aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-04 10:49:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-04 10:49:37 -0500
commit03a2c4d76c9e99b80d74ab8a4f344e135a5ae44b (patch)
tree7fd7940a4f87dc1ace1c1bdeb1fb0d90ac3beb13 /drivers
parenta27341cd5fcb7cf2d2d4726e9f324009f7162c00 (diff)
parentd424b925f7092b9d95e0a8556872349abe79d9b6 (diff)
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (151 commits) vga_switcheroo: disable default y by new rules. drm/nouveau: fix *staging* driver build with switcheroo off. drm/radeon: fix typo in Makefile vga_switcheroo: fix build on platforms with no ACPI drm/radeon: Fix printf type warning in 64bit system. drm/radeon/kms: bump the KMS version number for square tiling support. vga_switcheroo: initial implementation (v15) drm/radeon/kms: do not disable audio engine twice Revert "drm/radeon/kms: disable HDMI audio for now on rv710/rv730" drm/radeon/kms: do not preset audio stuff and start timer when not using audio drm/radeon: r100/r200 ums: block ability for userspace app to trash 0 page and beyond drm/ttm: fix function prototype to match implementation drm/radeon: use ALIGN instead of open coding it drm/radeon/kms: initialize set_surface_reg reg for rs600 asic drm/i915: Use a dmi quirk to skip a broken SDVO TV output. drm/i915: enable/disable LVDS port at DPMS time drm/i915: check for multiple write domains in pin_and_relocate drm/i915: clean-up i915_gem_flush_gpu_write_domain drm/i915: reuse i915_gpu_idle helper drm/i915: ensure lru ordering of fence_list ... Fixed trivial conflicts in drivers/gpu/vga/Kconfig
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/agp/intel-agp.c123
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_buffer.c184
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c44
-rw-r--r--drivers/gpu/drm/drm_edid.c30
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c26
-rw-r--r--drivers/gpu/drm/drm_gem.c70
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c253
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c326
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h69
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c430
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c169
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c313
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h170
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.c216
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c41
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c29
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c23
-rw-r--r--drivers/gpu/drm/nouveau/Makefile2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c160
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c339
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h126
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c167
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c508
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c54
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c74
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c2367
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile9
-rw-r--r--drivers/gpu/drm/radeon/atom.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios.h7300
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c456
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c64
-rw-r--r--drivers/gpu/drm/radeon/avivod.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c767
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h176
-rw-r--r--drivers/gpu/drm/radeon/r100.c176
-rw-r--r--drivers/gpu/drm/radeon/r200.c46
-rw-r--r--drivers/gpu/drm/radeon/r300.c157
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c280
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r420.c49
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h100
-rw-r--r--drivers/gpu/drm/radeon/r520.c21
-rw-r--r--drivers/gpu/drm/radeon/r600.c190
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c21
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c17
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c10
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c262
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c831
-rw-r--r--drivers/gpu/drm/radeon/r600d.h467
-rw-r--r--drivers/gpu/drm/radeon/radeon.h167
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h172
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c435
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c257
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c290
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c235
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c332
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c354
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c768
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h55
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c399
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h50
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c203
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c12
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r600837
-rw-r--r--drivers/gpu/drm/radeon/rs400.c39
-rw-r--r--drivers/gpu/drm/radeon/rs600.c56
-rw-r--r--drivers/gpu/drm/radeon/rs690.c41
-rw-r--r--drivers/gpu/drm/radeon/rv515.c21
-rw-r--r--drivers/gpu/drm/radeon/rv770.c259
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c18
-rw-r--r--drivers/gpu/vga/Kconfig11
-rw-r--r--drivers/gpu/vga/Makefile1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c450
-rw-r--r--drivers/video/console/fbcon.c18
-rw-r--r--drivers/video/fbmem.c1
136 files changed, 17868 insertions, 6316 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 8a713f1e9653..919a28558d36 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -11,6 +11,9 @@
11#include <asm/smp.h> 11#include <asm/smp.h>
12#include "agp.h" 12#include "agp.h"
13 13
14int intel_agp_enabled;
15EXPORT_SYMBOL(intel_agp_enabled);
16
14/* 17/*
15 * If we have Intel graphics, we're not going to have anything other than 18 * If we have Intel graphics, we're not going to have anything other than
16 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent 19 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
@@ -65,6 +68,10 @@
65#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 68#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
66#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a 69#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
67#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 70#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
71#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
72#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
73#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
74#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
68 75
69/* cover 915 and 945 variants */ 76/* cover 915 and 945 variants */
70#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ 77#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -99,7 +106,9 @@
99 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ 106 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
100 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ 107 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
101 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ 108 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
102 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB) 109 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
110 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
111 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
103 112
104extern int agp_memory_reserved; 113extern int agp_memory_reserved;
105 114
@@ -148,6 +157,25 @@ extern int agp_memory_reserved;
148#define INTEL_I7505_AGPCTRL 0x70 157#define INTEL_I7505_AGPCTRL 0x70
149#define INTEL_I7505_MCHCFG 0x50 158#define INTEL_I7505_MCHCFG 0x50
150 159
160#define SNB_GMCH_CTRL 0x50
161#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
162#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
163#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
164#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
165#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
166#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
167#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
168#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
169#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
170#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
171#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
172#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
173#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
174#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
175#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
176#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
177#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
178
151static const struct aper_size_info_fixed intel_i810_sizes[] = 179static const struct aper_size_info_fixed intel_i810_sizes[] =
152{ 180{
153 {64, 16384, 4}, 181 {64, 16384, 4},
@@ -294,6 +322,13 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
294 off_t pg_start, int mask_type) 322 off_t pg_start, int mask_type)
295{ 323{
296 int i, j; 324 int i, j;
325 u32 cache_bits = 0;
326
327 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
328 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
329 {
330 cache_bits = I830_PTE_SYSTEM_CACHED;
331 }
297 332
298 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 333 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
299 writel(agp_bridge->driver->mask_memory(agp_bridge, 334 writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -614,7 +649,7 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
614static void intel_i830_init_gtt_entries(void) 649static void intel_i830_init_gtt_entries(void)
615{ 650{
616 u16 gmch_ctrl; 651 u16 gmch_ctrl;
617 int gtt_entries; 652 int gtt_entries = 0;
618 u8 rdct; 653 u8 rdct;
619 int local = 0; 654 int local = 0;
620 static const int ddt[4] = { 0, 16, 32, 64 }; 655 static const int ddt[4] = { 0, 16, 32, 64 };
@@ -706,6 +741,63 @@ static void intel_i830_init_gtt_entries(void)
706 gtt_entries = 0; 741 gtt_entries = 0;
707 break; 742 break;
708 } 743 }
744 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
745 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
746 /*
747 * SandyBridge has new memory control reg at 0x50.w
748 */
749 u16 snb_gmch_ctl;
750 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
751 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
752 case SNB_GMCH_GMS_STOLEN_32M:
753 gtt_entries = MB(32) - KB(size);
754 break;
755 case SNB_GMCH_GMS_STOLEN_64M:
756 gtt_entries = MB(64) - KB(size);
757 break;
758 case SNB_GMCH_GMS_STOLEN_96M:
759 gtt_entries = MB(96) - KB(size);
760 break;
761 case SNB_GMCH_GMS_STOLEN_128M:
762 gtt_entries = MB(128) - KB(size);
763 break;
764 case SNB_GMCH_GMS_STOLEN_160M:
765 gtt_entries = MB(160) - KB(size);
766 break;
767 case SNB_GMCH_GMS_STOLEN_192M:
768 gtt_entries = MB(192) - KB(size);
769 break;
770 case SNB_GMCH_GMS_STOLEN_224M:
771 gtt_entries = MB(224) - KB(size);
772 break;
773 case SNB_GMCH_GMS_STOLEN_256M:
774 gtt_entries = MB(256) - KB(size);
775 break;
776 case SNB_GMCH_GMS_STOLEN_288M:
777 gtt_entries = MB(288) - KB(size);
778 break;
779 case SNB_GMCH_GMS_STOLEN_320M:
780 gtt_entries = MB(320) - KB(size);
781 break;
782 case SNB_GMCH_GMS_STOLEN_352M:
783 gtt_entries = MB(352) - KB(size);
784 break;
785 case SNB_GMCH_GMS_STOLEN_384M:
786 gtt_entries = MB(384) - KB(size);
787 break;
788 case SNB_GMCH_GMS_STOLEN_416M:
789 gtt_entries = MB(416) - KB(size);
790 break;
791 case SNB_GMCH_GMS_STOLEN_448M:
792 gtt_entries = MB(448) - KB(size);
793 break;
794 case SNB_GMCH_GMS_STOLEN_480M:
795 gtt_entries = MB(480) - KB(size);
796 break;
797 case SNB_GMCH_GMS_STOLEN_512M:
798 gtt_entries = MB(512) - KB(size);
799 break;
800 }
709 } else { 801 } else {
710 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 802 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
711 case I855_GMCH_GMS_STOLEN_1M: 803 case I855_GMCH_GMS_STOLEN_1M:
@@ -1357,6 +1449,8 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1357 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: 1449 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1358 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: 1450 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1359 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: 1451 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1452 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1453 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1360 *gtt_offset = *gtt_size = MB(2); 1454 *gtt_offset = *gtt_size = MB(2);
1361 break; 1455 break;
1362 default: 1456 default:
@@ -2338,9 +2432,9 @@ static const struct intel_driver_description {
2338 NULL, &intel_g33_driver }, 2432 NULL, &intel_g33_driver },
2339 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 2433 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
2340 NULL, &intel_g33_driver }, 2434 NULL, &intel_g33_driver },
2341 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview", 2435 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150",
2342 NULL, &intel_g33_driver }, 2436 NULL, &intel_g33_driver },
2343 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview", 2437 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150",
2344 NULL, &intel_g33_driver }, 2438 NULL, &intel_g33_driver },
2345 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, 2439 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
2346 "GM45", NULL, &intel_i965_driver }, 2440 "GM45", NULL, &intel_i965_driver },
@@ -2355,13 +2449,17 @@ static const struct intel_driver_description {
2355 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, 2449 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
2356 "G41", NULL, &intel_i965_driver }, 2450 "G41", NULL, &intel_i965_driver },
2357 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, 2451 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
2358 "Ironlake/D", NULL, &intel_i965_driver }, 2452 "HD Graphics", NULL, &intel_i965_driver },
2359 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 2453 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2360 "Ironlake/M", NULL, &intel_i965_driver }, 2454 "HD Graphics", NULL, &intel_i965_driver },
2361 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 2455 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2362 "Ironlake/MA", NULL, &intel_i965_driver }, 2456 "HD Graphics", NULL, &intel_i965_driver },
2363 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 2457 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2364 "Ironlake/MC2", NULL, &intel_i965_driver }, 2458 "HD Graphics", NULL, &intel_i965_driver },
2459 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0,
2460 "Sandybridge", NULL, &intel_i965_driver },
2461 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0,
2462 "Sandybridge", NULL, &intel_i965_driver },
2365 { 0, 0, 0, NULL, NULL, NULL } 2463 { 0, 0, 0, NULL, NULL, NULL }
2366}; 2464};
2367 2465
@@ -2371,7 +2469,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2371 struct agp_bridge_data *bridge; 2469 struct agp_bridge_data *bridge;
2372 u8 cap_ptr = 0; 2470 u8 cap_ptr = 0;
2373 struct resource *r; 2471 struct resource *r;
2374 int i; 2472 int i, err;
2375 2473
2376 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 2474 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
2377 2475
@@ -2463,7 +2561,10 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2463 } 2561 }
2464 2562
2465 pci_set_drvdata(pdev, bridge); 2563 pci_set_drvdata(pdev, bridge);
2466 return agp_add_bridge(bridge); 2564 err = agp_add_bridge(bridge);
2565 if (!err)
2566 intel_agp_enabled = 1;
2567 return err;
2467} 2568}
2468 2569
2469static void __devexit agp_intel_remove(struct pci_dev *pdev) 2570static void __devexit agp_intel_remove(struct pci_dev *pdev)
@@ -2568,6 +2669,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
2568 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), 2669 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
2569 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), 2670 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
2570 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), 2671 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
2672 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
2673 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
2571 { } 2674 { }
2572}; 2675};
2573 2676
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 39c5aa75b8f1..abe3f446ca48 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,7 +4,7 @@
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6 6
7drm-y := drm_auth.o drm_bufs.o drm_cache.o \ 7drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
8 drm_context.o drm_dma.o drm_drawable.o \ 8 drm_context.o drm_dma.o drm_drawable.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
new file mode 100644
index 000000000000..55d03ed05000
--- /dev/null
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -0,0 +1,184 @@
1/**************************************************************************
2 *
3 * Copyright 2010 Pauli Nieminen.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Multipart buffer for coping data which is larger than the page size.
30 *
31 * Authors:
32 * Pauli Nieminen <suokkos-at-gmail-dot-com>
33 */
34
35#include "drm_buffer.h"
36
37/**
38 * Allocate the drm buffer object.
39 *
40 * buf: Pointer to a pointer where the object is stored.
41 * size: The number of bytes to allocate.
42 */
43int drm_buffer_alloc(struct drm_buffer **buf, int size)
44{
45 int nr_pages = size / PAGE_SIZE + 1;
46 int idx;
47
48 /* Allocating pointer table to end of structure makes drm_buffer
49 * variable sized */
50 *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
51 GFP_KERNEL);
52
53 if (*buf == NULL) {
54 DRM_ERROR("Failed to allocate drm buffer object to hold"
55 " %d bytes in %d pages.\n",
56 size, nr_pages);
57 return -ENOMEM;
58 }
59
60 (*buf)->size = size;
61
62 for (idx = 0; idx < nr_pages; ++idx) {
63
64 (*buf)->data[idx] =
65 kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
66 GFP_KERNEL);
67
68
69 if ((*buf)->data[idx] == NULL) {
70 DRM_ERROR("Failed to allocate %dth page for drm"
71 " buffer with %d bytes and %d pages.\n",
72 idx + 1, size, nr_pages);
73 goto error_out;
74 }
75
76 }
77
78 return 0;
79
80error_out:
81
82 /* Only last element can be null pointer so check for it first. */
83 if ((*buf)->data[idx])
84 kfree((*buf)->data[idx]);
85
86 for (--idx; idx >= 0; --idx)
87 kfree((*buf)->data[idx]);
88
89 kfree(*buf);
90 return -ENOMEM;
91}
92EXPORT_SYMBOL(drm_buffer_alloc);
93
94/**
95 * Copy the user data to the begin of the buffer and reset the processing
96 * iterator.
97 *
98 * user_data: A pointer the data that is copied to the buffer.
99 * size: The Number of bytes to copy.
100 */
101extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
102 void __user *user_data, int size)
103{
104 int nr_pages = size / PAGE_SIZE + 1;
105 int idx;
106
107 if (size > buf->size) {
108 DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
109 " %d bytes space\n",
110 size, buf->size);
111 return -EFAULT;
112 }
113
114 for (idx = 0; idx < nr_pages; ++idx) {
115
116 if (DRM_COPY_FROM_USER(buf->data[idx],
117 user_data + idx * PAGE_SIZE,
118 min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
119 DRM_ERROR("Failed to copy user data (%p) to drm buffer"
120 " (%p) %dth page.\n",
121 user_data, buf, idx);
122 return -EFAULT;
123
124 }
125 }
126 buf->iterator = 0;
127 return 0;
128}
129EXPORT_SYMBOL(drm_buffer_copy_from_user);
130
131/**
132 * Free the drm buffer object
133 */
134void drm_buffer_free(struct drm_buffer *buf)
135{
136
137 if (buf != NULL) {
138
139 int nr_pages = buf->size / PAGE_SIZE + 1;
140 int idx;
141 for (idx = 0; idx < nr_pages; ++idx)
142 kfree(buf->data[idx]);
143
144 kfree(buf);
145 }
146}
147EXPORT_SYMBOL(drm_buffer_free);
148
149/**
150 * Read an object from buffer that may be split to multiple parts. If object
151 * is not split function just returns the pointer to object in buffer. But in
152 * case of split object data is copied to given stack object that is suplied
153 * by caller.
154 *
155 * The processing location of the buffer is also advanced to the next byte
156 * after the object.
157 *
158 * objsize: The size of the objet in bytes.
159 * stack_obj: A pointer to a memory location where object can be copied.
160 */
161void *drm_buffer_read_object(struct drm_buffer *buf,
162 int objsize, void *stack_obj)
163{
164 int idx = drm_buffer_index(buf);
165 int page = drm_buffer_page(buf);
166 void *obj = 0;
167
168 if (idx + objsize <= PAGE_SIZE) {
169 obj = &buf->data[page][idx];
170 } else {
171 /* The object is split which forces copy to temporary object.*/
172 int beginsz = PAGE_SIZE - idx;
173 memcpy(stack_obj, &buf->data[page][idx], beginsz);
174
175 memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
176 objsize - beginsz);
177
178 obj = stack_obj;
179 }
180
181 drm_buffer_advance(buf, objsize);
182 return obj;
183}
184EXPORT_SYMBOL(drm_buffer_read_object);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7d0f00a935fa..f2aaf39be398 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -836,11 +836,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
836 mode_changed = true; 836 mode_changed = true;
837 } else if (set->fb == NULL) { 837 } else if (set->fb == NULL) {
838 mode_changed = true; 838 mode_changed = true;
839 } else if ((set->fb->bits_per_pixel != 839 } else
840 set->crtc->fb->bits_per_pixel) ||
841 set->fb->depth != set->crtc->fb->depth)
842 fb_changed = true;
843 else
844 fb_changed = true; 840 fb_changed = true;
845 } 841 }
846 842
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 766c46875a20..f3c58e2bd75c 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -125,28 +125,28 @@ static struct drm_ioctl_desc drm_ioctls[] = {
125 125
126 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 126 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127 127
128 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), 128 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
129 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), 129 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
130 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), 130 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
131 131
132 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW), 132 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
133 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), 133 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
134 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), 134 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
135 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 135 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
136 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER), 136 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
137 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), 137 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW), 138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
139 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW), 139 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
140 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 140 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
141 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 141 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW), 142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), 145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), 146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), 147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW) 149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
150}; 150};
151 151
152#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 152#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ab6c97330412..f97e7c42ac8e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -60,8 +60,7 @@
60#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) 60#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
61/* use +hsync +vsync for detailed mode */ 61/* use +hsync +vsync for detailed mode */
62#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) 62#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
63/* define the number of Extension EDID block */ 63
64#define MAX_EDID_EXT_NUM 4
65 64
66#define LEVEL_DMT 0 65#define LEVEL_DMT 0
67#define LEVEL_GTF 1 66#define LEVEL_GTF 1
@@ -114,14 +113,14 @@ static const u8 edid_header[] = {
114}; 113};
115 114
116/** 115/**
117 * edid_is_valid - sanity check EDID data 116 * drm_edid_is_valid - sanity check EDID data
118 * @edid: EDID data 117 * @edid: EDID data
119 * 118 *
120 * Sanity check the EDID block by looking at the header, the version number 119 * Sanity check the EDID block by looking at the header, the version number
121 * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's 120 * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
122 * valid. 121 * valid.
123 */ 122 */
124static bool edid_is_valid(struct edid *edid) 123bool drm_edid_is_valid(struct edid *edid)
125{ 124{
126 int i, score = 0; 125 int i, score = 0;
127 u8 csum = 0; 126 u8 csum = 0;
@@ -163,6 +162,7 @@ bad:
163 } 162 }
164 return 0; 163 return 0;
165} 164}
165EXPORT_SYMBOL(drm_edid_is_valid);
166 166
167/** 167/**
168 * edid_vendor - match a string against EDID's obfuscated vendor field 168 * edid_vendor - match a string against EDID's obfuscated vendor field
@@ -1112,8 +1112,8 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
1112 } 1112 }
1113 1113
1114 /* Chose real EDID extension number */ 1114 /* Chose real EDID extension number */
1115 edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ? 1115 edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
1116 MAX_EDID_EXT_NUM : edid->extensions; 1116 DRM_MAX_EDID_EXT_NUM : edid->extensions;
1117 1117
1118 /* Find CEA extension */ 1118 /* Find CEA extension */
1119 for (i = 0; i < edid_ext_num; i++) { 1119 for (i = 0; i < edid_ext_num; i++) {
@@ -1195,7 +1195,7 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
1195 for (i = 0; i < 4; i++) { 1195 for (i = 0; i < 4; i++) {
1196 if (drm_do_probe_ddc_edid(adapter, buf, len)) 1196 if (drm_do_probe_ddc_edid(adapter, buf, len))
1197 return -1; 1197 return -1;
1198 if (edid_is_valid((struct edid *)buf)) 1198 if (drm_edid_is_valid((struct edid *)buf))
1199 return 0; 1199 return 0;
1200 } 1200 }
1201 1201
@@ -1220,7 +1220,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
1220 int ret; 1220 int ret;
1221 struct edid *edid; 1221 struct edid *edid;
1222 1222
1223 edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1), 1223 edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
1224 GFP_KERNEL); 1224 GFP_KERNEL);
1225 if (edid == NULL) { 1225 if (edid == NULL) {
1226 dev_warn(&connector->dev->pdev->dev, 1226 dev_warn(&connector->dev->pdev->dev,
@@ -1238,14 +1238,14 @@ struct edid *drm_get_edid(struct drm_connector *connector,
1238 if (edid->extensions != 0) { 1238 if (edid->extensions != 0) {
1239 int edid_ext_num = edid->extensions; 1239 int edid_ext_num = edid->extensions;
1240 1240
1241 if (edid_ext_num > MAX_EDID_EXT_NUM) { 1241 if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
1242 dev_warn(&connector->dev->pdev->dev, 1242 dev_warn(&connector->dev->pdev->dev,
1243 "The number of extension(%d) is " 1243 "The number of extension(%d) is "
1244 "over max (%d), actually read number (%d)\n", 1244 "over max (%d), actually read number (%d)\n",
1245 edid_ext_num, MAX_EDID_EXT_NUM, 1245 edid_ext_num, DRM_MAX_EDID_EXT_NUM,
1246 MAX_EDID_EXT_NUM); 1246 DRM_MAX_EDID_EXT_NUM);
1247 /* Reset EDID extension number to be read */ 1247 /* Reset EDID extension number to be read */
1248 edid_ext_num = MAX_EDID_EXT_NUM; 1248 edid_ext_num = DRM_MAX_EDID_EXT_NUM;
1249 } 1249 }
1250 /* Read EDID including extensions too */ 1250 /* Read EDID including extensions too */
1251 ret = drm_ddc_read_edid(connector, adapter, (char *)edid, 1251 ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
@@ -1288,8 +1288,8 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
1288 goto end; 1288 goto end;
1289 1289
1290 /* Chose real EDID extension number */ 1290 /* Chose real EDID extension number */
1291 edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ? 1291 edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
1292 MAX_EDID_EXT_NUM : edid->extensions; 1292 DRM_MAX_EDID_EXT_NUM : edid->extensions;
1293 1293
1294 /* Find CEA extension */ 1294 /* Find CEA extension */
1295 for (i = 0; i < edid_ext_num; i++) { 1295 for (i = 0; i < edid_ext_num; i++) {
@@ -1346,7 +1346,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
1346 if (edid == NULL) { 1346 if (edid == NULL) {
1347 return 0; 1347 return 0;
1348 } 1348 }
1349 if (!edid_is_valid(edid)) { 1349 if (!drm_edid_is_valid(edid)) {
1350 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", 1350 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
1351 drm_get_connector_name(connector)); 1351 drm_get_connector_name(connector));
1352 return 0; 1352 return 0;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0f9e90552dc4..50549703584f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,7 @@
27 * Dave Airlie <airlied@linux.ie> 27 * Dave Airlie <airlied@linux.ie>
28 * Jesse Barnes <jesse.barnes@intel.com> 28 * Jesse Barnes <jesse.barnes@intel.com>
29 */ 29 */
30#include <linux/kernel.h>
30#include <linux/sysrq.h> 31#include <linux/sysrq.h>
31#include <linux/fb.h> 32#include <linux/fb.h>
32#include "drmP.h" 33#include "drmP.h"
@@ -50,21 +51,6 @@ int drm_fb_helper_add_connector(struct drm_connector *connector)
50} 51}
51EXPORT_SYMBOL(drm_fb_helper_add_connector); 52EXPORT_SYMBOL(drm_fb_helper_add_connector);
52 53
53static int my_atoi(const char *name)
54{
55 int val = 0;
56
57 for (;; name++) {
58 switch (*name) {
59 case '0' ... '9':
60 val = 10*val+(*name-'0');
61 break;
62 default:
63 return val;
64 }
65 }
66}
67
68/** 54/**
69 * drm_fb_helper_connector_parse_command_line - parse command line for connector 55 * drm_fb_helper_connector_parse_command_line - parse command line for connector
70 * @connector - connector to parse line for 56 * @connector - connector to parse line for
@@ -111,7 +97,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
111 namelen = i; 97 namelen = i;
112 if (!refresh_specified && !bpp_specified && 98 if (!refresh_specified && !bpp_specified &&
113 !yres_specified) { 99 !yres_specified) {
114 refresh = my_atoi(&name[i+1]); 100 refresh = simple_strtol(&name[i+1], NULL, 10);
115 refresh_specified = 1; 101 refresh_specified = 1;
116 if (cvt || rb) 102 if (cvt || rb)
117 cvt = 0; 103 cvt = 0;
@@ -121,7 +107,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
121 case '-': 107 case '-':
122 namelen = i; 108 namelen = i;
123 if (!bpp_specified && !yres_specified) { 109 if (!bpp_specified && !yres_specified) {
124 bpp = my_atoi(&name[i+1]); 110 bpp = simple_strtol(&name[i+1], NULL, 10);
125 bpp_specified = 1; 111 bpp_specified = 1;
126 if (cvt || rb) 112 if (cvt || rb)
127 cvt = 0; 113 cvt = 0;
@@ -130,7 +116,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
130 break; 116 break;
131 case 'x': 117 case 'x':
132 if (!yres_specified) { 118 if (!yres_specified) {
133 yres = my_atoi(&name[i+1]); 119 yres = simple_strtol(&name[i+1], NULL, 10);
134 yres_specified = 1; 120 yres_specified = 1;
135 } else 121 } else
136 goto done; 122 goto done;
@@ -170,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
170 } 156 }
171 } 157 }
172 if (i < 0 && yres_specified) { 158 if (i < 0 && yres_specified) {
173 xres = my_atoi(name); 159 xres = simple_strtol(name, NULL, 10);
174 res_specified = 1; 160 res_specified = 1;
175 } 161 }
176done: 162done:
@@ -694,7 +680,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
694 int i; 680 int i;
695 681
696 if (var->pixclock != 0) { 682 if (var->pixclock != 0) {
697 DRM_ERROR("PIXEL CLCOK SET\n"); 683 DRM_ERROR("PIXEL CLOCK SET\n");
698 return -EINVAL; 684 return -EINVAL;
699 } 685 }
700 686
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8bf3770f294e..aa89d4b0b4c4 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -192,9 +192,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
192 idr_remove(&filp->object_idr, handle); 192 idr_remove(&filp->object_idr, handle);
193 spin_unlock(&filp->table_lock); 193 spin_unlock(&filp->table_lock);
194 194
195 mutex_lock(&dev->struct_mutex); 195 drm_gem_object_handle_unreference_unlocked(obj);
196 drm_gem_object_handle_unreference(obj);
197 mutex_unlock(&dev->struct_mutex);
198 196
199 return 0; 197 return 0;
200} 198}
@@ -325,9 +323,7 @@ again:
325 } 323 }
326 324
327err: 325err:
328 mutex_lock(&dev->struct_mutex); 326 drm_gem_object_unreference_unlocked(obj);
329 drm_gem_object_unreference(obj);
330 mutex_unlock(&dev->struct_mutex);
331 return ret; 327 return ret;
332} 328}
333 329
@@ -358,9 +354,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
358 return -ENOENT; 354 return -ENOENT;
359 355
360 ret = drm_gem_handle_create(file_priv, obj, &handle); 356 ret = drm_gem_handle_create(file_priv, obj, &handle);
361 mutex_lock(&dev->struct_mutex); 357 drm_gem_object_unreference_unlocked(obj);
362 drm_gem_object_unreference(obj);
363 mutex_unlock(&dev->struct_mutex);
364 if (ret) 358 if (ret)
365 return ret; 359 return ret;
366 360
@@ -390,7 +384,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
390{ 384{
391 struct drm_gem_object *obj = ptr; 385 struct drm_gem_object *obj = ptr;
392 386
393 drm_gem_object_handle_unreference(obj); 387 drm_gem_object_handle_unreference_unlocked(obj);
394 388
395 return 0; 389 return 0;
396} 390}
@@ -403,16 +397,25 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
403void 397void
404drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 398drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
405{ 399{
406 mutex_lock(&dev->struct_mutex);
407 idr_for_each(&file_private->object_idr, 400 idr_for_each(&file_private->object_idr,
408 &drm_gem_object_release_handle, NULL); 401 &drm_gem_object_release_handle, NULL);
409 402
410 idr_destroy(&file_private->object_idr); 403 idr_destroy(&file_private->object_idr);
411 mutex_unlock(&dev->struct_mutex); 404}
405
406static void
407drm_gem_object_free_common(struct drm_gem_object *obj)
408{
409 struct drm_device *dev = obj->dev;
410 fput(obj->filp);
411 atomic_dec(&dev->object_count);
412 atomic_sub(obj->size, &dev->object_memory);
413 kfree(obj);
412} 414}
413 415
414/** 416/**
415 * Called after the last reference to the object has been lost. 417 * Called after the last reference to the object has been lost.
418 * Must be called holding struct_ mutex
416 * 419 *
417 * Frees the object 420 * Frees the object
418 */ 421 */
@@ -427,14 +430,40 @@ drm_gem_object_free(struct kref *kref)
427 if (dev->driver->gem_free_object != NULL) 430 if (dev->driver->gem_free_object != NULL)
428 dev->driver->gem_free_object(obj); 431 dev->driver->gem_free_object(obj);
429 432
430 fput(obj->filp); 433 drm_gem_object_free_common(obj);
431 atomic_dec(&dev->object_count);
432 atomic_sub(obj->size, &dev->object_memory);
433 kfree(obj);
434} 434}
435EXPORT_SYMBOL(drm_gem_object_free); 435EXPORT_SYMBOL(drm_gem_object_free);
436 436
437/** 437/**
438 * Called after the last reference to the object has been lost.
439 * Must be called without holding struct_mutex
440 *
441 * Frees the object
442 */
443void
444drm_gem_object_free_unlocked(struct kref *kref)
445{
446 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
447 struct drm_device *dev = obj->dev;
448
449 if (dev->driver->gem_free_object_unlocked != NULL)
450 dev->driver->gem_free_object_unlocked(obj);
451 else if (dev->driver->gem_free_object != NULL) {
452 mutex_lock(&dev->struct_mutex);
453 dev->driver->gem_free_object(obj);
454 mutex_unlock(&dev->struct_mutex);
455 }
456
457 drm_gem_object_free_common(obj);
458}
459EXPORT_SYMBOL(drm_gem_object_free_unlocked);
460
461static void drm_gem_object_ref_bug(struct kref *list_kref)
462{
463 BUG();
464}
465
466/**
438 * Called after the last handle to the object has been closed 467 * Called after the last handle to the object has been closed
439 * 468 *
440 * Removes any name for the object. Note that this must be 469 * Removes any name for the object. Note that this must be
@@ -458,8 +487,10 @@ drm_gem_object_handle_free(struct kref *kref)
458 /* 487 /*
459 * The object name held a reference to this object, drop 488 * The object name held a reference to this object, drop
460 * that now. 489 * that now.
490 *
491 * This cannot be the last reference, since the handle holds one too.
461 */ 492 */
462 drm_gem_object_unreference(obj); 493 kref_put(&obj->refcount, drm_gem_object_ref_bug);
463 } else 494 } else
464 spin_unlock(&dev->object_name_lock); 495 spin_unlock(&dev->object_name_lock);
465 496
@@ -477,11 +508,8 @@ EXPORT_SYMBOL(drm_gem_vm_open);
477void drm_gem_vm_close(struct vm_area_struct *vma) 508void drm_gem_vm_close(struct vm_area_struct *vma)
478{ 509{
479 struct drm_gem_object *obj = vma->vm_private_data; 510 struct drm_gem_object *obj = vma->vm_private_data;
480 struct drm_device *dev = obj->dev;
481 511
482 mutex_lock(&dev->struct_mutex); 512 drm_gem_object_unreference_unlocked(obj);
483 drm_gem_object_unreference(obj);
484 mutex_unlock(&dev->struct_mutex);
485} 513}
486EXPORT_SYMBOL(drm_gem_vm_close); 514EXPORT_SYMBOL(drm_gem_vm_close);
487 515
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a894ade03093..1376dfe44c95 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -162,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
162 struct drm_device *dev = node->minor->dev; 162 struct drm_device *dev = node->minor->dev;
163 drm_i915_private_t *dev_priv = dev->dev_private; 163 drm_i915_private_t *dev_priv = dev->dev_private;
164 164
165 if (!IS_IRONLAKE(dev)) { 165 if (!HAS_PCH_SPLIT(dev)) {
166 seq_printf(m, "Interrupt enable: %08x\n", 166 seq_printf(m, "Interrupt enable: %08x\n",
167 I915_READ(IER)); 167 I915_READ(IER));
168 seq_printf(m, "Interrupt identity: %08x\n", 168 seq_printf(m, "Interrupt identity: %08x\n",
@@ -350,6 +350,36 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
350 return 0; 350 return 0;
351} 351}
352 352
353static const char *pin_flag(int pinned)
354{
355 if (pinned > 0)
356 return " P";
357 else if (pinned < 0)
358 return " p";
359 else
360 return "";
361}
362
363static const char *tiling_flag(int tiling)
364{
365 switch (tiling) {
366 default:
367 case I915_TILING_NONE: return "";
368 case I915_TILING_X: return " X";
369 case I915_TILING_Y: return " Y";
370 }
371}
372
373static const char *dirty_flag(int dirty)
374{
375 return dirty ? " dirty" : "";
376}
377
378static const char *purgeable_flag(int purgeable)
379{
380 return purgeable ? " purgeable" : "";
381}
382
353static int i915_error_state(struct seq_file *m, void *unused) 383static int i915_error_state(struct seq_file *m, void *unused)
354{ 384{
355 struct drm_info_node *node = (struct drm_info_node *) m->private; 385 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -357,6 +387,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
357 drm_i915_private_t *dev_priv = dev->dev_private; 387 drm_i915_private_t *dev_priv = dev->dev_private;
358 struct drm_i915_error_state *error; 388 struct drm_i915_error_state *error;
359 unsigned long flags; 389 unsigned long flags;
390 int i, page, offset, elt;
360 391
361 spin_lock_irqsave(&dev_priv->error_lock, flags); 392 spin_lock_irqsave(&dev_priv->error_lock, flags);
362 if (!dev_priv->first_error) { 393 if (!dev_priv->first_error) {
@@ -368,6 +399,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
368 399
369 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 400 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
370 error->time.tv_usec); 401 error->time.tv_usec);
402 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
371 seq_printf(m, "EIR: 0x%08x\n", error->eir); 403 seq_printf(m, "EIR: 0x%08x\n", error->eir);
372 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); 404 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
373 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 405 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
@@ -379,6 +411,59 @@ static int i915_error_state(struct seq_file *m, void *unused)
379 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 411 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
380 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 412 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
381 } 413 }
414 seq_printf(m, "seqno: 0x%08x\n", error->seqno);
415
416 if (error->active_bo_count) {
417 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
418
419 for (i = 0; i < error->active_bo_count; i++) {
420 seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
421 error->active_bo[i].gtt_offset,
422 error->active_bo[i].size,
423 error->active_bo[i].read_domains,
424 error->active_bo[i].write_domain,
425 error->active_bo[i].seqno,
426 pin_flag(error->active_bo[i].pinned),
427 tiling_flag(error->active_bo[i].tiling),
428 dirty_flag(error->active_bo[i].dirty),
429 purgeable_flag(error->active_bo[i].purgeable));
430
431 if (error->active_bo[i].name)
432 seq_printf(m, " (name: %d)", error->active_bo[i].name);
433 if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
434 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
435
436 seq_printf(m, "\n");
437 }
438 }
439
440 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
441 if (error->batchbuffer[i]) {
442 struct drm_i915_error_object *obj = error->batchbuffer[i];
443
444 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
445 offset = 0;
446 for (page = 0; page < obj->page_count; page++) {
447 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
448 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
449 offset += 4;
450 }
451 }
452 }
453 }
454
455 if (error->ringbuffer) {
456 struct drm_i915_error_object *obj = error->ringbuffer;
457
458 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
459 offset = 0;
460 for (page = 0; page < obj->page_count; page++) {
461 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
462 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
463 offset += 4;
464 }
465 }
466 }
382 467
383out: 468out:
384 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 469 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -386,6 +471,165 @@ out:
386 return 0; 471 return 0;
387} 472}
388 473
474static int i915_rstdby_delays(struct seq_file *m, void *unused)
475{
476 struct drm_info_node *node = (struct drm_info_node *) m->private;
477 struct drm_device *dev = node->minor->dev;
478 drm_i915_private_t *dev_priv = dev->dev_private;
479 u16 crstanddelay = I915_READ16(CRSTANDVID);
480
481 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
482
483 return 0;
484}
485
486static int i915_cur_delayinfo(struct seq_file *m, void *unused)
487{
488 struct drm_info_node *node = (struct drm_info_node *) m->private;
489 struct drm_device *dev = node->minor->dev;
490 drm_i915_private_t *dev_priv = dev->dev_private;
491 u16 rgvswctl = I915_READ16(MEMSWCTL);
492
493 seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3);
494 seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1);
495 seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf,
496 rgvswctl & 0x3f);
497
498 return 0;
499}
500
501static int i915_delayfreq_table(struct seq_file *m, void *unused)
502{
503 struct drm_info_node *node = (struct drm_info_node *) m->private;
504 struct drm_device *dev = node->minor->dev;
505 drm_i915_private_t *dev_priv = dev->dev_private;
506 u32 delayfreq;
507 int i;
508
509 for (i = 0; i < 16; i++) {
510 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
511 seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq);
512 }
513
514 return 0;
515}
516
517static inline int MAP_TO_MV(int map)
518{
519 return 1250 - (map * 25);
520}
521
522static int i915_inttoext_table(struct seq_file *m, void *unused)
523{
524 struct drm_info_node *node = (struct drm_info_node *) m->private;
525 struct drm_device *dev = node->minor->dev;
526 drm_i915_private_t *dev_priv = dev->dev_private;
527 u32 inttoext;
528 int i;
529
530 for (i = 1; i <= 32; i++) {
531 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
532 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
533 }
534
535 return 0;
536}
537
538static int i915_drpc_info(struct seq_file *m, void *unused)
539{
540 struct drm_info_node *node = (struct drm_info_node *) m->private;
541 struct drm_device *dev = node->minor->dev;
542 drm_i915_private_t *dev_priv = dev->dev_private;
543 u32 rgvmodectl = I915_READ(MEMMODECTL);
544
545 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
546 "yes" : "no");
547 seq_printf(m, "Boost freq: %d\n",
548 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
549 MEMMODE_BOOST_FREQ_SHIFT);
550 seq_printf(m, "HW control enabled: %s\n",
551 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
552 seq_printf(m, "SW control enabled: %s\n",
553 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
554 seq_printf(m, "Gated voltage change: %s\n",
555 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
556 seq_printf(m, "Starting frequency: P%d\n",
557 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
558 seq_printf(m, "Max frequency: P%d\n",
559 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
560 seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
561
562 return 0;
563}
564
565static int i915_fbc_status(struct seq_file *m, void *unused)
566{
567 struct drm_info_node *node = (struct drm_info_node *) m->private;
568 struct drm_device *dev = node->minor->dev;
569 struct drm_crtc *crtc;
570 drm_i915_private_t *dev_priv = dev->dev_private;
571 bool fbc_enabled = false;
572
573 if (!dev_priv->display.fbc_enabled) {
574 seq_printf(m, "FBC unsupported on this chipset\n");
575 return 0;
576 }
577
578 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
579 if (!crtc->enabled)
580 continue;
581 if (dev_priv->display.fbc_enabled(crtc))
582 fbc_enabled = true;
583 }
584
585 if (fbc_enabled) {
586 seq_printf(m, "FBC enabled\n");
587 } else {
588 seq_printf(m, "FBC disabled: ");
589 switch (dev_priv->no_fbc_reason) {
590 case FBC_STOLEN_TOO_SMALL:
591 seq_printf(m, "not enough stolen memory");
592 break;
593 case FBC_UNSUPPORTED_MODE:
594 seq_printf(m, "mode not supported");
595 break;
596 case FBC_MODE_TOO_LARGE:
597 seq_printf(m, "mode too large");
598 break;
599 case FBC_BAD_PLANE:
600 seq_printf(m, "FBC unsupported on plane");
601 break;
602 case FBC_NOT_TILED:
603 seq_printf(m, "scanout buffer not tiled");
604 break;
605 default:
606 seq_printf(m, "unknown reason");
607 }
608 seq_printf(m, "\n");
609 }
610 return 0;
611}
612
613static int i915_sr_status(struct seq_file *m, void *unused)
614{
615 struct drm_info_node *node = (struct drm_info_node *) m->private;
616 struct drm_device *dev = node->minor->dev;
617 drm_i915_private_t *dev_priv = dev->dev_private;
618 bool sr_enabled = false;
619
620 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev))
621 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
622 else if (IS_I915GM(dev))
623 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
624 else if (IS_PINEVIEW(dev))
625 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
626
627 seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
628 "disabled");
629
630 return 0;
631}
632
389static int 633static int
390i915_wedged_open(struct inode *inode, 634i915_wedged_open(struct inode *inode,
391 struct file *filp) 635 struct file *filp)
@@ -503,6 +747,13 @@ static struct drm_info_list i915_debugfs_list[] = {
503 {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, 747 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
504 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 748 {"i915_batchbuffers", i915_batchbuffer_info, 0},
505 {"i915_error_state", i915_error_state, 0}, 749 {"i915_error_state", i915_error_state, 0},
750 {"i915_rstdby_delays", i915_rstdby_delays, 0},
751 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
752 {"i915_delayfreq_table", i915_delayfreq_table, 0},
753 {"i915_inttoext_table", i915_inttoext_table, 0},
754 {"i915_drpc_info", i915_drpc_info, 0},
755 {"i915_fbc_status", i915_fbc_status, 0},
756 {"i915_sr_status", i915_sr_status, 0},
506}; 757};
507#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 758#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
508 759
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2307f98349f7..8bfc0bbf13e6 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -35,6 +35,9 @@
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h" 36#include "i915_trace.h"
37#include <linux/vgaarb.h> 37#include <linux/vgaarb.h>
38#include <linux/acpi.h>
39#include <linux/pnp.h>
40#include <linux/vga_switcheroo.h>
38 41
39/* Really want an OS-independent resettable timer. Would like to have 42/* Really want an OS-independent resettable timer. Would like to have
40 * this loop run for (eg) 3 sec, but have the timer reset every time 43 * this loop run for (eg) 3 sec, but have the timer reset every time
@@ -933,6 +936,120 @@ static int i915_get_bridge_dev(struct drm_device *dev)
933 return 0; 936 return 0;
934} 937}
935 938
939#define MCHBAR_I915 0x44
940#define MCHBAR_I965 0x48
941#define MCHBAR_SIZE (4*4096)
942
943#define DEVEN_REG 0x54
944#define DEVEN_MCHBAR_EN (1 << 28)
945
946/* Allocate space for the MCH regs if needed, return nonzero on error */
947static int
948intel_alloc_mchbar_resource(struct drm_device *dev)
949{
950 drm_i915_private_t *dev_priv = dev->dev_private;
951 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
952 u32 temp_lo, temp_hi = 0;
953 u64 mchbar_addr;
954 int ret = 0;
955
956 if (IS_I965G(dev))
957 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
958 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
959 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
960
961 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
962#ifdef CONFIG_PNP
963 if (mchbar_addr &&
964 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
965 ret = 0;
966 goto out;
967 }
968#endif
969
970 /* Get some space for it */
971 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
972 MCHBAR_SIZE, MCHBAR_SIZE,
973 PCIBIOS_MIN_MEM,
974 0, pcibios_align_resource,
975 dev_priv->bridge_dev);
976 if (ret) {
977 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
978 dev_priv->mch_res.start = 0;
979 goto out;
980 }
981
982 if (IS_I965G(dev))
983 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
984 upper_32_bits(dev_priv->mch_res.start));
985
986 pci_write_config_dword(dev_priv->bridge_dev, reg,
987 lower_32_bits(dev_priv->mch_res.start));
988out:
989 return ret;
990}
991
992/* Setup MCHBAR if possible, return true if we should disable it again */
993static void
994intel_setup_mchbar(struct drm_device *dev)
995{
996 drm_i915_private_t *dev_priv = dev->dev_private;
997 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
998 u32 temp;
999 bool enabled;
1000
1001 dev_priv->mchbar_need_disable = false;
1002
1003 if (IS_I915G(dev) || IS_I915GM(dev)) {
1004 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1005 enabled = !!(temp & DEVEN_MCHBAR_EN);
1006 } else {
1007 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1008 enabled = temp & 1;
1009 }
1010
1011 /* If it's already enabled, don't have to do anything */
1012 if (enabled)
1013 return;
1014
1015 if (intel_alloc_mchbar_resource(dev))
1016 return;
1017
1018 dev_priv->mchbar_need_disable = true;
1019
1020 /* Space is allocated or reserved, so enable it. */
1021 if (IS_I915G(dev) || IS_I915GM(dev)) {
1022 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1023 temp | DEVEN_MCHBAR_EN);
1024 } else {
1025 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1026 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1027 }
1028}
1029
1030static void
1031intel_teardown_mchbar(struct drm_device *dev)
1032{
1033 drm_i915_private_t *dev_priv = dev->dev_private;
1034 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
1035 u32 temp;
1036
1037 if (dev_priv->mchbar_need_disable) {
1038 if (IS_I915G(dev) || IS_I915GM(dev)) {
1039 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1040 temp &= ~DEVEN_MCHBAR_EN;
1041 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1042 } else {
1043 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1044 temp &= ~1;
1045 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1046 }
1047 }
1048
1049 if (dev_priv->mch_res.start)
1050 release_resource(&dev_priv->mch_res);
1051}
1052
936/** 1053/**
937 * i915_probe_agp - get AGP bootup configuration 1054 * i915_probe_agp - get AGP bootup configuration
938 * @pdev: PCI device 1055 * @pdev: PCI device
@@ -978,59 +1095,123 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
978 * Some of the preallocated space is taken by the GTT 1095 * Some of the preallocated space is taken by the GTT
979 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 1096 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
980 */ 1097 */
981 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev)) 1098 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
982 overhead = 4096; 1099 overhead = 4096;
983 else 1100 else
984 overhead = (*aperture_size / 1024) + 4096; 1101 overhead = (*aperture_size / 1024) + 4096;
985 1102
986 switch (tmp & INTEL_GMCH_GMS_MASK) { 1103 if (IS_GEN6(dev)) {
987 case INTEL_855_GMCH_GMS_DISABLED: 1104 /* SNB has memory control reg at 0x50.w */
988 DRM_ERROR("video memory is disabled\n"); 1105 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
989 return -1; 1106
990 case INTEL_855_GMCH_GMS_STOLEN_1M: 1107 switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
991 stolen = 1 * 1024 * 1024; 1108 case INTEL_855_GMCH_GMS_DISABLED:
992 break; 1109 DRM_ERROR("video memory is disabled\n");
993 case INTEL_855_GMCH_GMS_STOLEN_4M: 1110 return -1;
994 stolen = 4 * 1024 * 1024; 1111 case SNB_GMCH_GMS_STOLEN_32M:
995 break; 1112 stolen = 32 * 1024 * 1024;
996 case INTEL_855_GMCH_GMS_STOLEN_8M: 1113 break;
997 stolen = 8 * 1024 * 1024; 1114 case SNB_GMCH_GMS_STOLEN_64M:
998 break; 1115 stolen = 64 * 1024 * 1024;
999 case INTEL_855_GMCH_GMS_STOLEN_16M: 1116 break;
1000 stolen = 16 * 1024 * 1024; 1117 case SNB_GMCH_GMS_STOLEN_96M:
1001 break; 1118 stolen = 96 * 1024 * 1024;
1002 case INTEL_855_GMCH_GMS_STOLEN_32M: 1119 break;
1003 stolen = 32 * 1024 * 1024; 1120 case SNB_GMCH_GMS_STOLEN_128M:
1004 break; 1121 stolen = 128 * 1024 * 1024;
1005 case INTEL_915G_GMCH_GMS_STOLEN_48M: 1122 break;
1006 stolen = 48 * 1024 * 1024; 1123 case SNB_GMCH_GMS_STOLEN_160M:
1007 break; 1124 stolen = 160 * 1024 * 1024;
1008 case INTEL_915G_GMCH_GMS_STOLEN_64M: 1125 break;
1009 stolen = 64 * 1024 * 1024; 1126 case SNB_GMCH_GMS_STOLEN_192M:
1010 break; 1127 stolen = 192 * 1024 * 1024;
1011 case INTEL_GMCH_GMS_STOLEN_128M: 1128 break;
1012 stolen = 128 * 1024 * 1024; 1129 case SNB_GMCH_GMS_STOLEN_224M:
1013 break; 1130 stolen = 224 * 1024 * 1024;
1014 case INTEL_GMCH_GMS_STOLEN_256M: 1131 break;
1015 stolen = 256 * 1024 * 1024; 1132 case SNB_GMCH_GMS_STOLEN_256M:
1016 break; 1133 stolen = 256 * 1024 * 1024;
1017 case INTEL_GMCH_GMS_STOLEN_96M: 1134 break;
1018 stolen = 96 * 1024 * 1024; 1135 case SNB_GMCH_GMS_STOLEN_288M:
1019 break; 1136 stolen = 288 * 1024 * 1024;
1020 case INTEL_GMCH_GMS_STOLEN_160M: 1137 break;
1021 stolen = 160 * 1024 * 1024; 1138 case SNB_GMCH_GMS_STOLEN_320M:
1022 break; 1139 stolen = 320 * 1024 * 1024;
1023 case INTEL_GMCH_GMS_STOLEN_224M: 1140 break;
1024 stolen = 224 * 1024 * 1024; 1141 case SNB_GMCH_GMS_STOLEN_352M:
1025 break; 1142 stolen = 352 * 1024 * 1024;
1026 case INTEL_GMCH_GMS_STOLEN_352M: 1143 break;
1027 stolen = 352 * 1024 * 1024; 1144 case SNB_GMCH_GMS_STOLEN_384M:
1028 break; 1145 stolen = 384 * 1024 * 1024;
1029 default: 1146 break;
1030 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", 1147 case SNB_GMCH_GMS_STOLEN_416M:
1031 tmp & INTEL_GMCH_GMS_MASK); 1148 stolen = 416 * 1024 * 1024;
1032 return -1; 1149 break;
1150 case SNB_GMCH_GMS_STOLEN_448M:
1151 stolen = 448 * 1024 * 1024;
1152 break;
1153 case SNB_GMCH_GMS_STOLEN_480M:
1154 stolen = 480 * 1024 * 1024;
1155 break;
1156 case SNB_GMCH_GMS_STOLEN_512M:
1157 stolen = 512 * 1024 * 1024;
1158 break;
1159 default:
1160 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1161 tmp & SNB_GMCH_GMS_STOLEN_MASK);
1162 return -1;
1163 }
1164 } else {
1165 switch (tmp & INTEL_GMCH_GMS_MASK) {
1166 case INTEL_855_GMCH_GMS_DISABLED:
1167 DRM_ERROR("video memory is disabled\n");
1168 return -1;
1169 case INTEL_855_GMCH_GMS_STOLEN_1M:
1170 stolen = 1 * 1024 * 1024;
1171 break;
1172 case INTEL_855_GMCH_GMS_STOLEN_4M:
1173 stolen = 4 * 1024 * 1024;
1174 break;
1175 case INTEL_855_GMCH_GMS_STOLEN_8M:
1176 stolen = 8 * 1024 * 1024;
1177 break;
1178 case INTEL_855_GMCH_GMS_STOLEN_16M:
1179 stolen = 16 * 1024 * 1024;
1180 break;
1181 case INTEL_855_GMCH_GMS_STOLEN_32M:
1182 stolen = 32 * 1024 * 1024;
1183 break;
1184 case INTEL_915G_GMCH_GMS_STOLEN_48M:
1185 stolen = 48 * 1024 * 1024;
1186 break;
1187 case INTEL_915G_GMCH_GMS_STOLEN_64M:
1188 stolen = 64 * 1024 * 1024;
1189 break;
1190 case INTEL_GMCH_GMS_STOLEN_128M:
1191 stolen = 128 * 1024 * 1024;
1192 break;
1193 case INTEL_GMCH_GMS_STOLEN_256M:
1194 stolen = 256 * 1024 * 1024;
1195 break;
1196 case INTEL_GMCH_GMS_STOLEN_96M:
1197 stolen = 96 * 1024 * 1024;
1198 break;
1199 case INTEL_GMCH_GMS_STOLEN_160M:
1200 stolen = 160 * 1024 * 1024;
1201 break;
1202 case INTEL_GMCH_GMS_STOLEN_224M:
1203 stolen = 224 * 1024 * 1024;
1204 break;
1205 case INTEL_GMCH_GMS_STOLEN_352M:
1206 stolen = 352 * 1024 * 1024;
1207 break;
1208 default:
1209 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1210 tmp & INTEL_GMCH_GMS_MASK);
1211 return -1;
1212 }
1033 } 1213 }
1214
1034 *preallocated_size = stolen - overhead; 1215 *preallocated_size = stolen - overhead;
1035 *start = overhead; 1216 *start = overhead;
1036 1217
@@ -1064,7 +1245,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1064 int gtt_offset, gtt_size; 1245 int gtt_offset, gtt_size;
1065 1246
1066 if (IS_I965G(dev)) { 1247 if (IS_I965G(dev)) {
1067 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 1248 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1068 gtt_offset = 2*1024*1024; 1249 gtt_offset = 2*1024*1024;
1069 gtt_size = 2*1024*1024; 1250 gtt_size = 2*1024*1024;
1070 } else { 1251 } else {
@@ -1133,6 +1314,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1133 /* Leave 1M for line length buffer & misc. */ 1314 /* Leave 1M for line length buffer & misc. */
1134 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1315 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
1135 if (!compressed_fb) { 1316 if (!compressed_fb) {
1317 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1136 i915_warn_stolen(dev); 1318 i915_warn_stolen(dev);
1137 return; 1319 return;
1138 } 1320 }
@@ -1140,6 +1322,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1140 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1322 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1141 if (!compressed_fb) { 1323 if (!compressed_fb) {
1142 i915_warn_stolen(dev); 1324 i915_warn_stolen(dev);
1325 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1143 return; 1326 return;
1144 } 1327 }
1145 1328
@@ -1199,6 +1382,32 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
1199 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1382 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1200} 1383}
1201 1384
1385static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1386{
1387 struct drm_device *dev = pci_get_drvdata(pdev);
1388 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1389 if (state == VGA_SWITCHEROO_ON) {
1390 printk(KERN_INFO "i915: switched off\n");
1391 /* i915 resume handler doesn't set to D0 */
1392 pci_set_power_state(dev->pdev, PCI_D0);
1393 i915_resume(dev);
1394 } else {
1395 printk(KERN_ERR "i915: switched off\n");
1396 i915_suspend(dev, pmm);
1397 }
1398}
1399
1400static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1401{
1402 struct drm_device *dev = pci_get_drvdata(pdev);
1403 bool can_switch;
1404
1405 spin_lock(&dev->count_lock);
1406 can_switch = (dev->open_count == 0);
1407 spin_unlock(&dev->count_lock);
1408 return can_switch;
1409}
1410
1202static int i915_load_modeset_init(struct drm_device *dev, 1411static int i915_load_modeset_init(struct drm_device *dev,
1203 unsigned long prealloc_start, 1412 unsigned long prealloc_start,
1204 unsigned long prealloc_size, 1413 unsigned long prealloc_size,
@@ -1260,6 +1469,12 @@ static int i915_load_modeset_init(struct drm_device *dev,
1260 if (ret) 1469 if (ret)
1261 goto destroy_ringbuffer; 1470 goto destroy_ringbuffer;
1262 1471
1472 ret = vga_switcheroo_register_client(dev->pdev,
1473 i915_switcheroo_set_state,
1474 i915_switcheroo_can_switch);
1475 if (ret)
1476 goto destroy_ringbuffer;
1477
1263 intel_modeset_init(dev); 1478 intel_modeset_init(dev);
1264 1479
1265 ret = drm_irq_install(dev); 1480 ret = drm_irq_install(dev);
@@ -1281,7 +1496,9 @@ static int i915_load_modeset_init(struct drm_device *dev,
1281 return 0; 1496 return 0;
1282 1497
1283destroy_ringbuffer: 1498destroy_ringbuffer:
1499 mutex_lock(&dev->struct_mutex);
1284 i915_gem_cleanup_ringbuffer(dev); 1500 i915_gem_cleanup_ringbuffer(dev);
1501 mutex_unlock(&dev->struct_mutex);
1285out: 1502out:
1286 return ret; 1503 return ret;
1287} 1504}
@@ -1445,11 +1662,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1445 1662
1446 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1663 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1447 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1664 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1448 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 1665 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1449 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1666 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1450 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1667 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1451 } 1668 }
1452 1669
1670 /* Try to make sure MCHBAR is enabled before poking at it */
1671 intel_setup_mchbar(dev);
1672
1453 i915_gem_load(dev); 1673 i915_gem_load(dev);
1454 1674
1455 /* Init HWS */ 1675 /* Init HWS */
@@ -1523,6 +1743,8 @@ int i915_driver_unload(struct drm_device *dev)
1523{ 1743{
1524 struct drm_i915_private *dev_priv = dev->dev_private; 1744 struct drm_i915_private *dev_priv = dev->dev_private;
1525 1745
1746 i915_destroy_error_state(dev);
1747
1526 destroy_workqueue(dev_priv->wq); 1748 destroy_workqueue(dev_priv->wq);
1527 del_timer_sync(&dev_priv->hangcheck_timer); 1749 del_timer_sync(&dev_priv->hangcheck_timer);
1528 1750
@@ -1544,6 +1766,7 @@ int i915_driver_unload(struct drm_device *dev)
1544 dev_priv->child_dev_num = 0; 1766 dev_priv->child_dev_num = 0;
1545 } 1767 }
1546 drm_irq_uninstall(dev); 1768 drm_irq_uninstall(dev);
1769 vga_switcheroo_unregister_client(dev->pdev);
1547 vga_client_register(dev->pdev, NULL, NULL, NULL); 1770 vga_client_register(dev->pdev, NULL, NULL, NULL);
1548 } 1771 }
1549 1772
@@ -1569,6 +1792,8 @@ int i915_driver_unload(struct drm_device *dev)
1569 intel_cleanup_overlay(dev); 1792 intel_cleanup_overlay(dev);
1570 } 1793 }
1571 1794
1795 intel_teardown_mchbar(dev);
1796
1572 pci_dev_put(dev_priv->bridge_dev); 1797 pci_dev_put(dev_priv->bridge_dev);
1573 kfree(dev->dev_private); 1798 kfree(dev->dev_private);
1574 1799
@@ -1611,6 +1836,7 @@ void i915_driver_lastclose(struct drm_device * dev)
1611 1836
1612 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 1837 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1613 drm_fb_helper_restore(); 1838 drm_fb_helper_restore();
1839 vga_switcheroo_process_delayed_switch();
1614 return; 1840 return;
1615 } 1841 }
1616 1842
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cf4cb3e9a0c2..1b2e95455c05 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,7 @@ unsigned int i915_lvds_downclock = 0;
49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
50 50
51static struct drm_driver driver; 51static struct drm_driver driver;
52extern int intel_agp_enabled;
52 53
53#define INTEL_VGA_DEVICE(id, info) { \ 54#define INTEL_VGA_DEVICE(id, info) { \
54 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 55 .class = PCI_CLASS_DISPLAY_VGA << 8, \
@@ -136,6 +137,16 @@ const static struct intel_device_info intel_ironlake_m_info = {
136 .has_hotplug = 1, 137 .has_hotplug = 1,
137}; 138};
138 139
140const static struct intel_device_info intel_sandybridge_d_info = {
141 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
142 .has_hotplug = 1,
143};
144
145const static struct intel_device_info intel_sandybridge_m_info = {
146 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
147 .has_hotplug = 1,
148};
149
139const static struct pci_device_id pciidlist[] = { 150const static struct pci_device_id pciidlist[] = {
140 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 151 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
141 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 152 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
@@ -167,6 +178,8 @@ const static struct pci_device_id pciidlist[] = {
167 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 178 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
168 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 179 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
169 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 180 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
181 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
182 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
170 {0, 0, 0} 183 {0, 0, 0}
171}; 184};
172 185
@@ -201,7 +214,7 @@ static int i915_drm_freeze(struct drm_device *dev)
201 return 0; 214 return 0;
202} 215}
203 216
204static int i915_suspend(struct drm_device *dev, pm_message_t state) 217int i915_suspend(struct drm_device *dev, pm_message_t state)
205{ 218{
206 int error; 219 int error;
207 220
@@ -255,7 +268,7 @@ static int i915_drm_thaw(struct drm_device *dev)
255 return error; 268 return error;
256} 269}
257 270
258static int i915_resume(struct drm_device *dev) 271int i915_resume(struct drm_device *dev)
259{ 272{
260 if (pci_enable_device(dev->pdev)) 273 if (pci_enable_device(dev->pdev))
261 return -EIO; 274 return -EIO;
@@ -546,6 +559,11 @@ static struct drm_driver driver = {
546 559
547static int __init i915_init(void) 560static int __init i915_init(void)
548{ 561{
562 if (!intel_agp_enabled) {
563 DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
564 return -ENODEV;
565 }
566
549 driver.num_ioctls = i915_max_ioctl; 567 driver.num_ioctls = i915_max_ioctl;
550 568
551 i915_gem_shrinker_init(); 569 i915_gem_shrinker_init();
@@ -571,6 +589,11 @@ static int __init i915_init(void)
571 driver.driver_features &= ~DRIVER_MODESET; 589 driver.driver_features &= ~DRIVER_MODESET;
572#endif 590#endif
573 591
592 if (!(driver.driver_features & DRIVER_MODESET)) {
593 driver.suspend = i915_suspend;
594 driver.resume = i915_resume;
595 }
596
574 return drm_init(&driver); 597 return drm_init(&driver);
575} 598}
576 599
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b99b6a841d95..979439cfb827 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -150,7 +150,27 @@ struct drm_i915_error_state {
150 u32 instps; 150 u32 instps;
151 u32 instdone1; 151 u32 instdone1;
152 u32 seqno; 152 u32 seqno;
153 u64 bbaddr;
153 struct timeval time; 154 struct timeval time;
155 struct drm_i915_error_object {
156 int page_count;
157 u32 gtt_offset;
158 u32 *pages[0];
159 } *ringbuffer, *batchbuffer[2];
160 struct drm_i915_error_buffer {
161 size_t size;
162 u32 name;
163 u32 seqno;
164 u32 gtt_offset;
165 u32 read_domains;
166 u32 write_domain;
167 u32 fence_reg;
168 s32 pinned:2;
169 u32 tiling:2;
170 u32 dirty:1;
171 u32 purgeable:1;
172 } *active_bo;
173 u32 active_bo_count;
154}; 174};
155 175
156struct drm_i915_display_funcs { 176struct drm_i915_display_funcs {
@@ -192,6 +212,14 @@ struct intel_device_info {
192 u8 cursor_needs_physical : 1; 212 u8 cursor_needs_physical : 1;
193}; 213};
194 214
215enum no_fbc_reason {
216 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
217 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
218 FBC_MODE_TOO_LARGE, /* mode too large for compression */
219 FBC_BAD_PLANE, /* fbc not supported on plane */
220 FBC_NOT_TILED, /* buffer not tiled */
221};
222
195typedef struct drm_i915_private { 223typedef struct drm_i915_private {
196 struct drm_device *dev; 224 struct drm_device *dev;
197 225
@@ -452,6 +480,7 @@ typedef struct drm_i915_private {
452 u32 savePIPEB_DATA_N1; 480 u32 savePIPEB_DATA_N1;
453 u32 savePIPEB_LINK_M1; 481 u32 savePIPEB_LINK_M1;
454 u32 savePIPEB_LINK_N1; 482 u32 savePIPEB_LINK_N1;
483 u32 saveMCHBAR_RENDER_STANDBY;
455 484
456 struct { 485 struct {
457 struct drm_mm gtt_space; 486 struct drm_mm gtt_space;
@@ -590,6 +619,14 @@ typedef struct drm_i915_private {
590 int child_dev_num; 619 int child_dev_num;
591 struct child_device_config *child_dev; 620 struct child_device_config *child_dev;
592 struct drm_connector *int_lvds_connector; 621 struct drm_connector *int_lvds_connector;
622
623 bool mchbar_need_disable;
624
625 u8 cur_delay;
626 u8 min_delay;
627 u8 max_delay;
628
629 enum no_fbc_reason no_fbc_reason;
593} drm_i915_private_t; 630} drm_i915_private_t;
594 631
595/** driver private structure attached to each drm_gem_object */ 632/** driver private structure attached to each drm_gem_object */
@@ -736,6 +773,8 @@ extern unsigned int i915_fbpercrtc;
736extern unsigned int i915_powersave; 773extern unsigned int i915_powersave;
737extern unsigned int i915_lvds_downclock; 774extern unsigned int i915_lvds_downclock;
738 775
776extern int i915_suspend(struct drm_device *dev, pm_message_t state);
777extern int i915_resume(struct drm_device *dev);
739extern void i915_save_display(struct drm_device *dev); 778extern void i915_save_display(struct drm_device *dev);
740extern void i915_restore_display(struct drm_device *dev); 779extern void i915_restore_display(struct drm_device *dev);
741extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 780extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
@@ -761,6 +800,7 @@ extern int i965_reset(struct drm_device *dev, u8 flags);
761 800
762/* i915_irq.c */ 801/* i915_irq.c */
763void i915_hangcheck_elapsed(unsigned long data); 802void i915_hangcheck_elapsed(unsigned long data);
803void i915_destroy_error_state(struct drm_device *dev);
764extern int i915_irq_emit(struct drm_device *dev, void *data, 804extern int i915_irq_emit(struct drm_device *dev, void *data,
765 struct drm_file *file_priv); 805 struct drm_file *file_priv);
766extern int i915_irq_wait(struct drm_device *dev, void *data, 806extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -897,7 +937,8 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
897void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 937void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
898bool i915_tiling_ok(struct drm_device *dev, int stride, int size, 938bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
899 int tiling_mode); 939 int tiling_mode);
900bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj); 940bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
941 int tiling_mode);
901 942
902/* i915_gem_debug.c */ 943/* i915_gem_debug.c */
903void i915_gem_dump_object(struct drm_gem_object *obj, int len, 944void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -1026,7 +1067,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1026#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1067#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1027#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1068#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
1028#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1069#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1029#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) 1070#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1030#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1071#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1031#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1072#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1032#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1073#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1045,8 +1086,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1045#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) 1086#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1046#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1087#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1047 1088
1089#define IS_GEN3(dev) (IS_I915G(dev) || \
1090 IS_I915GM(dev) || \
1091 IS_I945G(dev) || \
1092 IS_I945GM(dev) || \
1093 IS_G33(dev) || \
1094 IS_PINEVIEW(dev))
1095#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
1096 (dev)->pci_device == 0x2982 || \
1097 (dev)->pci_device == 0x2992 || \
1098 (dev)->pci_device == 0x29A2 || \
1099 (dev)->pci_device == 0x2A02 || \
1100 (dev)->pci_device == 0x2A12 || \
1101 (dev)->pci_device == 0x2E02 || \
1102 (dev)->pci_device == 0x2E12 || \
1103 (dev)->pci_device == 0x2E22 || \
1104 (dev)->pci_device == 0x2E32 || \
1105 (dev)->pci_device == 0x2A42 || \
1106 (dev)->pci_device == 0x2E42)
1107
1048#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1108#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1049 1109
1110#define IS_GEN6(dev) ((dev)->pci_device == 0x0102)
1111
1050/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1112/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1051 * rows, which changed the alignment requirements and fence programming. 1113 * rows, which changed the alignment requirements and fence programming.
1052 */ 1114 */
@@ -1067,6 +1129,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1067#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1129#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1068#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) 1130#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1069 1131
1132#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
1133 IS_GEN6(dev))
1134
1070#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1135#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1071 1136
1072#endif 1137#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ec8a0d7ffa39..fba37e9f775d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -128,9 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
128 return -ENOMEM; 128 return -ENOMEM;
129 129
130 ret = drm_gem_handle_create(file_priv, obj, &handle); 130 ret = drm_gem_handle_create(file_priv, obj, &handle);
131 mutex_lock(&dev->struct_mutex); 131 drm_gem_object_handle_unreference_unlocked(obj);
132 drm_gem_object_handle_unreference(obj);
133 mutex_unlock(&dev->struct_mutex);
134 132
135 if (ret) 133 if (ret)
136 return ret; 134 return ret;
@@ -488,7 +486,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
488 */ 486 */
489 if (args->offset > obj->size || args->size > obj->size || 487 if (args->offset > obj->size || args->size > obj->size ||
490 args->offset + args->size > obj->size) { 488 args->offset + args->size > obj->size) {
491 drm_gem_object_unreference(obj); 489 drm_gem_object_unreference_unlocked(obj);
492 return -EINVAL; 490 return -EINVAL;
493 } 491 }
494 492
@@ -501,7 +499,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
501 file_priv); 499 file_priv);
502 } 500 }
503 501
504 drm_gem_object_unreference(obj); 502 drm_gem_object_unreference_unlocked(obj);
505 503
506 return ret; 504 return ret;
507} 505}
@@ -961,7 +959,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
961 */ 959 */
962 if (args->offset > obj->size || args->size > obj->size || 960 if (args->offset > obj->size || args->size > obj->size ||
963 args->offset + args->size > obj->size) { 961 args->offset + args->size > obj->size) {
964 drm_gem_object_unreference(obj); 962 drm_gem_object_unreference_unlocked(obj);
965 return -EINVAL; 963 return -EINVAL;
966 } 964 }
967 965
@@ -995,7 +993,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
995 DRM_INFO("pwrite failed %d\n", ret); 993 DRM_INFO("pwrite failed %d\n", ret);
996#endif 994#endif
997 995
998 drm_gem_object_unreference(obj); 996 drm_gem_object_unreference_unlocked(obj);
999 997
1000 return ret; 998 return ret;
1001} 999}
@@ -1138,9 +1136,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1138 PROT_READ | PROT_WRITE, MAP_SHARED, 1136 PROT_READ | PROT_WRITE, MAP_SHARED,
1139 args->offset); 1137 args->offset);
1140 up_write(&current->mm->mmap_sem); 1138 up_write(&current->mm->mmap_sem);
1141 mutex_lock(&dev->struct_mutex); 1139 drm_gem_object_unreference_unlocked(obj);
1142 drm_gem_object_unreference(obj);
1143 mutex_unlock(&dev->struct_mutex);
1144 if (IS_ERR((void *)addr)) 1140 if (IS_ERR((void *)addr))
1145 return addr; 1141 return addr;
1146 1142
@@ -1562,6 +1558,38 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1562 i915_verify_inactive(dev, __FILE__, __LINE__); 1558 i915_verify_inactive(dev, __FILE__, __LINE__);
1563} 1559}
1564 1560
1561static void
1562i915_gem_process_flushing_list(struct drm_device *dev,
1563 uint32_t flush_domains, uint32_t seqno)
1564{
1565 drm_i915_private_t *dev_priv = dev->dev_private;
1566 struct drm_i915_gem_object *obj_priv, *next;
1567
1568 list_for_each_entry_safe(obj_priv, next,
1569 &dev_priv->mm.gpu_write_list,
1570 gpu_write_list) {
1571 struct drm_gem_object *obj = obj_priv->obj;
1572
1573 if ((obj->write_domain & flush_domains) ==
1574 obj->write_domain) {
1575 uint32_t old_write_domain = obj->write_domain;
1576
1577 obj->write_domain = 0;
1578 list_del_init(&obj_priv->gpu_write_list);
1579 i915_gem_object_move_to_active(obj, seqno);
1580
1581 /* update the fence lru list */
1582 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1583 list_move_tail(&obj_priv->fence_list,
1584 &dev_priv->mm.fence_list);
1585
1586 trace_i915_gem_object_change_domain(obj,
1587 obj->read_domains,
1588 old_write_domain);
1589 }
1590 }
1591}
1592
1565/** 1593/**
1566 * Creates a new sequence number, emitting a write of it to the status page 1594 * Creates a new sequence number, emitting a write of it to the status page
1567 * plus an interrupt, which will trigger i915_user_interrupt_handler. 1595 * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1620,29 +1648,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1620 /* Associate any objects on the flushing list matching the write 1648 /* Associate any objects on the flushing list matching the write
1621 * domain we're flushing with our flush. 1649 * domain we're flushing with our flush.
1622 */ 1650 */
1623 if (flush_domains != 0) { 1651 if (flush_domains != 0)
1624 struct drm_i915_gem_object *obj_priv, *next; 1652 i915_gem_process_flushing_list(dev, flush_domains, seqno);
1625
1626 list_for_each_entry_safe(obj_priv, next,
1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1629 struct drm_gem_object *obj = obj_priv->obj;
1630
1631 if ((obj->write_domain & flush_domains) ==
1632 obj->write_domain) {
1633 uint32_t old_write_domain = obj->write_domain;
1634
1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1637 i915_gem_object_move_to_active(obj, seqno);
1638
1639 trace_i915_gem_object_change_domain(obj,
1640 obj->read_domains,
1641 old_write_domain);
1642 }
1643 }
1644
1645 }
1646 1653
1647 if (!dev_priv->mm.suspended) { 1654 if (!dev_priv->mm.suspended) {
1648 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1655 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1822,7 +1829,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1822 return -EIO; 1829 return -EIO;
1823 1830
1824 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1831 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1825 if (IS_IRONLAKE(dev)) 1832 if (HAS_PCH_SPLIT(dev))
1826 ier = I915_READ(DEIER) | I915_READ(GTIER); 1833 ier = I915_READ(DEIER) | I915_READ(GTIER);
1827 else 1834 else
1828 ier = I915_READ(IER); 1835 ier = I915_READ(IER);
@@ -1991,6 +1998,7 @@ int
1991i915_gem_object_unbind(struct drm_gem_object *obj) 1998i915_gem_object_unbind(struct drm_gem_object *obj)
1992{ 1999{
1993 struct drm_device *dev = obj->dev; 2000 struct drm_device *dev = obj->dev;
2001 drm_i915_private_t *dev_priv = dev->dev_private;
1994 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2002 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1995 int ret = 0; 2003 int ret = 0;
1996 2004
@@ -2046,8 +2054,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2046 } 2054 }
2047 2055
2048 /* Remove ourselves from the LRU list if present. */ 2056 /* Remove ourselves from the LRU list if present. */
2057 spin_lock(&dev_priv->mm.active_list_lock);
2049 if (!list_empty(&obj_priv->list)) 2058 if (!list_empty(&obj_priv->list))
2050 list_del_init(&obj_priv->list); 2059 list_del_init(&obj_priv->list);
2060 spin_unlock(&dev_priv->mm.active_list_lock);
2051 2061
2052 if (i915_gem_object_is_purgeable(obj_priv)) 2062 if (i915_gem_object_is_purgeable(obj_priv))
2053 i915_gem_object_truncate(obj); 2063 i915_gem_object_truncate(obj);
@@ -2085,11 +2095,34 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2085} 2095}
2086 2096
2087static int 2097static int
2098i915_gpu_idle(struct drm_device *dev)
2099{
2100 drm_i915_private_t *dev_priv = dev->dev_private;
2101 bool lists_empty;
2102 uint32_t seqno;
2103
2104 spin_lock(&dev_priv->mm.active_list_lock);
2105 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
2106 list_empty(&dev_priv->mm.active_list);
2107 spin_unlock(&dev_priv->mm.active_list_lock);
2108
2109 if (lists_empty)
2110 return 0;
2111
2112 /* Flush everything onto the inactive list. */
2113 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2114 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2115 if (seqno == 0)
2116 return -ENOMEM;
2117
2118 return i915_wait_request(dev, seqno);
2119}
2120
2121static int
2088i915_gem_evict_everything(struct drm_device *dev) 2122i915_gem_evict_everything(struct drm_device *dev)
2089{ 2123{
2090 drm_i915_private_t *dev_priv = dev->dev_private; 2124 drm_i915_private_t *dev_priv = dev->dev_private;
2091 int ret; 2125 int ret;
2092 uint32_t seqno;
2093 bool lists_empty; 2126 bool lists_empty;
2094 2127
2095 spin_lock(&dev_priv->mm.active_list_lock); 2128 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2102,12 +2135,7 @@ i915_gem_evict_everything(struct drm_device *dev)
2102 return -ENOSPC; 2135 return -ENOSPC;
2103 2136
2104 /* Flush everything (on to the inactive lists) and evict */ 2137 /* Flush everything (on to the inactive lists) and evict */
2105 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2138 ret = i915_gpu_idle(dev);
2106 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2107 if (seqno == 0)
2108 return -ENOMEM;
2109
2110 ret = i915_wait_request(dev, seqno);
2111 if (ret) 2139 if (ret)
2112 return ret; 2140 return ret;
2113 2141
@@ -2265,6 +2293,28 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
2265 return 0; 2293 return 0;
2266} 2294}
2267 2295
2296static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2297{
2298 struct drm_gem_object *obj = reg->obj;
2299 struct drm_device *dev = obj->dev;
2300 drm_i915_private_t *dev_priv = dev->dev_private;
2301 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2302 int regnum = obj_priv->fence_reg;
2303 uint64_t val;
2304
2305 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2306 0xfffff000) << 32;
2307 val |= obj_priv->gtt_offset & 0xfffff000;
2308 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2309 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2310
2311 if (obj_priv->tiling_mode == I915_TILING_Y)
2312 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2313 val |= I965_FENCE_REG_VALID;
2314
2315 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2316}
2317
2268static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) 2318static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2269{ 2319{
2270 struct drm_gem_object *obj = reg->obj; 2320 struct drm_gem_object *obj = reg->obj;
@@ -2361,6 +2411,58 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2361 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 2411 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2362} 2412}
2363 2413
2414static int i915_find_fence_reg(struct drm_device *dev)
2415{
2416 struct drm_i915_fence_reg *reg = NULL;
2417 struct drm_i915_gem_object *obj_priv = NULL;
2418 struct drm_i915_private *dev_priv = dev->dev_private;
2419 struct drm_gem_object *obj = NULL;
2420 int i, avail, ret;
2421
2422 /* First try to find a free reg */
2423 avail = 0;
2424 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2425 reg = &dev_priv->fence_regs[i];
2426 if (!reg->obj)
2427 return i;
2428
2429 obj_priv = reg->obj->driver_private;
2430 if (!obj_priv->pin_count)
2431 avail++;
2432 }
2433
2434 if (avail == 0)
2435 return -ENOSPC;
2436
2437 /* None available, try to steal one or wait for a user to finish */
2438 i = I915_FENCE_REG_NONE;
2439 list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
2440 fence_list) {
2441 obj = obj_priv->obj;
2442
2443 if (obj_priv->pin_count)
2444 continue;
2445
2446 /* found one! */
2447 i = obj_priv->fence_reg;
2448 break;
2449 }
2450
2451 BUG_ON(i == I915_FENCE_REG_NONE);
2452
2453 /* We only have a reference on obj from the active list. put_fence_reg
2454 * might drop that one, causing a use-after-free in it. So hold a
2455 * private reference to obj like the other callers of put_fence_reg
2456 * (set_tiling ioctl) do. */
2457 drm_gem_object_reference(obj);
2458 ret = i915_gem_object_put_fence_reg(obj);
2459 drm_gem_object_unreference(obj);
2460 if (ret != 0)
2461 return ret;
2462
2463 return i;
2464}
2465
2364/** 2466/**
2365 * i915_gem_object_get_fence_reg - set up a fence reg for an object 2467 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2366 * @obj: object to map through a fence reg 2468 * @obj: object to map through a fence reg
@@ -2381,8 +2483,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2381 struct drm_i915_private *dev_priv = dev->dev_private; 2483 struct drm_i915_private *dev_priv = dev->dev_private;
2382 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2484 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2383 struct drm_i915_fence_reg *reg = NULL; 2485 struct drm_i915_fence_reg *reg = NULL;
2384 struct drm_i915_gem_object *old_obj_priv = NULL; 2486 int ret;
2385 int i, ret, avail;
2386 2487
2387 /* Just update our place in the LRU if our fence is getting used. */ 2488 /* Just update our place in the LRU if our fence is getting used. */
2388 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 2489 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -2410,86 +2511,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2410 break; 2511 break;
2411 } 2512 }
2412 2513
2413 /* First try to find a free reg */ 2514 ret = i915_find_fence_reg(dev);
2414 avail = 0; 2515 if (ret < 0)
2415 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 2516 return ret;
2416 reg = &dev_priv->fence_regs[i];
2417 if (!reg->obj)
2418 break;
2419
2420 old_obj_priv = reg->obj->driver_private;
2421 if (!old_obj_priv->pin_count)
2422 avail++;
2423 }
2424
2425 /* None available, try to steal one or wait for a user to finish */
2426 if (i == dev_priv->num_fence_regs) {
2427 struct drm_gem_object *old_obj = NULL;
2428
2429 if (avail == 0)
2430 return -ENOSPC;
2431
2432 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2433 fence_list) {
2434 old_obj = old_obj_priv->obj;
2435
2436 if (old_obj_priv->pin_count)
2437 continue;
2438
2439 /* Take a reference, as otherwise the wait_rendering
2440 * below may cause the object to get freed out from
2441 * under us.
2442 */
2443 drm_gem_object_reference(old_obj);
2444
2445 /* i915 uses fences for GPU access to tiled buffers */
2446 if (IS_I965G(dev) || !old_obj_priv->active)
2447 break;
2448
2449 /* This brings the object to the head of the LRU if it
2450 * had been written to. The only way this should
2451 * result in us waiting longer than the expected
2452 * optimal amount of time is if there was a
2453 * fence-using buffer later that was read-only.
2454 */
2455 i915_gem_object_flush_gpu_write_domain(old_obj);
2456 ret = i915_gem_object_wait_rendering(old_obj);
2457 if (ret != 0) {
2458 drm_gem_object_unreference(old_obj);
2459 return ret;
2460 }
2461
2462 break;
2463 }
2464
2465 /*
2466 * Zap this virtual mapping so we can set up a fence again
2467 * for this object next time we need it.
2468 */
2469 i915_gem_release_mmap(old_obj);
2470
2471 i = old_obj_priv->fence_reg;
2472 reg = &dev_priv->fence_regs[i];
2473
2474 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2475 list_del_init(&old_obj_priv->fence_list);
2476
2477 drm_gem_object_unreference(old_obj);
2478 }
2479 2517
2480 obj_priv->fence_reg = i; 2518 obj_priv->fence_reg = ret;
2519 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2481 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); 2520 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2482 2521
2483 reg->obj = obj; 2522 reg->obj = obj;
2484 2523
2485 if (IS_I965G(dev)) 2524 if (IS_GEN6(dev))
2525 sandybridge_write_fence_reg(reg);
2526 else if (IS_I965G(dev))
2486 i965_write_fence_reg(reg); 2527 i965_write_fence_reg(reg);
2487 else if (IS_I9XX(dev)) 2528 else if (IS_I9XX(dev))
2488 i915_write_fence_reg(reg); 2529 i915_write_fence_reg(reg);
2489 else 2530 else
2490 i830_write_fence_reg(reg); 2531 i830_write_fence_reg(reg);
2491 2532
2492 trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode); 2533 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2534 obj_priv->tiling_mode);
2493 2535
2494 return 0; 2536 return 0;
2495} 2537}
@@ -2508,9 +2550,12 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2508 drm_i915_private_t *dev_priv = dev->dev_private; 2550 drm_i915_private_t *dev_priv = dev->dev_private;
2509 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2551 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2510 2552
2511 if (IS_I965G(dev)) 2553 if (IS_GEN6(dev)) {
2554 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2555 (obj_priv->fence_reg * 8), 0);
2556 } else if (IS_I965G(dev)) {
2512 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2557 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2513 else { 2558 } else {
2514 uint32_t fence_reg; 2559 uint32_t fence_reg;
2515 2560
2516 if (obj_priv->fence_reg < 8) 2561 if (obj_priv->fence_reg < 8)
@@ -2544,6 +2589,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2544 if (obj_priv->fence_reg == I915_FENCE_REG_NONE) 2589 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2545 return 0; 2590 return 0;
2546 2591
2592 /* If we've changed tiling, GTT-mappings of the object
2593 * need to re-fault to ensure that the correct fence register
2594 * setup is in place.
2595 */
2596 i915_gem_release_mmap(obj);
2597
2547 /* On the i915, GPU access to tiled buffers is via a fence, 2598 /* On the i915, GPU access to tiled buffers is via a fence,
2548 * therefore we must wait for any outstanding access to complete 2599 * therefore we must wait for any outstanding access to complete
2549 * before clearing the fence. 2600 * before clearing the fence.
@@ -2552,12 +2603,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2552 int ret; 2603 int ret;
2553 2604
2554 i915_gem_object_flush_gpu_write_domain(obj); 2605 i915_gem_object_flush_gpu_write_domain(obj);
2555 i915_gem_object_flush_gtt_write_domain(obj);
2556 ret = i915_gem_object_wait_rendering(obj); 2606 ret = i915_gem_object_wait_rendering(obj);
2557 if (ret != 0) 2607 if (ret != 0)
2558 return ret; 2608 return ret;
2559 } 2609 }
2560 2610
2611 i915_gem_object_flush_gtt_write_domain(obj);
2561 i915_gem_clear_fence_reg (obj); 2612 i915_gem_clear_fence_reg (obj);
2562 2613
2563 return 0; 2614 return 0;
@@ -2697,7 +2748,6 @@ static void
2697i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) 2748i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2698{ 2749{
2699 struct drm_device *dev = obj->dev; 2750 struct drm_device *dev = obj->dev;
2700 uint32_t seqno;
2701 uint32_t old_write_domain; 2751 uint32_t old_write_domain;
2702 2752
2703 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2753 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
@@ -2706,9 +2756,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2706 /* Queue the GPU write cache flushing we need. */ 2756 /* Queue the GPU write cache flushing we need. */
2707 old_write_domain = obj->write_domain; 2757 old_write_domain = obj->write_domain;
2708 i915_gem_flush(dev, 0, obj->write_domain); 2758 i915_gem_flush(dev, 0, obj->write_domain);
2709 seqno = i915_add_request(dev, NULL, obj->write_domain); 2759 (void) i915_add_request(dev, NULL, obj->write_domain);
2710 BUG_ON(obj->write_domain); 2760 BUG_ON(obj->write_domain);
2711 i915_gem_object_move_to_active(obj, seqno);
2712 2761
2713 trace_i915_gem_object_change_domain(obj, 2762 trace_i915_gem_object_change_domain(obj,
2714 obj->read_domains, 2763 obj->read_domains,
@@ -3247,7 +3296,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3247 obj_priv->tiling_mode != I915_TILING_NONE; 3296 obj_priv->tiling_mode != I915_TILING_NONE;
3248 3297
3249 /* Check fence reg constraints and rebind if necessary */ 3298 /* Check fence reg constraints and rebind if necessary */
3250 if (need_fence && !i915_obj_fenceable(dev, obj)) 3299 if (need_fence && !i915_gem_object_fence_offset_ok(obj,
3300 obj_priv->tiling_mode))
3251 i915_gem_object_unbind(obj); 3301 i915_gem_object_unbind(obj);
3252 3302
3253 /* Choose the GTT offset for our buffer and put it there. */ 3303 /* Choose the GTT offset for our buffer and put it there. */
@@ -3317,6 +3367,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3317 } 3367 }
3318 3368
3319 /* Validate that the target is in a valid r/w GPU domain */ 3369 /* Validate that the target is in a valid r/w GPU domain */
3370 if (reloc->write_domain & (reloc->write_domain - 1)) {
3371 DRM_ERROR("reloc with multiple write domains: "
3372 "obj %p target %d offset %d "
3373 "read %08x write %08x",
3374 obj, reloc->target_handle,
3375 (int) reloc->offset,
3376 reloc->read_domains,
3377 reloc->write_domain);
3378 return -EINVAL;
3379 }
3320 if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3380 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3321 reloc->read_domains & I915_GEM_DOMAIN_CPU) { 3381 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3322 DRM_ERROR("reloc with read/write CPU domains: " 3382 DRM_ERROR("reloc with read/write CPU domains: "
@@ -4445,8 +4505,7 @@ int
4445i915_gem_idle(struct drm_device *dev) 4505i915_gem_idle(struct drm_device *dev)
4446{ 4506{
4447 drm_i915_private_t *dev_priv = dev->dev_private; 4507 drm_i915_private_t *dev_priv = dev->dev_private;
4448 uint32_t seqno, cur_seqno, last_seqno; 4508 int ret;
4449 int stuck, ret;
4450 4509
4451 mutex_lock(&dev->struct_mutex); 4510 mutex_lock(&dev->struct_mutex);
4452 4511
@@ -4455,115 +4514,36 @@ i915_gem_idle(struct drm_device *dev)
4455 return 0; 4514 return 0;
4456 } 4515 }
4457 4516
4458 /* Hack! Don't let anybody do execbuf while we don't control the chip. 4517 ret = i915_gpu_idle(dev);
4459 * We need to replace this with a semaphore, or something. 4518 if (ret) {
4460 */
4461 dev_priv->mm.suspended = 1;
4462 del_timer(&dev_priv->hangcheck_timer);
4463
4464 /* Cancel the retire work handler, wait for it to finish if running
4465 */
4466 mutex_unlock(&dev->struct_mutex);
4467 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4468 mutex_lock(&dev->struct_mutex);
4469
4470 i915_kernel_lost_context(dev);
4471
4472 /* Flush the GPU along with all non-CPU write domains
4473 */
4474 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
4475 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
4476
4477 if (seqno == 0) {
4478 mutex_unlock(&dev->struct_mutex); 4519 mutex_unlock(&dev->struct_mutex);
4479 return -ENOMEM; 4520 return ret;
4480 } 4521 }
4481 4522
4482 dev_priv->mm.waiting_gem_seqno = seqno; 4523 /* Under UMS, be paranoid and evict. */
4483 last_seqno = 0; 4524 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4484 stuck = 0; 4525 ret = i915_gem_evict_from_inactive_list(dev);
4485 for (;;) { 4526 if (ret) {
4486 cur_seqno = i915_get_gem_seqno(dev); 4527 mutex_unlock(&dev->struct_mutex);
4487 if (i915_seqno_passed(cur_seqno, seqno)) 4528 return ret;
4488 break;
4489 if (last_seqno == cur_seqno) {
4490 if (stuck++ > 100) {
4491 DRM_ERROR("hardware wedged\n");
4492 atomic_set(&dev_priv->mm.wedged, 1);
4493 DRM_WAKEUP(&dev_priv->irq_queue);
4494 break;
4495 }
4496 } 4529 }
4497 msleep(10);
4498 last_seqno = cur_seqno;
4499 }
4500 dev_priv->mm.waiting_gem_seqno = 0;
4501
4502 i915_gem_retire_requests(dev);
4503
4504 spin_lock(&dev_priv->mm.active_list_lock);
4505 if (!atomic_read(&dev_priv->mm.wedged)) {
4506 /* Active and flushing should now be empty as we've
4507 * waited for a sequence higher than any pending execbuffer
4508 */
4509 WARN_ON(!list_empty(&dev_priv->mm.active_list));
4510 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
4511 /* Request should now be empty as we've also waited
4512 * for the last request in the list
4513 */
4514 WARN_ON(!list_empty(&dev_priv->mm.request_list));
4515 } 4530 }
4516 4531
4517 /* Empty the active and flushing lists to inactive. If there's 4532 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4518 * anything left at this point, it means that we're wedged and 4533 * We need to replace this with a semaphore, or something.
4519 * nothing good's going to happen by leaving them there. So strip 4534 * And not confound mm.suspended!
4520 * the GPU domains and just stuff them onto inactive.
4521 */ 4535 */
4522 while (!list_empty(&dev_priv->mm.active_list)) { 4536 dev_priv->mm.suspended = 1;
4523 struct drm_gem_object *obj; 4537 del_timer(&dev_priv->hangcheck_timer);
4524 uint32_t old_write_domain;
4525
4526 obj = list_first_entry(&dev_priv->mm.active_list,
4527 struct drm_i915_gem_object,
4528 list)->obj;
4529 old_write_domain = obj->write_domain;
4530 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4531 i915_gem_object_move_to_inactive(obj);
4532
4533 trace_i915_gem_object_change_domain(obj,
4534 obj->read_domains,
4535 old_write_domain);
4536 }
4537 spin_unlock(&dev_priv->mm.active_list_lock);
4538
4539 while (!list_empty(&dev_priv->mm.flushing_list)) {
4540 struct drm_gem_object *obj;
4541 uint32_t old_write_domain;
4542
4543 obj = list_first_entry(&dev_priv->mm.flushing_list,
4544 struct drm_i915_gem_object,
4545 list)->obj;
4546 old_write_domain = obj->write_domain;
4547 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4548 i915_gem_object_move_to_inactive(obj);
4549
4550 trace_i915_gem_object_change_domain(obj,
4551 obj->read_domains,
4552 old_write_domain);
4553 }
4554
4555
4556 /* Move all inactive buffers out of the GTT. */
4557 ret = i915_gem_evict_from_inactive_list(dev);
4558 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
4559 if (ret) {
4560 mutex_unlock(&dev->struct_mutex);
4561 return ret;
4562 }
4563 4538
4539 i915_kernel_lost_context(dev);
4564 i915_gem_cleanup_ringbuffer(dev); 4540 i915_gem_cleanup_ringbuffer(dev);
4541
4565 mutex_unlock(&dev->struct_mutex); 4542 mutex_unlock(&dev->struct_mutex);
4566 4543
4544 /* Cancel the retire work handler, which should be idle now. */
4545 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4546
4567 return 0; 4547 return 0;
4568} 4548}
4569 4549
@@ -4607,8 +4587,13 @@ i915_gem_init_hws(struct drm_device *dev)
4607 } 4587 }
4608 dev_priv->hws_obj = obj; 4588 dev_priv->hws_obj = obj;
4609 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4589 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4610 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 4590 if (IS_GEN6(dev)) {
4611 I915_READ(HWS_PGA); /* posting read */ 4591 I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
4592 I915_READ(HWS_PGA_GEN6); /* posting read */
4593 } else {
4594 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4595 I915_READ(HWS_PGA); /* posting read */
4596 }
4612 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4597 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4613 4598
4614 return 0; 4599 return 0;
@@ -4850,7 +4835,8 @@ i915_gem_load(struct drm_device *dev)
4850 spin_unlock(&shrink_list_lock); 4835 spin_unlock(&shrink_list_lock);
4851 4836
4852 /* Old X drivers will take 0-2 for front, back, depth buffers */ 4837 /* Old X drivers will take 0-2 for front, back, depth buffers */
4853 dev_priv->fence_reg_start = 3; 4838 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4839 dev_priv->fence_reg_start = 3;
4854 4840
4855 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4841 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4856 dev_priv->num_fence_regs = 16; 4842 dev_priv->num_fence_regs = 16;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index df278b2685bf..b5c55d88ff76 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,8 +25,6 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/acpi.h>
29#include <linux/pnp.h>
30#include "linux/string.h" 28#include "linux/string.h"
31#include "linux/bitops.h" 29#include "linux/bitops.h"
32#include "drmP.h" 30#include "drmP.h"
@@ -83,120 +81,6 @@
83 * to match what the GPU expects. 81 * to match what the GPU expects.
84 */ 82 */
85 83
86#define MCHBAR_I915 0x44
87#define MCHBAR_I965 0x48
88#define MCHBAR_SIZE (4*4096)
89
90#define DEVEN_REG 0x54
91#define DEVEN_MCHBAR_EN (1 << 28)
92
93/* Allocate space for the MCH regs if needed, return nonzero on error */
94static int
95intel_alloc_mchbar_resource(struct drm_device *dev)
96{
97 drm_i915_private_t *dev_priv = dev->dev_private;
98 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
99 u32 temp_lo, temp_hi = 0;
100 u64 mchbar_addr;
101 int ret = 0;
102
103 if (IS_I965G(dev))
104 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
105 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
106 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
107
108 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
109#ifdef CONFIG_PNP
110 if (mchbar_addr &&
111 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
112 ret = 0;
113 goto out;
114 }
115#endif
116
117 /* Get some space for it */
118 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
119 MCHBAR_SIZE, MCHBAR_SIZE,
120 PCIBIOS_MIN_MEM,
121 0, pcibios_align_resource,
122 dev_priv->bridge_dev);
123 if (ret) {
124 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
125 dev_priv->mch_res.start = 0;
126 goto out;
127 }
128
129 if (IS_I965G(dev))
130 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
131 upper_32_bits(dev_priv->mch_res.start));
132
133 pci_write_config_dword(dev_priv->bridge_dev, reg,
134 lower_32_bits(dev_priv->mch_res.start));
135out:
136 return ret;
137}
138
139/* Setup MCHBAR if possible, return true if we should disable it again */
140static bool
141intel_setup_mchbar(struct drm_device *dev)
142{
143 drm_i915_private_t *dev_priv = dev->dev_private;
144 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
145 u32 temp;
146 bool need_disable = false, enabled;
147
148 if (IS_I915G(dev) || IS_I915GM(dev)) {
149 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
150 enabled = !!(temp & DEVEN_MCHBAR_EN);
151 } else {
152 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
153 enabled = temp & 1;
154 }
155
156 /* If it's already enabled, don't have to do anything */
157 if (enabled)
158 goto out;
159
160 if (intel_alloc_mchbar_resource(dev))
161 goto out;
162
163 need_disable = true;
164
165 /* Space is allocated or reserved, so enable it. */
166 if (IS_I915G(dev) || IS_I915GM(dev)) {
167 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
168 temp | DEVEN_MCHBAR_EN);
169 } else {
170 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
171 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
172 }
173out:
174 return need_disable;
175}
176
177static void
178intel_teardown_mchbar(struct drm_device *dev, bool disable)
179{
180 drm_i915_private_t *dev_priv = dev->dev_private;
181 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
182 u32 temp;
183
184 if (disable) {
185 if (IS_I915G(dev) || IS_I915GM(dev)) {
186 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
187 temp &= ~DEVEN_MCHBAR_EN;
188 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
189 } else {
190 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
191 temp &= ~1;
192 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
193 }
194 }
195
196 if (dev_priv->mch_res.start)
197 release_resource(&dev_priv->mch_res);
198}
199
200/** 84/**
201 * Detects bit 6 swizzling of address lookup between IGD access and CPU 85 * Detects bit 6 swizzling of address lookup between IGD access and CPU
202 * access through main memory. 86 * access through main memory.
@@ -207,9 +91,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
207 drm_i915_private_t *dev_priv = dev->dev_private; 91 drm_i915_private_t *dev_priv = dev->dev_private;
208 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
209 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
210 bool need_disable;
211 94
212 if (IS_IRONLAKE(dev)) { 95 if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
213 /* On Ironlake whatever DRAM config, GPU always do 96 /* On Ironlake whatever DRAM config, GPU always do
214 * same swizzling setup. 97 * same swizzling setup.
215 */ 98 */
@@ -224,9 +107,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
224 } else if (IS_MOBILE(dev)) { 107 } else if (IS_MOBILE(dev)) {
225 uint32_t dcc; 108 uint32_t dcc;
226 109
227 /* Try to make sure MCHBAR is enabled before poking at it */
228 need_disable = intel_setup_mchbar(dev);
229
230 /* On mobile 9xx chipsets, channel interleave by the CPU is 110 /* On mobile 9xx chipsets, channel interleave by the CPU is
231 * determined by DCC. For single-channel, neither the CPU 111 * determined by DCC. For single-channel, neither the CPU
232 * nor the GPU do swizzling. For dual channel interleaved, 112 * nor the GPU do swizzling. For dual channel interleaved,
@@ -266,8 +146,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
266 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 146 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
267 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 147 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
268 } 148 }
269
270 intel_teardown_mchbar(dev, need_disable);
271 } else { 149 } else {
272 /* The 965, G33, and newer, have a very flexible memory 150 /* The 965, G33, and newer, have a very flexible memory
273 * configuration. It will enable dual-channel mode 151 * configuration. It will enable dual-channel mode
@@ -302,39 +180,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
302 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 180 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
303} 181}
304 182
305
306/**
307 * Returns whether an object is currently fenceable. If not, it may need
308 * to be unbound and have its pitch adjusted.
309 */
310bool
311i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
312{
313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
314
315 if (IS_I965G(dev)) {
316 /* The 965 can have fences at any page boundary. */
317 if (obj->size & 4095)
318 return false;
319 return true;
320 } else if (IS_I9XX(dev)) {
321 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
322 return false;
323 } else {
324 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
325 return false;
326 }
327
328 /* Power of two sized... */
329 if (obj->size & (obj->size - 1))
330 return false;
331
332 /* Objects must be size aligned as well */
333 if (obj_priv->gtt_offset & (obj->size - 1))
334 return false;
335 return true;
336}
337
338/* Check pitch constriants for all chips & tiling formats */ 183/* Check pitch constriants for all chips & tiling formats */
339bool 184bool
340i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
@@ -391,7 +236,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
391 return true; 236 return true;
392} 237}
393 238
394static bool 239bool
395i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) 240i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
396{ 241{
397 struct drm_device *dev = obj->dev; 242 struct drm_device *dev = obj->dev;
@@ -438,9 +283,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
438 obj_priv = obj->driver_private; 283 obj_priv = obj->driver_private;
439 284
440 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { 285 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
441 mutex_lock(&dev->struct_mutex); 286 drm_gem_object_unreference_unlocked(obj);
442 drm_gem_object_unreference(obj);
443 mutex_unlock(&dev->struct_mutex);
444 return -EINVAL; 287 return -EINVAL;
445 } 288 }
446 289
@@ -493,12 +336,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
493 goto err; 336 goto err;
494 } 337 }
495 338
496 /* If we've changed tiling, GTT-mappings of the object
497 * need to re-fault to ensure that the correct fence register
498 * setup is in place.
499 */
500 i915_gem_release_mmap(obj);
501
502 obj_priv->tiling_mode = args->tiling_mode; 339 obj_priv->tiling_mode = args->tiling_mode;
503 obj_priv->stride = args->stride; 340 obj_priv->stride = args->stride;
504 } 341 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a17d6bdfe63e..5388354da0d1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -166,7 +166,7 @@ void intel_enable_asle (struct drm_device *dev)
166{ 166{
167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
168 168
169 if (IS_IRONLAKE(dev)) 169 if (HAS_PCH_SPLIT(dev))
170 ironlake_enable_display_irq(dev_priv, DE_GSE); 170 ironlake_enable_display_irq(dev_priv, DE_GSE);
171 else 171 else
172 i915_enable_pipestat(dev_priv, 1, 172 i915_enable_pipestat(dev_priv, 1,
@@ -269,6 +269,57 @@ static void i915_hotplug_work_func(struct work_struct *work)
269 drm_sysfs_hotplug_event(dev); 269 drm_sysfs_hotplug_event(dev);
270} 270}
271 271
272static void i915_handle_rps_change(struct drm_device *dev)
273{
274 drm_i915_private_t *dev_priv = dev->dev_private;
275 u32 busy_up, busy_down, max_avg, min_avg;
276 u16 rgvswctl;
277 u8 new_delay = dev_priv->cur_delay;
278
279 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
280 busy_up = I915_READ(RCPREVBSYTUPAVG);
281 busy_down = I915_READ(RCPREVBSYTDNAVG);
282 max_avg = I915_READ(RCBMAXAVG);
283 min_avg = I915_READ(RCBMINAVG);
284
285 /* Handle RCS change request from hw */
286 if (busy_up > max_avg) {
287 if (dev_priv->cur_delay != dev_priv->max_delay)
288 new_delay = dev_priv->cur_delay - 1;
289 if (new_delay < dev_priv->max_delay)
290 new_delay = dev_priv->max_delay;
291 } else if (busy_down < min_avg) {
292 if (dev_priv->cur_delay != dev_priv->min_delay)
293 new_delay = dev_priv->cur_delay + 1;
294 if (new_delay > dev_priv->min_delay)
295 new_delay = dev_priv->min_delay;
296 }
297
298 DRM_DEBUG("rps change requested: %d -> %d\n",
299 dev_priv->cur_delay, new_delay);
300
301 rgvswctl = I915_READ(MEMSWCTL);
302 if (rgvswctl & MEMCTL_CMD_STS) {
303 DRM_ERROR("gpu busy, RCS change rejected\n");
304 return; /* still busy with another command */
305 }
306
307 /* Program the new state */
308 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
309 (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
310 I915_WRITE(MEMSWCTL, rgvswctl);
311 POSTING_READ(MEMSWCTL);
312
313 rgvswctl |= MEMCTL_CMD_STS;
314 I915_WRITE(MEMSWCTL, rgvswctl);
315
316 dev_priv->cur_delay = new_delay;
317
318 DRM_DEBUG("rps changed\n");
319
320 return;
321}
322
272irqreturn_t ironlake_irq_handler(struct drm_device *dev) 323irqreturn_t ironlake_irq_handler(struct drm_device *dev)
273{ 324{
274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 325 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -331,6 +382,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
331 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 382 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
332 } 383 }
333 384
385 if (de_iir & DE_PCU_EVENT) {
386 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
387 i915_handle_rps_change(dev);
388 }
389
334 /* should clear PCH hotplug event before clear CPU irq */ 390 /* should clear PCH hotplug event before clear CPU irq */
335 I915_WRITE(SDEIIR, pch_iir); 391 I915_WRITE(SDEIIR, pch_iir);
336 I915_WRITE(GTIIR, gt_iir); 392 I915_WRITE(GTIIR, gt_iir);
@@ -376,6 +432,121 @@ static void i915_error_work_func(struct work_struct *work)
376 } 432 }
377} 433}
378 434
435static struct drm_i915_error_object *
436i915_error_object_create(struct drm_device *dev,
437 struct drm_gem_object *src)
438{
439 struct drm_i915_error_object *dst;
440 struct drm_i915_gem_object *src_priv;
441 int page, page_count;
442
443 if (src == NULL)
444 return NULL;
445
446 src_priv = src->driver_private;
447 if (src_priv->pages == NULL)
448 return NULL;
449
450 page_count = src->size / PAGE_SIZE;
451
452 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
453 if (dst == NULL)
454 return NULL;
455
456 for (page = 0; page < page_count; page++) {
457 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
458 if (d == NULL)
459 goto unwind;
460 s = kmap_atomic(src_priv->pages[page], KM_USER0);
461 memcpy(d, s, PAGE_SIZE);
462 kunmap_atomic(s, KM_USER0);
463 dst->pages[page] = d;
464 }
465 dst->page_count = page_count;
466 dst->gtt_offset = src_priv->gtt_offset;
467
468 return dst;
469
470unwind:
471 while (page--)
472 kfree(dst->pages[page]);
473 kfree(dst);
474 return NULL;
475}
476
477static void
478i915_error_object_free(struct drm_i915_error_object *obj)
479{
480 int page;
481
482 if (obj == NULL)
483 return;
484
485 for (page = 0; page < obj->page_count; page++)
486 kfree(obj->pages[page]);
487
488 kfree(obj);
489}
490
491static void
492i915_error_state_free(struct drm_device *dev,
493 struct drm_i915_error_state *error)
494{
495 i915_error_object_free(error->batchbuffer[0]);
496 i915_error_object_free(error->batchbuffer[1]);
497 i915_error_object_free(error->ringbuffer);
498 kfree(error->active_bo);
499 kfree(error);
500}
501
502static u32
503i915_get_bbaddr(struct drm_device *dev, u32 *ring)
504{
505 u32 cmd;
506
507 if (IS_I830(dev) || IS_845G(dev))
508 cmd = MI_BATCH_BUFFER;
509 else if (IS_I965G(dev))
510 cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
511 MI_BATCH_NON_SECURE_I965);
512 else
513 cmd = (MI_BATCH_BUFFER_START | (2 << 6));
514
515 return ring[0] == cmd ? ring[1] : 0;
516}
517
518static u32
519i915_ringbuffer_last_batch(struct drm_device *dev)
520{
521 struct drm_i915_private *dev_priv = dev->dev_private;
522 u32 head, bbaddr;
523 u32 *ring;
524
525 /* Locate the current position in the ringbuffer and walk back
526 * to find the most recently dispatched batch buffer.
527 */
528 bbaddr = 0;
529 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
530 ring = (u32 *)(dev_priv->ring.virtual_start + head);
531
532 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
533 bbaddr = i915_get_bbaddr(dev, ring);
534 if (bbaddr)
535 break;
536 }
537
538 if (bbaddr == 0) {
539 ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
540 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
541 bbaddr = i915_get_bbaddr(dev, ring);
542 if (bbaddr)
543 break;
544 }
545 }
546
547 return bbaddr;
548}
549
379/** 550/**
380 * i915_capture_error_state - capture an error record for later analysis 551 * i915_capture_error_state - capture an error record for later analysis
381 * @dev: drm device 552 * @dev: drm device
@@ -388,19 +559,26 @@ static void i915_error_work_func(struct work_struct *work)
388static void i915_capture_error_state(struct drm_device *dev) 559static void i915_capture_error_state(struct drm_device *dev)
389{ 560{
390 struct drm_i915_private *dev_priv = dev->dev_private; 561 struct drm_i915_private *dev_priv = dev->dev_private;
562 struct drm_i915_gem_object *obj_priv;
391 struct drm_i915_error_state *error; 563 struct drm_i915_error_state *error;
564 struct drm_gem_object *batchbuffer[2];
392 unsigned long flags; 565 unsigned long flags;
566 u32 bbaddr;
567 int count;
393 568
394 spin_lock_irqsave(&dev_priv->error_lock, flags); 569 spin_lock_irqsave(&dev_priv->error_lock, flags);
395 if (dev_priv->first_error) 570 error = dev_priv->first_error;
396 goto out; 571 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
572 if (error)
573 return;
397 574
398 error = kmalloc(sizeof(*error), GFP_ATOMIC); 575 error = kmalloc(sizeof(*error), GFP_ATOMIC);
399 if (!error) { 576 if (!error) {
400 DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n"); 577 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
401 goto out; 578 return;
402 } 579 }
403 580
581 error->seqno = i915_get_gem_seqno(dev);
404 error->eir = I915_READ(EIR); 582 error->eir = I915_READ(EIR);
405 error->pgtbl_er = I915_READ(PGTBL_ER); 583 error->pgtbl_er = I915_READ(PGTBL_ER);
406 error->pipeastat = I915_READ(PIPEASTAT); 584 error->pipeastat = I915_READ(PIPEASTAT);
@@ -411,6 +589,7 @@ static void i915_capture_error_state(struct drm_device *dev)
411 error->ipehr = I915_READ(IPEHR); 589 error->ipehr = I915_READ(IPEHR);
412 error->instdone = I915_READ(INSTDONE); 590 error->instdone = I915_READ(INSTDONE);
413 error->acthd = I915_READ(ACTHD); 591 error->acthd = I915_READ(ACTHD);
592 error->bbaddr = 0;
414 } else { 593 } else {
415 error->ipeir = I915_READ(IPEIR_I965); 594 error->ipeir = I915_READ(IPEIR_I965);
416 error->ipehr = I915_READ(IPEHR_I965); 595 error->ipehr = I915_READ(IPEHR_I965);
@@ -418,14 +597,101 @@ static void i915_capture_error_state(struct drm_device *dev)
418 error->instps = I915_READ(INSTPS); 597 error->instps = I915_READ(INSTPS);
419 error->instdone1 = I915_READ(INSTDONE1); 598 error->instdone1 = I915_READ(INSTDONE1);
420 error->acthd = I915_READ(ACTHD_I965); 599 error->acthd = I915_READ(ACTHD_I965);
600 error->bbaddr = I915_READ64(BB_ADDR);
421 } 601 }
422 602
423 do_gettimeofday(&error->time); 603 bbaddr = i915_ringbuffer_last_batch(dev);
604
605 /* Grab the current batchbuffer, most likely to have crashed. */
606 batchbuffer[0] = NULL;
607 batchbuffer[1] = NULL;
608 count = 0;
609 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
610 struct drm_gem_object *obj = obj_priv->obj;
611
612 if (batchbuffer[0] == NULL &&
613 bbaddr >= obj_priv->gtt_offset &&
614 bbaddr < obj_priv->gtt_offset + obj->size)
615 batchbuffer[0] = obj;
616
617 if (batchbuffer[1] == NULL &&
618 error->acthd >= obj_priv->gtt_offset &&
619 error->acthd < obj_priv->gtt_offset + obj->size &&
620 batchbuffer[0] != obj)
621 batchbuffer[1] = obj;
622
623 count++;
624 }
424 625
425 dev_priv->first_error = error; 626 /* We need to copy these to an anonymous buffer as the simplest
627 * method to avoid being overwritten by userpace.
628 */
629 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
630 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
631
632 /* Record the ringbuffer */
633 error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
634
635 /* Record buffers on the active list. */
636 error->active_bo = NULL;
637 error->active_bo_count = 0;
638
639 if (count)
640 error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
641 GFP_ATOMIC);
642
643 if (error->active_bo) {
644 int i = 0;
645 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
646 struct drm_gem_object *obj = obj_priv->obj;
647
648 error->active_bo[i].size = obj->size;
649 error->active_bo[i].name = obj->name;
650 error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
651 error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
652 error->active_bo[i].read_domains = obj->read_domains;
653 error->active_bo[i].write_domain = obj->write_domain;
654 error->active_bo[i].fence_reg = obj_priv->fence_reg;
655 error->active_bo[i].pinned = 0;
656 if (obj_priv->pin_count > 0)
657 error->active_bo[i].pinned = 1;
658 if (obj_priv->user_pin_count > 0)
659 error->active_bo[i].pinned = -1;
660 error->active_bo[i].tiling = obj_priv->tiling_mode;
661 error->active_bo[i].dirty = obj_priv->dirty;
662 error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
663
664 if (++i == count)
665 break;
666 }
667 error->active_bo_count = i;
668 }
669
670 do_gettimeofday(&error->time);
426 671
427out: 672 spin_lock_irqsave(&dev_priv->error_lock, flags);
673 if (dev_priv->first_error == NULL) {
674 dev_priv->first_error = error;
675 error = NULL;
676 }
428 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 677 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
678
679 if (error)
680 i915_error_state_free(dev, error);
681}
682
683void i915_destroy_error_state(struct drm_device *dev)
684{
685 struct drm_i915_private *dev_priv = dev->dev_private;
686 struct drm_i915_error_state *error;
687
688 spin_lock(&dev_priv->error_lock);
689 error = dev_priv->first_error;
690 dev_priv->first_error = NULL;
691 spin_unlock(&dev_priv->error_lock);
692
693 if (error)
694 i915_error_state_free(dev, error);
429} 695}
430 696
431/** 697/**
@@ -576,7 +842,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
576 842
577 atomic_inc(&dev_priv->irq_received); 843 atomic_inc(&dev_priv->irq_received);
578 844
579 if (IS_IRONLAKE(dev)) 845 if (HAS_PCH_SPLIT(dev))
580 return ironlake_irq_handler(dev); 846 return ironlake_irq_handler(dev);
581 847
582 iir = I915_READ(IIR); 848 iir = I915_READ(IIR);
@@ -737,7 +1003,7 @@ void i915_user_irq_get(struct drm_device *dev)
737 1003
738 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1004 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
739 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 1005 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
740 if (IS_IRONLAKE(dev)) 1006 if (HAS_PCH_SPLIT(dev))
741 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1007 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
742 else 1008 else
743 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 1009 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -753,7 +1019,7 @@ void i915_user_irq_put(struct drm_device *dev)
753 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1019 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
754 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 1020 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
755 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 1021 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
756 if (IS_IRONLAKE(dev)) 1022 if (HAS_PCH_SPLIT(dev))
757 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1023 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
758 else 1024 else
759 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 1025 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -861,7 +1127,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
861 return -EINVAL; 1127 return -EINVAL;
862 1128
863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1129 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
864 if (IS_IRONLAKE(dev)) 1130 if (HAS_PCH_SPLIT(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1131 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1132 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev)) 1133 else if (IS_I965G(dev))
@@ -883,7 +1149,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
883 unsigned long irqflags; 1149 unsigned long irqflags;
884 1150
885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1151 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
886 if (IS_IRONLAKE(dev)) 1152 if (HAS_PCH_SPLIT(dev))
887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1153 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1154 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else 1155 else
@@ -897,7 +1163,7 @@ void i915_enable_interrupt (struct drm_device *dev)
897{ 1163{
898 struct drm_i915_private *dev_priv = dev->dev_private; 1164 struct drm_i915_private *dev_priv = dev->dev_private;
899 1165
900 if (!IS_IRONLAKE(dev)) 1166 if (!HAS_PCH_SPLIT(dev))
901 opregion_enable_asle(dev); 1167 opregion_enable_asle(dev);
902 dev_priv->irq_enabled = 1; 1168 dev_priv->irq_enabled = 1;
903} 1169}
@@ -973,7 +1239,11 @@ void i915_hangcheck_elapsed(unsigned long data)
973 struct drm_device *dev = (struct drm_device *)data; 1239 struct drm_device *dev = (struct drm_device *)data;
974 drm_i915_private_t *dev_priv = dev->dev_private; 1240 drm_i915_private_t *dev_priv = dev->dev_private;
975 uint32_t acthd; 1241 uint32_t acthd;
976 1242
1243 /* No reset support on this chip yet. */
1244 if (IS_GEN6(dev))
1245 return;
1246
977 if (!IS_I965G(dev)) 1247 if (!IS_I965G(dev))
978 acthd = I915_READ(ACTHD); 1248 acthd = I915_READ(ACTHD);
979 else 1249 else
@@ -1064,6 +1334,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1064 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); 1334 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
1065 (void) I915_READ(SDEIER); 1335 (void) I915_READ(SDEIER);
1066 1336
1337 if (IS_IRONLAKE_M(dev)) {
1338 /* Clear & enable PCU event interrupts */
1339 I915_WRITE(DEIIR, DE_PCU_EVENT);
1340 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1341 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1342 }
1343
1067 return 0; 1344 return 0;
1068} 1345}
1069 1346
@@ -1076,7 +1353,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1076 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1353 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1077 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1354 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1078 1355
1079 if (IS_IRONLAKE(dev)) { 1356 if (HAS_PCH_SPLIT(dev)) {
1080 ironlake_irq_preinstall(dev); 1357 ironlake_irq_preinstall(dev);
1081 return; 1358 return;
1082 } 1359 }
@@ -1108,7 +1385,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1108 1385
1109 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1386 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1110 1387
1111 if (IS_IRONLAKE(dev)) 1388 if (HAS_PCH_SPLIT(dev))
1112 return ironlake_irq_postinstall(dev); 1389 return ironlake_irq_postinstall(dev);
1113 1390
1114 /* Unmask the interrupts that we always want on. */ 1391 /* Unmask the interrupts that we always want on. */
@@ -1196,7 +1473,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1196 1473
1197 dev_priv->vblank_pipe = 0; 1474 dev_priv->vblank_pipe = 0;
1198 1475
1199 if (IS_IRONLAKE(dev)) { 1476 if (HAS_PCH_SPLIT(dev)) {
1200 ironlake_irq_uninstall(dev); 1477 ironlake_irq_uninstall(dev);
1201 return; 1478 return;
1202 } 1479 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ab1bd2d3d3b6..3d59862c7ccd 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -53,6 +53,25 @@
53#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) 53#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
54#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) 54#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
55 55
56#define SNB_GMCH_CTRL 0x50
57#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
58#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
59#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
60#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
61#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
62#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
63#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
64#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
65#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
66#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
67#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
68#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
69#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
70#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
71#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
72#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
73#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
74
56/* PCI config space */ 75/* PCI config space */
57 76
58#define HPLLCC 0xc0 /* 855 only */ 77#define HPLLCC 0xc0 /* 855 only */
@@ -61,6 +80,7 @@
61#define GC_CLOCK_100_200 (1 << 0) 80#define GC_CLOCK_100_200 (1 << 0)
62#define GC_CLOCK_100_133 (2 << 0) 81#define GC_CLOCK_100_133 (2 << 0)
63#define GC_CLOCK_166_250 (3 << 0) 82#define GC_CLOCK_166_250 (3 << 0)
83#define GCFGC2 0xda
64#define GCFGC 0xf0 /* 915+ only */ 84#define GCFGC 0xf0 /* 915+ only */
65#define GC_LOW_FREQUENCY_ENABLE (1 << 7) 85#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
66#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) 86#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
@@ -234,6 +254,9 @@
234#define I965_FENCE_REG_VALID (1<<0) 254#define I965_FENCE_REG_VALID (1<<0)
235#define I965_FENCE_MAX_PITCH_VAL 0x0400 255#define I965_FENCE_MAX_PITCH_VAL 0x0400
236 256
257#define FENCE_REG_SANDYBRIDGE_0 0x100000
258#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
259
237/* 260/*
238 * Instruction and interrupt control regs 261 * Instruction and interrupt control regs
239 */ 262 */
@@ -265,6 +288,7 @@
265#define INSTDONE1 0x0207c /* 965+ only */ 288#define INSTDONE1 0x0207c /* 965+ only */
266#define ACTHD_I965 0x02074 289#define ACTHD_I965 0x02074
267#define HWS_PGA 0x02080 290#define HWS_PGA 0x02080
291#define HWS_PGA_GEN6 0x04080
268#define HWS_ADDRESS_MASK 0xfffff000 292#define HWS_ADDRESS_MASK 0xfffff000
269#define HWS_START_ADDRESS_SHIFT 4 293#define HWS_START_ADDRESS_SHIFT 4
270#define PWRCTXA 0x2088 /* 965GM+ only */ 294#define PWRCTXA 0x2088 /* 965GM+ only */
@@ -282,7 +306,7 @@
282#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 306#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
283#define I915_DISPLAY_PORT_INTERRUPT (1<<17) 307#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
284#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 308#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
285#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) 309#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
286#define I915_HWB_OOM_INTERRUPT (1<<13) 310#define I915_HWB_OOM_INTERRUPT (1<<13)
287#define I915_SYNC_STATUS_INTERRUPT (1<<12) 311#define I915_SYNC_STATUS_INTERRUPT (1<<12)
288#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) 312#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
@@ -306,11 +330,14 @@
306#define I915_ERROR_MEMORY_REFRESH (1<<1) 330#define I915_ERROR_MEMORY_REFRESH (1<<1)
307#define I915_ERROR_INSTRUCTION (1<<0) 331#define I915_ERROR_INSTRUCTION (1<<0)
308#define INSTPM 0x020c0 332#define INSTPM 0x020c0
333#define INSTPM_SELF_EN (1<<12) /* 915GM only */
309#define ACTHD 0x020c8 334#define ACTHD 0x020c8
310#define FW_BLC 0x020d8 335#define FW_BLC 0x020d8
311#define FW_BLC2 0x020dc 336#define FW_BLC2 0x020dc
312#define FW_BLC_SELF 0x020e0 /* 915+ only */ 337#define FW_BLC_SELF 0x020e0 /* 915+ only */
313#define FW_BLC_SELF_EN (1<<15) 338#define FW_BLC_SELF_EN_MASK (1<<31)
339#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
340#define FW_BLC_SELF_EN (1<<15) /* 945 only */
314#define MM_BURST_LENGTH 0x00700000 341#define MM_BURST_LENGTH 0x00700000
315#define MM_FIFO_WATERMARK 0x0001F000 342#define MM_FIFO_WATERMARK 0x0001F000
316#define LM_BURST_LENGTH 0x00000700 343#define LM_BURST_LENGTH 0x00000700
@@ -324,6 +351,7 @@
324#define CM0_COLOR_EVICT_DISABLE (1<<3) 351#define CM0_COLOR_EVICT_DISABLE (1<<3)
325#define CM0_DEPTH_WRITE_DISABLE (1<<1) 352#define CM0_DEPTH_WRITE_DISABLE (1<<1)
326#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 353#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
354#define BB_ADDR 0x02140 /* 8 bytes */
327#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 355#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
328 356
329 357
@@ -784,10 +812,144 @@
784#define CLKCFG_MEM_800 (3 << 4) 812#define CLKCFG_MEM_800 (3 << 4)
785#define CLKCFG_MEM_MASK (7 << 4) 813#define CLKCFG_MEM_MASK (7 << 4)
786 814
787/** GM965 GM45 render standby register */ 815#define CRSTANDVID 0x11100
788#define MCHBAR_RENDER_STANDBY 0x111B8 816#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
817#define PXVFREQ_PX_MASK 0x7f000000
818#define PXVFREQ_PX_SHIFT 24
819#define VIDFREQ_BASE 0x11110
820#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
821#define VIDFREQ2 0x11114
822#define VIDFREQ3 0x11118
823#define VIDFREQ4 0x1111c
824#define VIDFREQ_P0_MASK 0x1f000000
825#define VIDFREQ_P0_SHIFT 24
826#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
827#define VIDFREQ_P0_CSCLK_SHIFT 20
828#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
829#define VIDFREQ_P0_CRCLK_SHIFT 16
830#define VIDFREQ_P1_MASK 0x00001f00
831#define VIDFREQ_P1_SHIFT 8
832#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
833#define VIDFREQ_P1_CSCLK_SHIFT 4
834#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
835#define INTTOEXT_BASE_ILK 0x11300
836#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
837#define INTTOEXT_MAP3_SHIFT 24
838#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
839#define INTTOEXT_MAP2_SHIFT 16
840#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
841#define INTTOEXT_MAP1_SHIFT 8
842#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
843#define INTTOEXT_MAP0_SHIFT 0
844#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
845#define MEMSWCTL 0x11170 /* Ironlake only */
846#define MEMCTL_CMD_MASK 0xe000
847#define MEMCTL_CMD_SHIFT 13
848#define MEMCTL_CMD_RCLK_OFF 0
849#define MEMCTL_CMD_RCLK_ON 1
850#define MEMCTL_CMD_CHFREQ 2
851#define MEMCTL_CMD_CHVID 3
852#define MEMCTL_CMD_VMMOFF 4
853#define MEMCTL_CMD_VMMON 5
854#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
855 when command complete */
856#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
857#define MEMCTL_FREQ_SHIFT 8
858#define MEMCTL_SFCAVM (1<<7)
859#define MEMCTL_TGT_VID_MASK 0x007f
860#define MEMIHYST 0x1117c
861#define MEMINTREN 0x11180 /* 16 bits */
862#define MEMINT_RSEXIT_EN (1<<8)
863#define MEMINT_CX_SUPR_EN (1<<7)
864#define MEMINT_CONT_BUSY_EN (1<<6)
865#define MEMINT_AVG_BUSY_EN (1<<5)
866#define MEMINT_EVAL_CHG_EN (1<<4)
867#define MEMINT_MON_IDLE_EN (1<<3)
868#define MEMINT_UP_EVAL_EN (1<<2)
869#define MEMINT_DOWN_EVAL_EN (1<<1)
870#define MEMINT_SW_CMD_EN (1<<0)
871#define MEMINTRSTR 0x11182 /* 16 bits */
872#define MEM_RSEXIT_MASK 0xc000
873#define MEM_RSEXIT_SHIFT 14
874#define MEM_CONT_BUSY_MASK 0x3000
875#define MEM_CONT_BUSY_SHIFT 12
876#define MEM_AVG_BUSY_MASK 0x0c00
877#define MEM_AVG_BUSY_SHIFT 10
878#define MEM_EVAL_CHG_MASK 0x0300
879#define MEM_EVAL_BUSY_SHIFT 8
880#define MEM_MON_IDLE_MASK 0x00c0
881#define MEM_MON_IDLE_SHIFT 6
882#define MEM_UP_EVAL_MASK 0x0030
883#define MEM_UP_EVAL_SHIFT 4
884#define MEM_DOWN_EVAL_MASK 0x000c
885#define MEM_DOWN_EVAL_SHIFT 2
886#define MEM_SW_CMD_MASK 0x0003
887#define MEM_INT_STEER_GFX 0
888#define MEM_INT_STEER_CMR 1
889#define MEM_INT_STEER_SMI 2
890#define MEM_INT_STEER_SCI 3
891#define MEMINTRSTS 0x11184
892#define MEMINT_RSEXIT (1<<7)
893#define MEMINT_CONT_BUSY (1<<6)
894#define MEMINT_AVG_BUSY (1<<5)
895#define MEMINT_EVAL_CHG (1<<4)
896#define MEMINT_MON_IDLE (1<<3)
897#define MEMINT_UP_EVAL (1<<2)
898#define MEMINT_DOWN_EVAL (1<<1)
899#define MEMINT_SW_CMD (1<<0)
900#define MEMMODECTL 0x11190
901#define MEMMODE_BOOST_EN (1<<31)
902#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
903#define MEMMODE_BOOST_FREQ_SHIFT 24
904#define MEMMODE_IDLE_MODE_MASK 0x00030000
905#define MEMMODE_IDLE_MODE_SHIFT 16
906#define MEMMODE_IDLE_MODE_EVAL 0
907#define MEMMODE_IDLE_MODE_CONT 1
908#define MEMMODE_HWIDLE_EN (1<<15)
909#define MEMMODE_SWMODE_EN (1<<14)
910#define MEMMODE_RCLK_GATE (1<<13)
911#define MEMMODE_HW_UPDATE (1<<12)
912#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
913#define MEMMODE_FSTART_SHIFT 8
914#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
915#define MEMMODE_FMAX_SHIFT 4
916#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
917#define RCBMAXAVG 0x1119c
918#define MEMSWCTL2 0x1119e /* Cantiga only */
919#define SWMEMCMD_RENDER_OFF (0 << 13)
920#define SWMEMCMD_RENDER_ON (1 << 13)
921#define SWMEMCMD_SWFREQ (2 << 13)
922#define SWMEMCMD_TARVID (3 << 13)
923#define SWMEMCMD_VRM_OFF (4 << 13)
924#define SWMEMCMD_VRM_ON (5 << 13)
925#define CMDSTS (1<<12)
926#define SFCAVM (1<<11)
927#define SWFREQ_MASK 0x0380 /* P0-7 */
928#define SWFREQ_SHIFT 7
929#define TARVID_MASK 0x001f
930#define MEMSTAT_CTG 0x111a0
931#define RCBMINAVG 0x111a0
932#define RCUPEI 0x111b0
933#define RCDNEI 0x111b4
934#define MCHBAR_RENDER_STANDBY 0x111b8
789#define RCX_SW_EXIT (1<<23) 935#define RCX_SW_EXIT (1<<23)
790#define RSX_STATUS_MASK 0x00700000 936#define RSX_STATUS_MASK 0x00700000
937#define VIDCTL 0x111c0
938#define VIDSTS 0x111c8
939#define VIDSTART 0x111cc /* 8 bits */
940#define MEMSTAT_ILK 0x111f8
941#define MEMSTAT_VID_MASK 0x7f00
942#define MEMSTAT_VID_SHIFT 8
943#define MEMSTAT_PSTATE_MASK 0x00f8
944#define MEMSTAT_PSTATE_SHIFT 3
945#define MEMSTAT_MON_ACTV (1<<2)
946#define MEMSTAT_SRC_CTL_MASK 0x0003
947#define MEMSTAT_SRC_CTL_CORE 0
948#define MEMSTAT_SRC_CTL_TRB 1
949#define MEMSTAT_SRC_CTL_THM 2
950#define MEMSTAT_SRC_CTL_STDBY 3
951#define RCPREVBSYTUPAVG 0x113b8
952#define RCPREVBSYTDNAVG 0x113bc
791#define PEG_BAND_GAP_DATA 0x14d68 953#define PEG_BAND_GAP_DATA 0x14d68
792 954
793/* 955/*
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a3b90c9561dc..ac0d1a73ac22 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -682,6 +682,8 @@ void i915_restore_display(struct drm_device *dev)
682 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 682 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
683 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); 683 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
684 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); 684 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
685 I915_WRITE(MCHBAR_RENDER_STANDBY,
686 dev_priv->saveMCHBAR_RENDER_STANDBY);
685 } else { 687 } else {
686 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); 688 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
687 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 689 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
@@ -745,11 +747,16 @@ int i915_save_state(struct drm_device *dev)
745 dev_priv->saveGTIMR = I915_READ(GTIMR); 747 dev_priv->saveGTIMR = I915_READ(GTIMR);
746 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); 748 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
747 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); 749 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
750 dev_priv->saveMCHBAR_RENDER_STANDBY =
751 I915_READ(MCHBAR_RENDER_STANDBY);
748 } else { 752 } else {
749 dev_priv->saveIER = I915_READ(IER); 753 dev_priv->saveIER = I915_READ(IER);
750 dev_priv->saveIMR = I915_READ(IMR); 754 dev_priv->saveIMR = I915_READ(IMR);
751 } 755 }
752 756
757 if (IS_IRONLAKE_M(dev))
758 ironlake_disable_drps(dev);
759
753 /* Cache mode state */ 760 /* Cache mode state */
754 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 761 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
755 762
@@ -820,6 +827,9 @@ int i915_restore_state(struct drm_device *dev)
820 /* Clock gating state */ 827 /* Clock gating state */
821 intel_init_clock_gating(dev); 828 intel_init_clock_gating(dev);
822 829
830 if (IS_IRONLAKE_M(dev))
831 ironlake_enable_drps(dev);
832
823 /* Cache mode state */ 833 /* Cache mode state */
824 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 834 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
825 835
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 15fbc1b5a83e..70c9d4ba7042 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -247,6 +247,7 @@ static void
247parse_general_features(struct drm_i915_private *dev_priv, 247parse_general_features(struct drm_i915_private *dev_priv,
248 struct bdb_header *bdb) 248 struct bdb_header *bdb)
249{ 249{
250 struct drm_device *dev = dev_priv->dev;
250 struct bdb_general_features *general; 251 struct bdb_general_features *general;
251 252
252 /* Set sensible defaults in case we can't find the general block */ 253 /* Set sensible defaults in case we can't find the general block */
@@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
263 if (IS_I85X(dev_priv->dev)) 264 if (IS_I85X(dev_priv->dev))
264 dev_priv->lvds_ssc_freq = 265 dev_priv->lvds_ssc_freq =
265 general->ssc_freq ? 66 : 48; 266 general->ssc_freq ? 66 : 48;
266 else if (IS_IRONLAKE(dev_priv->dev)) 267 else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
267 dev_priv->lvds_ssc_freq = 268 dev_priv->lvds_ssc_freq =
268 general->ssc_freq ? 100 : 120; 269 general->ssc_freq ? 100 : 120;
269 else 270 else
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 79dd4026586f..fccf07470c8f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
39 struct drm_i915_private *dev_priv = dev->dev_private; 39 struct drm_i915_private *dev_priv = dev->dev_private;
40 u32 temp, reg; 40 u32 temp, reg;
41 41
42 if (IS_IRONLAKE(dev)) 42 if (HAS_PCH_SPLIT(dev))
43 reg = PCH_ADPA; 43 reg = PCH_ADPA;
44 else 44 else
45 reg = ADPA; 45 reg = ADPA;
@@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
113 else 113 else
114 dpll_md_reg = DPLL_B_MD; 114 dpll_md_reg = DPLL_B_MD;
115 115
116 if (IS_IRONLAKE(dev)) 116 if (HAS_PCH_SPLIT(dev))
117 adpa_reg = PCH_ADPA; 117 adpa_reg = PCH_ADPA;
118 else 118 else
119 adpa_reg = ADPA; 119 adpa_reg = ADPA;
@@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
122 * Disable separate mode multiplier used when cloning SDVO to CRT 122 * Disable separate mode multiplier used when cloning SDVO to CRT
123 * XXX this needs to be adjusted when we really are cloning 123 * XXX this needs to be adjusted when we really are cloning
124 */ 124 */
125 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { 125 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
126 dpll_md = I915_READ(dpll_md_reg); 126 dpll_md = I915_READ(dpll_md_reg);
127 I915_WRITE(dpll_md_reg, 127 I915_WRITE(dpll_md_reg,
128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -136,11 +136,11 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
136 136
137 if (intel_crtc->pipe == 0) { 137 if (intel_crtc->pipe == 0) {
138 adpa |= ADPA_PIPE_A_SELECT; 138 adpa |= ADPA_PIPE_A_SELECT;
139 if (!IS_IRONLAKE(dev)) 139 if (!HAS_PCH_SPLIT(dev))
140 I915_WRITE(BCLRPAT_A, 0); 140 I915_WRITE(BCLRPAT_A, 0);
141 } else { 141 } else {
142 adpa |= ADPA_PIPE_B_SELECT; 142 adpa |= ADPA_PIPE_B_SELECT;
143 if (!IS_IRONLAKE(dev)) 143 if (!HAS_PCH_SPLIT(dev))
144 I915_WRITE(BCLRPAT_B, 0); 144 I915_WRITE(BCLRPAT_B, 0);
145 } 145 }
146 146
@@ -202,7 +202,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
202 u32 hotplug_en; 202 u32 hotplug_en;
203 int i, tries = 0; 203 int i, tries = 0;
204 204
205 if (IS_IRONLAKE(dev)) 205 if (HAS_PCH_SPLIT(dev))
206 return intel_ironlake_crt_detect_hotplug(connector); 206 return intel_ironlake_crt_detect_hotplug(connector);
207 207
208 /* 208 /*
@@ -524,7 +524,7 @@ void intel_crt_init(struct drm_device *dev)
524 &intel_output->enc); 524 &intel_output->enc);
525 525
526 /* Set up the DDC bus. */ 526 /* Set up the DDC bus. */
527 if (IS_IRONLAKE(dev)) 527 if (HAS_PCH_SPLIT(dev))
528 i2c_reg = PCH_GPIOA; 528 i2c_reg = PCH_GPIOA;
529 else { 529 else {
530 i2c_reg = GPIOA; 530 i2c_reg = GPIOA;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b27202d23ebc..9cd6de5f9906 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -232,7 +232,7 @@ struct intel_limit {
232#define G4X_P2_DISPLAY_PORT_FAST 10 232#define G4X_P2_DISPLAY_PORT_FAST 10
233#define G4X_P2_DISPLAY_PORT_LIMIT 0 233#define G4X_P2_DISPLAY_PORT_LIMIT 0
234 234
235/* Ironlake */ 235/* Ironlake / Sandybridge */
236/* as we calculate clock using (register_value + 2) for 236/* as we calculate clock using (register_value + 2) for
237 N/M1/M2, so here the range value for them is (actual_value-2). 237 N/M1/M2, so here the range value for them is (actual_value-2).
238 */ 238 */
@@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
690 struct drm_device *dev = crtc->dev; 690 struct drm_device *dev = crtc->dev;
691 const intel_limit_t *limit; 691 const intel_limit_t *limit;
692 692
693 if (IS_IRONLAKE(dev)) 693 if (HAS_PCH_SPLIT(dev))
694 limit = intel_ironlake_limit(crtc); 694 limit = intel_ironlake_limit(crtc);
695 else if (IS_G4X(dev)) { 695 else if (IS_G4X(dev)) {
696 limit = intel_g4x_limit(crtc); 696 limit = intel_g4x_limit(crtc);
@@ -886,7 +886,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
886 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 886 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
887 int lvds_reg; 887 int lvds_reg;
888 888
889 if (IS_IRONLAKE(dev)) 889 if (HAS_PCH_SPLIT(dev))
890 lvds_reg = PCH_LVDS; 890 lvds_reg = PCH_LVDS;
891 else 891 else
892 lvds_reg = LVDS; 892 lvds_reg = LVDS;
@@ -1188,25 +1188,30 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1188 if (intel_fb->obj->size > dev_priv->cfb_size) { 1188 if (intel_fb->obj->size > dev_priv->cfb_size) {
1189 DRM_DEBUG_KMS("framebuffer too large, disabling " 1189 DRM_DEBUG_KMS("framebuffer too large, disabling "
1190 "compression\n"); 1190 "compression\n");
1191 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1191 goto out_disable; 1192 goto out_disable;
1192 } 1193 }
1193 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 1194 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1194 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 1195 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
1195 DRM_DEBUG_KMS("mode incompatible with compression, " 1196 DRM_DEBUG_KMS("mode incompatible with compression, "
1196 "disabling\n"); 1197 "disabling\n");
1198 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1197 goto out_disable; 1199 goto out_disable;
1198 } 1200 }
1199 if ((mode->hdisplay > 2048) || 1201 if ((mode->hdisplay > 2048) ||
1200 (mode->vdisplay > 1536)) { 1202 (mode->vdisplay > 1536)) {
1201 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 1203 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1204 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1202 goto out_disable; 1205 goto out_disable;
1203 } 1206 }
1204 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { 1207 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
1205 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 1208 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1209 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1206 goto out_disable; 1210 goto out_disable;
1207 } 1211 }
1208 if (obj_priv->tiling_mode != I915_TILING_X) { 1212 if (obj_priv->tiling_mode != I915_TILING_X) {
1209 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); 1213 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
1214 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1210 goto out_disable; 1215 goto out_disable;
1211 } 1216 }
1212 1217
@@ -1366,7 +1371,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1366 dspcntr &= ~DISPPLANE_TILED; 1371 dspcntr &= ~DISPPLANE_TILED;
1367 } 1372 }
1368 1373
1369 if (IS_IRONLAKE(dev)) 1374 if (HAS_PCH_SPLIT(dev))
1370 /* must disable */ 1375 /* must disable */
1371 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1376 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1372 1377
@@ -1427,7 +1432,7 @@ static void i915_disable_vga (struct drm_device *dev)
1427 u8 sr1; 1432 u8 sr1;
1428 u32 vga_reg; 1433 u32 vga_reg;
1429 1434
1430 if (IS_IRONLAKE(dev)) 1435 if (HAS_PCH_SPLIT(dev))
1431 vga_reg = CPU_VGACNTRL; 1436 vga_reg = CPU_VGACNTRL;
1432 else 1437 else
1433 vga_reg = VGACNTRL; 1438 vga_reg = VGACNTRL;
@@ -2111,7 +2116,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2111 struct drm_display_mode *adjusted_mode) 2116 struct drm_display_mode *adjusted_mode)
2112{ 2117{
2113 struct drm_device *dev = crtc->dev; 2118 struct drm_device *dev = crtc->dev;
2114 if (IS_IRONLAKE(dev)) { 2119 if (HAS_PCH_SPLIT(dev)) {
2115 /* FDI link clock is fixed at 2.7G */ 2120 /* FDI link clock is fixed at 2.7G */
2116 if (mode->clock * 3 > 27000 * 4) 2121 if (mode->clock * 3 > 27000 * 4)
2117 return MODE_CLOCK_HIGH; 2122 return MODE_CLOCK_HIGH;
@@ -2757,11 +2762,22 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2757 srwm = total_size - sr_entries; 2762 srwm = total_size - sr_entries;
2758 if (srwm < 0) 2763 if (srwm < 0)
2759 srwm = 1; 2764 srwm = 1;
2760 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2765
2766 if (IS_I945G(dev) || IS_I945GM(dev))
2767 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2768 else if (IS_I915GM(dev)) {
2769 /* 915M has a smaller SRWM field */
2770 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2771 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
2772 }
2761 } else { 2773 } else {
2762 /* Turn off self refresh if both pipes are enabled */ 2774 /* Turn off self refresh if both pipes are enabled */
2763 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2775 if (IS_I945G(dev) || IS_I945GM(dev)) {
2764 & ~FW_BLC_SELF_EN); 2776 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2777 & ~FW_BLC_SELF_EN);
2778 } else if (IS_I915GM(dev)) {
2779 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
2780 }
2765 } 2781 }
2766 2782
2767 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2783 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2967,7 +2983,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2967 refclk / 1000); 2983 refclk / 1000);
2968 } else if (IS_I9XX(dev)) { 2984 } else if (IS_I9XX(dev)) {
2969 refclk = 96000; 2985 refclk = 96000;
2970 if (IS_IRONLAKE(dev)) 2986 if (HAS_PCH_SPLIT(dev))
2971 refclk = 120000; /* 120Mhz refclk */ 2987 refclk = 120000; /* 120Mhz refclk */
2972 } else { 2988 } else {
2973 refclk = 48000; 2989 refclk = 48000;
@@ -3025,7 +3041,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3025 } 3041 }
3026 3042
3027 /* FDI link */ 3043 /* FDI link */
3028 if (IS_IRONLAKE(dev)) { 3044 if (HAS_PCH_SPLIT(dev)) {
3029 int lane, link_bw, bpp; 3045 int lane, link_bw, bpp;
3030 /* eDP doesn't require FDI link, so just set DP M/N 3046 /* eDP doesn't require FDI link, so just set DP M/N
3031 according to current link config */ 3047 according to current link config */
@@ -3102,7 +3118,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3102 * PCH B stepping, previous chipset stepping should be 3118 * PCH B stepping, previous chipset stepping should be
3103 * ignoring this setting. 3119 * ignoring this setting.
3104 */ 3120 */
3105 if (IS_IRONLAKE(dev)) { 3121 if (HAS_PCH_SPLIT(dev)) {
3106 temp = I915_READ(PCH_DREF_CONTROL); 3122 temp = I915_READ(PCH_DREF_CONTROL);
3107 /* Always enable nonspread source */ 3123 /* Always enable nonspread source */
3108 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3124 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -3149,7 +3165,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3149 reduced_clock.m2; 3165 reduced_clock.m2;
3150 } 3166 }
3151 3167
3152 if (!IS_IRONLAKE(dev)) 3168 if (!HAS_PCH_SPLIT(dev))
3153 dpll = DPLL_VGA_MODE_DIS; 3169 dpll = DPLL_VGA_MODE_DIS;
3154 3170
3155 if (IS_I9XX(dev)) { 3171 if (IS_I9XX(dev)) {
@@ -3162,7 +3178,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3162 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3178 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3163 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3179 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3164 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 3180 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3165 else if (IS_IRONLAKE(dev)) 3181 else if (HAS_PCH_SPLIT(dev))
3166 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 3182 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
3167 } 3183 }
3168 if (is_dp) 3184 if (is_dp)
@@ -3174,7 +3190,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3174 else { 3190 else {
3175 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3191 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3176 /* also FPA1 */ 3192 /* also FPA1 */
3177 if (IS_IRONLAKE(dev)) 3193 if (HAS_PCH_SPLIT(dev))
3178 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3194 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
3179 if (IS_G4X(dev) && has_reduced_clock) 3195 if (IS_G4X(dev) && has_reduced_clock)
3180 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3196 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -3193,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3193 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3209 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3194 break; 3210 break;
3195 } 3211 }
3196 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) 3212 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
3197 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3213 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3198 } else { 3214 } else {
3199 if (is_lvds) { 3215 if (is_lvds) {
@@ -3227,7 +3243,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3227 3243
3228 /* Ironlake's plane is forced to pipe, bit 24 is to 3244 /* Ironlake's plane is forced to pipe, bit 24 is to
3229 enable color space conversion */ 3245 enable color space conversion */
3230 if (!IS_IRONLAKE(dev)) { 3246 if (!HAS_PCH_SPLIT(dev)) {
3231 if (pipe == 0) 3247 if (pipe == 0)
3232 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 3248 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
3233 else 3249 else
@@ -3254,14 +3270,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3254 3270
3255 3271
3256 /* Disable the panel fitter if it was on our pipe */ 3272 /* Disable the panel fitter if it was on our pipe */
3257 if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe) 3273 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
3258 I915_WRITE(PFIT_CONTROL, 0); 3274 I915_WRITE(PFIT_CONTROL, 0);
3259 3275
3260 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3276 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
3261 drm_mode_debug_printmodeline(mode); 3277 drm_mode_debug_printmodeline(mode);
3262 3278
3263 /* assign to Ironlake registers */ 3279 /* assign to Ironlake registers */
3264 if (IS_IRONLAKE(dev)) { 3280 if (HAS_PCH_SPLIT(dev)) {
3265 fp_reg = pch_fp_reg; 3281 fp_reg = pch_fp_reg;
3266 dpll_reg = pch_dpll_reg; 3282 dpll_reg = pch_dpll_reg;
3267 } 3283 }
@@ -3282,7 +3298,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3282 if (is_lvds) { 3298 if (is_lvds) {
3283 u32 lvds; 3299 u32 lvds;
3284 3300
3285 if (IS_IRONLAKE(dev)) 3301 if (HAS_PCH_SPLIT(dev))
3286 lvds_reg = PCH_LVDS; 3302 lvds_reg = PCH_LVDS;
3287 3303
3288 lvds = I915_READ(lvds_reg); 3304 lvds = I915_READ(lvds_reg);
@@ -3304,12 +3320,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3304 /* set the dithering flag */ 3320 /* set the dithering flag */
3305 if (IS_I965G(dev)) { 3321 if (IS_I965G(dev)) {
3306 if (dev_priv->lvds_dither) { 3322 if (dev_priv->lvds_dither) {
3307 if (IS_IRONLAKE(dev)) 3323 if (HAS_PCH_SPLIT(dev))
3308 pipeconf |= PIPE_ENABLE_DITHER; 3324 pipeconf |= PIPE_ENABLE_DITHER;
3309 else 3325 else
3310 lvds |= LVDS_ENABLE_DITHER; 3326 lvds |= LVDS_ENABLE_DITHER;
3311 } else { 3327 } else {
3312 if (IS_IRONLAKE(dev)) 3328 if (HAS_PCH_SPLIT(dev))
3313 pipeconf &= ~PIPE_ENABLE_DITHER; 3329 pipeconf &= ~PIPE_ENABLE_DITHER;
3314 else 3330 else
3315 lvds &= ~LVDS_ENABLE_DITHER; 3331 lvds &= ~LVDS_ENABLE_DITHER;
@@ -3328,7 +3344,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3328 /* Wait for the clocks to stabilize. */ 3344 /* Wait for the clocks to stabilize. */
3329 udelay(150); 3345 udelay(150);
3330 3346
3331 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { 3347 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
3332 if (is_sdvo) { 3348 if (is_sdvo) {
3333 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3349 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3334 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 3350 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3375,14 +3391,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3375 /* pipesrc and dspsize control the size that is scaled from, which should 3391 /* pipesrc and dspsize control the size that is scaled from, which should
3376 * always be the user's requested size. 3392 * always be the user's requested size.
3377 */ 3393 */
3378 if (!IS_IRONLAKE(dev)) { 3394 if (!HAS_PCH_SPLIT(dev)) {
3379 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | 3395 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
3380 (mode->hdisplay - 1)); 3396 (mode->hdisplay - 1));
3381 I915_WRITE(dsppos_reg, 0); 3397 I915_WRITE(dsppos_reg, 0);
3382 } 3398 }
3383 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 3399 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
3384 3400
3385 if (IS_IRONLAKE(dev)) { 3401 if (HAS_PCH_SPLIT(dev)) {
3386 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); 3402 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
3387 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); 3403 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
3388 I915_WRITE(link_m1_reg, m_n.link_m); 3404 I915_WRITE(link_m1_reg, m_n.link_m);
@@ -3438,7 +3454,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
3438 return; 3454 return;
3439 3455
3440 /* use legacy palette for Ironlake */ 3456 /* use legacy palette for Ironlake */
3441 if (IS_IRONLAKE(dev)) 3457 if (HAS_PCH_SPLIT(dev))
3442 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : 3458 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
3443 LGC_PALETTE_B; 3459 LGC_PALETTE_B;
3444 3460
@@ -3553,11 +3569,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3553 intel_crtc->cursor_bo = bo; 3569 intel_crtc->cursor_bo = bo;
3554 3570
3555 return 0; 3571 return 0;
3556fail:
3557 mutex_lock(&dev->struct_mutex);
3558fail_locked: 3572fail_locked:
3559 drm_gem_object_unreference(bo);
3560 mutex_unlock(&dev->struct_mutex); 3573 mutex_unlock(&dev->struct_mutex);
3574fail:
3575 drm_gem_object_unreference_unlocked(bo);
3561 return ret; 3576 return ret;
3562} 3577}
3563 3578
@@ -3922,7 +3937,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
3922 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3937 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3923 int dpll = I915_READ(dpll_reg); 3938 int dpll = I915_READ(dpll_reg);
3924 3939
3925 if (IS_IRONLAKE(dev)) 3940 if (HAS_PCH_SPLIT(dev))
3926 return; 3941 return;
3927 3942
3928 if (!dev_priv->lvds_downclock_avail) 3943 if (!dev_priv->lvds_downclock_avail)
@@ -3961,7 +3976,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3961 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3976 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3962 int dpll = I915_READ(dpll_reg); 3977 int dpll = I915_READ(dpll_reg);
3963 3978
3964 if (IS_IRONLAKE(dev)) 3979 if (HAS_PCH_SPLIT(dev))
3965 return; 3980 return;
3966 3981
3967 if (!dev_priv->lvds_downclock_avail) 3982 if (!dev_priv->lvds_downclock_avail)
@@ -4011,6 +4026,11 @@ static void intel_idle_update(struct work_struct *work)
4011 4026
4012 mutex_lock(&dev->struct_mutex); 4027 mutex_lock(&dev->struct_mutex);
4013 4028
4029 if (IS_I945G(dev) || IS_I945GM(dev)) {
4030 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
4031 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4032 }
4033
4014 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4034 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4015 /* Skip inactive CRTCs */ 4035 /* Skip inactive CRTCs */
4016 if (!crtc->fb) 4036 if (!crtc->fb)
@@ -4044,9 +4064,17 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4044 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4064 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4045 return; 4065 return;
4046 4066
4047 if (!dev_priv->busy) 4067 if (!dev_priv->busy) {
4068 if (IS_I945G(dev) || IS_I945GM(dev)) {
4069 u32 fw_blc_self;
4070
4071 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4072 fw_blc_self = I915_READ(FW_BLC_SELF);
4073 fw_blc_self &= ~FW_BLC_SELF_EN;
4074 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4075 }
4048 dev_priv->busy = true; 4076 dev_priv->busy = true;
4049 else 4077 } else
4050 mod_timer(&dev_priv->idle_timer, jiffies + 4078 mod_timer(&dev_priv->idle_timer, jiffies +
4051 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 4079 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4052 4080
@@ -4058,6 +4086,14 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4058 intel_fb = to_intel_framebuffer(crtc->fb); 4086 intel_fb = to_intel_framebuffer(crtc->fb);
4059 if (intel_fb->obj == obj) { 4087 if (intel_fb->obj == obj) {
4060 if (!intel_crtc->busy) { 4088 if (!intel_crtc->busy) {
4089 if (IS_I945G(dev) || IS_I945GM(dev)) {
4090 u32 fw_blc_self;
4091
4092 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4093 fw_blc_self = I915_READ(FW_BLC_SELF);
4094 fw_blc_self &= ~FW_BLC_SELF_EN;
4095 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4096 }
4061 /* Non-busy -> busy, upclock */ 4097 /* Non-busy -> busy, upclock */
4062 intel_increase_pllclock(crtc, true); 4098 intel_increase_pllclock(crtc, true);
4063 intel_crtc->busy = true; 4099 intel_crtc->busy = true;
@@ -4382,7 +4418,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4382 if (IS_MOBILE(dev) && !IS_I830(dev)) 4418 if (IS_MOBILE(dev) && !IS_I830(dev))
4383 intel_lvds_init(dev); 4419 intel_lvds_init(dev);
4384 4420
4385 if (IS_IRONLAKE(dev)) { 4421 if (HAS_PCH_SPLIT(dev)) {
4386 int found; 4422 int found;
4387 4423
4388 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 4424 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4451,7 +4487,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4451 DRM_DEBUG_KMS("probing DP_D\n"); 4487 DRM_DEBUG_KMS("probing DP_D\n");
4452 intel_dp_init(dev, DP_D); 4488 intel_dp_init(dev, DP_D);
4453 } 4489 }
4454 } else if (IS_I8XX(dev)) 4490 } else if (IS_GEN2(dev))
4455 intel_dvo_init(dev); 4491 intel_dvo_init(dev);
4456 4492
4457 if (SUPPORTS_TV(dev)) 4493 if (SUPPORTS_TV(dev))
@@ -4476,9 +4512,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
4476 intelfb_remove(dev, fb); 4512 intelfb_remove(dev, fb);
4477 4513
4478 drm_framebuffer_cleanup(fb); 4514 drm_framebuffer_cleanup(fb);
4479 mutex_lock(&dev->struct_mutex); 4515 drm_gem_object_unreference_unlocked(intel_fb->obj);
4480 drm_gem_object_unreference(intel_fb->obj);
4481 mutex_unlock(&dev->struct_mutex);
4482 4516
4483 kfree(intel_fb); 4517 kfree(intel_fb);
4484} 4518}
@@ -4541,9 +4575,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
4541 4575
4542 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); 4576 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
4543 if (ret) { 4577 if (ret) {
4544 mutex_lock(&dev->struct_mutex); 4578 drm_gem_object_unreference_unlocked(obj);
4545 drm_gem_object_unreference(obj);
4546 mutex_unlock(&dev->struct_mutex);
4547 return NULL; 4579 return NULL;
4548 } 4580 }
4549 4581
@@ -4591,6 +4623,91 @@ err_unref:
4591 return NULL; 4623 return NULL;
4592} 4624}
4593 4625
4626void ironlake_enable_drps(struct drm_device *dev)
4627{
4628 struct drm_i915_private *dev_priv = dev->dev_private;
4629 u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
4630 u8 fmax, fmin, fstart, vstart;
4631 int i = 0;
4632
4633 /* 100ms RC evaluation intervals */
4634 I915_WRITE(RCUPEI, 100000);
4635 I915_WRITE(RCDNEI, 100000);
4636
4637 /* Set max/min thresholds to 90ms and 80ms respectively */
4638 I915_WRITE(RCBMAXAVG, 90000);
4639 I915_WRITE(RCBMINAVG, 80000);
4640
4641 I915_WRITE(MEMIHYST, 1);
4642
4643 /* Set up min, max, and cur for interrupt handling */
4644 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4645 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4646 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4647 MEMMODE_FSTART_SHIFT;
4648 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
4649 PXVFREQ_PX_SHIFT;
4650
4651 dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
4652 dev_priv->min_delay = fmin;
4653 dev_priv->cur_delay = fstart;
4654
4655 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4656
4657 /*
4658 * Interrupts will be enabled in ironlake_irq_postinstall
4659 */
4660
4661 I915_WRITE(VIDSTART, vstart);
4662 POSTING_READ(VIDSTART);
4663
4664 rgvmodectl |= MEMMODE_SWMODE_EN;
4665 I915_WRITE(MEMMODECTL, rgvmodectl);
4666
4667 while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
4668 if (i++ > 100) {
4669 DRM_ERROR("stuck trying to change perf mode\n");
4670 break;
4671 }
4672 msleep(1);
4673 }
4674 msleep(1);
4675
4676 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4677 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4678 I915_WRITE(MEMSWCTL, rgvswctl);
4679 POSTING_READ(MEMSWCTL);
4680
4681 rgvswctl |= MEMCTL_CMD_STS;
4682 I915_WRITE(MEMSWCTL, rgvswctl);
4683}
4684
4685void ironlake_disable_drps(struct drm_device *dev)
4686{
4687 struct drm_i915_private *dev_priv = dev->dev_private;
4688 u32 rgvswctl;
4689 u8 fstart;
4690
4691 /* Ack interrupts, disable EFC interrupt */
4692 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4693 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4694 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4695 I915_WRITE(DEIIR, DE_PCU_EVENT);
4696 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4697
4698 /* Go back to the starting frequency */
4699 fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
4700 MEMMODE_FSTART_SHIFT;
4701 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4702 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4703 I915_WRITE(MEMSWCTL, rgvswctl);
4704 msleep(1);
4705 rgvswctl |= MEMCTL_CMD_STS;
4706 I915_WRITE(MEMSWCTL, rgvswctl);
4707 msleep(1);
4708
4709}
4710
4594void intel_init_clock_gating(struct drm_device *dev) 4711void intel_init_clock_gating(struct drm_device *dev)
4595{ 4712{
4596 struct drm_i915_private *dev_priv = dev->dev_private; 4713 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4599,7 +4716,7 @@ void intel_init_clock_gating(struct drm_device *dev)
4599 * Disable clock gating reported to work incorrectly according to the 4716 * Disable clock gating reported to work incorrectly according to the
4600 * specs, but enable as much else as we can. 4717 * specs, but enable as much else as we can.
4601 */ 4718 */
4602 if (IS_IRONLAKE(dev)) { 4719 if (HAS_PCH_SPLIT(dev)) {
4603 return; 4720 return;
4604 } else if (IS_G4X(dev)) { 4721 } else if (IS_G4X(dev)) {
4605 uint32_t dspclk_gate; 4722 uint32_t dspclk_gate;
@@ -4672,7 +4789,7 @@ static void intel_init_display(struct drm_device *dev)
4672 struct drm_i915_private *dev_priv = dev->dev_private; 4789 struct drm_i915_private *dev_priv = dev->dev_private;
4673 4790
4674 /* We always want a DPMS function */ 4791 /* We always want a DPMS function */
4675 if (IS_IRONLAKE(dev)) 4792 if (HAS_PCH_SPLIT(dev))
4676 dev_priv->display.dpms = ironlake_crtc_dpms; 4793 dev_priv->display.dpms = ironlake_crtc_dpms;
4677 else 4794 else
4678 dev_priv->display.dpms = i9xx_crtc_dpms; 4795 dev_priv->display.dpms = i9xx_crtc_dpms;
@@ -4715,7 +4832,7 @@ static void intel_init_display(struct drm_device *dev)
4715 i830_get_display_clock_speed; 4832 i830_get_display_clock_speed;
4716 4833
4717 /* For FIFO watermark updates */ 4834 /* For FIFO watermark updates */
4718 if (IS_IRONLAKE(dev)) 4835 if (HAS_PCH_SPLIT(dev))
4719 dev_priv->display.update_wm = NULL; 4836 dev_priv->display.update_wm = NULL;
4720 else if (IS_G4X(dev)) 4837 else if (IS_G4X(dev))
4721 dev_priv->display.update_wm = g4x_update_wm; 4838 dev_priv->display.update_wm = g4x_update_wm;
@@ -4774,11 +4891,6 @@ void intel_modeset_init(struct drm_device *dev)
4774 DRM_DEBUG_KMS("%d display pipe%s available.\n", 4891 DRM_DEBUG_KMS("%d display pipe%s available.\n",
4775 num_pipe, num_pipe > 1 ? "s" : ""); 4892 num_pipe, num_pipe > 1 ? "s" : "");
4776 4893
4777 if (IS_I85X(dev))
4778 pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
4779 else if (IS_I9XX(dev) || IS_G4X(dev))
4780 pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
4781
4782 for (i = 0; i < num_pipe; i++) { 4894 for (i = 0; i < num_pipe; i++) {
4783 intel_crtc_init(dev, i); 4895 intel_crtc_init(dev, i);
4784 } 4896 }
@@ -4787,6 +4899,9 @@ void intel_modeset_init(struct drm_device *dev)
4787 4899
4788 intel_init_clock_gating(dev); 4900 intel_init_clock_gating(dev);
4789 4901
4902 if (IS_IRONLAKE_M(dev))
4903 ironlake_enable_drps(dev);
4904
4790 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 4905 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
4791 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 4906 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
4792 (unsigned long)dev); 4907 (unsigned long)dev);
@@ -4834,6 +4949,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
4834 drm_gem_object_unreference(dev_priv->pwrctx); 4949 drm_gem_object_unreference(dev_priv->pwrctx);
4835 } 4950 }
4836 4951
4952 if (IS_IRONLAKE_M(dev))
4953 ironlake_disable_drps(dev);
4954
4837 mutex_unlock(&dev->struct_mutex); 4955 mutex_unlock(&dev->struct_mutex);
4838 4956
4839 drm_mode_config_cleanup(dev); 4957 drm_mode_config_cleanup(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 439506cefc14..3ef3a0d0edd0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -231,7 +231,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
231 */ 231 */
232 if (IS_eDP(intel_output)) 232 if (IS_eDP(intel_output))
233 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 233 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
234 else if (IS_IRONLAKE(dev)) 234 else if (HAS_PCH_SPLIT(dev))
235 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ 235 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
236 else 236 else
237 aux_clock_divider = intel_hrawclk(dev) / 2; 237 aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -584,7 +584,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
584 intel_dp_compute_m_n(3, lane_count, 584 intel_dp_compute_m_n(3, lane_count,
585 mode->clock, adjusted_mode->clock, &m_n); 585 mode->clock, adjusted_mode->clock, &m_n);
586 586
587 if (IS_IRONLAKE(dev)) { 587 if (HAS_PCH_SPLIT(dev)) {
588 if (intel_crtc->pipe == 0) { 588 if (intel_crtc->pipe == 0) {
589 I915_WRITE(TRANSA_DATA_M1, 589 I915_WRITE(TRANSA_DATA_M1,
590 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 590 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -1176,7 +1176,7 @@ intel_dp_detect(struct drm_connector *connector)
1176 1176
1177 dp_priv->has_audio = false; 1177 dp_priv->has_audio = false;
1178 1178
1179 if (IS_IRONLAKE(dev)) 1179 if (HAS_PCH_SPLIT(dev))
1180 return ironlake_dp_detect(connector); 1180 return ironlake_dp_detect(connector);
1181 1181
1182 temp = I915_READ(PORT_HOTPLUG_EN); 1182 temp = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a51573da1ff6..3a467ca57857 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,8 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
210 u16 *blue, int regno); 210 u16 *blue, int regno);
211extern void intel_init_clock_gating(struct drm_device *dev); 211extern void intel_init_clock_gating(struct drm_device *dev);
212extern void ironlake_enable_drps(struct drm_device *dev);
213extern void ironlake_disable_drps(struct drm_device *dev);
212 214
213extern int intel_framebuffer_create(struct drm_device *dev, 215extern int intel_framebuffer_create(struct drm_device *dev,
214 struct drm_mode_fb_cmd *mode_cmd, 216 struct drm_mode_fb_cmd *mode_cmd,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index aaabbcbe5905..8cd791dc5b29 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/fb.h> 36#include <linux/fb.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/vga_switcheroo.h>
38 39
39#include "drmP.h" 40#include "drmP.h"
40#include "drm.h" 41#include "drm.h"
@@ -235,6 +236,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
235 obj_priv->gtt_offset, fbo); 236 obj_priv->gtt_offset, fbo);
236 237
237 mutex_unlock(&dev->struct_mutex); 238 mutex_unlock(&dev->struct_mutex);
239 vga_switcheroo_client_fb_set(dev->pdev, info);
238 return 0; 240 return 0;
239 241
240out_unpin: 242out_unpin:
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0e268deed761..a30f8bfc1985 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
83 * we do this anyway which shows more stable in testing. 83 * we do this anyway which shows more stable in testing.
84 */ 84 */
85 if (IS_IRONLAKE(dev)) { 85 if (HAS_PCH_SPLIT(dev)) {
86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); 86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
87 POSTING_READ(hdmi_priv->sdvox_reg); 87 POSTING_READ(hdmi_priv->sdvox_reg);
88 } 88 }
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
99 /* HW workaround, need to write this twice for issue that may result 99 /* HW workaround, need to write this twice for issue that may result
100 * in first write getting masked. 100 * in first write getting masked.
101 */ 101 */
102 if (IS_IRONLAKE(dev)) { 102 if (HAS_PCH_SPLIT(dev)) {
103 I915_WRITE(hdmi_priv->sdvox_reg, temp); 103 I915_WRITE(hdmi_priv->sdvox_reg, temp);
104 POSTING_READ(hdmi_priv->sdvox_reg); 104 POSTING_READ(hdmi_priv->sdvox_reg);
105 } 105 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8673c735b8ab..fcc753ca5d94 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -128,7 +128,7 @@ intel_i2c_reset_gmbus(struct drm_device *dev)
128{ 128{
129 struct drm_i915_private *dev_priv = dev->dev_private; 129 struct drm_i915_private *dev_priv = dev->dev_private;
130 130
131 if (IS_IRONLAKE(dev)) { 131 if (HAS_PCH_SPLIT(dev)) {
132 I915_WRITE(PCH_GMBUS0, 0); 132 I915_WRITE(PCH_GMBUS0, 0);
133 } else { 133 } else {
134 I915_WRITE(GMBUS0, 0); 134 I915_WRITE(GMBUS0, 0);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c2e8a45780d5..14e516fdc2dd 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
56 struct drm_i915_private *dev_priv = dev->dev_private; 56 struct drm_i915_private *dev_priv = dev->dev_private;
57 u32 blc_pwm_ctl, reg; 57 u32 blc_pwm_ctl, reg;
58 58
59 if (IS_IRONLAKE(dev)) 59 if (HAS_PCH_SPLIT(dev))
60 reg = BLC_PWM_CPU_CTL; 60 reg = BLC_PWM_CPU_CTL;
61 else 61 else
62 reg = BLC_PWM_CTL; 62 reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
74 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
75 u32 reg; 75 u32 reg;
76 76
77 if (IS_IRONLAKE(dev)) 77 if (HAS_PCH_SPLIT(dev))
78 reg = BLC_PWM_PCH_CTL2; 78 reg = BLC_PWM_PCH_CTL2;
79 else 79 else
80 reg = BLC_PWM_CTL; 80 reg = BLC_PWM_CTL;
@@ -89,17 +89,22 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
89static void intel_lvds_set_power(struct drm_device *dev, bool on) 89static void intel_lvds_set_power(struct drm_device *dev, bool on)
90{ 90{
91 struct drm_i915_private *dev_priv = dev->dev_private; 91 struct drm_i915_private *dev_priv = dev->dev_private;
92 u32 pp_status, ctl_reg, status_reg; 92 u32 pp_status, ctl_reg, status_reg, lvds_reg;
93 93
94 if (IS_IRONLAKE(dev)) { 94 if (HAS_PCH_SPLIT(dev)) {
95 ctl_reg = PCH_PP_CONTROL; 95 ctl_reg = PCH_PP_CONTROL;
96 status_reg = PCH_PP_STATUS; 96 status_reg = PCH_PP_STATUS;
97 lvds_reg = PCH_LVDS;
97 } else { 98 } else {
98 ctl_reg = PP_CONTROL; 99 ctl_reg = PP_CONTROL;
99 status_reg = PP_STATUS; 100 status_reg = PP_STATUS;
101 lvds_reg = LVDS;
100 } 102 }
101 103
102 if (on) { 104 if (on) {
105 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
106 POSTING_READ(lvds_reg);
107
103 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | 108 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
104 POWER_TARGET_ON); 109 POWER_TARGET_ON);
105 do { 110 do {
@@ -115,6 +120,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
115 do { 120 do {
116 pp_status = I915_READ(status_reg); 121 pp_status = I915_READ(status_reg);
117 } while (pp_status & PP_ON); 122 } while (pp_status & PP_ON);
123
124 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
125 POSTING_READ(lvds_reg);
118 } 126 }
119} 127}
120 128
@@ -137,7 +145,7 @@ static void intel_lvds_save(struct drm_connector *connector)
137 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 145 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
138 u32 pwm_ctl_reg; 146 u32 pwm_ctl_reg;
139 147
140 if (IS_IRONLAKE(dev)) { 148 if (HAS_PCH_SPLIT(dev)) {
141 pp_on_reg = PCH_PP_ON_DELAYS; 149 pp_on_reg = PCH_PP_ON_DELAYS;
142 pp_off_reg = PCH_PP_OFF_DELAYS; 150 pp_off_reg = PCH_PP_OFF_DELAYS;
143 pp_ctl_reg = PCH_PP_CONTROL; 151 pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +182,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
174 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 182 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
175 u32 pwm_ctl_reg; 183 u32 pwm_ctl_reg;
176 184
177 if (IS_IRONLAKE(dev)) { 185 if (HAS_PCH_SPLIT(dev)) {
178 pp_on_reg = PCH_PP_ON_DELAYS; 186 pp_on_reg = PCH_PP_ON_DELAYS;
179 pp_off_reg = PCH_PP_OFF_DELAYS; 187 pp_off_reg = PCH_PP_OFF_DELAYS;
180 pp_ctl_reg = PCH_PP_CONTROL; 188 pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +305,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
297 } 305 }
298 306
299 /* full screen scale for now */ 307 /* full screen scale for now */
300 if (IS_IRONLAKE(dev)) 308 if (HAS_PCH_SPLIT(dev))
301 goto out; 309 goto out;
302 310
303 /* 965+ wants fuzzy fitting */ 311 /* 965+ wants fuzzy fitting */
@@ -327,7 +335,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
327 * to register description and PRM. 335 * to register description and PRM.
328 * Change the value here to see the borders for debugging 336 * Change the value here to see the borders for debugging
329 */ 337 */
330 if (!IS_IRONLAKE(dev)) { 338 if (!HAS_PCH_SPLIT(dev)) {
331 I915_WRITE(BCLRPAT_A, 0); 339 I915_WRITE(BCLRPAT_A, 0);
332 I915_WRITE(BCLRPAT_B, 0); 340 I915_WRITE(BCLRPAT_B, 0);
333 } 341 }
@@ -548,7 +556,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
548 struct drm_i915_private *dev_priv = dev->dev_private; 556 struct drm_i915_private *dev_priv = dev->dev_private;
549 u32 reg; 557 u32 reg;
550 558
551 if (IS_IRONLAKE(dev)) 559 if (HAS_PCH_SPLIT(dev))
552 reg = BLC_PWM_CPU_CTL; 560 reg = BLC_PWM_CPU_CTL;
553 else 561 else
554 reg = BLC_PWM_CTL; 562 reg = BLC_PWM_CTL;
@@ -587,7 +595,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
587 * settings. 595 * settings.
588 */ 596 */
589 597
590 if (IS_IRONLAKE(dev)) 598 if (HAS_PCH_SPLIT(dev))
591 return; 599 return;
592 600
593 /* 601 /*
@@ -655,8 +663,15 @@ static const struct dmi_system_id bad_lid_status[] = {
655 */ 663 */
656static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 664static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
657{ 665{
666 struct drm_device *dev = connector->dev;
658 enum drm_connector_status status = connector_status_connected; 667 enum drm_connector_status status = connector_status_connected;
659 668
669 /* ACPI lid methods were generally unreliable in this generation, so
670 * don't even bother.
671 */
672 if (IS_GEN2(dev))
673 return connector_status_connected;
674
660 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) 675 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
661 status = connector_status_disconnected; 676 status = connector_status_disconnected;
662 677
@@ -1020,7 +1035,7 @@ void intel_lvds_init(struct drm_device *dev)
1020 return; 1035 return;
1021 } 1036 }
1022 1037
1023 if (IS_IRONLAKE(dev)) { 1038 if (HAS_PCH_SPLIT(dev)) {
1024 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 1039 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
1025 return; 1040 return;
1026 if (dev_priv->edp_support) { 1041 if (dev_priv->edp_support) {
@@ -1123,7 +1138,7 @@ void intel_lvds_init(struct drm_device *dev)
1123 */ 1138 */
1124 1139
1125 /* Ironlake: FIXME if still fail, not try pipe mode now */ 1140 /* Ironlake: FIXME if still fail, not try pipe mode now */
1126 if (IS_IRONLAKE(dev)) 1141 if (HAS_PCH_SPLIT(dev))
1127 goto failed; 1142 goto failed;
1128 1143
1129 lvds = I915_READ(LVDS); 1144 lvds = I915_READ(LVDS);
@@ -1144,7 +1159,7 @@ void intel_lvds_init(struct drm_device *dev)
1144 goto failed; 1159 goto failed;
1145 1160
1146out: 1161out:
1147 if (IS_IRONLAKE(dev)) { 1162 if (HAS_PCH_SPLIT(dev)) {
1148 u32 pwm; 1163 u32 pwm;
1149 /* make sure PWM is enabled */ 1164 /* make sure PWM is enabled */
1150 pwm = I915_READ(BLC_PWM_CPU_CTL2); 1165 pwm = I915_READ(BLC_PWM_CPU_CTL2);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 2639591c72e9..d355d1d527e7 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -172,7 +172,7 @@ struct overlay_registers {
172#define OFC_UPDATE 0x1 172#define OFC_UPDATE 0x1
173 173
174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) 174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev)) 175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
176 176
177 177
178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
@@ -199,16 +199,11 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
199 199
200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) 200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
201{ 201{
202 struct drm_device *dev = overlay->dev;
203 drm_i915_private_t *dev_priv = dev->dev_private;
204
205 if (OVERLAY_NONPHYSICAL(overlay->dev)) 202 if (OVERLAY_NONPHYSICAL(overlay->dev))
206 io_mapping_unmap_atomic(overlay->virt_addr); 203 io_mapping_unmap_atomic(overlay->virt_addr);
207 204
208 overlay->virt_addr = NULL; 205 overlay->virt_addr = NULL;
209 206
210 I915_READ(OVADD); /* flush wc cashes */
211
212 return; 207 return;
213} 208}
214 209
@@ -225,9 +220,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
225 overlay->active = 1; 220 overlay->active = 1;
226 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; 221 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
227 222
228 BEGIN_LP_RING(6); 223 BEGIN_LP_RING(4);
229 OUT_RING(MI_FLUSH);
230 OUT_RING(MI_NOOP);
231 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); 224 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
232 OUT_RING(overlay->flip_addr | OFC_UPDATE); 225 OUT_RING(overlay->flip_addr | OFC_UPDATE);
233 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 226 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -267,9 +260,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
267 if (tmp & (1 << 17)) 260 if (tmp & (1 << 17))
268 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 261 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
269 262
270 BEGIN_LP_RING(4); 263 BEGIN_LP_RING(2);
271 OUT_RING(MI_FLUSH);
272 OUT_RING(MI_NOOP);
273 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 264 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
274 OUT_RING(flip_addr); 265 OUT_RING(flip_addr);
275 ADVANCE_LP_RING(); 266 ADVANCE_LP_RING();
@@ -338,9 +329,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
338 /* wait for overlay to go idle */ 329 /* wait for overlay to go idle */
339 overlay->hw_wedged = SWITCH_OFF_STAGE_1; 330 overlay->hw_wedged = SWITCH_OFF_STAGE_1;
340 331
341 BEGIN_LP_RING(6); 332 BEGIN_LP_RING(4);
342 OUT_RING(MI_FLUSH);
343 OUT_RING(MI_NOOP);
344 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 333 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
345 OUT_RING(flip_addr); 334 OUT_RING(flip_addr);
346 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 335 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -358,9 +347,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
358 /* turn overlay off */ 347 /* turn overlay off */
359 overlay->hw_wedged = SWITCH_OFF_STAGE_2; 348 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
360 349
361 BEGIN_LP_RING(6); 350 BEGIN_LP_RING(4);
362 OUT_RING(MI_FLUSH);
363 OUT_RING(MI_NOOP);
364 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 351 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
365 OUT_RING(flip_addr); 352 OUT_RING(flip_addr);
366 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 353 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -435,9 +422,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
435 422
436 overlay->hw_wedged = SWITCH_OFF_STAGE_2; 423 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
437 424
438 BEGIN_LP_RING(6); 425 BEGIN_LP_RING(4);
439 OUT_RING(MI_FLUSH);
440 OUT_RING(MI_NOOP);
441 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 426 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
442 OUT_RING(flip_addr); 427 OUT_RING(flip_addr);
443 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 428 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -1179,7 +1164,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1179out_unlock: 1164out_unlock:
1180 mutex_unlock(&dev->struct_mutex); 1165 mutex_unlock(&dev->struct_mutex);
1181 mutex_unlock(&dev->mode_config.mutex); 1166 mutex_unlock(&dev->mode_config.mutex);
1182 drm_gem_object_unreference(new_bo); 1167 drm_gem_object_unreference_unlocked(new_bo);
1183 kfree(params); 1168 kfree(params);
1184 1169
1185 return ret; 1170 return ret;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 82678d30ab06..48daee5c9c63 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -35,6 +35,7 @@
35#include "i915_drm.h" 35#include "i915_drm.h"
36#include "i915_drv.h" 36#include "i915_drv.h"
37#include "intel_sdvo_regs.h" 37#include "intel_sdvo_regs.h"
38#include <linux/dmi.h>
38 39
39static char *tv_format_names[] = { 40static char *tv_format_names[] = {
40 "NTSC_M" , "NTSC_J" , "NTSC_443", 41 "NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -2283,6 +2284,25 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
2283 return 0x72; 2284 return 0x72;
2284} 2285}
2285 2286
2287static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
2288{
2289 DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
2290 return 1;
2291}
2292
2293static struct dmi_system_id intel_sdvo_bad_tv[] = {
2294 {
2295 .callback = intel_sdvo_bad_tv_callback,
2296 .ident = "IntelG45/ICH10R/DME1737",
2297 .matches = {
2298 DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
2299 DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
2300 },
2301 },
2302
2303 { } /* terminating entry */
2304};
2305
2286static bool 2306static bool
2287intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) 2307intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2288{ 2308{
@@ -2323,7 +2343,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2323 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2343 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2324 (1 << INTEL_ANALOG_CLONE_BIT); 2344 (1 << INTEL_ANALOG_CLONE_BIT);
2325 } 2345 }
2326 } else if (flags & SDVO_OUTPUT_SVID0) { 2346 } else if ((flags & SDVO_OUTPUT_SVID0) &&
2347 !dmi_check_system(intel_sdvo_bad_tv)) {
2327 2348
2328 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; 2349 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
2329 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2350 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 48c290b5da8c..32db806f3b5a 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -16,7 +16,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ 16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
17 nv04_graph.o nv10_graph.o nv20_graph.o \ 17 nv04_graph.o nv10_graph.o nv20_graph.o \
18 nv40_graph.o nv50_graph.o \ 18 nv40_graph.o nv50_graph.o \
19 nv40_grctx.o \ 19 nv40_grctx.o nv50_grctx.o \
20 nv04_instmem.o nv50_instmem.o \ 20 nv04_instmem.o nv50_instmem.o \
21 nv50_crtc.o nv50_dac.o nv50_sor.o \ 21 nv50_crtc.o nv50_dac.o nv50_sor.o \
22 nv50_cursor.o nv50_display.o nv50_fbcon.o \ 22 nv50_cursor.o nv50_display.o nv50_fbcon.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48227e744753..0e0730a53137 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -11,6 +11,8 @@
11#include "nouveau_drm.h" 11#include "nouveau_drm.h"
12#include "nv50_display.h" 12#include "nv50_display.h"
13 13
14#include <linux/vga_switcheroo.h>
15
14#define NOUVEAU_DSM_SUPPORTED 0x00 16#define NOUVEAU_DSM_SUPPORTED 0x00
15#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00 17#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
16 18
@@ -28,31 +30,30 @@
28#define NOUVEAU_DSM_POWER_SPEED 0x01 30#define NOUVEAU_DSM_POWER_SPEED 0x01
29#define NOUVEAU_DSM_POWER_STAMINA 0x02 31#define NOUVEAU_DSM_POWER_STAMINA 0x02
30 32
31static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result) 33static struct nouveau_dsm_priv {
32{ 34 bool dsm_detected;
33 static char muid[] = { 35 acpi_handle dhandle;
34 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, 36 acpi_handle dsm_handle;
35 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, 37} nouveau_dsm_priv;
36 }; 38
39static const char nouveau_dsm_muid[] = {
40 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
41 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
42};
37 43
38 struct pci_dev *pdev = dev->pdev; 44static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result)
39 struct acpi_handle *handle; 45{
40 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 46 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
41 struct acpi_object_list input; 47 struct acpi_object_list input;
42 union acpi_object params[4]; 48 union acpi_object params[4];
43 union acpi_object *obj; 49 union acpi_object *obj;
44 int err; 50 int err;
45 51
46 handle = DEVICE_ACPI_HANDLE(&pdev->dev);
47
48 if (!handle)
49 return -ENODEV;
50
51 input.count = 4; 52 input.count = 4;
52 input.pointer = params; 53 input.pointer = params;
53 params[0].type = ACPI_TYPE_BUFFER; 54 params[0].type = ACPI_TYPE_BUFFER;
54 params[0].buffer.length = sizeof(muid); 55 params[0].buffer.length = sizeof(nouveau_dsm_muid);
55 params[0].buffer.pointer = (char *)muid; 56 params[0].buffer.pointer = (char *)nouveau_dsm_muid;
56 params[1].type = ACPI_TYPE_INTEGER; 57 params[1].type = ACPI_TYPE_INTEGER;
57 params[1].integer.value = 0x00000102; 58 params[1].integer.value = 0x00000102;
58 params[2].type = ACPI_TYPE_INTEGER; 59 params[2].type = ACPI_TYPE_INTEGER;
@@ -62,7 +63,7 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
62 63
63 err = acpi_evaluate_object(handle, "_DSM", &input, &output); 64 err = acpi_evaluate_object(handle, "_DSM", &input, &output);
64 if (err) { 65 if (err) {
65 NV_INFO(dev, "failed to evaluate _DSM: %d\n", err); 66 printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
66 return err; 67 return err;
67 } 68 }
68 69
@@ -86,40 +87,119 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
86 return 0; 87 return 0;
87} 88}
88 89
89int nouveau_hybrid_setup(struct drm_device *dev) 90static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
90{ 91{
91 int result; 92 return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
92 93}
93 if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE, 94
94 &result)) 95static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
95 return -ENODEV; 96{
96 97 int arg;
97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); 98 if (state == VGA_SWITCHEROO_ON)
98 99 arg = NOUVEAU_DSM_POWER_SPEED;
99 if (result) { /* Ensure that the external GPU is enabled */ 100 else
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); 101 arg = NOUVEAU_DSM_POWER_STAMINA;
101 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, 102 nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL);
102 NULL); 103 return 0;
103 } else { /* Stamina mode - disable the external GPU */ 104}
104 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, 105
105 NULL); 106static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, 107{
107 NULL); 108 if (id == VGA_SWITCHEROO_IGD)
108 } 109 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA);
110 else
111 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED);
112}
109 113
114static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
115 enum vga_switcheroo_state state)
116{
117 if (id == VGA_SWITCHEROO_IGD)
118 return 0;
119
120 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state);
121}
122
123static int nouveau_dsm_init(void)
124{
110 return 0; 125 return 0;
111} 126}
112 127
113bool nouveau_dsm_probe(struct drm_device *dev) 128static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
114{ 129{
115 int support = 0; 130 if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
131 return VGA_SWITCHEROO_IGD;
132 else
133 return VGA_SWITCHEROO_DIS;
134}
135
136static struct vga_switcheroo_handler nouveau_dsm_handler = {
137 .switchto = nouveau_dsm_switchto,
138 .power_state = nouveau_dsm_power_state,
139 .init = nouveau_dsm_init,
140 .get_client_id = nouveau_dsm_get_client_id,
141};
116 142
117 if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED, 143static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
118 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support)) 144{
145 acpi_handle dhandle, nvidia_handle;
146 acpi_status status;
147 int ret;
148 uint32_t result;
149
150 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
151 if (!dhandle)
152 return false;
153 status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
154 if (ACPI_FAILURE(status)) {
119 return false; 155 return false;
156 }
120 157
121 if (!support) 158 ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED,
159 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
160 if (ret < 0)
122 return false; 161 return false;
123 162
163 nouveau_dsm_priv.dhandle = dhandle;
164 nouveau_dsm_priv.dsm_handle = nvidia_handle;
124 return true; 165 return true;
125} 166}
167
168static bool nouveau_dsm_detect(void)
169{
170 char acpi_method_name[255] = { 0 };
171 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
172 struct pci_dev *pdev = NULL;
173 int has_dsm = 0;
174 int vga_count = 0;
175 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
176 vga_count++;
177
178 has_dsm |= (nouveau_dsm_pci_probe(pdev) == true);
179 }
180
181 if (vga_count == 2 && has_dsm) {
182 acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer);
183 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
184 acpi_method_name);
185 nouveau_dsm_priv.dsm_detected = true;
186 return true;
187 }
188 return false;
189}
190
191void nouveau_register_dsm_handler(void)
192{
193 bool r;
194
195 r = nouveau_dsm_detect();
196 if (!r)
197 return;
198
199 vga_switcheroo_register_handler(&nouveau_dsm_handler);
200}
201
202void nouveau_unregister_dsm_handler(void)
203{
204 vga_switcheroo_unregister_handler();
205}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0e9cd1d49130..71247da17da5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -311,11 +311,11 @@ valid_reg(struct nvbios *bios, uint32_t reg)
311 311
312 /* C51 has misaligned regs on purpose. Marvellous */ 312 /* C51 has misaligned regs on purpose. Marvellous */
313 if (reg & 0x2 || 313 if (reg & 0x2 ||
314 (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) 314 (reg & 0x1 && dev_priv->vbios.chip_version != 0x51))
315 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg); 315 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
316 316
317 /* warn on C51 regs that haven't been verified accessible in tracing */ 317 /* warn on C51 regs that haven't been verified accessible in tracing */
318 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && 318 if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 &&
319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d) 319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", 320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
321 reg); 321 reg);
@@ -420,7 +420,7 @@ bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
420 LOG_OLD_VALUE(bios_rd32(bios, reg)); 420 LOG_OLD_VALUE(bios_rd32(bios, reg));
421 BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data); 421 BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
422 422
423 if (dev_priv->VBIOS.execute) { 423 if (dev_priv->vbios.execute) {
424 still_alive(); 424 still_alive();
425 nv_wr32(bios->dev, reg, data); 425 nv_wr32(bios->dev, reg, data);
426 } 426 }
@@ -647,7 +647,7 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
647 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); 647 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
648 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; 648 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
649 649
650 if (dev_priv->VBIOS.execute) { 650 if (dev_priv->vbios.execute) {
651 still_alive(); 651 still_alive();
652 nv_wr32(dev, reg + 4, reg1); 652 nv_wr32(dev, reg + 4, reg1);
653 nv_wr32(dev, reg + 0, reg0); 653 nv_wr32(dev, reg + 0, reg0);
@@ -689,7 +689,7 @@ setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
689static int dcb_entry_idx_from_crtchead(struct drm_device *dev) 689static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
690{ 690{
691 struct drm_nouveau_private *dev_priv = dev->dev_private; 691 struct drm_nouveau_private *dev_priv = dev->dev_private;
692 struct nvbios *bios = &dev_priv->VBIOS; 692 struct nvbios *bios = &dev_priv->vbios;
693 693
694 /* 694 /*
695 * For the results of this function to be correct, CR44 must have been 695 * For the results of this function to be correct, CR44 must have been
@@ -700,7 +700,7 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
700 700
701 uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0); 701 uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
702 702
703 if (dcb_entry > bios->bdcb.dcb.entries) { 703 if (dcb_entry > bios->dcb.entries) {
704 NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently " 704 NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
705 "(%02X)\n", dcb_entry); 705 "(%02X)\n", dcb_entry);
706 dcb_entry = 0x7f; /* unused / invalid marker */ 706 dcb_entry = 0x7f; /* unused / invalid marker */
@@ -713,25 +713,26 @@ static struct nouveau_i2c_chan *
713init_i2c_device_find(struct drm_device *dev, int i2c_index) 713init_i2c_device_find(struct drm_device *dev, int i2c_index)
714{ 714{
715 struct drm_nouveau_private *dev_priv = dev->dev_private; 715 struct drm_nouveau_private *dev_priv = dev->dev_private;
716 struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb; 716 struct dcb_table *dcb = &dev_priv->vbios.dcb;
717 717
718 if (i2c_index == 0xff) { 718 if (i2c_index == 0xff) {
719 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */ 719 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
720 int idx = dcb_entry_idx_from_crtchead(dev), shift = 0; 720 int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
721 int default_indices = bdcb->i2c_default_indices; 721 int default_indices = dcb->i2c_default_indices;
722 722
723 if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default) 723 if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
724 shift = 4; 724 shift = 4;
725 725
726 i2c_index = (default_indices >> shift) & 0xf; 726 i2c_index = (default_indices >> shift) & 0xf;
727 } 727 }
728 if (i2c_index == 0x80) /* g80+ */ 728 if (i2c_index == 0x80) /* g80+ */
729 i2c_index = bdcb->i2c_default_indices & 0xf; 729 i2c_index = dcb->i2c_default_indices & 0xf;
730 730
731 return nouveau_i2c_find(dev, i2c_index); 731 return nouveau_i2c_find(dev, i2c_index);
732} 732}
733 733
734static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) 734static uint32_t
735get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
735{ 736{
736 /* 737 /*
737 * For mlv < 0x80, it is an index into a table of TMDS base addresses. 738 * For mlv < 0x80, it is an index into a table of TMDS base addresses.
@@ -744,6 +745,7 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
744 */ 745 */
745 746
746 struct drm_nouveau_private *dev_priv = dev->dev_private; 747 struct drm_nouveau_private *dev_priv = dev->dev_private;
748 struct nvbios *bios = &dev_priv->vbios;
747 const int pramdac_offset[13] = { 749 const int pramdac_offset[13] = {
748 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 }; 750 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
749 const uint32_t pramdac_table[4] = { 751 const uint32_t pramdac_table[4] = {
@@ -756,13 +758,12 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
756 dcb_entry = dcb_entry_idx_from_crtchead(dev); 758 dcb_entry = dcb_entry_idx_from_crtchead(dev);
757 if (dcb_entry == 0x7f) 759 if (dcb_entry == 0x7f)
758 return 0; 760 return 0;
759 dacoffset = pramdac_offset[ 761 dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or];
760 dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
761 if (mlv == 0x81) 762 if (mlv == 0x81)
762 dacoffset ^= 8; 763 dacoffset ^= 8;
763 return 0x6808b0 + dacoffset; 764 return 0x6808b0 + dacoffset;
764 } else { 765 } else {
765 if (mlv > ARRAY_SIZE(pramdac_table)) { 766 if (mlv >= ARRAY_SIZE(pramdac_table)) {
766 NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n", 767 NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
767 mlv); 768 mlv);
768 return 0; 769 return 0;
@@ -2574,19 +2575,19 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2574 2575
2575 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 2576 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
2576 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; 2577 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
2577 const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr]; 2578 const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr];
2578 const uint8_t *gpio_entry; 2579 const uint8_t *gpio_entry;
2579 int i; 2580 int i;
2580 2581
2581 if (!iexec->execute) 2582 if (!iexec->execute)
2582 return 1; 2583 return 1;
2583 2584
2584 if (bios->bdcb.version != 0x40) { 2585 if (bios->dcb.version != 0x40) {
2585 NV_ERROR(bios->dev, "DCB table not version 4.0\n"); 2586 NV_ERROR(bios->dev, "DCB table not version 4.0\n");
2586 return 0; 2587 return 0;
2587 } 2588 }
2588 2589
2589 if (!bios->bdcb.gpio_table_ptr) { 2590 if (!bios->dcb.gpio_table_ptr) {
2590 NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); 2591 NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
2591 return 0; 2592 return 0;
2592 } 2593 }
@@ -3123,7 +3124,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
3123 struct dcb_entry *dcbent, int head, bool dl) 3124 struct dcb_entry *dcbent, int head, bool dl)
3124{ 3125{
3125 struct drm_nouveau_private *dev_priv = dev->dev_private; 3126 struct drm_nouveau_private *dev_priv = dev->dev_private;
3126 struct nvbios *bios = &dev_priv->VBIOS; 3127 struct nvbios *bios = &dev_priv->vbios;
3127 struct init_exec iexec = {true, false}; 3128 struct init_exec iexec = {true, false};
3128 3129
3129 NV_TRACE(dev, "0x%04X: Parsing digital output script table\n", 3130 NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
@@ -3140,7 +3141,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
3140static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script) 3141static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
3141{ 3142{
3142 struct drm_nouveau_private *dev_priv = dev->dev_private; 3143 struct drm_nouveau_private *dev_priv = dev->dev_private;
3143 struct nvbios *bios = &dev_priv->VBIOS; 3144 struct nvbios *bios = &dev_priv->vbios;
3144 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0); 3145 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
3145 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]); 3146 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
3146 3147
@@ -3194,7 +3195,7 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3194 * of a list of pxclks and script pointers. 3195 * of a list of pxclks and script pointers.
3195 */ 3196 */
3196 struct drm_nouveau_private *dev_priv = dev->dev_private; 3197 struct drm_nouveau_private *dev_priv = dev->dev_private;
3197 struct nvbios *bios = &dev_priv->VBIOS; 3198 struct nvbios *bios = &dev_priv->vbios;
3198 unsigned int outputset = (dcbent->or == 4) ? 1 : 0; 3199 unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
3199 uint16_t scriptptr = 0, clktable; 3200 uint16_t scriptptr = 0, clktable;
3200 uint8_t clktableptr = 0; 3201 uint8_t clktableptr = 0;
@@ -3261,7 +3262,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
3261 */ 3262 */
3262 3263
3263 struct drm_nouveau_private *dev_priv = dev->dev_private; 3264 struct drm_nouveau_private *dev_priv = dev->dev_private;
3264 struct nvbios *bios = &dev_priv->VBIOS; 3265 struct nvbios *bios = &dev_priv->vbios;
3265 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; 3266 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
3266 uint32_t sel_clk_binding, sel_clk; 3267 uint32_t sel_clk_binding, sel_clk;
3267 int ret; 3268 int ret;
@@ -3395,7 +3396,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3395#ifndef __powerpc__ 3396#ifndef __powerpc__
3396 NV_ERROR(dev, "Pointer to flat panel table invalid\n"); 3397 NV_ERROR(dev, "Pointer to flat panel table invalid\n");
3397#endif 3398#endif
3398 bios->pub.digital_min_front_porch = 0x4b; 3399 bios->digital_min_front_porch = 0x4b;
3399 return 0; 3400 return 0;
3400 } 3401 }
3401 3402
@@ -3428,7 +3429,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3428 * fptable[4] is the minimum 3429 * fptable[4] is the minimum
3429 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap 3430 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
3430 */ 3431 */
3431 bios->pub.digital_min_front_porch = fptable[4]; 3432 bios->digital_min_front_porch = fptable[4];
3432 ofs = -7; 3433 ofs = -7;
3433 break; 3434 break;
3434 default: 3435 default:
@@ -3467,7 +3468,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3467 3468
3468 /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */ 3469 /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
3469 if (lth.lvds_ver > 0x10) 3470 if (lth.lvds_ver > 0x10)
3470 bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf; 3471 bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
3471 3472
3472 /* 3473 /*
3473 * If either the strap or xlated fpindex value are 0xf there is no 3474 * If either the strap or xlated fpindex value are 0xf there is no
@@ -3491,7 +3492,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3491bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode) 3492bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
3492{ 3493{
3493 struct drm_nouveau_private *dev_priv = dev->dev_private; 3494 struct drm_nouveau_private *dev_priv = dev->dev_private;
3494 struct nvbios *bios = &dev_priv->VBIOS; 3495 struct nvbios *bios = &dev_priv->vbios;
3495 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr]; 3496 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
3496 3497
3497 if (!mode) /* just checking whether we can produce a mode */ 3498 if (!mode) /* just checking whether we can produce a mode */
@@ -3562,11 +3563,11 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
3562 * until later, when this function should be called with non-zero pxclk 3563 * until later, when this function should be called with non-zero pxclk
3563 */ 3564 */
3564 struct drm_nouveau_private *dev_priv = dev->dev_private; 3565 struct drm_nouveau_private *dev_priv = dev->dev_private;
3565 struct nvbios *bios = &dev_priv->VBIOS; 3566 struct nvbios *bios = &dev_priv->vbios;
3566 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0; 3567 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
3567 struct lvdstableheader lth; 3568 struct lvdstableheader lth;
3568 uint16_t lvdsofs; 3569 uint16_t lvdsofs;
3569 int ret, chip_version = bios->pub.chip_version; 3570 int ret, chip_version = bios->chip_version;
3570 3571
3571 ret = parse_lvds_manufacturer_table_header(dev, bios, &lth); 3572 ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
3572 if (ret) 3573 if (ret)
@@ -3682,7 +3683,7 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
3682 uint16_t record, int record_len, int record_nr) 3683 uint16_t record, int record_len, int record_nr)
3683{ 3684{
3684 struct drm_nouveau_private *dev_priv = dev->dev_private; 3685 struct drm_nouveau_private *dev_priv = dev->dev_private;
3685 struct nvbios *bios = &dev_priv->VBIOS; 3686 struct nvbios *bios = &dev_priv->vbios;
3686 uint32_t entry; 3687 uint32_t entry;
3687 uint16_t table; 3688 uint16_t table;
3688 int i, v; 3689 int i, v;
@@ -3716,7 +3717,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
3716 int *length) 3717 int *length)
3717{ 3718{
3718 struct drm_nouveau_private *dev_priv = dev->dev_private; 3719 struct drm_nouveau_private *dev_priv = dev->dev_private;
3719 struct nvbios *bios = &dev_priv->VBIOS; 3720 struct nvbios *bios = &dev_priv->vbios;
3720 uint8_t *table; 3721 uint8_t *table;
3721 3722
3722 if (!bios->display.dp_table_ptr) { 3723 if (!bios->display.dp_table_ptr) {
@@ -3725,7 +3726,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
3725 } 3726 }
3726 table = &bios->data[bios->display.dp_table_ptr]; 3727 table = &bios->data[bios->display.dp_table_ptr];
3727 3728
3728 if (table[0] != 0x21) { 3729 if (table[0] != 0x20 && table[0] != 0x21) {
3729 NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n", 3730 NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
3730 table[0]); 3731 table[0]);
3731 return NULL; 3732 return NULL;
@@ -3765,7 +3766,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3765 */ 3766 */
3766 3767
3767 struct drm_nouveau_private *dev_priv = dev->dev_private; 3768 struct drm_nouveau_private *dev_priv = dev->dev_private;
3768 struct nvbios *bios = &dev_priv->VBIOS; 3769 struct nvbios *bios = &dev_priv->vbios;
3769 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 3770 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3770 uint8_t *otable = NULL; 3771 uint8_t *otable = NULL;
3771 uint16_t script; 3772 uint16_t script;
@@ -3918,8 +3919,8 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
3918 */ 3919 */
3919 3920
3920 struct drm_nouveau_private *dev_priv = dev->dev_private; 3921 struct drm_nouveau_private *dev_priv = dev->dev_private;
3921 struct nvbios *bios = &dev_priv->VBIOS; 3922 struct nvbios *bios = &dev_priv->vbios;
3922 int cv = bios->pub.chip_version; 3923 int cv = bios->chip_version;
3923 uint16_t clktable = 0, scriptptr; 3924 uint16_t clktable = 0, scriptptr;
3924 uint32_t sel_clk_binding, sel_clk; 3925 uint32_t sel_clk_binding, sel_clk;
3925 3926
@@ -3978,8 +3979,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
3978 */ 3979 */
3979 3980
3980 struct drm_nouveau_private *dev_priv = dev->dev_private; 3981 struct drm_nouveau_private *dev_priv = dev->dev_private;
3981 struct nvbios *bios = &dev_priv->VBIOS; 3982 struct nvbios *bios = &dev_priv->vbios;
3982 int cv = bios->pub.chip_version, pllindex = 0; 3983 int cv = bios->chip_version, pllindex = 0;
3983 uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0; 3984 uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
3984 uint32_t crystal_strap_mask, crystal_straps; 3985 uint32_t crystal_strap_mask, crystal_straps;
3985 3986
@@ -4332,7 +4333,7 @@ static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint
4332 */ 4333 */
4333 4334
4334 bios->major_version = bios->data[offset + 3]; 4335 bios->major_version = bios->data[offset + 3];
4335 bios->pub.chip_version = bios->data[offset + 2]; 4336 bios->chip_version = bios->data[offset + 2];
4336 NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n", 4337 NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
4337 bios->data[offset + 3], bios->data[offset + 2], 4338 bios->data[offset + 3], bios->data[offset + 2],
4338 bios->data[offset + 1], bios->data[offset]); 4339 bios->data[offset + 1], bios->data[offset]);
@@ -4402,7 +4403,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
4402 } 4403 }
4403 4404
4404 /* First entry is normal dac, 2nd tv-out perhaps? */ 4405 /* First entry is normal dac, 2nd tv-out perhaps? */
4405 bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff; 4406 bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
4406 4407
4407 return 0; 4408 return 0;
4408} 4409}
@@ -4526,8 +4527,8 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
4526 return -ENOSYS; 4527 return -ENOSYS;
4527 } 4528 }
4528 4529
4529 bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]); 4530 bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
4530 bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]); 4531 bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
4531 4532
4532 return 0; 4533 return 0;
4533} 4534}
@@ -4796,11 +4797,11 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
4796 uint16_t legacy_scripts_offset, legacy_i2c_offset; 4797 uint16_t legacy_scripts_offset, legacy_i2c_offset;
4797 4798
4798 /* load needed defaults in case we can't parse this info */ 4799 /* load needed defaults in case we can't parse this info */
4799 bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX; 4800 bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
4800 bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX; 4801 bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
4801 bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX; 4802 bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
4802 bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX; 4803 bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
4803 bios->pub.digital_min_front_porch = 0x4b; 4804 bios->digital_min_front_porch = 0x4b;
4804 bios->fmaxvco = 256000; 4805 bios->fmaxvco = 256000;
4805 bios->fminvco = 128000; 4806 bios->fminvco = 128000;
4806 bios->fp.duallink_transition_clk = 90000; 4807 bios->fp.duallink_transition_clk = 90000;
@@ -4907,10 +4908,10 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
4907 bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset]; 4908 bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
4908 bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1]; 4909 bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
4909 bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2]; 4910 bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
4910 bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; 4911 bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
4911 bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; 4912 bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
4912 bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; 4913 bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
4913 bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; 4914 bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
4914 4915
4915 if (bmplength > 74) { 4916 if (bmplength > 74) {
4916 bios->fmaxvco = ROM32(bmp[67]); 4917 bios->fmaxvco = ROM32(bmp[67]);
@@ -4984,7 +4985,8 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
4984 else 4985 else
4985 NV_WARN(dev, 4986 NV_WARN(dev,
4986 "DCB I2C table has more entries than indexable " 4987 "DCB I2C table has more entries than indexable "
4987 "(%d entries, max index 15)\n", i2ctable[2]); 4988 "(%d entries, max %d)\n", i2ctable[2],
4989 DCB_MAX_NUM_I2C_ENTRIES);
4988 entry_len = i2ctable[3]; 4990 entry_len = i2ctable[3];
4989 /* [4] is i2c_default_indices, read in parse_dcb_table() */ 4991 /* [4] is i2c_default_indices, read in parse_dcb_table() */
4990 } 4992 }
@@ -5000,8 +5002,8 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
5000 5002
5001 if (index == 0xf) 5003 if (index == 0xf)
5002 return 0; 5004 return 0;
5003 if (index > i2c_entries) { 5005 if (index >= i2c_entries) {
5004 NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n", 5006 NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
5005 index, i2ctable[2]); 5007 index, i2ctable[2]);
5006 return -ENOENT; 5008 return -ENOENT;
5007 } 5009 }
@@ -5036,7 +5038,7 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
5036static struct dcb_gpio_entry * 5038static struct dcb_gpio_entry *
5037new_gpio_entry(struct nvbios *bios) 5039new_gpio_entry(struct nvbios *bios)
5038{ 5040{
5039 struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio; 5041 struct dcb_gpio_table *gpio = &bios->dcb.gpio;
5040 5042
5041 return &gpio->entry[gpio->entries++]; 5043 return &gpio->entry[gpio->entries++];
5042} 5044}
@@ -5045,14 +5047,14 @@ struct dcb_gpio_entry *
5045nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag) 5047nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
5046{ 5048{
5047 struct drm_nouveau_private *dev_priv = dev->dev_private; 5049 struct drm_nouveau_private *dev_priv = dev->dev_private;
5048 struct nvbios *bios = &dev_priv->VBIOS; 5050 struct nvbios *bios = &dev_priv->vbios;
5049 int i; 5051 int i;
5050 5052
5051 for (i = 0; i < bios->bdcb.gpio.entries; i++) { 5053 for (i = 0; i < bios->dcb.gpio.entries; i++) {
5052 if (bios->bdcb.gpio.entry[i].tag != tag) 5054 if (bios->dcb.gpio.entry[i].tag != tag)
5053 continue; 5055 continue;
5054 5056
5055 return &bios->bdcb.gpio.entry[i]; 5057 return &bios->dcb.gpio.entry[i];
5056 } 5058 }
5057 5059
5058 return NULL; 5060 return NULL;
@@ -5100,7 +5102,7 @@ static void
5100parse_dcb_gpio_table(struct nvbios *bios) 5102parse_dcb_gpio_table(struct nvbios *bios)
5101{ 5103{
5102 struct drm_device *dev = bios->dev; 5104 struct drm_device *dev = bios->dev;
5103 uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr; 5105 uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
5104 uint8_t *gpio_table = &bios->data[gpio_table_ptr]; 5106 uint8_t *gpio_table = &bios->data[gpio_table_ptr];
5105 int header_len = gpio_table[1], 5107 int header_len = gpio_table[1],
5106 entries = gpio_table[2], 5108 entries = gpio_table[2],
@@ -5108,7 +5110,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
5108 void (*parse_entry)(struct nvbios *, uint16_t) = NULL; 5110 void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
5109 int i; 5111 int i;
5110 5112
5111 if (bios->bdcb.version >= 0x40) { 5113 if (bios->dcb.version >= 0x40) {
5112 if (gpio_table_ptr && entry_len != 4) { 5114 if (gpio_table_ptr && entry_len != 4) {
5113 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); 5115 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
5114 return; 5116 return;
@@ -5116,7 +5118,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
5116 5118
5117 parse_entry = parse_dcb40_gpio_entry; 5119 parse_entry = parse_dcb40_gpio_entry;
5118 5120
5119 } else if (bios->bdcb.version >= 0x30) { 5121 } else if (bios->dcb.version >= 0x30) {
5120 if (gpio_table_ptr && entry_len != 2) { 5122 if (gpio_table_ptr && entry_len != 2) {
5121 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); 5123 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
5122 return; 5124 return;
@@ -5124,7 +5126,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
5124 5126
5125 parse_entry = parse_dcb30_gpio_entry; 5127 parse_entry = parse_dcb30_gpio_entry;
5126 5128
5127 } else if (bios->bdcb.version >= 0x22) { 5129 } else if (bios->dcb.version >= 0x22) {
5128 /* 5130 /*
5129 * DCBs older than v3.0 don't really have a GPIO 5131 * DCBs older than v3.0 don't really have a GPIO
5130 * table, instead they keep some GPIO info at fixed 5132 * table, instead they keep some GPIO info at fixed
@@ -5158,30 +5160,67 @@ struct dcb_connector_table_entry *
5158nouveau_bios_connector_entry(struct drm_device *dev, int index) 5160nouveau_bios_connector_entry(struct drm_device *dev, int index)
5159{ 5161{
5160 struct drm_nouveau_private *dev_priv = dev->dev_private; 5162 struct drm_nouveau_private *dev_priv = dev->dev_private;
5161 struct nvbios *bios = &dev_priv->VBIOS; 5163 struct nvbios *bios = &dev_priv->vbios;
5162 struct dcb_connector_table_entry *cte; 5164 struct dcb_connector_table_entry *cte;
5163 5165
5164 if (index >= bios->bdcb.connector.entries) 5166 if (index >= bios->dcb.connector.entries)
5165 return NULL; 5167 return NULL;
5166 5168
5167 cte = &bios->bdcb.connector.entry[index]; 5169 cte = &bios->dcb.connector.entry[index];
5168 if (cte->type == 0xff) 5170 if (cte->type == 0xff)
5169 return NULL; 5171 return NULL;
5170 5172
5171 return cte; 5173 return cte;
5172} 5174}
5173 5175
5176static enum dcb_connector_type
5177divine_connector_type(struct nvbios *bios, int index)
5178{
5179 struct dcb_table *dcb = &bios->dcb;
5180 unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
5181 int i;
5182
5183 for (i = 0; i < dcb->entries; i++) {
5184 if (dcb->entry[i].connector == index)
5185 encoders |= (1 << dcb->entry[i].type);
5186 }
5187
5188 if (encoders & (1 << OUTPUT_DP)) {
5189 if (encoders & (1 << OUTPUT_TMDS))
5190 type = DCB_CONNECTOR_DP;
5191 else
5192 type = DCB_CONNECTOR_eDP;
5193 } else
5194 if (encoders & (1 << OUTPUT_TMDS)) {
5195 if (encoders & (1 << OUTPUT_ANALOG))
5196 type = DCB_CONNECTOR_DVI_I;
5197 else
5198 type = DCB_CONNECTOR_DVI_D;
5199 } else
5200 if (encoders & (1 << OUTPUT_ANALOG)) {
5201 type = DCB_CONNECTOR_VGA;
5202 } else
5203 if (encoders & (1 << OUTPUT_LVDS)) {
5204 type = DCB_CONNECTOR_LVDS;
5205 } else
5206 if (encoders & (1 << OUTPUT_TV)) {
5207 type = DCB_CONNECTOR_TV_0;
5208 }
5209
5210 return type;
5211}
5212
5174static void 5213static void
5175parse_dcb_connector_table(struct nvbios *bios) 5214parse_dcb_connector_table(struct nvbios *bios)
5176{ 5215{
5177 struct drm_device *dev = bios->dev; 5216 struct drm_device *dev = bios->dev;
5178 struct dcb_connector_table *ct = &bios->bdcb.connector; 5217 struct dcb_connector_table *ct = &bios->dcb.connector;
5179 struct dcb_connector_table_entry *cte; 5218 struct dcb_connector_table_entry *cte;
5180 uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr]; 5219 uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
5181 uint8_t *entry; 5220 uint8_t *entry;
5182 int i; 5221 int i;
5183 5222
5184 if (!bios->bdcb.connector_table_ptr) { 5223 if (!bios->dcb.connector_table_ptr) {
5185 NV_DEBUG_KMS(dev, "No DCB connector table present\n"); 5224 NV_DEBUG_KMS(dev, "No DCB connector table present\n");
5186 return; 5225 return;
5187 } 5226 }
@@ -5203,6 +5242,7 @@ parse_dcb_connector_table(struct nvbios *bios)
5203 cte->entry = ROM16(entry[0]); 5242 cte->entry = ROM16(entry[0]);
5204 else 5243 else
5205 cte->entry = ROM32(entry[0]); 5244 cte->entry = ROM32(entry[0]);
5245
5206 cte->type = (cte->entry & 0x000000ff) >> 0; 5246 cte->type = (cte->entry & 0x000000ff) >> 0;
5207 cte->index = (cte->entry & 0x00000f00) >> 8; 5247 cte->index = (cte->entry & 0x00000f00) >> 8;
5208 switch (cte->entry & 0x00033000) { 5248 switch (cte->entry & 0x00033000) {
@@ -5228,10 +5268,33 @@ parse_dcb_connector_table(struct nvbios *bios)
5228 5268
5229 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", 5269 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
5230 i, cte->entry, cte->type, cte->index, cte->gpio_tag); 5270 i, cte->entry, cte->type, cte->index, cte->gpio_tag);
5271
5272 /* check for known types, fallback to guessing the type
5273 * from attached encoders if we hit an unknown.
5274 */
5275 switch (cte->type) {
5276 case DCB_CONNECTOR_VGA:
5277 case DCB_CONNECTOR_TV_0:
5278 case DCB_CONNECTOR_TV_1:
5279 case DCB_CONNECTOR_TV_3:
5280 case DCB_CONNECTOR_DVI_I:
5281 case DCB_CONNECTOR_DVI_D:
5282 case DCB_CONNECTOR_LVDS:
5283 case DCB_CONNECTOR_DP:
5284 case DCB_CONNECTOR_eDP:
5285 case DCB_CONNECTOR_HDMI_0:
5286 case DCB_CONNECTOR_HDMI_1:
5287 break;
5288 default:
5289 cte->type = divine_connector_type(bios, cte->index);
5290 NV_WARN(dev, "unknown type, using 0x%02x", cte->type);
5291 break;
5292 }
5293
5231 } 5294 }
5232} 5295}
5233 5296
5234static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb) 5297static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
5235{ 5298{
5236 struct dcb_entry *entry = &dcb->entry[dcb->entries]; 5299 struct dcb_entry *entry = &dcb->entry[dcb->entries];
5237 5300
@@ -5241,7 +5304,7 @@ static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
5241 return entry; 5304 return entry;
5242} 5305}
5243 5306
5244static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads) 5307static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
5245{ 5308{
5246 struct dcb_entry *entry = new_dcb_entry(dcb); 5309 struct dcb_entry *entry = new_dcb_entry(dcb);
5247 5310
@@ -5252,7 +5315,7 @@ static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
5252 /* "or" mostly unused in early gen crt modesetting, 0 is fine */ 5315 /* "or" mostly unused in early gen crt modesetting, 0 is fine */
5253} 5316}
5254 5317
5255static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads) 5318static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
5256{ 5319{
5257 struct dcb_entry *entry = new_dcb_entry(dcb); 5320 struct dcb_entry *entry = new_dcb_entry(dcb);
5258 5321
@@ -5279,7 +5342,7 @@ static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
5279#endif 5342#endif
5280} 5343}
5281 5344
5282static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads) 5345static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
5283{ 5346{
5284 struct dcb_entry *entry = new_dcb_entry(dcb); 5347 struct dcb_entry *entry = new_dcb_entry(dcb);
5285 5348
@@ -5290,13 +5353,13 @@ static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
5290} 5353}
5291 5354
5292static bool 5355static bool
5293parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, 5356parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5294 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5357 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5295{ 5358{
5296 entry->type = conn & 0xf; 5359 entry->type = conn & 0xf;
5297 entry->i2c_index = (conn >> 4) & 0xf; 5360 entry->i2c_index = (conn >> 4) & 0xf;
5298 entry->heads = (conn >> 8) & 0xf; 5361 entry->heads = (conn >> 8) & 0xf;
5299 if (bdcb->version >= 0x40) 5362 if (dcb->version >= 0x40)
5300 entry->connector = (conn >> 12) & 0xf; 5363 entry->connector = (conn >> 12) & 0xf;
5301 entry->bus = (conn >> 16) & 0xf; 5364 entry->bus = (conn >> 16) & 0xf;
5302 entry->location = (conn >> 20) & 0x3; 5365 entry->location = (conn >> 20) & 0x3;
@@ -5314,7 +5377,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5314 * Although the rest of a CRT conf dword is usually 5377 * Although the rest of a CRT conf dword is usually
5315 * zeros, mac biosen have stuff there so we must mask 5378 * zeros, mac biosen have stuff there so we must mask
5316 */ 5379 */
5317 entry->crtconf.maxfreq = (bdcb->version < 0x30) ? 5380 entry->crtconf.maxfreq = (dcb->version < 0x30) ?
5318 (conf & 0xffff) * 10 : 5381 (conf & 0xffff) * 10 :
5319 (conf & 0xff) * 10000; 5382 (conf & 0xff) * 10000;
5320 break; 5383 break;
@@ -5323,7 +5386,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5323 uint32_t mask; 5386 uint32_t mask;
5324 if (conf & 0x1) 5387 if (conf & 0x1)
5325 entry->lvdsconf.use_straps_for_mode = true; 5388 entry->lvdsconf.use_straps_for_mode = true;
5326 if (bdcb->version < 0x22) { 5389 if (dcb->version < 0x22) {
5327 mask = ~0xd; 5390 mask = ~0xd;
5328 /* 5391 /*
5329 * The laptop in bug 14567 lies and claims to not use 5392 * The laptop in bug 14567 lies and claims to not use
@@ -5347,7 +5410,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5347 * Until we even try to use these on G8x, it's 5410 * Until we even try to use these on G8x, it's
5348 * useless reporting unknown bits. They all are. 5411 * useless reporting unknown bits. They all are.
5349 */ 5412 */
5350 if (bdcb->version >= 0x40) 5413 if (dcb->version >= 0x40)
5351 break; 5414 break;
5352 5415
5353 NV_ERROR(dev, "Unknown LVDS configuration bits, " 5416 NV_ERROR(dev, "Unknown LVDS configuration bits, "
@@ -5357,7 +5420,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5357 } 5420 }
5358 case OUTPUT_TV: 5421 case OUTPUT_TV:
5359 { 5422 {
5360 if (bdcb->version >= 0x30) 5423 if (dcb->version >= 0x30)
5361 entry->tvconf.has_component_output = conf & (0x8 << 4); 5424 entry->tvconf.has_component_output = conf & (0x8 << 4);
5362 else 5425 else
5363 entry->tvconf.has_component_output = false; 5426 entry->tvconf.has_component_output = false;
@@ -5384,8 +5447,10 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5384 break; 5447 break;
5385 case 0xe: 5448 case 0xe:
5386 /* weird g80 mobile type that "nv" treats as a terminator */ 5449 /* weird g80 mobile type that "nv" treats as a terminator */
5387 bdcb->dcb.entries--; 5450 dcb->entries--;
5388 return false; 5451 return false;
5452 default:
5453 break;
5389 } 5454 }
5390 5455
5391 /* unsure what DCB version introduces this, 3.0? */ 5456 /* unsure what DCB version introduces this, 3.0? */
@@ -5396,7 +5461,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5396} 5461}
5397 5462
5398static bool 5463static bool
5399parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, 5464parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
5400 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5465 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5401{ 5466{
5402 switch (conn & 0x0000000f) { 5467 switch (conn & 0x0000000f) {
@@ -5462,27 +5527,27 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5462 return true; 5527 return true;
5463} 5528}
5464 5529
5465static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, 5530static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
5466 uint32_t conn, uint32_t conf) 5531 uint32_t conn, uint32_t conf)
5467{ 5532{
5468 struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb); 5533 struct dcb_entry *entry = new_dcb_entry(dcb);
5469 bool ret; 5534 bool ret;
5470 5535
5471 if (bdcb->version >= 0x20) 5536 if (dcb->version >= 0x20)
5472 ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry); 5537 ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
5473 else 5538 else
5474 ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry); 5539 ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
5475 if (!ret) 5540 if (!ret)
5476 return ret; 5541 return ret;
5477 5542
5478 read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table, 5543 read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
5479 entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]); 5544 entry->i2c_index, &dcb->i2c[entry->i2c_index]);
5480 5545
5481 return true; 5546 return true;
5482} 5547}
5483 5548
5484static 5549static
5485void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb) 5550void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
5486{ 5551{
5487 /* 5552 /*
5488 * DCB v2.0 lists each output combination separately. 5553 * DCB v2.0 lists each output combination separately.
@@ -5534,8 +5599,7 @@ static int
5534parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) 5599parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5535{ 5600{
5536 struct drm_nouveau_private *dev_priv = dev->dev_private; 5601 struct drm_nouveau_private *dev_priv = dev->dev_private;
5537 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5602 struct dcb_table *dcb = &bios->dcb;
5538 struct parsed_dcb *dcb;
5539 uint16_t dcbptr = 0, i2ctabptr = 0; 5603 uint16_t dcbptr = 0, i2ctabptr = 0;
5540 uint8_t *dcbtable; 5604 uint8_t *dcbtable;
5541 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; 5605 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
@@ -5543,9 +5607,6 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5543 int recordlength = 8, confofs = 4; 5607 int recordlength = 8, confofs = 4;
5544 int i; 5608 int i;
5545 5609
5546 dcb = bios->pub.dcb = &bdcb->dcb;
5547 dcb->entries = 0;
5548
5549 /* get the offset from 0x36 */ 5610 /* get the offset from 0x36 */
5550 if (dev_priv->card_type > NV_04) { 5611 if (dev_priv->card_type > NV_04) {
5551 dcbptr = ROM16(bios->data[0x36]); 5612 dcbptr = ROM16(bios->data[0x36]);
@@ -5567,21 +5628,21 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5567 dcbtable = &bios->data[dcbptr]; 5628 dcbtable = &bios->data[dcbptr];
5568 5629
5569 /* get DCB version */ 5630 /* get DCB version */
5570 bdcb->version = dcbtable[0]; 5631 dcb->version = dcbtable[0];
5571 NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n", 5632 NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
5572 bdcb->version >> 4, bdcb->version & 0xf); 5633 dcb->version >> 4, dcb->version & 0xf);
5573 5634
5574 if (bdcb->version >= 0x20) { /* NV17+ */ 5635 if (dcb->version >= 0x20) { /* NV17+ */
5575 uint32_t sig; 5636 uint32_t sig;
5576 5637
5577 if (bdcb->version >= 0x30) { /* NV40+ */ 5638 if (dcb->version >= 0x30) { /* NV40+ */
5578 headerlen = dcbtable[1]; 5639 headerlen = dcbtable[1];
5579 entries = dcbtable[2]; 5640 entries = dcbtable[2];
5580 recordlength = dcbtable[3]; 5641 recordlength = dcbtable[3];
5581 i2ctabptr = ROM16(dcbtable[4]); 5642 i2ctabptr = ROM16(dcbtable[4]);
5582 sig = ROM32(dcbtable[6]); 5643 sig = ROM32(dcbtable[6]);
5583 bdcb->gpio_table_ptr = ROM16(dcbtable[10]); 5644 dcb->gpio_table_ptr = ROM16(dcbtable[10]);
5584 bdcb->connector_table_ptr = ROM16(dcbtable[20]); 5645 dcb->connector_table_ptr = ROM16(dcbtable[20]);
5585 } else { 5646 } else {
5586 i2ctabptr = ROM16(dcbtable[2]); 5647 i2ctabptr = ROM16(dcbtable[2]);
5587 sig = ROM32(dcbtable[4]); 5648 sig = ROM32(dcbtable[4]);
@@ -5593,7 +5654,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5593 "signature (%08X)\n", sig); 5654 "signature (%08X)\n", sig);
5594 return -EINVAL; 5655 return -EINVAL;
5595 } 5656 }
5596 } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */ 5657 } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
5597 char sig[8] = { 0 }; 5658 char sig[8] = { 0 };
5598 5659
5599 strncpy(sig, (char *)&dcbtable[-7], 7); 5660 strncpy(sig, (char *)&dcbtable[-7], 7);
@@ -5641,14 +5702,11 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5641 if (!i2ctabptr) 5702 if (!i2ctabptr)
5642 NV_WARN(dev, "No pointer to DCB I2C port table\n"); 5703 NV_WARN(dev, "No pointer to DCB I2C port table\n");
5643 else { 5704 else {
5644 bdcb->i2c_table = &bios->data[i2ctabptr]; 5705 dcb->i2c_table = &bios->data[i2ctabptr];
5645 if (bdcb->version >= 0x30) 5706 if (dcb->version >= 0x30)
5646 bdcb->i2c_default_indices = bdcb->i2c_table[4]; 5707 dcb->i2c_default_indices = dcb->i2c_table[4];
5647 } 5708 }
5648 5709
5649 parse_dcb_gpio_table(bios);
5650 parse_dcb_connector_table(bios);
5651
5652 if (entries > DCB_MAX_NUM_ENTRIES) 5710 if (entries > DCB_MAX_NUM_ENTRIES)
5653 entries = DCB_MAX_NUM_ENTRIES; 5711 entries = DCB_MAX_NUM_ENTRIES;
5654 5712
@@ -5673,7 +5731,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5673 NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n", 5731 NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
5674 dcb->entries, connection, config); 5732 dcb->entries, connection, config);
5675 5733
5676 if (!parse_dcb_entry(dev, bdcb, connection, config)) 5734 if (!parse_dcb_entry(dev, dcb, connection, config))
5677 break; 5735 break;
5678 } 5736 }
5679 5737
@@ -5681,18 +5739,22 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5681 * apart for v2.1+ not being known for requiring merging, this 5739 * apart for v2.1+ not being known for requiring merging, this
5682 * guarantees dcbent->index is the index of the entry in the rom image 5740 * guarantees dcbent->index is the index of the entry in the rom image
5683 */ 5741 */
5684 if (bdcb->version < 0x21) 5742 if (dcb->version < 0x21)
5685 merge_like_dcb_entries(dev, dcb); 5743 merge_like_dcb_entries(dev, dcb);
5686 5744
5687 return dcb->entries ? 0 : -ENXIO; 5745 if (!dcb->entries)
5746 return -ENXIO;
5747
5748 parse_dcb_gpio_table(bios);
5749 parse_dcb_connector_table(bios);
5750 return 0;
5688} 5751}
5689 5752
5690static void 5753static void
5691fixup_legacy_connector(struct nvbios *bios) 5754fixup_legacy_connector(struct nvbios *bios)
5692{ 5755{
5693 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5756 struct dcb_table *dcb = &bios->dcb;
5694 struct parsed_dcb *dcb = &bdcb->dcb; 5757 int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
5695 int high = 0, i;
5696 5758
5697 /* 5759 /*
5698 * DCB 3.0 also has the table in most cases, but there are some cards 5760 * DCB 3.0 also has the table in most cases, but there are some cards
@@ -5700,9 +5762,11 @@ fixup_legacy_connector(struct nvbios *bios)
5700 * indices are all 0. We don't need the connector indices on pre-G80 5762 * indices are all 0. We don't need the connector indices on pre-G80
5701 * chips (yet?) so limit the use to DCB 4.0 and above. 5763 * chips (yet?) so limit the use to DCB 4.0 and above.
5702 */ 5764 */
5703 if (bdcb->version >= 0x40) 5765 if (dcb->version >= 0x40)
5704 return; 5766 return;
5705 5767
5768 dcb->connector.entries = 0;
5769
5706 /* 5770 /*
5707 * No known connector info before v3.0, so make it up. the rule here 5771 * No known connector info before v3.0, so make it up. the rule here
5708 * is: anything on the same i2c bus is considered to be on the same 5772 * is: anything on the same i2c bus is considered to be on the same
@@ -5710,37 +5774,38 @@ fixup_legacy_connector(struct nvbios *bios)
5710 * its own unique connector index. 5774 * its own unique connector index.
5711 */ 5775 */
5712 for (i = 0; i < dcb->entries; i++) { 5776 for (i = 0; i < dcb->entries; i++) {
5713 if (dcb->entry[i].i2c_index == 0xf)
5714 continue;
5715
5716 /* 5777 /*
5717 * Ignore the I2C index for on-chip TV-out, as there 5778 * Ignore the I2C index for on-chip TV-out, as there
5718 * are cards with bogus values (nv31m in bug 23212), 5779 * are cards with bogus values (nv31m in bug 23212),
5719 * and it's otherwise useless. 5780 * and it's otherwise useless.
5720 */ 5781 */
5721 if (dcb->entry[i].type == OUTPUT_TV && 5782 if (dcb->entry[i].type == OUTPUT_TV &&
5722 dcb->entry[i].location == DCB_LOC_ON_CHIP) { 5783 dcb->entry[i].location == DCB_LOC_ON_CHIP)
5723 dcb->entry[i].i2c_index = 0xf; 5784 dcb->entry[i].i2c_index = 0xf;
5785 i2c = dcb->entry[i].i2c_index;
5786
5787 if (i2c_conn[i2c]) {
5788 dcb->entry[i].connector = i2c_conn[i2c] - 1;
5724 continue; 5789 continue;
5725 } 5790 }
5726 5791
5727 dcb->entry[i].connector = dcb->entry[i].i2c_index; 5792 dcb->entry[i].connector = dcb->connector.entries++;
5728 if (dcb->entry[i].connector > high) 5793 if (i2c != 0xf)
5729 high = dcb->entry[i].connector; 5794 i2c_conn[i2c] = dcb->connector.entries;
5730 } 5795 }
5731 5796
5732 for (i = 0; i < dcb->entries; i++) { 5797 /* Fake the connector table as well as just connector indices */
5733 if (dcb->entry[i].i2c_index != 0xf) 5798 for (i = 0; i < dcb->connector.entries; i++) {
5734 continue; 5799 dcb->connector.entry[i].index = i;
5735 5800 dcb->connector.entry[i].type = divine_connector_type(bios, i);
5736 dcb->entry[i].connector = ++high; 5801 dcb->connector.entry[i].gpio_tag = 0xff;
5737 } 5802 }
5738} 5803}
5739 5804
5740static void 5805static void
5741fixup_legacy_i2c(struct nvbios *bios) 5806fixup_legacy_i2c(struct nvbios *bios)
5742{ 5807{
5743 struct parsed_dcb *dcb = &bios->bdcb.dcb; 5808 struct dcb_table *dcb = &bios->dcb;
5744 int i; 5809 int i;
5745 5810
5746 for (i = 0; i < dcb->entries; i++) { 5811 for (i = 0; i < dcb->entries; i++) {
@@ -5826,7 +5891,7 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
5826uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) 5891uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
5827{ 5892{
5828 struct drm_nouveau_private *dev_priv = dev->dev_private; 5893 struct drm_nouveau_private *dev_priv = dev->dev_private;
5829 struct nvbios *bios = &dev_priv->VBIOS; 5894 struct nvbios *bios = &dev_priv->vbios;
5830 const uint8_t edid_sig[] = { 5895 const uint8_t edid_sig[] = {
5831 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; 5896 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
5832 uint16_t offset = 0; 5897 uint16_t offset = 0;
@@ -5859,7 +5924,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5859 struct dcb_entry *dcbent) 5924 struct dcb_entry *dcbent)
5860{ 5925{
5861 struct drm_nouveau_private *dev_priv = dev->dev_private; 5926 struct drm_nouveau_private *dev_priv = dev->dev_private;
5862 struct nvbios *bios = &dev_priv->VBIOS; 5927 struct nvbios *bios = &dev_priv->vbios;
5863 struct init_exec iexec = { true, false }; 5928 struct init_exec iexec = { true, false };
5864 5929
5865 mutex_lock(&bios->lock); 5930 mutex_lock(&bios->lock);
@@ -5872,7 +5937,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5872static bool NVInitVBIOS(struct drm_device *dev) 5937static bool NVInitVBIOS(struct drm_device *dev)
5873{ 5938{
5874 struct drm_nouveau_private *dev_priv = dev->dev_private; 5939 struct drm_nouveau_private *dev_priv = dev->dev_private;
5875 struct nvbios *bios = &dev_priv->VBIOS; 5940 struct nvbios *bios = &dev_priv->vbios;
5876 5941
5877 memset(bios, 0, sizeof(struct nvbios)); 5942 memset(bios, 0, sizeof(struct nvbios));
5878 mutex_init(&bios->lock); 5943 mutex_init(&bios->lock);
@@ -5888,7 +5953,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
5888static int nouveau_parse_vbios_struct(struct drm_device *dev) 5953static int nouveau_parse_vbios_struct(struct drm_device *dev)
5889{ 5954{
5890 struct drm_nouveau_private *dev_priv = dev->dev_private; 5955 struct drm_nouveau_private *dev_priv = dev->dev_private;
5891 struct nvbios *bios = &dev_priv->VBIOS; 5956 struct nvbios *bios = &dev_priv->vbios;
5892 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' }; 5957 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
5893 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 }; 5958 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
5894 int offset; 5959 int offset;
@@ -5915,7 +5980,7 @@ int
5915nouveau_run_vbios_init(struct drm_device *dev) 5980nouveau_run_vbios_init(struct drm_device *dev)
5916{ 5981{
5917 struct drm_nouveau_private *dev_priv = dev->dev_private; 5982 struct drm_nouveau_private *dev_priv = dev->dev_private;
5918 struct nvbios *bios = &dev_priv->VBIOS; 5983 struct nvbios *bios = &dev_priv->vbios;
5919 int i, ret = 0; 5984 int i, ret = 0;
5920 5985
5921 NVLockVgaCrtcs(dev, false); 5986 NVLockVgaCrtcs(dev, false);
@@ -5946,9 +6011,9 @@ nouveau_run_vbios_init(struct drm_device *dev)
5946 } 6011 }
5947 6012
5948 if (dev_priv->card_type >= NV_50) { 6013 if (dev_priv->card_type >= NV_50) {
5949 for (i = 0; i < bios->bdcb.dcb.entries; i++) { 6014 for (i = 0; i < bios->dcb.entries; i++) {
5950 nouveau_bios_run_display_table(dev, 6015 nouveau_bios_run_display_table(dev,
5951 &bios->bdcb.dcb.entry[i], 6016 &bios->dcb.entry[i],
5952 0, 0); 6017 0, 0);
5953 } 6018 }
5954 } 6019 }
@@ -5962,11 +6027,11 @@ static void
5962nouveau_bios_i2c_devices_takedown(struct drm_device *dev) 6027nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
5963{ 6028{
5964 struct drm_nouveau_private *dev_priv = dev->dev_private; 6029 struct drm_nouveau_private *dev_priv = dev->dev_private;
5965 struct nvbios *bios = &dev_priv->VBIOS; 6030 struct nvbios *bios = &dev_priv->vbios;
5966 struct dcb_i2c_entry *entry; 6031 struct dcb_i2c_entry *entry;
5967 int i; 6032 int i;
5968 6033
5969 entry = &bios->bdcb.dcb.i2c[0]; 6034 entry = &bios->dcb.i2c[0];
5970 for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++) 6035 for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
5971 nouveau_i2c_fini(dev, entry); 6036 nouveau_i2c_fini(dev, entry);
5972} 6037}
@@ -5975,13 +6040,11 @@ int
5975nouveau_bios_init(struct drm_device *dev) 6040nouveau_bios_init(struct drm_device *dev)
5976{ 6041{
5977 struct drm_nouveau_private *dev_priv = dev->dev_private; 6042 struct drm_nouveau_private *dev_priv = dev->dev_private;
5978 struct nvbios *bios = &dev_priv->VBIOS; 6043 struct nvbios *bios = &dev_priv->vbios;
5979 uint32_t saved_nv_pextdev_boot_0; 6044 uint32_t saved_nv_pextdev_boot_0;
5980 bool was_locked; 6045 bool was_locked;
5981 int ret; 6046 int ret;
5982 6047
5983 dev_priv->vbios = &bios->pub;
5984
5985 if (!NVInitVBIOS(dev)) 6048 if (!NVInitVBIOS(dev))
5986 return -ENODEV; 6049 return -ENODEV;
5987 6050
@@ -6023,10 +6086,8 @@ nouveau_bios_init(struct drm_device *dev)
6023 bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0); 6086 bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
6024 6087
6025 ret = nouveau_run_vbios_init(dev); 6088 ret = nouveau_run_vbios_init(dev);
6026 if (ret) { 6089 if (ret)
6027 dev_priv->vbios = NULL;
6028 return ret; 6090 return ret;
6029 }
6030 6091
6031 /* feature_byte on BMP is poor, but init always sets CR4B */ 6092 /* feature_byte on BMP is poor, but init always sets CR4B */
6032 was_locked = NVLockVgaCrtcs(dev, false); 6093 was_locked = NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index fd94bd6dc264..9f688aa9a655 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,9 +34,67 @@
34 34
35#define DCB_LOC_ON_CHIP 0 35#define DCB_LOC_ON_CHIP 0
36 36
37struct dcb_i2c_entry {
38 uint8_t port_type;
39 uint8_t read, write;
40 struct nouveau_i2c_chan *chan;
41};
42
43enum dcb_gpio_tag {
44 DCB_GPIO_TVDAC0 = 0xc,
45 DCB_GPIO_TVDAC1 = 0x2d,
46};
47
48struct dcb_gpio_entry {
49 enum dcb_gpio_tag tag;
50 int line;
51 bool invert;
52};
53
54struct dcb_gpio_table {
55 int entries;
56 struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
57};
58
59enum dcb_connector_type {
60 DCB_CONNECTOR_VGA = 0x00,
61 DCB_CONNECTOR_TV_0 = 0x10,
62 DCB_CONNECTOR_TV_1 = 0x11,
63 DCB_CONNECTOR_TV_3 = 0x13,
64 DCB_CONNECTOR_DVI_I = 0x30,
65 DCB_CONNECTOR_DVI_D = 0x31,
66 DCB_CONNECTOR_LVDS = 0x40,
67 DCB_CONNECTOR_DP = 0x46,
68 DCB_CONNECTOR_eDP = 0x47,
69 DCB_CONNECTOR_HDMI_0 = 0x60,
70 DCB_CONNECTOR_HDMI_1 = 0x61,
71 DCB_CONNECTOR_NONE = 0xff
72};
73
74struct dcb_connector_table_entry {
75 uint32_t entry;
76 enum dcb_connector_type type;
77 uint8_t index;
78 uint8_t gpio_tag;
79};
80
81struct dcb_connector_table {
82 int entries;
83 struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
84};
85
86enum dcb_type {
87 OUTPUT_ANALOG = 0,
88 OUTPUT_TV = 1,
89 OUTPUT_TMDS = 2,
90 OUTPUT_LVDS = 3,
91 OUTPUT_DP = 6,
92 OUTPUT_ANY = -1
93};
94
37struct dcb_entry { 95struct dcb_entry {
38 int index; /* may not be raw dcb index if merging has happened */ 96 int index; /* may not be raw dcb index if merging has happened */
39 uint8_t type; 97 enum dcb_type type;
40 uint8_t i2c_index; 98 uint8_t i2c_index;
41 uint8_t heads; 99 uint8_t heads;
42 uint8_t connector; 100 uint8_t connector;
@@ -71,69 +129,22 @@ struct dcb_entry {
71 bool i2c_upper_default; 129 bool i2c_upper_default;
72}; 130};
73 131
74struct dcb_i2c_entry { 132struct dcb_table {
75 uint8_t port_type; 133 uint8_t version;
76 uint8_t read, write;
77 struct nouveau_i2c_chan *chan;
78};
79 134
80struct parsed_dcb {
81 int entries; 135 int entries;
82 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES]; 136 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
83 struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
84};
85
86enum dcb_gpio_tag {
87 DCB_GPIO_TVDAC0 = 0xc,
88 DCB_GPIO_TVDAC1 = 0x2d,
89};
90
91struct dcb_gpio_entry {
92 enum dcb_gpio_tag tag;
93 int line;
94 bool invert;
95};
96
97struct parsed_dcb_gpio {
98 int entries;
99 struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
100};
101
102struct dcb_connector_table_entry {
103 uint32_t entry;
104 uint8_t type;
105 uint8_t index;
106 uint8_t gpio_tag;
107};
108
109struct dcb_connector_table {
110 int entries;
111 struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
112};
113
114struct bios_parsed_dcb {
115 uint8_t version;
116
117 struct parsed_dcb dcb;
118 137
119 uint8_t *i2c_table; 138 uint8_t *i2c_table;
120 uint8_t i2c_default_indices; 139 uint8_t i2c_default_indices;
140 struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
121 141
122 uint16_t gpio_table_ptr; 142 uint16_t gpio_table_ptr;
123 struct parsed_dcb_gpio gpio; 143 struct dcb_gpio_table gpio;
124 uint16_t connector_table_ptr; 144 uint16_t connector_table_ptr;
125 struct dcb_connector_table connector; 145 struct dcb_connector_table connector;
126}; 146};
127 147
128enum nouveau_encoder_type {
129 OUTPUT_ANALOG = 0,
130 OUTPUT_TV = 1,
131 OUTPUT_TMDS = 2,
132 OUTPUT_LVDS = 3,
133 OUTPUT_DP = 6,
134 OUTPUT_ANY = -1
135};
136
137enum nouveau_or { 148enum nouveau_or {
138 OUTPUT_A = (1 << 0), 149 OUTPUT_A = (1 << 0),
139 OUTPUT_B = (1 << 1), 150 OUTPUT_B = (1 << 1),
@@ -190,8 +201,8 @@ struct pll_lims {
190 int refclk; 201 int refclk;
191}; 202};
192 203
193struct nouveau_bios_info { 204struct nvbios {
194 struct parsed_dcb *dcb; 205 struct drm_device *dev;
195 206
196 uint8_t chip_version; 207 uint8_t chip_version;
197 208
@@ -199,11 +210,6 @@ struct nouveau_bios_info {
199 uint32_t tvdactestval; 210 uint32_t tvdactestval;
200 uint8_t digital_min_front_porch; 211 uint8_t digital_min_front_porch;
201 bool fp_no_ddc; 212 bool fp_no_ddc;
202};
203
204struct nvbios {
205 struct drm_device *dev;
206 struct nouveau_bios_info pub;
207 213
208 struct mutex lock; 214 struct mutex lock;
209 215
@@ -234,7 +240,7 @@ struct nvbios {
234 uint16_t some_script_ptr; /* BIT I + 14 */ 240 uint16_t some_script_ptr; /* BIT I + 14 */
235 uint16_t init96_tbl_ptr; /* BIT I + 16 */ 241 uint16_t init96_tbl_ptr; /* BIT I + 16 */
236 242
237 struct bios_parsed_dcb bdcb; 243 struct dcb_table dcb;
238 244
239 struct { 245 struct {
240 int crtchead; 246 int crtchead;
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index ee2b84504d05..88f9bc0941eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -274,7 +274,7 @@ getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
274 * returns calculated clock 274 * returns calculated clock
275 */ 275 */
276 struct drm_nouveau_private *dev_priv = dev->dev_private; 276 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 int cv = dev_priv->vbios->chip_version; 277 int cv = dev_priv->vbios.chip_version;
278 int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq; 278 int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
279 int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m; 279 int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
280 int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n; 280 int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
@@ -373,7 +373,7 @@ getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
373 * returns calculated clock 373 * returns calculated clock
374 */ 374 */
375 struct drm_nouveau_private *dev_priv = dev->dev_private; 375 struct drm_nouveau_private *dev_priv = dev->dev_private;
376 int chip_version = dev_priv->vbios->chip_version; 376 int chip_version = dev_priv->vbios.chip_version;
377 int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq; 377 int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
378 int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq; 378 int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
379 int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq; 379 int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 2281f99da7fc..6dfb425cbae9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -35,22 +35,27 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
35 struct drm_nouveau_private *dev_priv = dev->dev_private; 35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_bo *pb = chan->pushbuf_bo; 36 struct nouveau_bo *pb = chan->pushbuf_bo;
37 struct nouveau_gpuobj *pushbuf = NULL; 37 struct nouveau_gpuobj *pushbuf = NULL;
38 uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
39 int ret; 38 int ret;
40 39
40 if (dev_priv->card_type >= NV_50) {
41 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
42 dev_priv->vm_end, NV_DMA_ACCESS_RO,
43 NV_DMA_TARGET_AGP, &pushbuf);
44 chan->pushbuf_base = pb->bo.offset;
45 } else
41 if (pb->bo.mem.mem_type == TTM_PL_TT) { 46 if (pb->bo.mem.mem_type == TTM_PL_TT) {
42 ret = nouveau_gpuobj_gart_dma_new(chan, 0, 47 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
43 dev_priv->gart_info.aper_size, 48 dev_priv->gart_info.aper_size,
44 NV_DMA_ACCESS_RO, &pushbuf, 49 NV_DMA_ACCESS_RO, &pushbuf,
45 NULL); 50 NULL);
46 chan->pushbuf_base = start; 51 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
47 } else 52 } else
48 if (dev_priv->card_type != NV_04) { 53 if (dev_priv->card_type != NV_04) {
49 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
50 dev_priv->fb_available_size, 55 dev_priv->fb_available_size,
51 NV_DMA_ACCESS_RO, 56 NV_DMA_ACCESS_RO,
52 NV_DMA_TARGET_VIDMEM, &pushbuf); 57 NV_DMA_TARGET_VIDMEM, &pushbuf);
53 chan->pushbuf_base = start; 58 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
54 } else { 59 } else {
55 /* NV04 cmdbuf hack, from original ddx.. not sure of it's 60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
56 * exact reason for existing :) PCI access to cmdbuf in 61 * exact reason for existing :) PCI access to cmdbuf in
@@ -61,7 +66,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
61 dev_priv->fb_available_size, 66 dev_priv->fb_available_size,
62 NV_DMA_ACCESS_RO, 67 NV_DMA_ACCESS_RO,
63 NV_DMA_TARGET_PCI, &pushbuf); 68 NV_DMA_TARGET_PCI, &pushbuf);
64 chan->pushbuf_base = start; 69 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
65 } 70 }
66 71
67 ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf); 72 ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
@@ -275,9 +280,18 @@ nouveau_channel_free(struct nouveau_channel *chan)
275 */ 280 */
276 nouveau_fence_fini(chan); 281 nouveau_fence_fini(chan);
277 282
278 /* Ensure the channel is no longer active on the GPU */ 283 /* This will prevent pfifo from switching channels. */
279 pfifo->reassign(dev, false); 284 pfifo->reassign(dev, false);
280 285
286 /* We want to give pgraph a chance to idle and get rid of all potential
287 * errors. We need to do this before the lock, otherwise the irq handler
288 * is unable to process them.
289 */
290 if (pgraph->channel(dev) == chan)
291 nouveau_wait_for_idle(dev);
292
293 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
294
281 pgraph->fifo_access(dev, false); 295 pgraph->fifo_access(dev, false);
282 if (pgraph->channel(dev) == chan) 296 if (pgraph->channel(dev) == chan)
283 pgraph->unload_context(dev); 297 pgraph->unload_context(dev);
@@ -293,6 +307,8 @@ nouveau_channel_free(struct nouveau_channel *chan)
293 307
294 pfifo->reassign(dev, true); 308 pfifo->reassign(dev, true);
295 309
310 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
311
296 /* Release the channel's resources */ 312 /* Release the channel's resources */
297 nouveau_gpuobj_ref_del(dev, &chan->pushbuf); 313 nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
298 if (chan->pushbuf_bo) { 314 if (chan->pushbuf_bo) {
@@ -369,6 +385,14 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
369 return ret; 385 return ret;
370 init->channel = chan->id; 386 init->channel = chan->id;
371 387
388 if (chan->dma.ib_max)
389 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
390 NOUVEAU_GEM_DOMAIN_GART;
391 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
392 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
393 else
394 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
395
372 init->subchan[0].handle = NvM2MF; 396 init->subchan[0].handle = NvM2MF;
373 if (dev_priv->card_type < NV_50) 397 if (dev_priv->card_type < NV_50)
374 init->subchan[0].grclass = 0x0039; 398 init->subchan[0].grclass = 0x0039;
@@ -408,7 +432,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
408 ***********************************/ 432 ***********************************/
409 433
410struct drm_ioctl_desc nouveau_ioctls[] = { 434struct drm_ioctl_desc nouveau_ioctls[] = {
411 DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
412 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), 435 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
413 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 436 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
414 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), 437 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
@@ -418,13 +441,9 @@ struct drm_ioctl_desc nouveau_ioctls[] = {
418 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), 441 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
419 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), 442 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
420 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), 443 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
421 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
422 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
423 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
424 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), 444 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
425 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), 445 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
426 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), 446 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
427 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
428}; 447};
429 448
430int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); 449int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d2f63353ea97..24327f468c4b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -218,7 +218,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
218 connector->interlace_allowed = true; 218 connector->interlace_allowed = true;
219 } 219 }
220 220
221 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { 221 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
222 drm_connector_property_set_value(connector, 222 drm_connector_property_set_value(connector,
223 dev->mode_config.dvi_i_subconnector_property, 223 dev->mode_config.dvi_i_subconnector_property,
224 nv_encoder->dcb->type == OUTPUT_TMDS ? 224 nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -236,15 +236,17 @@ nouveau_connector_detect(struct drm_connector *connector)
236 struct nouveau_i2c_chan *i2c; 236 struct nouveau_i2c_chan *i2c;
237 int type, flags; 237 int type, flags;
238 238
239 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 239 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS)
240 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); 240 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
241 if (nv_encoder && nv_connector->native_mode) { 241 if (nv_encoder && nv_connector->native_mode) {
242 unsigned status = connector_status_connected;
243
242#ifdef CONFIG_ACPI 244#ifdef CONFIG_ACPI
243 if (!nouveau_ignorelid && !acpi_lid_open()) 245 if (!nouveau_ignorelid && !acpi_lid_open())
244 return connector_status_disconnected; 246 status = connector_status_unknown;
245#endif 247#endif
246 nouveau_connector_set_encoder(connector, nv_encoder); 248 nouveau_connector_set_encoder(connector, nv_encoder);
247 return connector_status_connected; 249 return status;
248 } 250 }
249 251
250 /* Cleanup the previous EDID block. */ 252 /* Cleanup the previous EDID block. */
@@ -279,7 +281,7 @@ nouveau_connector_detect(struct drm_connector *connector)
279 * same i2c channel so the value returned from ddc_detect 281 * same i2c channel so the value returned from ddc_detect
280 * isn't necessarily correct. 282 * isn't necessarily correct.
281 */ 283 */
282 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { 284 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
283 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) 285 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
284 type = OUTPUT_TMDS; 286 type = OUTPUT_TMDS;
285 else 287 else
@@ -321,11 +323,11 @@ detect_analog:
321static void 323static void
322nouveau_connector_force(struct drm_connector *connector) 324nouveau_connector_force(struct drm_connector *connector)
323{ 325{
324 struct drm_device *dev = connector->dev; 326 struct nouveau_connector *nv_connector = nouveau_connector(connector);
325 struct nouveau_encoder *nv_encoder; 327 struct nouveau_encoder *nv_encoder;
326 int type; 328 int type;
327 329
328 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { 330 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
329 if (connector->force == DRM_FORCE_ON_DIGITAL) 331 if (connector->force == DRM_FORCE_ON_DIGITAL)
330 type = OUTPUT_TMDS; 332 type = OUTPUT_TMDS;
331 else 333 else
@@ -335,7 +337,7 @@ nouveau_connector_force(struct drm_connector *connector)
335 337
336 nv_encoder = find_encoder_by_type(connector, type); 338 nv_encoder = find_encoder_by_type(connector, type);
337 if (!nv_encoder) { 339 if (!nv_encoder) {
338 NV_ERROR(dev, "can't find encoder to force %s on!\n", 340 NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
339 drm_get_connector_name(connector)); 341 drm_get_connector_name(connector));
340 connector->status = connector_status_disconnected; 342 connector->status = connector_status_disconnected;
341 return; 343 return;
@@ -369,7 +371,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
369 } 371 }
370 372
371 /* LVDS always needs gpu scaling */ 373 /* LVDS always needs gpu scaling */
372 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && 374 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS &&
373 value == DRM_MODE_SCALE_NONE) 375 value == DRM_MODE_SCALE_NONE)
374 return -EINVAL; 376 return -EINVAL;
375 377
@@ -535,7 +537,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
535 /* If we're not LVDS, destroy the previous native mode, the attached 537 /* If we're not LVDS, destroy the previous native mode, the attached
536 * monitor could have changed. 538 * monitor could have changed.
537 */ 539 */
538 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && 540 if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS &&
539 nv_connector->native_mode) { 541 nv_connector->native_mode) {
540 drm_mode_destroy(dev, nv_connector->native_mode); 542 drm_mode_destroy(dev, nv_connector->native_mode);
541 nv_connector->native_mode = NULL; 543 nv_connector->native_mode = NULL;
@@ -563,7 +565,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
563 ret = get_slave_funcs(nv_encoder)-> 565 ret = get_slave_funcs(nv_encoder)->
564 get_modes(to_drm_encoder(nv_encoder), connector); 566 get_modes(to_drm_encoder(nv_encoder), connector);
565 567
566 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 568 if (nv_encoder->dcb->type == OUTPUT_LVDS)
567 ret += nouveau_connector_scaler_modes_add(connector); 569 ret += nouveau_connector_scaler_modes_add(connector);
568 570
569 return ret; 571 return ret;
@@ -613,6 +615,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
613 615
614 clock *= 3; 616 clock *= 3;
615 break; 617 break;
618 default:
619 BUG_ON(1);
620 return MODE_BAD;
616 } 621 }
617 622
618 if (clock < min_clock) 623 if (clock < min_clock)
@@ -680,7 +685,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
680 /* Firstly try getting EDID over DDC, if allowed and I2C channel 685 /* Firstly try getting EDID over DDC, if allowed and I2C channel
681 * is available. 686 * is available.
682 */ 687 */
683 if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf) 688 if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
684 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 689 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
685 690
686 if (i2c) { 691 if (i2c) {
@@ -695,7 +700,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
695 */ 700 */
696 if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) && 701 if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
697 (nv_encoder->dcb->lvdsconf.use_straps_for_mode || 702 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
698 dev_priv->VBIOS.pub.fp_no_ddc)) { 703 dev_priv->vbios.fp_no_ddc)) {
699 nv_connector->native_mode = drm_mode_duplicate(dev, &native); 704 nv_connector->native_mode = drm_mode_duplicate(dev, &native);
700 goto out; 705 goto out;
701 } 706 }
@@ -704,7 +709,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
704 * stored for the panel stored in them. 709 * stored for the panel stored in them.
705 */ 710 */
706 if (!nv_connector->edid && !nv_connector->native_mode && 711 if (!nv_connector->edid && !nv_connector->native_mode &&
707 !dev_priv->VBIOS.pub.fp_no_ddc) { 712 !dev_priv->vbios.fp_no_ddc) {
708 struct edid *edid = 713 struct edid *edid =
709 (struct edid *)nouveau_bios_embedded_edid(dev); 714 (struct edid *)nouveau_bios_embedded_edid(dev);
710 if (edid) { 715 if (edid) {
@@ -739,46 +744,66 @@ out:
739} 744}
740 745
741int 746int
742nouveau_connector_create(struct drm_device *dev, int index, int type) 747nouveau_connector_create(struct drm_device *dev,
748 struct dcb_connector_table_entry *dcb)
743{ 749{
744 struct drm_nouveau_private *dev_priv = dev->dev_private; 750 struct drm_nouveau_private *dev_priv = dev->dev_private;
745 struct nouveau_connector *nv_connector = NULL; 751 struct nouveau_connector *nv_connector = NULL;
746 struct drm_connector *connector; 752 struct drm_connector *connector;
747 struct drm_encoder *encoder; 753 struct drm_encoder *encoder;
748 int ret; 754 int ret, type;
749 755
750 NV_DEBUG_KMS(dev, "\n"); 756 NV_DEBUG_KMS(dev, "\n");
751 757
752 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); 758 switch (dcb->type) {
753 if (!nv_connector) 759 case DCB_CONNECTOR_NONE:
754 return -ENOMEM; 760 return 0;
755 nv_connector->dcb = nouveau_bios_connector_entry(dev, index); 761 case DCB_CONNECTOR_VGA:
756 connector = &nv_connector->base;
757
758 switch (type) {
759 case DRM_MODE_CONNECTOR_VGA:
760 NV_INFO(dev, "Detected a VGA connector\n"); 762 NV_INFO(dev, "Detected a VGA connector\n");
763 type = DRM_MODE_CONNECTOR_VGA;
761 break; 764 break;
762 case DRM_MODE_CONNECTOR_DVID: 765 case DCB_CONNECTOR_TV_0:
763 NV_INFO(dev, "Detected a DVI-D connector\n"); 766 case DCB_CONNECTOR_TV_1:
767 case DCB_CONNECTOR_TV_3:
768 NV_INFO(dev, "Detected a TV connector\n");
769 type = DRM_MODE_CONNECTOR_TV;
764 break; 770 break;
765 case DRM_MODE_CONNECTOR_DVII: 771 case DCB_CONNECTOR_DVI_I:
766 NV_INFO(dev, "Detected a DVI-I connector\n"); 772 NV_INFO(dev, "Detected a DVI-I connector\n");
773 type = DRM_MODE_CONNECTOR_DVII;
767 break; 774 break;
768 case DRM_MODE_CONNECTOR_LVDS: 775 case DCB_CONNECTOR_DVI_D:
769 NV_INFO(dev, "Detected a LVDS connector\n"); 776 NV_INFO(dev, "Detected a DVI-D connector\n");
777 type = DRM_MODE_CONNECTOR_DVID;
770 break; 778 break;
771 case DRM_MODE_CONNECTOR_TV: 779 case DCB_CONNECTOR_HDMI_0:
772 NV_INFO(dev, "Detected a TV connector\n"); 780 case DCB_CONNECTOR_HDMI_1:
781 NV_INFO(dev, "Detected a HDMI connector\n");
782 type = DRM_MODE_CONNECTOR_HDMIA;
783 break;
784 case DCB_CONNECTOR_LVDS:
785 NV_INFO(dev, "Detected a LVDS connector\n");
786 type = DRM_MODE_CONNECTOR_LVDS;
773 break; 787 break;
774 case DRM_MODE_CONNECTOR_DisplayPort: 788 case DCB_CONNECTOR_DP:
775 NV_INFO(dev, "Detected a DisplayPort connector\n"); 789 NV_INFO(dev, "Detected a DisplayPort connector\n");
790 type = DRM_MODE_CONNECTOR_DisplayPort;
776 break; 791 break;
777 default: 792 case DCB_CONNECTOR_eDP:
778 NV_ERROR(dev, "Unknown connector, this is not good.\n"); 793 NV_INFO(dev, "Detected an eDP connector\n");
794 type = DRM_MODE_CONNECTOR_eDP;
779 break; 795 break;
796 default:
797 NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
798 return -EINVAL;
780 } 799 }
781 800
801 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
802 if (!nv_connector)
803 return -ENOMEM;
804 nv_connector->dcb = dcb;
805 connector = &nv_connector->base;
806
782 /* defaults, will get overridden in detect() */ 807 /* defaults, will get overridden in detect() */
783 connector->interlace_allowed = false; 808 connector->interlace_allowed = false;
784 connector->doublescan_allowed = false; 809 connector->doublescan_allowed = false;
@@ -786,55 +811,65 @@ nouveau_connector_create(struct drm_device *dev, int index, int type)
786 drm_connector_init(dev, connector, &nouveau_connector_funcs, type); 811 drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
787 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); 812 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
788 813
814 /* attach encoders */
815 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
816 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
817
818 if (nv_encoder->dcb->connector != dcb->index)
819 continue;
820
821 if (get_slave_funcs(nv_encoder))
822 get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
823
824 drm_mode_connector_attach_encoder(connector, encoder);
825 }
826
827 if (!connector->encoder_ids[0]) {
828 NV_WARN(dev, " no encoders, ignoring\n");
829 drm_connector_cleanup(connector);
830 kfree(connector);
831 return 0;
832 }
833
789 /* Init DVI-I specific properties */ 834 /* Init DVI-I specific properties */
790 if (type == DRM_MODE_CONNECTOR_DVII) { 835 if (dcb->type == DCB_CONNECTOR_DVI_I) {
791 drm_mode_create_dvi_i_properties(dev); 836 drm_mode_create_dvi_i_properties(dev);
792 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0); 837 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
793 drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0); 838 drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
794 } 839 }
795 840
796 if (type != DRM_MODE_CONNECTOR_LVDS) 841 if (dcb->type != DCB_CONNECTOR_LVDS)
797 nv_connector->use_dithering = false; 842 nv_connector->use_dithering = false;
798 843
799 if (type == DRM_MODE_CONNECTOR_DVID || 844 switch (dcb->type) {
800 type == DRM_MODE_CONNECTOR_DVII || 845 case DCB_CONNECTOR_VGA:
801 type == DRM_MODE_CONNECTOR_LVDS || 846 if (dev_priv->card_type >= NV_50) {
802 type == DRM_MODE_CONNECTOR_DisplayPort) {
803 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
804
805 drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property,
806 nv_connector->scaling_mode);
807 drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property,
808 nv_connector->use_dithering ? DRM_MODE_DITHERING_ON
809 : DRM_MODE_DITHERING_OFF);
810
811 } else {
812 nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
813
814 if (type == DRM_MODE_CONNECTOR_VGA &&
815 dev_priv->card_type >= NV_50) {
816 drm_connector_attach_property(connector, 847 drm_connector_attach_property(connector,
817 dev->mode_config.scaling_mode_property, 848 dev->mode_config.scaling_mode_property,
818 nv_connector->scaling_mode); 849 nv_connector->scaling_mode);
819 } 850 }
820 } 851 /* fall-through */
821 852 case DCB_CONNECTOR_TV_0:
822 /* attach encoders */ 853 case DCB_CONNECTOR_TV_1:
823 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 854 case DCB_CONNECTOR_TV_3:
824 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 855 nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
825 856 break;
826 if (nv_encoder->dcb->connector != index) 857 default:
827 continue; 858 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
828
829 if (get_slave_funcs(nv_encoder))
830 get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
831 859
832 drm_mode_connector_attach_encoder(connector, encoder); 860 drm_connector_attach_property(connector,
861 dev->mode_config.scaling_mode_property,
862 nv_connector->scaling_mode);
863 drm_connector_attach_property(connector,
864 dev->mode_config.dithering_mode_property,
865 nv_connector->use_dithering ?
866 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
867 break;
833 } 868 }
834 869
835 drm_sysfs_connector_add(connector); 870 drm_sysfs_connector_add(connector);
836 871
837 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 872 if (dcb->type == DCB_CONNECTOR_LVDS) {
838 ret = nouveau_connector_create_lvds(dev, connector); 873 ret = nouveau_connector_create_lvds(dev, connector);
839 if (ret) { 874 if (ret) {
840 connector->funcs->destroy(connector); 875 connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 728b8090e5ff..4ef38abc2d9c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -49,6 +49,7 @@ static inline struct nouveau_connector *nouveau_connector(
49 return container_of(con, struct nouveau_connector, base); 49 return container_of(con, struct nouveau_connector, base);
50} 50}
51 51
52int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type); 52int nouveau_connector_create(struct drm_device *,
53 struct dcb_connector_table_entry *);
53 54
54#endif /* __NOUVEAU_CONNECTOR_H__ */ 55#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index d79db3698f16..8ff9ef5d4b47 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -47,12 +47,23 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
47 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2); 47 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
48 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2); 48 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
49 seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2); 49 seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
50 if (chan->dma.ib_max) {
51 seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max);
52 seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put);
53 seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free);
54 }
50 55
51 seq_printf(m, "gpu fifo state:\n"); 56 seq_printf(m, "gpu fifo state:\n");
52 seq_printf(m, " get: 0x%08x\n", 57 seq_printf(m, " get: 0x%08x\n",
53 nvchan_rd32(chan, chan->user_get)); 58 nvchan_rd32(chan, chan->user_get));
54 seq_printf(m, " put: 0x%08x\n", 59 seq_printf(m, " put: 0x%08x\n",
55 nvchan_rd32(chan, chan->user_put)); 60 nvchan_rd32(chan, chan->user_put));
61 if (chan->dma.ib_max) {
62 seq_printf(m, " ib get: 0x%08x\n",
63 nvchan_rd32(chan, 0x88));
64 seq_printf(m, " ib put: 0x%08x\n",
65 nvchan_rd32(chan, 0x8c));
66 }
56 67
57 seq_printf(m, "last fence : %d\n", chan->fence.sequence); 68 seq_printf(m, "last fence : %d\n", chan->fence.sequence);
58 seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack); 69 seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
@@ -133,9 +144,22 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data)
133 return 0; 144 return 0;
134} 145}
135 146
147static int
148nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
149{
150 struct drm_info_node *node = (struct drm_info_node *) m->private;
151 struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
152 int i;
153
154 for (i = 0; i < dev_priv->vbios.length; i++)
155 seq_printf(m, "%c", dev_priv->vbios.data[i]);
156 return 0;
157}
158
136static struct drm_info_list nouveau_debugfs_list[] = { 159static struct drm_info_list nouveau_debugfs_list[] = {
137 { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, 160 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
138 { "memory", nouveau_debugfs_memory_info, 0, NULL }, 161 { "memory", nouveau_debugfs_memory_info, 0, NULL },
162 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
139}; 163};
140#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) 164#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
141 165
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index dfc94391d71e..cf1c5c0a0abe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -39,11 +39,8 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
39 if (drm_fb->fbdev) 39 if (drm_fb->fbdev)
40 nouveau_fbcon_remove(dev, drm_fb); 40 nouveau_fbcon_remove(dev, drm_fb);
41 41
42 if (fb->nvbo) { 42 if (fb->nvbo)
43 mutex_lock(&dev->struct_mutex); 43 drm_gem_object_unreference_unlocked(fb->nvbo->gem);
44 drm_gem_object_unreference(fb->nvbo->gem);
45 mutex_unlock(&dev->struct_mutex);
46 }
47 44
48 drm_framebuffer_cleanup(drm_fb); 45 drm_framebuffer_cleanup(drm_fb);
49 kfree(fb); 46 kfree(fb);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 50d9e67745af..c8482a108a78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -32,7 +32,22 @@
32void 32void
33nouveau_dma_pre_init(struct nouveau_channel *chan) 33nouveau_dma_pre_init(struct nouveau_channel *chan)
34{ 34{
35 chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2; 35 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
36 struct nouveau_bo *pushbuf = chan->pushbuf_bo;
37
38 if (dev_priv->card_type == NV_50) {
39 const int ib_size = pushbuf->bo.mem.size / 2;
40
41 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
42 chan->dma.ib_max = (ib_size / 8) - 1;
43 chan->dma.ib_put = 0;
44 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
45
46 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
47 } else {
48 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
49 }
50
36 chan->dma.put = 0; 51 chan->dma.put = 0;
37 chan->dma.cur = chan->dma.put; 52 chan->dma.cur = chan->dma.put;
38 chan->dma.free = chan->dma.max - chan->dma.cur; 53 chan->dma.free = chan->dma.max - chan->dma.cur;
@@ -162,12 +177,101 @@ READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
162 return (val - chan->pushbuf_base) >> 2; 177 return (val - chan->pushbuf_base) >> 2;
163} 178}
164 179
180void
181nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
182 int delta, int length)
183{
184 struct nouveau_bo *pb = chan->pushbuf_bo;
185 uint64_t offset = bo->bo.offset + delta;
186 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
187
188 BUG_ON(chan->dma.ib_free < 1);
189 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
190 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
191
192 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
193 nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
194 chan->dma.ib_free--;
195}
196
197static int
198nv50_dma_push_wait(struct nouveau_channel *chan, int count)
199{
200 uint32_t cnt = 0, prev_get = 0;
201
202 while (chan->dma.ib_free < count) {
203 uint32_t get = nvchan_rd32(chan, 0x88);
204 if (get != prev_get) {
205 prev_get = get;
206 cnt = 0;
207 }
208
209 if ((++cnt & 0xff) == 0) {
210 DRM_UDELAY(1);
211 if (cnt > 100000)
212 return -EBUSY;
213 }
214
215 chan->dma.ib_free = get - chan->dma.ib_put;
216 if (chan->dma.ib_free <= 0)
217 chan->dma.ib_free += chan->dma.ib_max + 1;
218 }
219
220 return 0;
221}
222
223static int
224nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
225{
226 uint32_t cnt = 0, prev_get = 0;
227 int ret;
228
229 ret = nv50_dma_push_wait(chan, slots + 1);
230 if (unlikely(ret))
231 return ret;
232
233 while (chan->dma.free < count) {
234 int get = READ_GET(chan, &prev_get, &cnt);
235 if (unlikely(get < 0)) {
236 if (get == -EINVAL)
237 continue;
238
239 return get;
240 }
241
242 if (get <= chan->dma.cur) {
243 chan->dma.free = chan->dma.max - chan->dma.cur;
244 if (chan->dma.free >= count)
245 break;
246
247 FIRE_RING(chan);
248 do {
249 get = READ_GET(chan, &prev_get, &cnt);
250 if (unlikely(get < 0)) {
251 if (get == -EINVAL)
252 continue;
253 return get;
254 }
255 } while (get == 0);
256 chan->dma.cur = 0;
257 chan->dma.put = 0;
258 }
259
260 chan->dma.free = get - chan->dma.cur - 1;
261 }
262
263 return 0;
264}
265
165int 266int
166nouveau_dma_wait(struct nouveau_channel *chan, int size) 267nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
167{ 268{
168 uint32_t prev_get = 0, cnt = 0; 269 uint32_t prev_get = 0, cnt = 0;
169 int get; 270 int get;
170 271
272 if (chan->dma.ib_max)
273 return nv50_dma_wait(chan, slots, size);
274
171 while (chan->dma.free < size) { 275 while (chan->dma.free < size) {
172 get = READ_GET(chan, &prev_get, &cnt); 276 get = READ_GET(chan, &prev_get, &cnt);
173 if (unlikely(get == -EBUSY)) 277 if (unlikely(get == -EBUSY))
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index dabfd655f93e..8b05c15866d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -31,6 +31,9 @@
31#define NOUVEAU_DMA_DEBUG 0 31#define NOUVEAU_DMA_DEBUG 0
32#endif 32#endif
33 33
34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
35 int delta, int length);
36
34/* 37/*
35 * There's a hw race condition where you can't jump to your PUT offset, 38 * There's a hw race condition where you can't jump to your PUT offset,
36 * to avoid this we jump to offset + SKIPS and fill the difference with 39 * to avoid this we jump to offset + SKIPS and fill the difference with
@@ -96,13 +99,11 @@ enum {
96static __must_check inline int 99static __must_check inline int
97RING_SPACE(struct nouveau_channel *chan, int size) 100RING_SPACE(struct nouveau_channel *chan, int size)
98{ 101{
99 if (chan->dma.free < size) { 102 int ret;
100 int ret;
101 103
102 ret = nouveau_dma_wait(chan, size); 104 ret = nouveau_dma_wait(chan, 1, size);
103 if (ret) 105 if (ret)
104 return ret; 106 return ret;
105 }
106 107
107 chan->dma.free -= size; 108 chan->dma.free -= size;
108 return 0; 109 return 0;
@@ -146,7 +147,13 @@ FIRE_RING(struct nouveau_channel *chan)
146 return; 147 return;
147 chan->accel_done = true; 148 chan->accel_done = true;
148 149
149 WRITE_PUT(chan->dma.cur); 150 if (chan->dma.ib_max) {
151 nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
152 (chan->dma.cur - chan->dma.put) << 2);
153 } else {
154 WRITE_PUT(chan->dma.cur);
155 }
156
150 chan->dma.put = chan->dma.cur; 157 chan->dma.put = chan->dma.cur;
151} 158}
152 159
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index da3b93b84502..30cc09e8a709 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -75,11 +75,11 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0; 75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77 77
78MODULE_PARM_DESC(noagp, "Disable all acceleration"); 78MODULE_PARM_DESC(noaccel, "Disable all acceleration");
79int nouveau_noaccel = 0; 79int nouveau_noaccel = 0;
80module_param_named(noaccel, nouveau_noaccel, int, 0400); 80module_param_named(noaccel, nouveau_noaccel, int, 0400);
81 81
82MODULE_PARM_DESC(noagp, "Disable fbcon acceleration"); 82MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0; 83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); 84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85 85
@@ -135,7 +135,7 @@ nouveau_pci_remove(struct pci_dev *pdev)
135 drm_put_dev(dev); 135 drm_put_dev(dev);
136} 136}
137 137
138static int 138int
139nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) 139nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
140{ 140{
141 struct drm_device *dev = pci_get_drvdata(pdev); 141 struct drm_device *dev = pci_get_drvdata(pdev);
@@ -233,7 +233,7 @@ out_abort:
233 return ret; 233 return ret;
234} 234}
235 235
236static int 236int
237nouveau_pci_resume(struct pci_dev *pdev) 237nouveau_pci_resume(struct pci_dev *pdev)
238{ 238{
239 struct drm_device *dev = pci_get_drvdata(pdev); 239 struct drm_device *dev = pci_get_drvdata(pdev);
@@ -402,8 +402,10 @@ static int __init nouveau_init(void)
402 nouveau_modeset = 1; 402 nouveau_modeset = 1;
403 } 403 }
404 404
405 if (nouveau_modeset == 1) 405 if (nouveau_modeset == 1) {
406 driver.driver_features |= DRIVER_MODESET; 406 driver.driver_features |= DRIVER_MODESET;
407 nouveau_register_dsm_handler();
408 }
407 409
408 return drm_init(&driver); 410 return drm_init(&driver);
409} 411}
@@ -411,6 +413,7 @@ static int __init nouveau_init(void)
411static void __exit nouveau_exit(void) 413static void __exit nouveau_exit(void)
412{ 414{
413 drm_exit(&driver); 415 drm_exit(&driver);
416 nouveau_unregister_dsm_handler();
414} 417}
415 418
416module_init(nouveau_init); 419module_init(nouveau_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c15ef37b71c..5f8d987af363 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -34,7 +34,7 @@
34 34
35#define DRIVER_MAJOR 0 35#define DRIVER_MAJOR 0
36#define DRIVER_MINOR 0 36#define DRIVER_MINOR 0
37#define DRIVER_PATCHLEVEL 15 37#define DRIVER_PATCHLEVEL 16
38 38
39#define NOUVEAU_FAMILY 0x0000FFFF 39#define NOUVEAU_FAMILY 0x0000FFFF
40#define NOUVEAU_FLAGS 0xFFFF0000 40#define NOUVEAU_FLAGS 0xFFFF0000
@@ -83,6 +83,7 @@ struct nouveau_bo {
83 struct drm_file *reserved_by; 83 struct drm_file *reserved_by;
84 struct list_head entry; 84 struct list_head entry;
85 int pbbo_index; 85 int pbbo_index;
86 bool validate_mapped;
86 87
87 struct nouveau_channel *channel; 88 struct nouveau_channel *channel;
88 89
@@ -239,6 +240,11 @@ struct nouveau_channel {
239 int cur; 240 int cur;
240 int put; 241 int put;
241 /* access via pushbuf_bo */ 242 /* access via pushbuf_bo */
243
244 int ib_base;
245 int ib_max;
246 int ib_free;
247 int ib_put;
242 } dma; 248 } dma;
243 249
244 uint32_t sw_subchannel[8]; 250 uint32_t sw_subchannel[8];
@@ -533,6 +539,9 @@ struct drm_nouveau_private {
533 struct nouveau_engine engine; 539 struct nouveau_engine engine;
534 struct nouveau_channel *channel; 540 struct nouveau_channel *channel;
535 541
542 /* For PFIFO and PGRAPH. */
543 spinlock_t context_switch_lock;
544
536 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ 545 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
537 struct nouveau_gpuobj *ramht; 546 struct nouveau_gpuobj *ramht;
538 uint32_t ramin_rsvd_vram; 547 uint32_t ramin_rsvd_vram;
@@ -596,8 +605,7 @@ struct drm_nouveau_private {
596 605
597 struct list_head gpuobj_list; 606 struct list_head gpuobj_list;
598 607
599 struct nvbios VBIOS; 608 struct nvbios vbios;
600 struct nouveau_bios_info *vbios;
601 609
602 struct nv04_mode_state mode_reg; 610 struct nv04_mode_state mode_reg;
603 struct nv04_mode_state saved_reg; 611 struct nv04_mode_state saved_reg;
@@ -614,7 +622,6 @@ struct drm_nouveau_private {
614 } susres; 622 } susres;
615 623
616 struct backlight_device *backlight; 624 struct backlight_device *backlight;
617 bool acpi_dsm;
618 625
619 struct nouveau_channel *evo; 626 struct nouveau_channel *evo;
620 627
@@ -682,6 +689,9 @@ extern int nouveau_ignorelid;
682extern int nouveau_nofbaccel; 689extern int nouveau_nofbaccel;
683extern int nouveau_noaccel; 690extern int nouveau_noaccel;
684 691
692extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
693extern int nouveau_pci_resume(struct pci_dev *pdev);
694
685/* nouveau_state.c */ 695/* nouveau_state.c */
686extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 696extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
687extern int nouveau_load(struct drm_device *, unsigned long flags); 697extern int nouveau_load(struct drm_device *, unsigned long flags);
@@ -696,12 +706,6 @@ extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
696 uint32_t reg, uint32_t mask, uint32_t val); 706 uint32_t reg, uint32_t mask, uint32_t val);
697extern bool nouveau_wait_for_idle(struct drm_device *); 707extern bool nouveau_wait_for_idle(struct drm_device *);
698extern int nouveau_card_init(struct drm_device *); 708extern int nouveau_card_init(struct drm_device *);
699extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
700 struct drm_file *);
701extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
702 struct drm_file *);
703extern int nouveau_ioctl_resume(struct drm_device *, void *data,
704 struct drm_file *);
705 709
706/* nouveau_mem.c */ 710/* nouveau_mem.c */
707extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, 711extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
@@ -845,21 +849,15 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
845/* nouveau_dma.c */ 849/* nouveau_dma.c */
846extern void nouveau_dma_pre_init(struct nouveau_channel *); 850extern void nouveau_dma_pre_init(struct nouveau_channel *);
847extern int nouveau_dma_init(struct nouveau_channel *); 851extern int nouveau_dma_init(struct nouveau_channel *);
848extern int nouveau_dma_wait(struct nouveau_channel *, int size); 852extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
849 853
850/* nouveau_acpi.c */ 854/* nouveau_acpi.c */
851#ifdef CONFIG_ACPI 855#if defined(CONFIG_ACPI)
852extern int nouveau_hybrid_setup(struct drm_device *dev); 856void nouveau_register_dsm_handler(void);
853extern bool nouveau_dsm_probe(struct drm_device *dev); 857void nouveau_unregister_dsm_handler(void);
854#else 858#else
855static inline int nouveau_hybrid_setup(struct drm_device *dev) 859static inline void nouveau_register_dsm_handler(void) {}
856{ 860static inline void nouveau_unregister_dsm_handler(void) {}
857 return 0;
858}
859static inline bool nouveau_dsm_probe(struct drm_device *dev)
860{
861 return false;
862}
863#endif 861#endif
864 862
865/* nouveau_backlight.c */ 863/* nouveau_backlight.c */
@@ -1027,6 +1025,7 @@ extern void nv50_graph_destroy_context(struct nouveau_channel *);
1027extern int nv50_graph_load_context(struct nouveau_channel *); 1025extern int nv50_graph_load_context(struct nouveau_channel *);
1028extern int nv50_graph_unload_context(struct drm_device *); 1026extern int nv50_graph_unload_context(struct drm_device *);
1029extern void nv50_graph_context_switch(struct drm_device *); 1027extern void nv50_graph_context_switch(struct drm_device *);
1028extern int nv50_grctx_init(struct nouveau_grctx *);
1030 1029
1031/* nouveau_grctx.c */ 1030/* nouveau_grctx.c */
1032extern int nouveau_grctx_prog_load(struct drm_device *); 1031extern int nouveau_grctx_prog_load(struct drm_device *);
@@ -1152,16 +1151,6 @@ extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
1152 struct drm_file *); 1151 struct drm_file *);
1153extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, 1152extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
1154 struct drm_file *); 1153 struct drm_file *);
1155extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
1156 struct drm_file *);
1157extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
1158 struct drm_file *);
1159extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
1160 struct drm_file *);
1161extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
1162 struct drm_file *);
1163extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
1164 struct drm_file *);
1165extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *, 1154extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
1166 struct drm_file *); 1155 struct drm_file *);
1167extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, 1156extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index ea879a2efef3..68cedd9194fe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -36,6 +36,7 @@
36#include <linux/fb.h> 36#include <linux/fb.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/screen_info.h> 38#include <linux/screen_info.h>
39#include <linux/vga_switcheroo.h>
39 40
40#include "drmP.h" 41#include "drmP.h"
41#include "drm.h" 42#include "drm.h"
@@ -370,6 +371,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
370 nvbo->bo.offset, nvbo); 371 nvbo->bo.offset, nvbo);
371 372
372 mutex_unlock(&dev->struct_mutex); 373 mutex_unlock(&dev->struct_mutex);
374 vga_switcheroo_client_fb_set(dev->pdev, info);
373 return 0; 375 return 0;
374 376
375out_unref: 377out_unref:
@@ -401,10 +403,8 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
401 403
402 unregister_framebuffer(info); 404 unregister_framebuffer(info);
403 nouveau_bo_unmap(nouveau_fb->nvbo); 405 nouveau_bo_unmap(nouveau_fb->nvbo);
404 mutex_lock(&dev->struct_mutex); 406 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
405 drm_gem_object_unreference(nouveau_fb->nvbo->gem);
406 nouveau_fb->nvbo = NULL; 407 nouveau_fb->nvbo = NULL;
407 mutex_unlock(&dev->struct_mutex);
408 if (par) 408 if (par)
409 drm_fb_helper_free(&par->helper); 409 drm_fb_helper_free(&par->helper);
410 framebuffer_release(info); 410 framebuffer_release(info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 70cc30803e3b..0d22f66f1c79 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -167,12 +167,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
167 167
168 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 168 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
169out: 169out:
170 mutex_lock(&dev->struct_mutex); 170 drm_gem_object_handle_unreference_unlocked(nvbo->gem);
171 drm_gem_object_handle_unreference(nvbo->gem);
172 mutex_unlock(&dev->struct_mutex);
173 171
174 if (ret) 172 if (ret)
175 drm_gem_object_unreference(nvbo->gem); 173 drm_gem_object_unreference_unlocked(nvbo->gem);
176 return ret; 174 return ret;
177} 175}
178 176
@@ -243,6 +241,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
243 nouveau_fence_unref((void *)&prev_fence); 241 nouveau_fence_unref((void *)&prev_fence);
244 } 242 }
245 243
244 if (unlikely(nvbo->validate_mapped)) {
245 ttm_bo_kunmap(&nvbo->kmap);
246 nvbo->validate_mapped = false;
247 }
248
246 list_del(&nvbo->entry); 249 list_del(&nvbo->entry);
247 nvbo->reserved_by = NULL; 250 nvbo->reserved_by = NULL;
248 ttm_bo_unreserve(&nvbo->bo); 251 ttm_bo_unreserve(&nvbo->bo);
@@ -302,11 +305,14 @@ retry:
302 if (ret == -EAGAIN) 305 if (ret == -EAGAIN)
303 ret = ttm_bo_wait_unreserved(&nvbo->bo, false); 306 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
304 drm_gem_object_unreference(gem); 307 drm_gem_object_unreference(gem);
305 if (ret) 308 if (ret) {
309 NV_ERROR(dev, "fail reserve\n");
306 return ret; 310 return ret;
311 }
307 goto retry; 312 goto retry;
308 } 313 }
309 314
315 b->user_priv = (uint64_t)(unsigned long)nvbo;
310 nvbo->reserved_by = file_priv; 316 nvbo->reserved_by = file_priv;
311 nvbo->pbbo_index = i; 317 nvbo->pbbo_index = i;
312 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 318 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
@@ -336,8 +342,10 @@ retry:
336 } 342 }
337 343
338 ret = ttm_bo_wait_cpu(&nvbo->bo, false); 344 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
339 if (ret) 345 if (ret) {
346 NV_ERROR(dev, "fail wait_cpu\n");
340 return ret; 347 return ret;
348 }
341 goto retry; 349 goto retry;
342 } 350 }
343 } 351 }
@@ -351,6 +359,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
351{ 359{
352 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 360 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
353 (void __force __user *)(uintptr_t)user_pbbo_ptr; 361 (void __force __user *)(uintptr_t)user_pbbo_ptr;
362 struct drm_device *dev = chan->dev;
354 struct nouveau_bo *nvbo; 363 struct nouveau_bo *nvbo;
355 int ret, relocs = 0; 364 int ret, relocs = 0;
356 365
@@ -362,39 +371,46 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
362 spin_lock(&nvbo->bo.lock); 371 spin_lock(&nvbo->bo.lock);
363 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 372 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
364 spin_unlock(&nvbo->bo.lock); 373 spin_unlock(&nvbo->bo.lock);
365 if (unlikely(ret)) 374 if (unlikely(ret)) {
375 NV_ERROR(dev, "fail wait other chan\n");
366 return ret; 376 return ret;
377 }
367 } 378 }
368 379
369 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, 380 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
370 b->write_domains, 381 b->write_domains,
371 b->valid_domains); 382 b->valid_domains);
372 if (unlikely(ret)) 383 if (unlikely(ret)) {
384 NV_ERROR(dev, "fail set_domain\n");
373 return ret; 385 return ret;
386 }
374 387
375 nvbo->channel = chan; 388 nvbo->channel = chan;
376 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 389 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
377 false, false); 390 false, false);
378 nvbo->channel = NULL; 391 nvbo->channel = NULL;
379 if (unlikely(ret)) 392 if (unlikely(ret)) {
393 NV_ERROR(dev, "fail ttm_validate\n");
380 return ret; 394 return ret;
395 }
381 396
382 if (nvbo->bo.offset == b->presumed_offset && 397 if (nvbo->bo.offset == b->presumed.offset &&
383 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 398 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
384 b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) || 399 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
385 (nvbo->bo.mem.mem_type == TTM_PL_TT && 400 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
386 b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART))) 401 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
387 continue; 402 continue;
388 403
389 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 404 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
390 b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART; 405 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
391 else 406 else
392 b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM; 407 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
393 b->presumed_offset = nvbo->bo.offset; 408 b->presumed.offset = nvbo->bo.offset;
394 b->presumed_ok = 0; 409 b->presumed.valid = 0;
395 relocs++; 410 relocs++;
396 411
397 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b))) 412 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
413 &b->presumed, sizeof(b->presumed)))
398 return -EFAULT; 414 return -EFAULT;
399 } 415 }
400 416
@@ -408,6 +424,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
408 uint64_t user_buffers, int nr_buffers, 424 uint64_t user_buffers, int nr_buffers,
409 struct validate_op *op, int *apply_relocs) 425 struct validate_op *op, int *apply_relocs)
410{ 426{
427 struct drm_device *dev = chan->dev;
411 int ret, relocs = 0; 428 int ret, relocs = 0;
412 429
413 INIT_LIST_HEAD(&op->vram_list); 430 INIT_LIST_HEAD(&op->vram_list);
@@ -418,11 +435,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
418 return 0; 435 return 0;
419 436
420 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 437 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
421 if (unlikely(ret)) 438 if (unlikely(ret)) {
439 NV_ERROR(dev, "validate_init\n");
422 return ret; 440 return ret;
441 }
423 442
424 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); 443 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
425 if (unlikely(ret < 0)) { 444 if (unlikely(ret < 0)) {
445 NV_ERROR(dev, "validate vram_list\n");
426 validate_fini(op, NULL); 446 validate_fini(op, NULL);
427 return ret; 447 return ret;
428 } 448 }
@@ -430,6 +450,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
430 450
431 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); 451 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
432 if (unlikely(ret < 0)) { 452 if (unlikely(ret < 0)) {
453 NV_ERROR(dev, "validate gart_list\n");
433 validate_fini(op, NULL); 454 validate_fini(op, NULL);
434 return ret; 455 return ret;
435 } 456 }
@@ -437,6 +458,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
437 458
438 ret = validate_list(chan, &op->both_list, pbbo, user_buffers); 459 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
439 if (unlikely(ret < 0)) { 460 if (unlikely(ret < 0)) {
461 NV_ERROR(dev, "validate both_list\n");
440 validate_fini(op, NULL); 462 validate_fini(op, NULL);
441 return ret; 463 return ret;
442 } 464 }
@@ -465,59 +487,82 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
465} 487}
466 488
467static int 489static int
468nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, 490nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
469 struct drm_nouveau_gem_pushbuf_bo *bo, 491 struct drm_nouveau_gem_pushbuf *req,
470 unsigned nr_relocs, uint64_t ptr_relocs, 492 struct drm_nouveau_gem_pushbuf_bo *bo)
471 unsigned nr_dwords, unsigned first_dword,
472 uint32_t *pushbuf, bool is_iomem)
473{ 493{
474 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 494 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
475 struct drm_device *dev = chan->dev;
476 int ret = 0; 495 int ret = 0;
477 unsigned i; 496 unsigned i;
478 497
479 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); 498 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
480 if (IS_ERR(reloc)) 499 if (IS_ERR(reloc))
481 return PTR_ERR(reloc); 500 return PTR_ERR(reloc);
482 501
483 for (i = 0; i < nr_relocs; i++) { 502 for (i = 0; i < req->nr_relocs; i++) {
484 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; 503 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
485 struct drm_nouveau_gem_pushbuf_bo *b; 504 struct drm_nouveau_gem_pushbuf_bo *b;
505 struct nouveau_bo *nvbo;
486 uint32_t data; 506 uint32_t data;
487 507
488 if (r->bo_index >= nr_bo || r->reloc_index < first_dword || 508 if (unlikely(r->bo_index > req->nr_buffers)) {
489 r->reloc_index >= first_dword + nr_dwords) { 509 NV_ERROR(dev, "reloc bo index invalid\n");
490 NV_ERROR(dev, "Bad relocation %d\n", i);
491 NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo);
492 NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords);
493 ret = -EINVAL; 510 ret = -EINVAL;
494 break; 511 break;
495 } 512 }
496 513
497 b = &bo[r->bo_index]; 514 b = &bo[r->bo_index];
498 if (b->presumed_ok) 515 if (b->presumed.valid)
499 continue; 516 continue;
500 517
518 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
519 NV_ERROR(dev, "reloc container bo index invalid\n");
520 ret = -EINVAL;
521 break;
522 }
523 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
524
525 if (unlikely(r->reloc_bo_offset + 4 >
526 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
527 NV_ERROR(dev, "reloc outside of bo\n");
528 ret = -EINVAL;
529 break;
530 }
531
532 if (!nvbo->kmap.virtual) {
533 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
534 &nvbo->kmap);
535 if (ret) {
536 NV_ERROR(dev, "failed kmap for reloc\n");
537 break;
538 }
539 nvbo->validate_mapped = true;
540 }
541
501 if (r->flags & NOUVEAU_GEM_RELOC_LOW) 542 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
502 data = b->presumed_offset + r->data; 543 data = b->presumed.offset + r->data;
503 else 544 else
504 if (r->flags & NOUVEAU_GEM_RELOC_HIGH) 545 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
505 data = (b->presumed_offset + r->data) >> 32; 546 data = (b->presumed.offset + r->data) >> 32;
506 else 547 else
507 data = r->data; 548 data = r->data;
508 549
509 if (r->flags & NOUVEAU_GEM_RELOC_OR) { 550 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
510 if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART) 551 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
511 data |= r->tor; 552 data |= r->tor;
512 else 553 else
513 data |= r->vor; 554 data |= r->vor;
514 } 555 }
515 556
516 if (is_iomem) 557 spin_lock(&nvbo->bo.lock);
517 iowrite32_native(data, (void __force __iomem *) 558 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
518 &pushbuf[r->reloc_index]); 559 spin_unlock(&nvbo->bo.lock);
519 else 560 if (ret) {
520 pushbuf[r->reloc_index] = data; 561 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
562 break;
563 }
564
565 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
521 } 566 }
522 567
523 kfree(reloc); 568 kfree(reloc);
@@ -528,127 +573,50 @@ int
528nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 573nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
529 struct drm_file *file_priv) 574 struct drm_file *file_priv)
530{ 575{
576 struct drm_nouveau_private *dev_priv = dev->dev_private;
531 struct drm_nouveau_gem_pushbuf *req = data; 577 struct drm_nouveau_gem_pushbuf *req = data;
532 struct drm_nouveau_gem_pushbuf_bo *bo = NULL; 578 struct drm_nouveau_gem_pushbuf_push *push;
579 struct drm_nouveau_gem_pushbuf_bo *bo;
533 struct nouveau_channel *chan; 580 struct nouveau_channel *chan;
534 struct validate_op op; 581 struct validate_op op;
535 struct nouveau_fence* fence = 0; 582 struct nouveau_fence *fence = 0;
536 uint32_t *pushbuf = NULL; 583 int i, j, ret = 0, do_reloc = 0;
537 int ret = 0, do_reloc = 0, i;
538 584
539 NOUVEAU_CHECK_INITIALISED_WITH_RETURN; 585 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
540 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); 586 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
541 587
542 if (req->nr_dwords >= chan->dma.max || 588 req->vram_available = dev_priv->fb_aper_free;
543 req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || 589 req->gart_available = dev_priv->gart_info.aper_free;
544 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { 590 if (unlikely(req->nr_push == 0))
545 NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); 591 goto out_next;
546 NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords,
547 chan->dma.max - 1);
548 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
549 NOUVEAU_GEM_MAX_BUFFERS);
550 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
551 NOUVEAU_GEM_MAX_RELOCS);
552 return -EINVAL;
553 }
554
555 pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
556 if (IS_ERR(pushbuf))
557 return PTR_ERR(pushbuf);
558
559 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
560 if (IS_ERR(bo)) {
561 kfree(pushbuf);
562 return PTR_ERR(bo);
563 }
564
565 mutex_lock(&dev->struct_mutex);
566
567 /* Validate buffer list */
568 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
569 req->nr_buffers, &op, &do_reloc);
570 if (ret)
571 goto out;
572
573 /* Apply any relocations that are required */
574 if (do_reloc) {
575 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
576 bo, req->nr_relocs,
577 req->relocs,
578 req->nr_dwords, 0,
579 pushbuf, false);
580 if (ret)
581 goto out;
582 }
583
584 /* Emit push buffer to the hw
585 */
586 ret = RING_SPACE(chan, req->nr_dwords);
587 if (ret)
588 goto out;
589
590 OUT_RINGp(chan, pushbuf, req->nr_dwords);
591 592
592 ret = nouveau_fence_new(chan, &fence, true); 593 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
593 if (ret) { 594 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
594 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 595 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
595 WIND_RING(chan); 596 return -EINVAL;
596 goto out;
597 } 597 }
598 598
599 if (nouveau_gem_pushbuf_sync(chan)) { 599 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
600 ret = nouveau_fence_wait(fence, NULL, false, false); 600 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
601 if (ret) { 601 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
602 for (i = 0; i < req->nr_dwords; i++) 602 return -EINVAL;
603 NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
604 NV_ERROR(dev, "^^ above push buffer is fail :(\n");
605 }
606 } 603 }
607 604
608out: 605 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
609 validate_fini(&op, fence); 606 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
610 nouveau_fence_unref((void**)&fence); 607 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
611 mutex_unlock(&dev->struct_mutex);
612 kfree(pushbuf);
613 kfree(bo);
614 return ret;
615}
616
617#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
618
619int
620nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
621 struct drm_file *file_priv)
622{
623 struct drm_nouveau_private *dev_priv = dev->dev_private;
624 struct drm_nouveau_gem_pushbuf_call *req = data;
625 struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
626 struct nouveau_channel *chan;
627 struct drm_gem_object *gem;
628 struct nouveau_bo *pbbo;
629 struct validate_op op;
630 struct nouveau_fence* fence = 0;
631 int i, ret = 0, do_reloc = 0;
632
633 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
634 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
635
636 if (unlikely(req->handle == 0))
637 goto out_next;
638
639 if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
640 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
641 NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
642 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
643 NOUVEAU_GEM_MAX_BUFFERS);
644 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
645 NOUVEAU_GEM_MAX_RELOCS);
646 return -EINVAL; 608 return -EINVAL;
647 } 609 }
648 610
611 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
612 if (IS_ERR(push))
613 return PTR_ERR(push);
614
649 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 615 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
650 if (IS_ERR(bo)) 616 if (IS_ERR(bo)) {
617 kfree(push);
651 return PTR_ERR(bo); 618 return PTR_ERR(bo);
619 }
652 620
653 mutex_lock(&dev->struct_mutex); 621 mutex_lock(&dev->struct_mutex);
654 622
@@ -660,122 +628,84 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
660 goto out; 628 goto out;
661 } 629 }
662 630
663 /* Validate DMA push buffer */
664 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
665 if (!gem) {
666 NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
667 ret = -EINVAL;
668 goto out;
669 }
670 pbbo = nouveau_gem_object(gem);
671
672 if ((req->offset & 3) || req->nr_dwords < 2 ||
673 (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
674 (unsigned long)req->nr_dwords >
675 ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
676 NV_ERROR(dev, "pb call misaligned or out of bounds: "
677 "%d + %d * 4 > %ld\n",
678 req->offset, req->nr_dwords, pbbo->bo.mem.size);
679 ret = -EINVAL;
680 drm_gem_object_unreference(gem);
681 goto out;
682 }
683
684 ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
685 chan->fence.sequence);
686 if (ret) {
687 NV_ERROR(dev, "resv pb: %d\n", ret);
688 drm_gem_object_unreference(gem);
689 goto out;
690 }
691
692 nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
693 ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
694 if (ret) {
695 NV_ERROR(dev, "validate pb: %d\n", ret);
696 ttm_bo_unreserve(&pbbo->bo);
697 drm_gem_object_unreference(gem);
698 goto out;
699 }
700
701 list_add_tail(&pbbo->entry, &op.both_list);
702
703 /* If presumed return address doesn't match, we need to map the
704 * push buffer and fix it..
705 */
706 if (!PUSHBUF_CAL) {
707 uint32_t retaddy;
708
709 if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
710 ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
711 if (ret) {
712 NV_ERROR(dev, "jmp_space: %d\n", ret);
713 goto out;
714 }
715 }
716
717 retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
718 retaddy |= 0x20000000;
719 if (retaddy != req->suffix0) {
720 req->suffix0 = retaddy;
721 do_reloc = 1;
722 }
723 }
724
725 /* Apply any relocations that are required */ 631 /* Apply any relocations that are required */
726 if (do_reloc) { 632 if (do_reloc) {
727 void *pbvirt; 633 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
728 bool is_iomem;
729 ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
730 &pbbo->kmap);
731 if (ret) { 634 if (ret) {
732 NV_ERROR(dev, "kmap pb: %d\n", ret); 635 NV_ERROR(dev, "reloc apply: %d\n", ret);
733 goto out; 636 goto out;
734 } 637 }
638 }
735 639
736 pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem); 640 if (chan->dma.ib_max) {
737 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo, 641 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
738 req->nr_relocs,
739 req->relocs,
740 req->nr_dwords,
741 req->offset / 4,
742 pbvirt, is_iomem);
743
744 if (!PUSHBUF_CAL) {
745 nouveau_bo_wr32(pbbo,
746 req->offset / 4 + req->nr_dwords - 2,
747 req->suffix0);
748 }
749
750 ttm_bo_kunmap(&pbbo->kmap);
751 if (ret) { 642 if (ret) {
752 NV_ERROR(dev, "reloc apply: %d\n", ret); 643 NV_INFO(dev, "nv50cal_space: %d\n", ret);
753 goto out; 644 goto out;
754 } 645 }
755 }
756 646
757 if (PUSHBUF_CAL) { 647 for (i = 0; i < req->nr_push; i++) {
758 ret = RING_SPACE(chan, 2); 648 struct nouveau_bo *nvbo = (void *)(unsigned long)
649 bo[push[i].bo_index].user_priv;
650
651 nv50_dma_push(chan, nvbo, push[i].offset,
652 push[i].length);
653 }
654 } else
655 if (dev_priv->card_type >= NV_20) {
656 ret = RING_SPACE(chan, req->nr_push * 2);
759 if (ret) { 657 if (ret) {
760 NV_ERROR(dev, "cal_space: %d\n", ret); 658 NV_ERROR(dev, "cal_space: %d\n", ret);
761 goto out; 659 goto out;
762 } 660 }
763 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + 661
764 req->offset) | 2); 662 for (i = 0; i < req->nr_push; i++) {
765 OUT_RING(chan, 0); 663 struct nouveau_bo *nvbo = (void *)(unsigned long)
664 bo[push[i].bo_index].user_priv;
665 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
666
667 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
668 push[i].offset) | 2);
669 OUT_RING(chan, 0);
670 }
766 } else { 671 } else {
767 ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS); 672 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
768 if (ret) { 673 if (ret) {
769 NV_ERROR(dev, "jmp_space: %d\n", ret); 674 NV_ERROR(dev, "jmp_space: %d\n", ret);
770 goto out; 675 goto out;
771 } 676 }
772 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
773 req->offset) | 0x20000000);
774 OUT_RING(chan, 0);
775 677
776 /* Space the jumps apart with NOPs. */ 678 for (i = 0; i < req->nr_push; i++) {
777 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 679 struct nouveau_bo *nvbo = (void *)(unsigned long)
680 bo[push[i].bo_index].user_priv;
681 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
682 uint32_t cmd;
683
684 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
685 cmd |= 0x20000000;
686 if (unlikely(cmd != req->suffix0)) {
687 if (!nvbo->kmap.virtual) {
688 ret = ttm_bo_kmap(&nvbo->bo, 0,
689 nvbo->bo.mem.
690 num_pages,
691 &nvbo->kmap);
692 if (ret) {
693 WIND_RING(chan);
694 goto out;
695 }
696 nvbo->validate_mapped = true;
697 }
698
699 nouveau_bo_wr32(nvbo, (push[i].offset +
700 push[i].length - 8) / 4, cmd);
701 }
702
703 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
704 push[i].offset) | 0x20000000);
778 OUT_RING(chan, 0); 705 OUT_RING(chan, 0);
706 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
707 OUT_RING(chan, 0);
708 }
779 } 709 }
780 710
781 ret = nouveau_fence_new(chan, &fence, true); 711 ret = nouveau_fence_new(chan, &fence, true);
@@ -790,9 +720,14 @@ out:
790 nouveau_fence_unref((void**)&fence); 720 nouveau_fence_unref((void**)&fence);
791 mutex_unlock(&dev->struct_mutex); 721 mutex_unlock(&dev->struct_mutex);
792 kfree(bo); 722 kfree(bo);
723 kfree(push);
793 724
794out_next: 725out_next:
795 if (PUSHBUF_CAL) { 726 if (chan->dma.ib_max) {
727 req->suffix0 = 0x00000000;
728 req->suffix1 = 0x00000000;
729 } else
730 if (dev_priv->card_type >= NV_20) {
796 req->suffix0 = 0x00020000; 731 req->suffix0 = 0x00020000;
797 req->suffix1 = 0x00000000; 732 req->suffix1 = 0x00000000;
798 } else { 733 } else {
@@ -804,19 +739,6 @@ out_next:
804 return ret; 739 return ret;
805} 740}
806 741
807int
808nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
809 struct drm_file *file_priv)
810{
811 struct drm_nouveau_private *dev_priv = dev->dev_private;
812 struct drm_nouveau_gem_pushbuf_call *req = data;
813
814 req->vram_available = dev_priv->fb_aper_free;
815 req->gart_available = dev_priv->gart_info.aper_free;
816
817 return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
818}
819
820static inline uint32_t 742static inline uint32_t
821domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) 743domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
822{ 744{
@@ -831,74 +753,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
831} 753}
832 754
833int 755int
834nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
835 struct drm_file *file_priv)
836{
837 struct drm_nouveau_gem_pin *req = data;
838 struct drm_gem_object *gem;
839 struct nouveau_bo *nvbo;
840 int ret = 0;
841
842 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
843
844 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
845 NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
846 return -EINVAL;
847 }
848
849 if (!DRM_SUSER(DRM_CURPROC))
850 return -EPERM;
851
852 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
853 if (!gem)
854 return -EINVAL;
855 nvbo = nouveau_gem_object(gem);
856
857 ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
858 if (ret)
859 goto out;
860
861 req->offset = nvbo->bo.offset;
862 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
863 req->domain = NOUVEAU_GEM_DOMAIN_GART;
864 else
865 req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
866
867out:
868 mutex_lock(&dev->struct_mutex);
869 drm_gem_object_unreference(gem);
870 mutex_unlock(&dev->struct_mutex);
871
872 return ret;
873}
874
875int
876nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
877 struct drm_file *file_priv)
878{
879 struct drm_nouveau_gem_pin *req = data;
880 struct drm_gem_object *gem;
881 int ret;
882
883 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
884
885 if (drm_core_check_feature(dev, DRIVER_MODESET))
886 return -EINVAL;
887
888 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
889 if (!gem)
890 return -EINVAL;
891
892 ret = nouveau_bo_unpin(nouveau_gem_object(gem));
893
894 mutex_lock(&dev->struct_mutex);
895 drm_gem_object_unreference(gem);
896 mutex_unlock(&dev->struct_mutex);
897
898 return ret;
899}
900
901int
902nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, 756nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
903 struct drm_file *file_priv) 757 struct drm_file *file_priv)
904{ 758{
@@ -935,9 +789,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
935 } 789 }
936 790
937out: 791out:
938 mutex_lock(&dev->struct_mutex); 792 drm_gem_object_unreference_unlocked(gem);
939 drm_gem_object_unreference(gem);
940 mutex_unlock(&dev->struct_mutex);
941 return ret; 793 return ret;
942} 794}
943 795
@@ -965,9 +817,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
965 ret = 0; 817 ret = 0;
966 818
967out: 819out:
968 mutex_lock(&dev->struct_mutex); 820 drm_gem_object_unreference_unlocked(gem);
969 drm_gem_object_unreference(gem);
970 mutex_unlock(&dev->struct_mutex);
971 return ret; 821 return ret;
972} 822}
973 823
@@ -986,9 +836,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
986 return -EINVAL; 836 return -EINVAL;
987 837
988 ret = nouveau_gem_info(gem, req); 838 ret = nouveau_gem_info(gem, req);
989 mutex_lock(&dev->struct_mutex); 839 drm_gem_object_unreference_unlocked(gem);
990 drm_gem_object_unreference(gem);
991 mutex_unlock(&dev->struct_mutex);
992 return ret; 840 return ret;
993} 841}
994 842
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index dc46792a5c96..7855b35effc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -160,7 +160,7 @@ static void
160setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv) 160setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
161{ 161{
162 struct drm_nouveau_private *dev_priv = dev->dev_private; 162 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 int chip_version = dev_priv->vbios->chip_version; 163 int chip_version = dev_priv->vbios.chip_version;
164 uint32_t oldpll = NVReadRAMDAC(dev, 0, reg); 164 uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
165 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff; 165 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
166 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1; 166 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
@@ -216,7 +216,7 @@ setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
216 struct nouveau_pll_vals *pv) 216 struct nouveau_pll_vals *pv)
217{ 217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private; 218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 int chip_version = dev_priv->vbios->chip_version; 219 int chip_version = dev_priv->vbios.chip_version;
220 bool nv3035 = chip_version == 0x30 || chip_version == 0x35; 220 bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
221 uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70); 221 uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
222 uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1); 222 uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
@@ -374,7 +374,7 @@ nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
374 struct nouveau_pll_vals *pv) 374 struct nouveau_pll_vals *pv)
375{ 375{
376 struct drm_nouveau_private *dev_priv = dev->dev_private; 376 struct drm_nouveau_private *dev_priv = dev->dev_private;
377 int cv = dev_priv->vbios->chip_version; 377 int cv = dev_priv->vbios.chip_version;
378 378
379 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 || 379 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
380 cv >= 0x40) { 380 cv >= 0x40) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 70e994d28122..88583e7bf651 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -254,16 +254,16 @@ struct nouveau_i2c_chan *
254nouveau_i2c_find(struct drm_device *dev, int index) 254nouveau_i2c_find(struct drm_device *dev, int index)
255{ 255{
256 struct drm_nouveau_private *dev_priv = dev->dev_private; 256 struct drm_nouveau_private *dev_priv = dev->dev_private;
257 struct nvbios *bios = &dev_priv->VBIOS; 257 struct nvbios *bios = &dev_priv->vbios;
258 258
259 if (index > DCB_MAX_NUM_I2C_ENTRIES) 259 if (index >= DCB_MAX_NUM_I2C_ENTRIES)
260 return NULL; 260 return NULL;
261 261
262 if (!bios->bdcb.dcb.i2c[index].chan) { 262 if (!bios->dcb.i2c[index].chan) {
263 if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index)) 263 if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index))
264 return NULL; 264 return NULL;
265 } 265 }
266 266
267 return bios->bdcb.dcb.i2c[index].chan; 267 return bios->dcb.i2c[index].chan;
268} 268}
269 269
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 447f9f69d6b1..95220ddebb45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -691,11 +691,14 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
691 struct drm_device *dev = (struct drm_device *)arg; 691 struct drm_device *dev = (struct drm_device *)arg;
692 struct drm_nouveau_private *dev_priv = dev->dev_private; 692 struct drm_nouveau_private *dev_priv = dev->dev_private;
693 uint32_t status, fbdev_flags = 0; 693 uint32_t status, fbdev_flags = 0;
694 unsigned long flags;
694 695
695 status = nv_rd32(dev, NV03_PMC_INTR_0); 696 status = nv_rd32(dev, NV03_PMC_INTR_0);
696 if (!status) 697 if (!status)
697 return IRQ_NONE; 698 return IRQ_NONE;
698 699
700 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
701
699 if (dev_priv->fbdev_info) { 702 if (dev_priv->fbdev_info) {
700 fbdev_flags = dev_priv->fbdev_info->flags; 703 fbdev_flags = dev_priv->fbdev_info->flags;
701 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; 704 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
@@ -733,5 +736,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
733 if (dev_priv->fbdev_info) 736 if (dev_priv->fbdev_info)
734 dev_priv->fbdev_info->flags = fbdev_flags; 737 dev_priv->fbdev_info->flags = fbdev_flags;
735 738
739 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
740
736 return IRQ_HANDLED; 741 return IRQ_HANDLED;
737} 742}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index d99dc087f9b1..9537f3e30115 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -61,11 +61,8 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
61 61
62 chan->notifier_bo = ntfy; 62 chan->notifier_bo = ntfy;
63out_err: 63out_err:
64 if (ret) { 64 if (ret)
65 mutex_lock(&dev->struct_mutex); 65 drm_gem_object_unreference_unlocked(ntfy->gem);
66 drm_gem_object_unreference(ntfy->gem);
67 mutex_unlock(&dev->struct_mutex);
68 }
69 66
70 return ret; 67 return ret;
71} 68}
@@ -81,8 +78,8 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
81 nouveau_bo_unmap(chan->notifier_bo); 78 nouveau_bo_unmap(chan->notifier_bo);
82 mutex_lock(&dev->struct_mutex); 79 mutex_lock(&dev->struct_mutex);
83 nouveau_bo_unpin(chan->notifier_bo); 80 nouveau_bo_unpin(chan->notifier_bo);
84 drm_gem_object_unreference(chan->notifier_bo->gem);
85 mutex_unlock(&dev->struct_mutex); 81 mutex_unlock(&dev->struct_mutex);
82 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
86 nouveau_mem_takedown(&chan->notifier_heap); 83 nouveau_mem_takedown(&chan->notifier_heap);
87} 84}
88 85
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a4851af5b05e..eb8f084d5f53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -29,6 +29,7 @@
29#include "drm_sarea.h" 29#include "drm_sarea.h"
30#include "drm_crtc_helper.h" 30#include "drm_crtc_helper.h"
31#include <linux/vgaarb.h> 31#include <linux/vgaarb.h>
32#include <linux/vga_switcheroo.h>
32 33
33#include "nouveau_drv.h" 34#include "nouveau_drv.h"
34#include "nouveau_drm.h" 35#include "nouveau_drm.h"
@@ -371,6 +372,30 @@ out_err:
371 return ret; 372 return ret;
372} 373}
373 374
375static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
376 enum vga_switcheroo_state state)
377{
378 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
379 if (state == VGA_SWITCHEROO_ON) {
380 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
381 nouveau_pci_resume(pdev);
382 } else {
383 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
384 nouveau_pci_suspend(pdev, pmm);
385 }
386}
387
388static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
389{
390 struct drm_device *dev = pci_get_drvdata(pdev);
391 bool can_switch;
392
393 spin_lock(&dev->count_lock);
394 can_switch = (dev->open_count == 0);
395 spin_unlock(&dev->count_lock);
396 return can_switch;
397}
398
374int 399int
375nouveau_card_init(struct drm_device *dev) 400nouveau_card_init(struct drm_device *dev)
376{ 401{
@@ -384,6 +409,8 @@ nouveau_card_init(struct drm_device *dev)
384 return 0; 409 return 0;
385 410
386 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 411 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
412 vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
413 nouveau_switcheroo_can_switch);
387 414
388 /* Initialise internal driver API hooks */ 415 /* Initialise internal driver API hooks */
389 ret = nouveau_init_engine_ptrs(dev); 416 ret = nouveau_init_engine_ptrs(dev);
@@ -391,6 +418,7 @@ nouveau_card_init(struct drm_device *dev)
391 goto out; 418 goto out;
392 engine = &dev_priv->engine; 419 engine = &dev_priv->engine;
393 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; 420 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
421 spin_lock_init(&dev_priv->context_switch_lock);
394 422
395 /* Parse BIOS tables / Run init tables if card not POSTed */ 423 /* Parse BIOS tables / Run init tables if card not POSTed */
396 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 424 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -617,11 +645,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
617 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", 645 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
618 dev->pci_vendor, dev->pci_device, dev->pdev->class); 646 dev->pci_vendor, dev->pci_device, dev->pdev->class);
619 647
620 dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
621
622 if (dev_priv->acpi_dsm)
623 nouveau_hybrid_setup(dev);
624
625 dev_priv->wq = create_workqueue("nouveau"); 648 dev_priv->wq = create_workqueue("nouveau");
626 if (!dev_priv->wq) 649 if (!dev_priv->wq)
627 return -EINVAL; 650 return -EINVAL;
@@ -776,13 +799,6 @@ int nouveau_unload(struct drm_device *dev)
776 return 0; 799 return 0;
777} 800}
778 801
779int
780nouveau_ioctl_card_init(struct drm_device *dev, void *data,
781 struct drm_file *file_priv)
782{
783 return nouveau_card_init(dev);
784}
785
786int nouveau_ioctl_getparam(struct drm_device *dev, void *data, 802int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
787 struct drm_file *file_priv) 803 struct drm_file *file_priv)
788{ 804{
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index d2f143ed97c1..a1d1ebb073d9 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -926,9 +926,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
926 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); 926 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
927 nv_crtc->cursor.show(nv_crtc, true); 927 nv_crtc->cursor.show(nv_crtc, true);
928out: 928out:
929 mutex_lock(&dev->struct_mutex); 929 drm_gem_object_unreference_unlocked(gem);
930 drm_gem_object_unreference(gem);
931 mutex_unlock(&dev->struct_mutex);
932 return ret; 930 return ret;
933} 931}
934 932
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 1d73b15d70da..1cb19e3acb55 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -230,13 +230,13 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
230 if (dcb->type == OUTPUT_TV) { 230 if (dcb->type == OUTPUT_TV) {
231 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0); 231 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
232 232
233 if (dev_priv->vbios->tvdactestval) 233 if (dev_priv->vbios.tvdactestval)
234 testval = dev_priv->vbios->tvdactestval; 234 testval = dev_priv->vbios.tvdactestval;
235 } else { 235 } else {
236 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */ 236 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
237 237
238 if (dev_priv->vbios->dactestval) 238 if (dev_priv->vbios.dactestval)
239 testval = dev_priv->vbios->dactestval; 239 testval = dev_priv->vbios.dactestval;
240 } 240 }
241 241
242 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); 242 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 483f875bdb6a..41634d4752fe 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -269,10 +269,10 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
269 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; 269 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
270 if (!nv_gf4_disp_arch(dev) || 270 if (!nv_gf4_disp_arch(dev) ||
271 (output_mode->hsync_start - output_mode->hdisplay) >= 271 (output_mode->hsync_start - output_mode->hdisplay) >=
272 dev_priv->vbios->digital_min_front_porch) 272 dev_priv->vbios.digital_min_front_porch)
273 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay; 273 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
274 else 274 else
275 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1; 275 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios.digital_min_front_porch - 1;
276 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1; 276 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
277 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; 277 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
278 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew; 278 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index ef77215fa5b9..c7898b4f6dfb 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -93,10 +93,9 @@ int
93nv04_display_create(struct drm_device *dev) 93nv04_display_create(struct drm_device *dev)
94{ 94{
95 struct drm_nouveau_private *dev_priv = dev->dev_private; 95 struct drm_nouveau_private *dev_priv = dev->dev_private;
96 struct parsed_dcb *dcb = dev_priv->vbios->dcb; 96 struct dcb_table *dcb = &dev_priv->vbios.dcb;
97 struct drm_encoder *encoder; 97 struct drm_encoder *encoder;
98 struct drm_crtc *crtc; 98 struct drm_crtc *crtc;
99 uint16_t connector[16] = { 0 };
100 int i, ret; 99 int i, ret;
101 100
102 NV_DEBUG_KMS(dev, "\n"); 101 NV_DEBUG_KMS(dev, "\n");
@@ -154,52 +153,10 @@ nv04_display_create(struct drm_device *dev)
154 153
155 if (ret) 154 if (ret)
156 continue; 155 continue;
157
158 connector[dcbent->connector] |= (1 << dcbent->type);
159 } 156 }
160 157
161 for (i = 0; i < dcb->entries; i++) { 158 for (i = 0; i < dcb->connector.entries; i++)
162 struct dcb_entry *dcbent = &dcb->entry[i]; 159 nouveau_connector_create(dev, &dcb->connector.entry[i]);
163 uint16_t encoders;
164 int type;
165
166 encoders = connector[dcbent->connector];
167 if (!(encoders & (1 << dcbent->type)))
168 continue;
169 connector[dcbent->connector] = 0;
170
171 switch (dcbent->type) {
172 case OUTPUT_ANALOG:
173 if (!MULTIPLE_ENCODERS(encoders))
174 type = DRM_MODE_CONNECTOR_VGA;
175 else
176 type = DRM_MODE_CONNECTOR_DVII;
177 break;
178 case OUTPUT_TMDS:
179 if (!MULTIPLE_ENCODERS(encoders))
180 type = DRM_MODE_CONNECTOR_DVID;
181 else
182 type = DRM_MODE_CONNECTOR_DVII;
183 break;
184 case OUTPUT_LVDS:
185 type = DRM_MODE_CONNECTOR_LVDS;
186#if 0
187 /* don't create i2c adapter when lvds ddc not allowed */
188 if (dcbent->lvdsconf.use_straps_for_mode ||
189 dev_priv->vbios->fp_no_ddc)
190 i2c_index = 0xf;
191#endif
192 break;
193 case OUTPUT_TV:
194 type = DRM_MODE_CONNECTOR_TV;
195 break;
196 default:
197 type = DRM_MODE_CONNECTOR_Unknown;
198 continue;
199 }
200
201 nouveau_connector_create(dev, dcbent->connector, type);
202 }
203 160
204 /* Save previous state */ 161 /* Save previous state */
205 NVLockVgaCrtcs(dev, false); 162 NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index fd01caabd5c3..3da90c2c4e63 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -118,7 +118,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
118 return; 118 return;
119 } 119 }
120 120
121 width = (image->width + 31) & ~31; 121 width = ALIGN(image->width, 32);
122 dsize = (width * image->height) >> 5; 122 dsize = (width * image->height) >> 5;
123 123
124 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 124 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index f31347b8c9b0..66fe55983b6e 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -117,6 +117,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
117{ 117{
118 struct drm_device *dev = chan->dev; 118 struct drm_device *dev = chan->dev;
119 struct drm_nouveau_private *dev_priv = dev->dev_private; 119 struct drm_nouveau_private *dev_priv = dev->dev_private;
120 unsigned long flags;
120 int ret; 121 int ret;
121 122
122 ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, 123 ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
@@ -127,6 +128,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
127 if (ret) 128 if (ret)
128 return ret; 129 return ret;
129 130
131 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
132
130 /* Setup initial state */ 133 /* Setup initial state */
131 dev_priv->engine.instmem.prepare_access(dev, true); 134 dev_priv->engine.instmem.prepare_access(dev, true);
132 RAMFC_WR(DMA_PUT, chan->pushbuf_base); 135 RAMFC_WR(DMA_PUT, chan->pushbuf_base);
@@ -144,6 +147,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
144 /* enable the fifo dma operation */ 147 /* enable the fifo dma operation */
145 nv_wr32(dev, NV04_PFIFO_MODE, 148 nv_wr32(dev, NV04_PFIFO_MODE,
146 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); 149 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
150
151 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
147 return 0; 152 return 0;
148} 153}
149 154
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 9c63099e9c42..c4e3404337d4 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -262,7 +262,7 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
262 nv_encoder->or = ffs(entry->or) - 1; 262 nv_encoder->or = ffs(entry->or) - 1;
263 263
264 /* Run the slave-specific initialization */ 264 /* Run the slave-specific initialization */
265 adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter; 265 adap = &dev_priv->vbios.dcb.i2c[i2c_index].chan->adapter;
266 266
267 was_locked = NVLockVgaCrtcs(dev, false); 267 was_locked = NVLockVgaCrtcs(dev, false);
268 268
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 21ac6e49b6ee..74c880374fb9 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -45,8 +45,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
45 45
46#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) 46#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
47 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82); 47 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
48 if (dev_priv->vbios->tvdactestval) 48 if (dev_priv->vbios.tvdactestval)
49 testval = dev_priv->vbios->tvdactestval; 49 testval = dev_priv->vbios.tvdactestval;
50 50
51 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); 51 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
52 head = (dacclk & 0x100) >> 8; 52 head = (dacclk & 0x100) >> 8;
@@ -367,7 +367,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
367 !enc->crtc && 367 !enc->crtc &&
368 nv04_dfp_get_bound_head(dev, dcb) == head) { 368 nv04_dfp_get_bound_head(dev, dcb) == head) {
369 nv04_dfp_bind_head(dev, dcb, head ^ 1, 369 nv04_dfp_bind_head(dev, dcb, head ^ 1,
370 dev_priv->VBIOS.fp.dual_link); 370 dev_priv->vbios.fp.dual_link);
371 } 371 }
372 } 372 }
373 373
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index b4f19ccb8b41..6b2ef4a9fce1 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -37,6 +37,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
37 struct drm_device *dev = chan->dev; 37 struct drm_device *dev = chan->dev;
38 struct drm_nouveau_private *dev_priv = dev->dev_private; 38 struct drm_nouveau_private *dev_priv = dev->dev_private;
39 uint32_t fc = NV40_RAMFC(chan->id); 39 uint32_t fc = NV40_RAMFC(chan->id);
40 unsigned long flags;
40 int ret; 41 int ret;
41 42
42 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, 43 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
@@ -45,6 +46,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
45 if (ret) 46 if (ret)
46 return ret; 47 return ret;
47 48
49 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
50
48 dev_priv->engine.instmem.prepare_access(dev, true); 51 dev_priv->engine.instmem.prepare_access(dev, true);
49 nv_wi32(dev, fc + 0, chan->pushbuf_base); 52 nv_wi32(dev, fc + 0, chan->pushbuf_base);
50 nv_wi32(dev, fc + 4, chan->pushbuf_base); 53 nv_wi32(dev, fc + 4, chan->pushbuf_base);
@@ -63,6 +66,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
63 /* enable the fifo dma operation */ 66 /* enable the fifo dma operation */
64 nv_wr32(dev, NV04_PFIFO_MODE, 67 nv_wr32(dev, NV04_PFIFO_MODE,
65 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); 68 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
69
70 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
66 return 0; 71 return 0;
67} 72}
68 73
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index d1a651e3400c..cfabeb974a56 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -358,9 +358,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
358 nv_crtc->cursor.show(nv_crtc, true); 358 nv_crtc->cursor.show(nv_crtc, true);
359 359
360out: 360out:
361 mutex_lock(&dev->struct_mutex); 361 drm_gem_object_unreference_unlocked(gem);
362 drm_gem_object_unreference(gem);
363 mutex_unlock(&dev->struct_mutex);
364 return ret; 362 return ret;
365} 363}
366 364
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index f08f042a8e10..1fd9537beff6 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -79,8 +79,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
79 } 79 }
80 80
81 /* Use bios provided value if possible. */ 81 /* Use bios provided value if possible. */
82 if (dev_priv->vbios->dactestval) { 82 if (dev_priv->vbios.dactestval) {
83 load_pattern = dev_priv->vbios->dactestval; 83 load_pattern = dev_priv->vbios.dactestval;
84 NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n", 84 NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
85 load_pattern); 85 load_pattern);
86 } else { 86 } else {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 90f0bf59fbcd..61a89f2dc553 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -370,9 +370,7 @@ nv50_display_init(struct drm_device *dev)
370 struct nouveau_connector *conn = nouveau_connector(connector); 370 struct nouveau_connector *conn = nouveau_connector(connector);
371 struct dcb_gpio_entry *gpio; 371 struct dcb_gpio_entry *gpio;
372 372
373 if (connector->connector_type != DRM_MODE_CONNECTOR_DVII && 373 if (conn->dcb->gpio_tag == 0xff)
374 connector->connector_type != DRM_MODE_CONNECTOR_DVID &&
375 connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
376 continue; 374 continue;
377 375
378 gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag); 376 gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
@@ -465,8 +463,7 @@ static int nv50_display_disable(struct drm_device *dev)
465int nv50_display_create(struct drm_device *dev) 463int nv50_display_create(struct drm_device *dev)
466{ 464{
467 struct drm_nouveau_private *dev_priv = dev->dev_private; 465 struct drm_nouveau_private *dev_priv = dev->dev_private;
468 struct parsed_dcb *dcb = dev_priv->vbios->dcb; 466 struct dcb_table *dcb = &dev_priv->vbios.dcb;
469 uint32_t connector[16] = {};
470 int ret, i; 467 int ret, i;
471 468
472 NV_DEBUG_KMS(dev, "\n"); 469 NV_DEBUG_KMS(dev, "\n");
@@ -522,44 +519,13 @@ int nv50_display_create(struct drm_device *dev)
522 NV_WARN(dev, "DCB encoder %d unknown\n", entry->type); 519 NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
523 continue; 520 continue;
524 } 521 }
525
526 connector[entry->connector] |= (1 << entry->type);
527 } 522 }
528 523
529 /* It appears that DCB 3.0+ VBIOS has a connector table, however, 524 for (i = 0 ; i < dcb->connector.entries; i++) {
530 * I'm not 100% certain how to decode it correctly yet so just 525 if (i != 0 && dcb->connector.entry[i].index ==
531 * look at what encoders are present on each connector index and 526 dcb->connector.entry[i - 1].index)
532 * attempt to derive the connector type from that.
533 */
534 for (i = 0 ; i < dcb->entries; i++) {
535 struct dcb_entry *entry = &dcb->entry[i];
536 uint16_t encoders;
537 int type;
538
539 encoders = connector[entry->connector];
540 if (!(encoders & (1 << entry->type)))
541 continue; 527 continue;
542 connector[entry->connector] = 0; 528 nouveau_connector_create(dev, &dcb->connector.entry[i]);
543
544 if (encoders & (1 << OUTPUT_DP)) {
545 type = DRM_MODE_CONNECTOR_DisplayPort;
546 } else if (encoders & (1 << OUTPUT_TMDS)) {
547 if (encoders & (1 << OUTPUT_ANALOG))
548 type = DRM_MODE_CONNECTOR_DVII;
549 else
550 type = DRM_MODE_CONNECTOR_DVID;
551 } else if (encoders & (1 << OUTPUT_ANALOG)) {
552 type = DRM_MODE_CONNECTOR_VGA;
553 } else if (encoders & (1 << OUTPUT_LVDS)) {
554 type = DRM_MODE_CONNECTOR_LVDS;
555 } else {
556 type = DRM_MODE_CONNECTOR_Unknown;
557 }
558
559 if (type == DRM_MODE_CONNECTOR_Unknown)
560 continue;
561
562 nouveau_connector_create(dev, entry->connector, type);
563 } 529 }
564 530
565 ret = nv50_display_init(dev); 531 ret = nv50_display_init(dev);
@@ -667,8 +633,8 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
667 return -1; 633 return -1;
668 } 634 }
669 635
670 for (i = 0; i < dev_priv->vbios->dcb->entries; i++) { 636 for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
671 struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i]; 637 struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i];
672 638
673 if (dcbent->type != type) 639 if (dcbent->type != type)
674 continue; 640 continue;
@@ -692,7 +658,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
692 struct drm_nouveau_private *dev_priv = dev->dev_private; 658 struct drm_nouveau_private *dev_priv = dev->dev_private;
693 struct nouveau_connector *nv_connector = NULL; 659 struct nouveau_connector *nv_connector = NULL;
694 struct drm_encoder *encoder; 660 struct drm_encoder *encoder;
695 struct nvbios *bios = &dev_priv->VBIOS; 661 struct nvbios *bios = &dev_priv->vbios;
696 uint32_t mc, script = 0, or; 662 uint32_t mc, script = 0, or;
697 663
698 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 664 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -710,7 +676,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
710 switch (dcbent->type) { 676 switch (dcbent->type) {
711 case OUTPUT_LVDS: 677 case OUTPUT_LVDS:
712 script = (mc >> 8) & 0xf; 678 script = (mc >> 8) & 0xf;
713 if (bios->pub.fp_no_ddc) { 679 if (bios->fp_no_ddc) {
714 if (bios->fp.dual_link) 680 if (bios->fp.dual_link)
715 script |= 0x0100; 681 script |= 0x0100;
716 if (bios->fp.if_is_24bit) 682 if (bios->fp.if_is_24bit)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 0f57cdf7ccb2..993c7126fbde 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -109,7 +109,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
109 return; 109 return;
110 } 110 }
111 111
112 width = (image->width + 31) & ~31; 112 width = ALIGN(image->width, 32);
113 dwords = (width * image->height) >> 5; 113 dwords = (width * image->height) >> 5;
114 114
115 BEGIN_RING(chan, NvSub2D, 0x0814, 2); 115 BEGIN_RING(chan, NvSub2D, 0x0814, 2);
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 204a79ff10f4..e20c0e2474f3 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -243,6 +243,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
243 struct drm_device *dev = chan->dev; 243 struct drm_device *dev = chan->dev;
244 struct drm_nouveau_private *dev_priv = dev->dev_private; 244 struct drm_nouveau_private *dev_priv = dev->dev_private;
245 struct nouveau_gpuobj *ramfc = NULL; 245 struct nouveau_gpuobj *ramfc = NULL;
246 unsigned long flags;
246 int ret; 247 int ret;
247 248
248 NV_DEBUG(dev, "ch%d\n", chan->id); 249 NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -278,19 +279,21 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
278 return ret; 279 return ret;
279 } 280 }
280 281
282 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
283
281 dev_priv->engine.instmem.prepare_access(dev, true); 284 dev_priv->engine.instmem.prepare_access(dev, true);
282 285
283 nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
284 nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
285 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); 286 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
286 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); 287 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
287 nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
288 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); 288 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
289 nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); 289 nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
290 nv_wo32(dev, ramfc, 0x40/4, 0x00000000); 290 nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
291 nv_wo32(dev, ramfc, 0x7c/4, 0x30000001); 291 nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
292 nv_wo32(dev, ramfc, 0x78/4, 0x00000000); 292 nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
293 nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff); 293 nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078);
294 nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base +
295 chan->dma.ib_base * 4);
296 nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
294 297
295 if (!IS_G80) { 298 if (!IS_G80) {
296 nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); 299 nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
@@ -306,10 +309,12 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
306 ret = nv50_fifo_channel_enable(dev, chan->id, false); 309 ret = nv50_fifo_channel_enable(dev, chan->id, false);
307 if (ret) { 310 if (ret) {
308 NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret); 311 NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
312 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
309 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 313 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
310 return ret; 314 return ret;
311 } 315 }
312 316
317 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
313 return 0; 318 return 0;
314} 319}
315 320
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 6d504801b514..857a09671a39 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -28,30 +28,7 @@
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30 30
31MODULE_FIRMWARE("nouveau/nv50.ctxprog"); 31#include "nouveau_grctx.h"
32MODULE_FIRMWARE("nouveau/nv50.ctxvals");
33MODULE_FIRMWARE("nouveau/nv84.ctxprog");
34MODULE_FIRMWARE("nouveau/nv84.ctxvals");
35MODULE_FIRMWARE("nouveau/nv86.ctxprog");
36MODULE_FIRMWARE("nouveau/nv86.ctxvals");
37MODULE_FIRMWARE("nouveau/nv92.ctxprog");
38MODULE_FIRMWARE("nouveau/nv92.ctxvals");
39MODULE_FIRMWARE("nouveau/nv94.ctxprog");
40MODULE_FIRMWARE("nouveau/nv94.ctxvals");
41MODULE_FIRMWARE("nouveau/nv96.ctxprog");
42MODULE_FIRMWARE("nouveau/nv96.ctxvals");
43MODULE_FIRMWARE("nouveau/nv98.ctxprog");
44MODULE_FIRMWARE("nouveau/nv98.ctxvals");
45MODULE_FIRMWARE("nouveau/nva0.ctxprog");
46MODULE_FIRMWARE("nouveau/nva0.ctxvals");
47MODULE_FIRMWARE("nouveau/nva5.ctxprog");
48MODULE_FIRMWARE("nouveau/nva5.ctxvals");
49MODULE_FIRMWARE("nouveau/nva8.ctxprog");
50MODULE_FIRMWARE("nouveau/nva8.ctxvals");
51MODULE_FIRMWARE("nouveau/nvaa.ctxprog");
52MODULE_FIRMWARE("nouveau/nvaa.ctxvals");
53MODULE_FIRMWARE("nouveau/nvac.ctxprog");
54MODULE_FIRMWARE("nouveau/nvac.ctxvals");
55 32
56#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) 33#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
57 34
@@ -111,9 +88,34 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
111 88
112 NV_DEBUG(dev, "\n"); 89 NV_DEBUG(dev, "\n");
113 90
114 nouveau_grctx_prog_load(dev); 91 if (nouveau_ctxfw) {
115 if (!dev_priv->engine.graph.ctxprog) 92 nouveau_grctx_prog_load(dev);
116 dev_priv->engine.graph.accel_blocked = true; 93 dev_priv->engine.graph.grctx_size = 0x70000;
94 }
95 if (!dev_priv->engine.graph.ctxprog) {
96 struct nouveau_grctx ctx = {};
97 uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL);
98 int i;
99 if (!cp) {
100 NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n");
101 dev_priv->engine.graph.accel_blocked = true;
102 return 0;
103 }
104 ctx.dev = dev;
105 ctx.mode = NOUVEAU_GRCTX_PROG;
106 ctx.data = cp;
107 ctx.ctxprog_max = 512;
108 if (!nv50_grctx_init(&ctx)) {
109 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
110
111 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
112 for (i = 0; i < ctx.ctxprog_len; i++)
113 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
114 } else {
115 dev_priv->engine.graph.accel_blocked = true;
116 }
117 kfree(cp);
118 }
117 119
118 nv_wr32(dev, 0x400320, 4); 120 nv_wr32(dev, 0x400320, 4);
119 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); 121 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
@@ -193,13 +195,13 @@ nv50_graph_create_context(struct nouveau_channel *chan)
193 struct drm_nouveau_private *dev_priv = dev->dev_private; 195 struct drm_nouveau_private *dev_priv = dev->dev_private;
194 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; 196 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
195 struct nouveau_gpuobj *ctx; 197 struct nouveau_gpuobj *ctx;
196 uint32_t grctx_size = 0x70000; 198 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
197 int hdr, ret; 199 int hdr, ret;
198 200
199 NV_DEBUG(dev, "ch%d\n", chan->id); 201 NV_DEBUG(dev, "ch%d\n", chan->id);
200 202
201 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000, 203 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
202 NVOBJ_FLAG_ZERO_ALLOC | 204 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
203 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); 205 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
204 if (ret) 206 if (ret)
205 return ret; 207 return ret;
@@ -209,7 +211,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
209 dev_priv->engine.instmem.prepare_access(dev, true); 211 dev_priv->engine.instmem.prepare_access(dev, true);
210 nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); 212 nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
211 nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + 213 nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
212 grctx_size - 1); 214 pgraph->grctx_size - 1);
213 nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); 215 nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
214 nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); 216 nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
215 nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); 217 nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
@@ -217,7 +219,15 @@ nv50_graph_create_context(struct nouveau_channel *chan)
217 dev_priv->engine.instmem.finish_access(dev); 219 dev_priv->engine.instmem.finish_access(dev);
218 220
219 dev_priv->engine.instmem.prepare_access(dev, true); 221 dev_priv->engine.instmem.prepare_access(dev, true);
220 nouveau_grctx_vals_load(dev, ctx); 222 if (!pgraph->ctxprog) {
223 struct nouveau_grctx ctx = {};
224 ctx.dev = chan->dev;
225 ctx.mode = NOUVEAU_GRCTX_VALS;
226 ctx.data = chan->ramin_grctx->gpuobj;
227 nv50_grctx_init(&ctx);
228 } else {
229 nouveau_grctx_vals_load(dev, ctx);
230 }
221 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); 231 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
222 if ((dev_priv->chipset & 0xf0) == 0xa0) 232 if ((dev_priv->chipset & 0xf0) == 0xa0)
223 nv_wo32(dev, ctx, 0x00004/4, 0x00000000); 233 nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
new file mode 100644
index 000000000000..d105fcd42ca0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -0,0 +1,2367 @@
1/*
2 * Copyright 2009 Marcin Kościelnicki
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#define CP_FLAG_CLEAR 0
24#define CP_FLAG_SET 1
25#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
26#define CP_FLAG_SWAP_DIRECTION_LOAD 0
27#define CP_FLAG_SWAP_DIRECTION_SAVE 1
28#define CP_FLAG_UNK01 ((0 * 32) + 1)
29#define CP_FLAG_UNK01_CLEAR 0
30#define CP_FLAG_UNK01_SET 1
31#define CP_FLAG_UNK03 ((0 * 32) + 3)
32#define CP_FLAG_UNK03_CLEAR 0
33#define CP_FLAG_UNK03_SET 1
34#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
35#define CP_FLAG_USER_SAVE_NOT_PENDING 0
36#define CP_FLAG_USER_SAVE_PENDING 1
37#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
38#define CP_FLAG_USER_LOAD_NOT_PENDING 0
39#define CP_FLAG_USER_LOAD_PENDING 1
40#define CP_FLAG_UNK0B ((0 * 32) + 0xb)
41#define CP_FLAG_UNK0B_CLEAR 0
42#define CP_FLAG_UNK0B_SET 1
43#define CP_FLAG_UNK1D ((0 * 32) + 0x1d)
44#define CP_FLAG_UNK1D_CLEAR 0
45#define CP_FLAG_UNK1D_SET 1
46#define CP_FLAG_UNK20 ((1 * 32) + 0)
47#define CP_FLAG_UNK20_CLEAR 0
48#define CP_FLAG_UNK20_SET 1
49#define CP_FLAG_STATUS ((2 * 32) + 0)
50#define CP_FLAG_STATUS_BUSY 0
51#define CP_FLAG_STATUS_IDLE 1
52#define CP_FLAG_AUTO_SAVE ((2 * 32) + 4)
53#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
54#define CP_FLAG_AUTO_SAVE_PENDING 1
55#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5)
56#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
57#define CP_FLAG_AUTO_LOAD_PENDING 1
58#define CP_FLAG_XFER ((2 * 32) + 11)
59#define CP_FLAG_XFER_IDLE 0
60#define CP_FLAG_XFER_BUSY 1
61#define CP_FLAG_NEWCTX ((2 * 32) + 12)
62#define CP_FLAG_NEWCTX_BUSY 0
63#define CP_FLAG_NEWCTX_DONE 1
64#define CP_FLAG_ALWAYS ((2 * 32) + 13)
65#define CP_FLAG_ALWAYS_FALSE 0
66#define CP_FLAG_ALWAYS_TRUE 1
67
68#define CP_CTX 0x00100000
69#define CP_CTX_COUNT 0x000f0000
70#define CP_CTX_COUNT_SHIFT 16
71#define CP_CTX_REG 0x00003fff
72#define CP_LOAD_SR 0x00200000
73#define CP_LOAD_SR_VALUE 0x000fffff
74#define CP_BRA 0x00400000
75#define CP_BRA_IP 0x0001ff00
76#define CP_BRA_IP_SHIFT 8
77#define CP_BRA_IF_CLEAR 0x00000080
78#define CP_BRA_FLAG 0x0000007f
79#define CP_WAIT 0x00500000
80#define CP_WAIT_SET 0x00000080
81#define CP_WAIT_FLAG 0x0000007f
82#define CP_SET 0x00700000
83#define CP_SET_1 0x00000080
84#define CP_SET_FLAG 0x0000007f
85#define CP_NEWCTX 0x00600004
86#define CP_NEXT_TO_SWAP 0x00600005
87#define CP_SET_CONTEXT_POINTER 0x00600006
88#define CP_SET_XFER_POINTER 0x00600007
89#define CP_ENABLE 0x00600009
90#define CP_END 0x0060000c
91#define CP_NEXT_TO_CURRENT 0x0060000d
92#define CP_DISABLE1 0x0090ffff
93#define CP_DISABLE2 0x0091ffff
94#define CP_XFER_1 0x008000ff
95#define CP_XFER_2 0x008800ff
96#define CP_SEEK_1 0x00c000ff
97#define CP_SEEK_2 0x00c800ff
98
99#include "drmP.h"
100#include "nouveau_drv.h"
101#include "nouveau_grctx.h"
102
103/*
104 * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
105 * the GPU itself that does context-switching, but it needs a special
106 * microcode to do it. And it's the driver's task to supply this microcode,
107 * further known as ctxprog, as well as the initial context values, known
108 * as ctxvals.
109 *
110 * Without ctxprog, you cannot switch contexts. Not even in software, since
111 * the majority of context [xfer strands] isn't accessible directly. You're
112 * stuck with a single channel, and you also suffer all the problems resulting
113 * from missing ctxvals, since you cannot load them.
114 *
115 * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to
116 * run 2d operations, but trying to utilise 3d or CUDA will just lock you up,
117 * since you don't have... some sort of needed setup.
118 *
119 * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since
120 * it's too much hassle to handle no-ctxprog as a special case.
121 */
122
123/*
124 * How ctxprogs work.
125 *
126 * The ctxprog is written in its own kind of microcode, with very small and
127 * crappy set of available commands. You upload it to a small [512 insns]
128 * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to
129 * switch channel. or when the driver explicitely requests it. Stuff visible
130 * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands,
131 * the per-channel context save area in VRAM [known as ctxvals or grctx],
132 * 4 flags registers, a scratch register, two grctx pointers, plus many
133 * random poorly-understood details.
134 *
135 * When ctxprog runs, it's supposed to check what operations are asked of it,
136 * save old context if requested, optionally reset PGRAPH and switch to the
137 * new channel, and load the new context. Context consists of three major
138 * parts: subset of MMIO registers and two "xfer areas".
139 */
140
141/* TODO:
142 * - document unimplemented bits compared to nvidia
143 * - NVAx: make a TP subroutine, use it.
144 * - use 0x4008fc instead of 0x1540?
145 */
146
147enum cp_label {
148 cp_check_load = 1,
149 cp_setup_auto_load,
150 cp_setup_load,
151 cp_setup_save,
152 cp_swap_state,
153 cp_prepare_exit,
154 cp_exit,
155};
156
157static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx);
158static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx);
159static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
160
161/* Main function: construct the ctxprog skeleton, call the other functions. */
162
163int
164nv50_grctx_init(struct nouveau_grctx *ctx)
165{
166 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
167
168 switch (dev_priv->chipset) {
169 case 0x50:
170 case 0x84:
171 case 0x86:
172 case 0x92:
173 case 0x94:
174 case 0x96:
175 case 0x98:
176 case 0xa0:
177 case 0xa5:
178 case 0xa8:
179 case 0xaa:
180 case 0xac:
181 break;
182 default:
183 NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
184 "your NV%x card.\n", dev_priv->chipset);
185 NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
186 "the devs.\n");
187 return -ENOSYS;
188 }
189 /* decide whether we're loading/unloading the context */
190 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
191 cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
192
193 cp_name(ctx, cp_check_load);
194 cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
195 cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
196 cp_bra (ctx, ALWAYS, TRUE, cp_exit);
197
198 /* setup for context load */
199 cp_name(ctx, cp_setup_auto_load);
200 cp_out (ctx, CP_DISABLE1);
201 cp_out (ctx, CP_DISABLE2);
202 cp_out (ctx, CP_ENABLE);
203 cp_out (ctx, CP_NEXT_TO_SWAP);
204 cp_set (ctx, UNK01, SET);
205 cp_name(ctx, cp_setup_load);
206 cp_out (ctx, CP_NEWCTX);
207 cp_wait(ctx, NEWCTX, BUSY);
208 cp_set (ctx, UNK1D, CLEAR);
209 cp_set (ctx, SWAP_DIRECTION, LOAD);
210 cp_bra (ctx, UNK0B, SET, cp_prepare_exit);
211 cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
212
213 /* setup for context save */
214 cp_name(ctx, cp_setup_save);
215 cp_set (ctx, UNK1D, SET);
216 cp_wait(ctx, STATUS, BUSY);
217 cp_set (ctx, UNK01, SET);
218 cp_set (ctx, SWAP_DIRECTION, SAVE);
219
220 /* general PGRAPH state */
221 cp_name(ctx, cp_swap_state);
222 cp_set (ctx, UNK03, SET);
223 cp_pos (ctx, 0x00004/4);
224 cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */
225 cp_pos (ctx, 0x00100/4);
226 nv50_graph_construct_mmio(ctx);
227 nv50_graph_construct_xfer1(ctx);
228 nv50_graph_construct_xfer2(ctx);
229
230 cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
231
232 cp_set (ctx, UNK20, SET);
233 cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */
234 cp_lsr (ctx, ctx->ctxvals_base);
235 cp_out (ctx, CP_SET_XFER_POINTER);
236 cp_lsr (ctx, 4);
237 cp_out (ctx, CP_SEEK_1);
238 cp_out (ctx, CP_XFER_1);
239 cp_wait(ctx, XFER, BUSY);
240
241 /* pre-exit state updates */
242 cp_name(ctx, cp_prepare_exit);
243 cp_set (ctx, UNK01, CLEAR);
244 cp_set (ctx, UNK03, CLEAR);
245 cp_set (ctx, UNK1D, CLEAR);
246
247 cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
248 cp_out (ctx, CP_NEXT_TO_CURRENT);
249
250 cp_name(ctx, cp_exit);
251 cp_set (ctx, USER_SAVE, NOT_PENDING);
252 cp_set (ctx, USER_LOAD, NOT_PENDING);
253 cp_out (ctx, CP_END);
254 ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
255
256 return 0;
257}
258
259/*
260 * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
261 * registers to save/restore and the default values for them.
262 */
263
264static void
265nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
266{
267 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
268 int i, j;
269 int offset, base;
270 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
271
272 /* 0800 */
273 cp_ctx(ctx, 0x400808, 7);
274 gr_def(ctx, 0x400814, 0x00000030);
275 cp_ctx(ctx, 0x400834, 0x32);
276 if (dev_priv->chipset == 0x50) {
277 gr_def(ctx, 0x400834, 0xff400040);
278 gr_def(ctx, 0x400838, 0xfff00080);
279 gr_def(ctx, 0x40083c, 0xfff70090);
280 gr_def(ctx, 0x400840, 0xffe806a8);
281 }
282 gr_def(ctx, 0x400844, 0x00000002);
283 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
284 gr_def(ctx, 0x400894, 0x00001000);
285 gr_def(ctx, 0x4008e8, 0x00000003);
286 gr_def(ctx, 0x4008ec, 0x00001000);
287 if (dev_priv->chipset == 0x50)
288 cp_ctx(ctx, 0x400908, 0xb);
289 else if (dev_priv->chipset < 0xa0)
290 cp_ctx(ctx, 0x400908, 0xc);
291 else
292 cp_ctx(ctx, 0x400908, 0xe);
293
294 if (dev_priv->chipset >= 0xa0)
295 cp_ctx(ctx, 0x400b00, 0x1);
296 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
297 cp_ctx(ctx, 0x400b10, 0x1);
298 gr_def(ctx, 0x400b10, 0x0001629d);
299 cp_ctx(ctx, 0x400b20, 0x1);
300 gr_def(ctx, 0x400b20, 0x0001629d);
301 }
302
303 /* 0C00 */
304 cp_ctx(ctx, 0x400c08, 0x2);
305 gr_def(ctx, 0x400c08, 0x0000fe0c);
306
307 /* 1000 */
308 if (dev_priv->chipset < 0xa0) {
309 cp_ctx(ctx, 0x401008, 0x4);
310 gr_def(ctx, 0x401014, 0x00001000);
311 } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) {
312 cp_ctx(ctx, 0x401008, 0x5);
313 gr_def(ctx, 0x401018, 0x00001000);
314 } else {
315 cp_ctx(ctx, 0x401008, 0x5);
316 gr_def(ctx, 0x401018, 0x00004000);
317 }
318
319 /* 1400 */
320 cp_ctx(ctx, 0x401400, 0x8);
321 cp_ctx(ctx, 0x401424, 0x3);
322 if (dev_priv->chipset == 0x50)
323 gr_def(ctx, 0x40142c, 0x0001fd87);
324 else
325 gr_def(ctx, 0x40142c, 0x00000187);
326 cp_ctx(ctx, 0x401540, 0x5);
327 gr_def(ctx, 0x401550, 0x00001018);
328
329 /* 1800 */
330 cp_ctx(ctx, 0x401814, 0x1);
331 gr_def(ctx, 0x401814, 0x000000ff);
332 if (dev_priv->chipset == 0x50) {
333 cp_ctx(ctx, 0x40181c, 0xe);
334 gr_def(ctx, 0x401850, 0x00000004);
335 } else if (dev_priv->chipset < 0xa0) {
336 cp_ctx(ctx, 0x40181c, 0xf);
337 gr_def(ctx, 0x401854, 0x00000004);
338 } else {
339 cp_ctx(ctx, 0x40181c, 0x13);
340 gr_def(ctx, 0x401864, 0x00000004);
341 }
342
343 /* 1C00 */
344 cp_ctx(ctx, 0x401c00, 0x1);
345 switch (dev_priv->chipset) {
346 case 0x50:
347 gr_def(ctx, 0x401c00, 0x0001005f);
348 break;
349 case 0x84:
350 case 0x86:
351 case 0x94:
352 gr_def(ctx, 0x401c00, 0x044d00df);
353 break;
354 case 0x92:
355 case 0x96:
356 case 0x98:
357 case 0xa0:
358 case 0xaa:
359 case 0xac:
360 gr_def(ctx, 0x401c00, 0x042500df);
361 break;
362 case 0xa5:
363 case 0xa8:
364 gr_def(ctx, 0x401c00, 0x142500df);
365 break;
366 }
367
368 /* 2400 */
369 cp_ctx(ctx, 0x402400, 0x1);
370 if (dev_priv->chipset == 0x50)
371 cp_ctx(ctx, 0x402408, 0x1);
372 else
373 cp_ctx(ctx, 0x402408, 0x2);
374 gr_def(ctx, 0x402408, 0x00000600);
375
376 /* 2800 */
377 cp_ctx(ctx, 0x402800, 0x1);
378 if (dev_priv->chipset == 0x50)
379 gr_def(ctx, 0x402800, 0x00000006);
380
381 /* 2C00 */
382 cp_ctx(ctx, 0x402c08, 0x6);
383 if (dev_priv->chipset != 0x50)
384 gr_def(ctx, 0x402c14, 0x01000000);
385 gr_def(ctx, 0x402c18, 0x000000ff);
386 if (dev_priv->chipset == 0x50)
387 cp_ctx(ctx, 0x402ca0, 0x1);
388 else
389 cp_ctx(ctx, 0x402ca0, 0x2);
390 if (dev_priv->chipset < 0xa0)
391 gr_def(ctx, 0x402ca0, 0x00000400);
392 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
393 gr_def(ctx, 0x402ca0, 0x00000800);
394 else
395 gr_def(ctx, 0x402ca0, 0x00000400);
396 cp_ctx(ctx, 0x402cac, 0x4);
397
398 /* 3000 */
399 cp_ctx(ctx, 0x403004, 0x1);
400 gr_def(ctx, 0x403004, 0x00000001);
401
402 /* 3404 */
403 if (dev_priv->chipset >= 0xa0) {
404 cp_ctx(ctx, 0x403404, 0x1);
405 gr_def(ctx, 0x403404, 0x00000001);
406 }
407
408 /* 5000 */
409 cp_ctx(ctx, 0x405000, 0x1);
410 switch (dev_priv->chipset) {
411 case 0x50:
412 gr_def(ctx, 0x405000, 0x00300080);
413 break;
414 case 0x84:
415 case 0xa0:
416 case 0xa5:
417 case 0xa8:
418 case 0xaa:
419 case 0xac:
420 gr_def(ctx, 0x405000, 0x000e0080);
421 break;
422 case 0x86:
423 case 0x92:
424 case 0x94:
425 case 0x96:
426 case 0x98:
427 gr_def(ctx, 0x405000, 0x00000080);
428 break;
429 }
430 cp_ctx(ctx, 0x405014, 0x1);
431 gr_def(ctx, 0x405014, 0x00000004);
432 cp_ctx(ctx, 0x40501c, 0x1);
433 cp_ctx(ctx, 0x405024, 0x1);
434 cp_ctx(ctx, 0x40502c, 0x1);
435
436 /* 5400 or maybe 4800 */
437 if (dev_priv->chipset == 0x50) {
438 offset = 0x405400;
439 cp_ctx(ctx, 0x405400, 0xea);
440 } else if (dev_priv->chipset < 0x94) {
441 offset = 0x405400;
442 cp_ctx(ctx, 0x405400, 0xcb);
443 } else if (dev_priv->chipset < 0xa0) {
444 offset = 0x405400;
445 cp_ctx(ctx, 0x405400, 0xcc);
446 } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
447 offset = 0x404800;
448 cp_ctx(ctx, 0x404800, 0xda);
449 } else {
450 offset = 0x405400;
451 cp_ctx(ctx, 0x405400, 0xd4);
452 }
453 gr_def(ctx, offset + 0x0c, 0x00000002);
454 gr_def(ctx, offset + 0x10, 0x00000001);
455 if (dev_priv->chipset >= 0x94)
456 offset += 4;
457 gr_def(ctx, offset + 0x1c, 0x00000001);
458 gr_def(ctx, offset + 0x20, 0x00000100);
459 gr_def(ctx, offset + 0x38, 0x00000002);
460 gr_def(ctx, offset + 0x3c, 0x00000001);
461 gr_def(ctx, offset + 0x40, 0x00000001);
462 gr_def(ctx, offset + 0x50, 0x00000001);
463 gr_def(ctx, offset + 0x54, 0x003fffff);
464 gr_def(ctx, offset + 0x58, 0x00001fff);
465 gr_def(ctx, offset + 0x60, 0x00000001);
466 gr_def(ctx, offset + 0x64, 0x00000001);
467 gr_def(ctx, offset + 0x6c, 0x00000001);
468 gr_def(ctx, offset + 0x70, 0x00000001);
469 gr_def(ctx, offset + 0x74, 0x00000001);
470 gr_def(ctx, offset + 0x78, 0x00000004);
471 gr_def(ctx, offset + 0x7c, 0x00000001);
472 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
473 offset += 4;
474 gr_def(ctx, offset + 0x80, 0x00000001);
475 gr_def(ctx, offset + 0x84, 0x00000001);
476 gr_def(ctx, offset + 0x88, 0x00000007);
477 gr_def(ctx, offset + 0x8c, 0x00000001);
478 gr_def(ctx, offset + 0x90, 0x00000007);
479 gr_def(ctx, offset + 0x94, 0x00000001);
480 gr_def(ctx, offset + 0x98, 0x00000001);
481 gr_def(ctx, offset + 0x9c, 0x00000001);
482 if (dev_priv->chipset == 0x50) {
483 gr_def(ctx, offset + 0xb0, 0x00000001);
484 gr_def(ctx, offset + 0xb4, 0x00000001);
485 gr_def(ctx, offset + 0xbc, 0x00000001);
486 gr_def(ctx, offset + 0xc0, 0x0000000a);
487 gr_def(ctx, offset + 0xd0, 0x00000040);
488 gr_def(ctx, offset + 0xd8, 0x00000002);
489 gr_def(ctx, offset + 0xdc, 0x00000100);
490 gr_def(ctx, offset + 0xe0, 0x00000001);
491 gr_def(ctx, offset + 0xe4, 0x00000100);
492 gr_def(ctx, offset + 0x100, 0x00000001);
493 gr_def(ctx, offset + 0x124, 0x00000004);
494 gr_def(ctx, offset + 0x13c, 0x00000001);
495 gr_def(ctx, offset + 0x140, 0x00000100);
496 gr_def(ctx, offset + 0x148, 0x00000001);
497 gr_def(ctx, offset + 0x154, 0x00000100);
498 gr_def(ctx, offset + 0x158, 0x00000001);
499 gr_def(ctx, offset + 0x15c, 0x00000100);
500 gr_def(ctx, offset + 0x164, 0x00000001);
501 gr_def(ctx, offset + 0x170, 0x00000100);
502 gr_def(ctx, offset + 0x174, 0x00000001);
503 gr_def(ctx, offset + 0x17c, 0x00000001);
504 gr_def(ctx, offset + 0x188, 0x00000002);
505 gr_def(ctx, offset + 0x190, 0x00000001);
506 gr_def(ctx, offset + 0x198, 0x00000001);
507 gr_def(ctx, offset + 0x1ac, 0x00000003);
508 offset += 0xd0;
509 } else {
510 gr_def(ctx, offset + 0xb0, 0x00000001);
511 gr_def(ctx, offset + 0xb4, 0x00000100);
512 gr_def(ctx, offset + 0xbc, 0x00000001);
513 gr_def(ctx, offset + 0xc8, 0x00000100);
514 gr_def(ctx, offset + 0xcc, 0x00000001);
515 gr_def(ctx, offset + 0xd0, 0x00000100);
516 gr_def(ctx, offset + 0xd8, 0x00000001);
517 gr_def(ctx, offset + 0xe4, 0x00000100);
518 }
519 gr_def(ctx, offset + 0xf8, 0x00000004);
520 gr_def(ctx, offset + 0xfc, 0x00000070);
521 gr_def(ctx, offset + 0x100, 0x00000080);
522 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
523 offset += 4;
524 gr_def(ctx, offset + 0x114, 0x0000000c);
525 if (dev_priv->chipset == 0x50)
526 offset -= 4;
527 gr_def(ctx, offset + 0x11c, 0x00000008);
528 gr_def(ctx, offset + 0x120, 0x00000014);
529 if (dev_priv->chipset == 0x50) {
530 gr_def(ctx, offset + 0x124, 0x00000026);
531 offset -= 0x18;
532 } else {
533 gr_def(ctx, offset + 0x128, 0x00000029);
534 gr_def(ctx, offset + 0x12c, 0x00000027);
535 gr_def(ctx, offset + 0x130, 0x00000026);
536 gr_def(ctx, offset + 0x134, 0x00000008);
537 gr_def(ctx, offset + 0x138, 0x00000004);
538 gr_def(ctx, offset + 0x13c, 0x00000027);
539 }
540 gr_def(ctx, offset + 0x148, 0x00000001);
541 gr_def(ctx, offset + 0x14c, 0x00000002);
542 gr_def(ctx, offset + 0x150, 0x00000003);
543 gr_def(ctx, offset + 0x154, 0x00000004);
544 gr_def(ctx, offset + 0x158, 0x00000005);
545 gr_def(ctx, offset + 0x15c, 0x00000006);
546 gr_def(ctx, offset + 0x160, 0x00000007);
547 gr_def(ctx, offset + 0x164, 0x00000001);
548 gr_def(ctx, offset + 0x1a8, 0x000000cf);
549 if (dev_priv->chipset == 0x50)
550 offset -= 4;
551 gr_def(ctx, offset + 0x1d8, 0x00000080);
552 gr_def(ctx, offset + 0x1dc, 0x00000004);
553 gr_def(ctx, offset + 0x1e0, 0x00000004);
554 if (dev_priv->chipset == 0x50)
555 offset -= 4;
556 else
557 gr_def(ctx, offset + 0x1e4, 0x00000003);
558 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
559 gr_def(ctx, offset + 0x1ec, 0x00000003);
560 offset += 8;
561 }
562 gr_def(ctx, offset + 0x1e8, 0x00000001);
563 if (dev_priv->chipset == 0x50)
564 offset -= 4;
565 gr_def(ctx, offset + 0x1f4, 0x00000012);
566 gr_def(ctx, offset + 0x1f8, 0x00000010);
567 gr_def(ctx, offset + 0x1fc, 0x0000000c);
568 gr_def(ctx, offset + 0x200, 0x00000001);
569 gr_def(ctx, offset + 0x210, 0x00000004);
570 gr_def(ctx, offset + 0x214, 0x00000002);
571 gr_def(ctx, offset + 0x218, 0x00000004);
572 if (dev_priv->chipset >= 0xa0)
573 offset += 4;
574 gr_def(ctx, offset + 0x224, 0x003fffff);
575 gr_def(ctx, offset + 0x228, 0x00001fff);
576 if (dev_priv->chipset == 0x50)
577 offset -= 0x20;
578 else if (dev_priv->chipset >= 0xa0) {
579 gr_def(ctx, offset + 0x250, 0x00000001);
580 gr_def(ctx, offset + 0x254, 0x00000001);
581 gr_def(ctx, offset + 0x258, 0x00000002);
582 offset += 0x10;
583 }
584 gr_def(ctx, offset + 0x250, 0x00000004);
585 gr_def(ctx, offset + 0x254, 0x00000014);
586 gr_def(ctx, offset + 0x258, 0x00000001);
587 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
588 offset += 4;
589 gr_def(ctx, offset + 0x264, 0x00000002);
590 if (dev_priv->chipset >= 0xa0)
591 offset += 8;
592 gr_def(ctx, offset + 0x270, 0x00000001);
593 gr_def(ctx, offset + 0x278, 0x00000002);
594 gr_def(ctx, offset + 0x27c, 0x00001000);
595 if (dev_priv->chipset == 0x50)
596 offset -= 0xc;
597 else {
598 gr_def(ctx, offset + 0x280, 0x00000e00);
599 gr_def(ctx, offset + 0x284, 0x00001000);
600 gr_def(ctx, offset + 0x288, 0x00001e00);
601 }
602 gr_def(ctx, offset + 0x290, 0x00000001);
603 gr_def(ctx, offset + 0x294, 0x00000001);
604 gr_def(ctx, offset + 0x298, 0x00000001);
605 gr_def(ctx, offset + 0x29c, 0x00000001);
606 gr_def(ctx, offset + 0x2a0, 0x00000001);
607 gr_def(ctx, offset + 0x2b0, 0x00000200);
608 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
609 gr_def(ctx, offset + 0x2b4, 0x00000200);
610 offset += 4;
611 }
612 if (dev_priv->chipset < 0xa0) {
613 gr_def(ctx, offset + 0x2b8, 0x00000001);
614 gr_def(ctx, offset + 0x2bc, 0x00000070);
615 gr_def(ctx, offset + 0x2c0, 0x00000080);
616 gr_def(ctx, offset + 0x2cc, 0x00000001);
617 gr_def(ctx, offset + 0x2d0, 0x00000070);
618 gr_def(ctx, offset + 0x2d4, 0x00000080);
619 } else {
620 gr_def(ctx, offset + 0x2b8, 0x00000001);
621 gr_def(ctx, offset + 0x2bc, 0x000000f0);
622 gr_def(ctx, offset + 0x2c0, 0x000000ff);
623 gr_def(ctx, offset + 0x2cc, 0x00000001);
624 gr_def(ctx, offset + 0x2d0, 0x000000f0);
625 gr_def(ctx, offset + 0x2d4, 0x000000ff);
626 gr_def(ctx, offset + 0x2dc, 0x00000009);
627 offset += 4;
628 }
629 gr_def(ctx, offset + 0x2e4, 0x00000001);
630 gr_def(ctx, offset + 0x2e8, 0x000000cf);
631 gr_def(ctx, offset + 0x2f0, 0x00000001);
632 gr_def(ctx, offset + 0x300, 0x000000cf);
633 gr_def(ctx, offset + 0x308, 0x00000002);
634 gr_def(ctx, offset + 0x310, 0x00000001);
635 gr_def(ctx, offset + 0x318, 0x00000001);
636 gr_def(ctx, offset + 0x320, 0x000000cf);
637 gr_def(ctx, offset + 0x324, 0x000000cf);
638 gr_def(ctx, offset + 0x328, 0x00000001);
639
640 /* 6000? */
641 if (dev_priv->chipset == 0x50)
642 cp_ctx(ctx, 0x4063e0, 0x1);
643
644 /* 6800 */
645 if (dev_priv->chipset < 0x90) {
646 cp_ctx(ctx, 0x406814, 0x2b);
647 gr_def(ctx, 0x406818, 0x00000f80);
648 gr_def(ctx, 0x406860, 0x007f0080);
649 gr_def(ctx, 0x40689c, 0x007f0080);
650 } else {
651 cp_ctx(ctx, 0x406814, 0x4);
652 if (dev_priv->chipset == 0x98)
653 gr_def(ctx, 0x406818, 0x00000f80);
654 else
655 gr_def(ctx, 0x406818, 0x00001f80);
656 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
657 gr_def(ctx, 0x40681c, 0x00000030);
658 cp_ctx(ctx, 0x406830, 0x3);
659 }
660
661 /* 7000: per-ROP group state */
662 for (i = 0; i < 8; i++) {
663 if (units & (1<<(i+16))) {
664 cp_ctx(ctx, 0x407000 + (i<<8), 3);
665 if (dev_priv->chipset == 0x50)
666 gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
667 else if (dev_priv->chipset != 0xa5)
668 gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
669 else
670 gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
671 gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
672
673 if (dev_priv->chipset == 0x50) {
674 cp_ctx(ctx, 0x407010 + (i<<8), 1);
675 } else if (dev_priv->chipset < 0xa0) {
676 cp_ctx(ctx, 0x407010 + (i<<8), 2);
677 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
678 gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
679 } else {
680 cp_ctx(ctx, 0x407010 + (i<<8), 3);
681 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
682 if (dev_priv->chipset != 0xa5)
683 gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
684 else
685 gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
686 }
687
688 cp_ctx(ctx, 0x407080 + (i<<8), 4);
689 if (dev_priv->chipset != 0xa5)
690 gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
691 else
692 gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
693 if (dev_priv->chipset == 0x50)
694 gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
695 else
696 gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
697 gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
698
699 if (dev_priv->chipset < 0xa0)
700 cp_ctx(ctx, 0x407094 + (i<<8), 1);
701 else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
702 cp_ctx(ctx, 0x407094 + (i<<8), 3);
703 else {
704 cp_ctx(ctx, 0x407094 + (i<<8), 4);
705 gr_def(ctx, 0x4070a0 + (i<<8), 1);
706 }
707 }
708 }
709
710 cp_ctx(ctx, 0x407c00, 0x3);
711 if (dev_priv->chipset < 0x90)
712 gr_def(ctx, 0x407c00, 0x00010040);
713 else if (dev_priv->chipset < 0xa0)
714 gr_def(ctx, 0x407c00, 0x00390040);
715 else
716 gr_def(ctx, 0x407c00, 0x003d0040);
717 gr_def(ctx, 0x407c08, 0x00000022);
718 if (dev_priv->chipset >= 0xa0) {
719 cp_ctx(ctx, 0x407c10, 0x3);
720 cp_ctx(ctx, 0x407c20, 0x1);
721 cp_ctx(ctx, 0x407c2c, 0x1);
722 }
723
724 if (dev_priv->chipset < 0xa0) {
725 cp_ctx(ctx, 0x407d00, 0x9);
726 } else {
727 cp_ctx(ctx, 0x407d00, 0x15);
728 }
729 if (dev_priv->chipset == 0x98)
730 gr_def(ctx, 0x407d08, 0x00380040);
731 else {
732 if (dev_priv->chipset < 0x90)
733 gr_def(ctx, 0x407d08, 0x00010040);
734 else if (dev_priv->chipset < 0xa0)
735 gr_def(ctx, 0x407d08, 0x00390040);
736 else
737 gr_def(ctx, 0x407d08, 0x003d0040);
738 gr_def(ctx, 0x407d0c, 0x00000022);
739 }
740
741 /* 8000+: per-TP state */
742 for (i = 0; i < 10; i++) {
743 if (units & (1<<i)) {
744 if (dev_priv->chipset < 0xa0)
745 base = 0x408000 + (i<<12);
746 else
747 base = 0x408000 + (i<<11);
748 if (dev_priv->chipset < 0xa0)
749 offset = base + 0xc00;
750 else
751 offset = base + 0x80;
752 cp_ctx(ctx, offset + 0x00, 1);
753 gr_def(ctx, offset + 0x00, 0x0000ff0a);
754 cp_ctx(ctx, offset + 0x08, 1);
755
756 /* per-MP state */
757 for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) {
758 if (!(units & (1 << (j+24)))) continue;
759 if (dev_priv->chipset < 0xa0)
760 offset = base + 0x200 + (j<<7);
761 else
762 offset = base + 0x100 + (j<<7);
763 cp_ctx(ctx, offset, 0x20);
764 gr_def(ctx, offset + 0x00, 0x01800000);
765 gr_def(ctx, offset + 0x04, 0x00160000);
766 gr_def(ctx, offset + 0x08, 0x01800000);
767 gr_def(ctx, offset + 0x18, 0x0003ffff);
768 switch (dev_priv->chipset) {
769 case 0x50:
770 gr_def(ctx, offset + 0x1c, 0x00080000);
771 break;
772 case 0x84:
773 gr_def(ctx, offset + 0x1c, 0x00880000);
774 break;
775 case 0x86:
776 gr_def(ctx, offset + 0x1c, 0x008c0000);
777 break;
778 case 0x92:
779 case 0x96:
780 case 0x98:
781 gr_def(ctx, offset + 0x1c, 0x118c0000);
782 break;
783 case 0x94:
784 gr_def(ctx, offset + 0x1c, 0x10880000);
785 break;
786 case 0xa0:
787 case 0xa5:
788 gr_def(ctx, offset + 0x1c, 0x310c0000);
789 break;
790 case 0xa8:
791 case 0xaa:
792 case 0xac:
793 gr_def(ctx, offset + 0x1c, 0x300c0000);
794 break;
795 }
796 gr_def(ctx, offset + 0x40, 0x00010401);
797 if (dev_priv->chipset == 0x50)
798 gr_def(ctx, offset + 0x48, 0x00000040);
799 else
800 gr_def(ctx, offset + 0x48, 0x00000078);
801 gr_def(ctx, offset + 0x50, 0x000000bf);
802 gr_def(ctx, offset + 0x58, 0x00001210);
803 if (dev_priv->chipset == 0x50)
804 gr_def(ctx, offset + 0x5c, 0x00000080);
805 else
806 gr_def(ctx, offset + 0x5c, 0x08000080);
807 if (dev_priv->chipset >= 0xa0)
808 gr_def(ctx, offset + 0x68, 0x0000003e);
809 }
810
811 if (dev_priv->chipset < 0xa0)
812 cp_ctx(ctx, base + 0x300, 0x4);
813 else
814 cp_ctx(ctx, base + 0x300, 0x5);
815 if (dev_priv->chipset == 0x50)
816 gr_def(ctx, base + 0x304, 0x00007070);
817 else if (dev_priv->chipset < 0xa0)
818 gr_def(ctx, base + 0x304, 0x00027070);
819 else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
820 gr_def(ctx, base + 0x304, 0x01127070);
821 else
822 gr_def(ctx, base + 0x304, 0x05127070);
823
824 if (dev_priv->chipset < 0xa0)
825 cp_ctx(ctx, base + 0x318, 1);
826 else
827 cp_ctx(ctx, base + 0x320, 1);
828 if (dev_priv->chipset == 0x50)
829 gr_def(ctx, base + 0x318, 0x0003ffff);
830 else if (dev_priv->chipset < 0xa0)
831 gr_def(ctx, base + 0x318, 0x03ffffff);
832 else
833 gr_def(ctx, base + 0x320, 0x07ffffff);
834
835 if (dev_priv->chipset < 0xa0)
836 cp_ctx(ctx, base + 0x324, 5);
837 else
838 cp_ctx(ctx, base + 0x328, 4);
839
840 if (dev_priv->chipset < 0xa0) {
841 cp_ctx(ctx, base + 0x340, 9);
842 offset = base + 0x340;
843 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
844 cp_ctx(ctx, base + 0x33c, 0xb);
845 offset = base + 0x344;
846 } else {
847 cp_ctx(ctx, base + 0x33c, 0xd);
848 offset = base + 0x344;
849 }
850 gr_def(ctx, offset + 0x0, 0x00120407);
851 gr_def(ctx, offset + 0x4, 0x05091507);
852 if (dev_priv->chipset == 0x84)
853 gr_def(ctx, offset + 0x8, 0x05100202);
854 else
855 gr_def(ctx, offset + 0x8, 0x05010202);
856 gr_def(ctx, offset + 0xc, 0x00030201);
857
858 cp_ctx(ctx, base + 0x400, 2);
859 gr_def(ctx, base + 0x404, 0x00000040);
860 cp_ctx(ctx, base + 0x40c, 2);
861 gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
862 gr_def(ctx, base + 0x410, 0x00141210);
863
864 if (dev_priv->chipset < 0xa0)
865 offset = base + 0x800;
866 else
867 offset = base + 0x500;
868 cp_ctx(ctx, offset, 6);
869 gr_def(ctx, offset + 0x0, 0x000001f0);
870 gr_def(ctx, offset + 0x4, 0x00000001);
871 gr_def(ctx, offset + 0x8, 0x00000003);
872 if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa)
873 gr_def(ctx, offset + 0xc, 0x00008000);
874 gr_def(ctx, offset + 0x14, 0x00039e00);
875 cp_ctx(ctx, offset + 0x1c, 2);
876 if (dev_priv->chipset == 0x50)
877 gr_def(ctx, offset + 0x1c, 0x00000040);
878 else
879 gr_def(ctx, offset + 0x1c, 0x00000100);
880 gr_def(ctx, offset + 0x20, 0x00003800);
881
882 if (dev_priv->chipset >= 0xa0) {
883 cp_ctx(ctx, base + 0x54c, 2);
884 if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
885 gr_def(ctx, base + 0x54c, 0x003fe006);
886 else
887 gr_def(ctx, base + 0x54c, 0x003fe007);
888 gr_def(ctx, base + 0x550, 0x003fe000);
889 }
890
891 if (dev_priv->chipset < 0xa0)
892 offset = base + 0xa00;
893 else
894 offset = base + 0x680;
895 cp_ctx(ctx, offset, 1);
896 gr_def(ctx, offset, 0x00404040);
897
898 if (dev_priv->chipset < 0xa0)
899 offset = base + 0xe00;
900 else
901 offset = base + 0x700;
902 cp_ctx(ctx, offset, 2);
903 if (dev_priv->chipset < 0xa0)
904 gr_def(ctx, offset, 0x0077f005);
905 else if (dev_priv->chipset == 0xa5)
906 gr_def(ctx, offset, 0x6cf7f007);
907 else if (dev_priv->chipset == 0xa8)
908 gr_def(ctx, offset, 0x6cfff007);
909 else if (dev_priv->chipset == 0xac)
910 gr_def(ctx, offset, 0x0cfff007);
911 else
912 gr_def(ctx, offset, 0x0cf7f007);
913 if (dev_priv->chipset == 0x50)
914 gr_def(ctx, offset + 0x4, 0x00007fff);
915 else if (dev_priv->chipset < 0xa0)
916 gr_def(ctx, offset + 0x4, 0x003f7fff);
917 else
918 gr_def(ctx, offset + 0x4, 0x02bf7fff);
919 cp_ctx(ctx, offset + 0x2c, 1);
920 if (dev_priv->chipset == 0x50) {
921 cp_ctx(ctx, offset + 0x50, 9);
922 gr_def(ctx, offset + 0x54, 0x000003ff);
923 gr_def(ctx, offset + 0x58, 0x00000003);
924 gr_def(ctx, offset + 0x5c, 0x00000003);
925 gr_def(ctx, offset + 0x60, 0x000001ff);
926 gr_def(ctx, offset + 0x64, 0x0000001f);
927 gr_def(ctx, offset + 0x68, 0x0000000f);
928 gr_def(ctx, offset + 0x6c, 0x0000000f);
929 } else if(dev_priv->chipset < 0xa0) {
930 cp_ctx(ctx, offset + 0x50, 1);
931 cp_ctx(ctx, offset + 0x70, 1);
932 } else {
933 cp_ctx(ctx, offset + 0x50, 1);
934 cp_ctx(ctx, offset + 0x60, 5);
935 }
936 }
937 }
938}
939
940/*
941 * xfer areas. These are a pain.
942 *
943 * There are 2 xfer areas: the first one is big and contains all sorts of
944 * stuff, the second is small and contains some per-TP context.
945 *
946 * Each area is split into 8 "strands". The areas, when saved to grctx,
947 * are made of 8-word blocks. Each block contains a single word from
948 * each strand. The strands are independent of each other, their
949 * addresses are unrelated to each other, and data in them is closely
950 * packed together. The strand layout varies a bit between cards: here
951 * and there, a single word is thrown out in the middle and the whole
952 * strand is offset by a bit from corresponding one on another chipset.
953 * For this reason, addresses of stuff in strands are almost useless.
954 * Knowing sequence of stuff and size of gaps between them is much more
955 * useful, and that's how we build the strands in our generator.
956 *
957 * NVA0 takes this mess to a whole new level by cutting the old strands
958 * into a few dozen pieces [known as genes], rearranging them randomly,
959 * and putting them back together to make new strands. Hopefully these
960 * genes correspond more or less directly to the same PGRAPH subunits
961 * as in 400040 register.
962 *
963 * The most common value in default context is 0, and when the genes
964 * are separated by 0's, gene bounduaries are quite speculative...
965 * some of them can be clearly deduced, others can be guessed, and yet
966 * others won't be resolved without figuring out the real meaning of
967 * given ctxval. For the same reason, ending point of each strand
968 * is unknown. Except for strand 0, which is the longest strand and
969 * its end corresponds to end of the whole xfer.
970 *
971 * An unsolved mystery is the seek instruction: it takes an argument
972 * in bits 8-18, and that argument is clearly the place in strands to
973 * seek to... but the offsets don't seem to correspond to offsets as
974 * seen in grctx. Perhaps there's another, real, not randomly-changing
975 * addressing in strands, and the xfer insn just happens to skip over
976 * the unused bits? NV10-NV30 PIPE comes to mind...
977 *
978 * As far as I know, there's no way to access the xfer areas directly
979 * without the help of ctxprog.
980 */
981
982static inline void
983xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
984 int i;
985 if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
986 for (i = 0; i < num; i++)
987 nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val);
988 ctx->ctxvals_pos += num << 3;
989}
990
991/* Gene declarations... */
992
993static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
994static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx);
995static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx);
996static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx);
997static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx);
998static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx);
999static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx);
1000static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx);
1001static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx);
1002static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx);
1003static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx);
1004static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
1005static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
1006
1007static void
1008nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1009{
1010 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1011 int i;
1012 int offset;
1013 int size = 0;
1014 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
1015
1016 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
1017 ctx->ctxvals_base = offset;
1018
1019 if (dev_priv->chipset < 0xa0) {
1020 /* Strand 0 */
1021 ctx->ctxvals_pos = offset;
1022 switch (dev_priv->chipset) {
1023 case 0x50:
1024 xf_emit(ctx, 0x99, 0);
1025 break;
1026 case 0x84:
1027 case 0x86:
1028 xf_emit(ctx, 0x384, 0);
1029 break;
1030 case 0x92:
1031 case 0x94:
1032 case 0x96:
1033 case 0x98:
1034 xf_emit(ctx, 0x380, 0);
1035 break;
1036 }
1037 nv50_graph_construct_gene_m2mf (ctx);
1038 switch (dev_priv->chipset) {
1039 case 0x50:
1040 case 0x84:
1041 case 0x86:
1042 case 0x98:
1043 xf_emit(ctx, 0x4c4, 0);
1044 break;
1045 case 0x92:
1046 case 0x94:
1047 case 0x96:
1048 xf_emit(ctx, 0x984, 0);
1049 break;
1050 }
1051 nv50_graph_construct_gene_unk5(ctx);
1052 if (dev_priv->chipset == 0x50)
1053 xf_emit(ctx, 0xa, 0);
1054 else
1055 xf_emit(ctx, 0xb, 0);
1056 nv50_graph_construct_gene_unk4(ctx);
1057 nv50_graph_construct_gene_unk3(ctx);
1058 if ((ctx->ctxvals_pos-offset)/8 > size)
1059 size = (ctx->ctxvals_pos-offset)/8;
1060
1061 /* Strand 1 */
1062 ctx->ctxvals_pos = offset + 0x1;
1063 nv50_graph_construct_gene_unk6(ctx);
1064 nv50_graph_construct_gene_unk7(ctx);
1065 nv50_graph_construct_gene_unk8(ctx);
1066 switch (dev_priv->chipset) {
1067 case 0x50:
1068 case 0x92:
1069 xf_emit(ctx, 0xfb, 0);
1070 break;
1071 case 0x84:
1072 xf_emit(ctx, 0xd3, 0);
1073 break;
1074 case 0x94:
1075 case 0x96:
1076 xf_emit(ctx, 0xab, 0);
1077 break;
1078 case 0x86:
1079 case 0x98:
1080 xf_emit(ctx, 0x6b, 0);
1081 break;
1082 }
1083 xf_emit(ctx, 2, 0x4e3bfdf);
1084 xf_emit(ctx, 4, 0);
1085 xf_emit(ctx, 1, 0x0fac6881);
1086 xf_emit(ctx, 0xb, 0);
1087 xf_emit(ctx, 2, 0x4e3bfdf);
1088 if ((ctx->ctxvals_pos-offset)/8 > size)
1089 size = (ctx->ctxvals_pos-offset)/8;
1090
1091 /* Strand 2 */
1092 ctx->ctxvals_pos = offset + 0x2;
1093 switch (dev_priv->chipset) {
1094 case 0x50:
1095 case 0x92:
1096 xf_emit(ctx, 0xa80, 0);
1097 break;
1098 case 0x84:
1099 xf_emit(ctx, 0xa7e, 0);
1100 break;
1101 case 0x94:
1102 case 0x96:
1103 xf_emit(ctx, 0xa7c, 0);
1104 break;
1105 case 0x86:
1106 case 0x98:
1107 xf_emit(ctx, 0xa7a, 0);
1108 break;
1109 }
1110 xf_emit(ctx, 1, 0x3fffff);
1111 xf_emit(ctx, 2, 0);
1112 xf_emit(ctx, 1, 0x1fff);
1113 xf_emit(ctx, 0xe, 0);
1114 nv50_graph_construct_gene_unk9(ctx);
1115 nv50_graph_construct_gene_unk2(ctx);
1116 nv50_graph_construct_gene_unk1(ctx);
1117 nv50_graph_construct_gene_unk10(ctx);
1118 if ((ctx->ctxvals_pos-offset)/8 > size)
1119 size = (ctx->ctxvals_pos-offset)/8;
1120
1121 /* Strand 3: per-ROP group state */
1122 ctx->ctxvals_pos = offset + 3;
1123 for (i = 0; i < 6; i++)
1124 if (units & (1 << (i + 16)))
1125 nv50_graph_construct_gene_ropc(ctx);
1126 if ((ctx->ctxvals_pos-offset)/8 > size)
1127 size = (ctx->ctxvals_pos-offset)/8;
1128
1129 /* Strands 4-7: per-TP state */
1130 for (i = 0; i < 4; i++) {
1131 ctx->ctxvals_pos = offset + 4 + i;
1132 if (units & (1 << (2 * i)))
1133 nv50_graph_construct_xfer_tp(ctx);
1134 if (units & (1 << (2 * i + 1)))
1135 nv50_graph_construct_xfer_tp(ctx);
1136 if ((ctx->ctxvals_pos-offset)/8 > size)
1137 size = (ctx->ctxvals_pos-offset)/8;
1138 }
1139 } else {
1140 /* Strand 0 */
1141 ctx->ctxvals_pos = offset;
1142 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1143 xf_emit(ctx, 0x385, 0);
1144 else
1145 xf_emit(ctx, 0x384, 0);
1146 nv50_graph_construct_gene_m2mf(ctx);
1147 xf_emit(ctx, 0x950, 0);
1148 nv50_graph_construct_gene_unk10(ctx);
1149 xf_emit(ctx, 1, 0x0fac6881);
1150 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1151 xf_emit(ctx, 1, 1);
1152 xf_emit(ctx, 3, 0);
1153 }
1154 nv50_graph_construct_gene_unk8(ctx);
1155 if (dev_priv->chipset == 0xa0)
1156 xf_emit(ctx, 0x189, 0);
1157 else if (dev_priv->chipset < 0xa8)
1158 xf_emit(ctx, 0x99, 0);
1159 else if (dev_priv->chipset == 0xaa)
1160 xf_emit(ctx, 0x65, 0);
1161 else
1162 xf_emit(ctx, 0x6d, 0);
1163 nv50_graph_construct_gene_unk9(ctx);
1164 if ((ctx->ctxvals_pos-offset)/8 > size)
1165 size = (ctx->ctxvals_pos-offset)/8;
1166
1167 /* Strand 1 */
1168 ctx->ctxvals_pos = offset + 1;
1169 nv50_graph_construct_gene_unk1(ctx);
1170 if ((ctx->ctxvals_pos-offset)/8 > size)
1171 size = (ctx->ctxvals_pos-offset)/8;
1172
1173 /* Strand 2 */
1174 ctx->ctxvals_pos = offset + 2;
1175 if (dev_priv->chipset == 0xa0) {
1176 nv50_graph_construct_gene_unk2(ctx);
1177 }
1178 xf_emit(ctx, 0x36, 0);
1179 nv50_graph_construct_gene_unk5(ctx);
1180 if ((ctx->ctxvals_pos-offset)/8 > size)
1181 size = (ctx->ctxvals_pos-offset)/8;
1182
1183 /* Strand 3 */
1184 ctx->ctxvals_pos = offset + 3;
1185 xf_emit(ctx, 1, 0);
1186 xf_emit(ctx, 1, 1);
1187 nv50_graph_construct_gene_unk6(ctx);
1188 if ((ctx->ctxvals_pos-offset)/8 > size)
1189 size = (ctx->ctxvals_pos-offset)/8;
1190
1191 /* Strand 4 */
1192 ctx->ctxvals_pos = offset + 4;
1193 if (dev_priv->chipset == 0xa0)
1194 xf_emit(ctx, 0xa80, 0);
1195 else
1196 xf_emit(ctx, 0xa7a, 0);
1197 xf_emit(ctx, 1, 0x3fffff);
1198 xf_emit(ctx, 2, 0);
1199 xf_emit(ctx, 1, 0x1fff);
1200 if ((ctx->ctxvals_pos-offset)/8 > size)
1201 size = (ctx->ctxvals_pos-offset)/8;
1202
1203 /* Strand 5 */
1204 ctx->ctxvals_pos = offset + 5;
1205 xf_emit(ctx, 1, 0);
1206 xf_emit(ctx, 1, 0x0fac6881);
1207 xf_emit(ctx, 0xb, 0);
1208 xf_emit(ctx, 2, 0x4e3bfdf);
1209 xf_emit(ctx, 3, 0);
1210 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1211 xf_emit(ctx, 1, 0x11);
1212 xf_emit(ctx, 1, 0);
1213 xf_emit(ctx, 2, 0x4e3bfdf);
1214 xf_emit(ctx, 2, 0);
1215 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1216 xf_emit(ctx, 1, 0x11);
1217 xf_emit(ctx, 1, 0);
1218 for (i = 0; i < 8; i++)
1219 if (units & (1<<(i+16)))
1220 nv50_graph_construct_gene_ropc(ctx);
1221 if ((ctx->ctxvals_pos-offset)/8 > size)
1222 size = (ctx->ctxvals_pos-offset)/8;
1223
1224 /* Strand 6 */
1225 ctx->ctxvals_pos = offset + 6;
1226 nv50_graph_construct_gene_unk3(ctx);
1227 xf_emit(ctx, 0xb, 0);
1228 nv50_graph_construct_gene_unk4(ctx);
1229 nv50_graph_construct_gene_unk7(ctx);
1230 if (units & (1 << 0))
1231 nv50_graph_construct_xfer_tp(ctx);
1232 if (units & (1 << 1))
1233 nv50_graph_construct_xfer_tp(ctx);
1234 if (units & (1 << 2))
1235 nv50_graph_construct_xfer_tp(ctx);
1236 if (units & (1 << 3))
1237 nv50_graph_construct_xfer_tp(ctx);
1238 if ((ctx->ctxvals_pos-offset)/8 > size)
1239 size = (ctx->ctxvals_pos-offset)/8;
1240
1241 /* Strand 7 */
1242 ctx->ctxvals_pos = offset + 7;
1243 if (dev_priv->chipset == 0xa0) {
1244 if (units & (1 << 4))
1245 nv50_graph_construct_xfer_tp(ctx);
1246 if (units & (1 << 5))
1247 nv50_graph_construct_xfer_tp(ctx);
1248 if (units & (1 << 6))
1249 nv50_graph_construct_xfer_tp(ctx);
1250 if (units & (1 << 7))
1251 nv50_graph_construct_xfer_tp(ctx);
1252 if (units & (1 << 8))
1253 nv50_graph_construct_xfer_tp(ctx);
1254 if (units & (1 << 9))
1255 nv50_graph_construct_xfer_tp(ctx);
1256 } else {
1257 nv50_graph_construct_gene_unk2(ctx);
1258 }
1259 if ((ctx->ctxvals_pos-offset)/8 > size)
1260 size = (ctx->ctxvals_pos-offset)/8;
1261 }
1262
1263 ctx->ctxvals_pos = offset + size * 8;
1264 ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
1265 cp_lsr (ctx, offset);
1266 cp_out (ctx, CP_SET_XFER_POINTER);
1267 cp_lsr (ctx, size);
1268 cp_out (ctx, CP_SEEK_1);
1269 cp_out (ctx, CP_XFER_1);
1270 cp_wait(ctx, XFER, BUSY);
1271}
1272
1273/*
1274 * non-trivial demagiced parts of ctx init go here
1275 */
1276
1277static void
1278nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
1279{
1280 /* m2mf state */
1281 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
1282 xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */
1283 xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */
1284 xf_emit (ctx, 1, 0); /* OFFSET_IN */
1285 xf_emit (ctx, 1, 0); /* OFFSET_OUT */
1286 xf_emit (ctx, 1, 0); /* PITCH_IN */
1287 xf_emit (ctx, 1, 0); /* PITCH_OUT */
1288 xf_emit (ctx, 1, 0); /* LINE_LENGTH */
1289 xf_emit (ctx, 1, 0); /* LINE_COUNT */
1290 xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */
1291 xf_emit (ctx, 1, 1); /* LINEAR_IN */
1292 xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */
1293 xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */
1294 xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */
1295 xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */
1296 xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */
1297 xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */
1298 xf_emit (ctx, 1, 1); /* LINEAR_OUT */
1299 xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */
1300 xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */
1301 xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */
1302 xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */
1303 xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */
1304 xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */
1305 xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */
1306 xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */
1307}
1308
1309static void
1310nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx)
1311{
1312 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1313 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1314 xf_emit(ctx, 2, 4);
1315 xf_emit(ctx, 1, 0);
1316 xf_emit(ctx, 1, 0x80);
1317 xf_emit(ctx, 1, 4);
1318 xf_emit(ctx, 1, 0x80c14);
1319 xf_emit(ctx, 1, 0);
1320 if (dev_priv->chipset == 0x50)
1321 xf_emit(ctx, 1, 0x3ff);
1322 else
1323 xf_emit(ctx, 1, 0x7ff);
1324 switch (dev_priv->chipset) {
1325 case 0x50:
1326 case 0x86:
1327 case 0x98:
1328 case 0xaa:
1329 case 0xac:
1330 xf_emit(ctx, 0x542, 0);
1331 break;
1332 case 0x84:
1333 case 0x92:
1334 case 0x94:
1335 case 0x96:
1336 xf_emit(ctx, 0x942, 0);
1337 break;
1338 case 0xa0:
1339 xf_emit(ctx, 0x2042, 0);
1340 break;
1341 case 0xa5:
1342 case 0xa8:
1343 xf_emit(ctx, 0x842, 0);
1344 break;
1345 }
1346 xf_emit(ctx, 2, 4);
1347 xf_emit(ctx, 1, 0);
1348 xf_emit(ctx, 1, 0x80);
1349 xf_emit(ctx, 1, 4);
1350 xf_emit(ctx, 1, 1);
1351 xf_emit(ctx, 1, 0);
1352 xf_emit(ctx, 1, 0x27);
1353 xf_emit(ctx, 1, 0);
1354 xf_emit(ctx, 1, 0x26);
1355 xf_emit(ctx, 3, 0);
1356}
1357
1358static void
1359nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx)
1360{
1361 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1362 xf_emit(ctx, 0x10, 0x04000000);
1363 xf_emit(ctx, 0x24, 0);
1364 xf_emit(ctx, 2, 0x04e3bfdf);
1365 xf_emit(ctx, 2, 0);
1366 xf_emit(ctx, 1, 0x1fe21);
1367}
1368
1369static void
1370nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx)
1371{
1372 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1373 /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
1374 if (dev_priv->chipset != 0x50) {
1375 xf_emit(ctx, 5, 0);
1376 xf_emit(ctx, 1, 0x80c14);
1377 xf_emit(ctx, 2, 0);
1378 xf_emit(ctx, 1, 0x804);
1379 xf_emit(ctx, 1, 0);
1380 xf_emit(ctx, 2, 4);
1381 xf_emit(ctx, 1, 0x8100c12);
1382 }
1383 xf_emit(ctx, 1, 0);
1384 xf_emit(ctx, 2, 4);
1385 xf_emit(ctx, 1, 0);
1386 xf_emit(ctx, 1, 0x10);
1387 if (dev_priv->chipset == 0x50)
1388 xf_emit(ctx, 3, 0);
1389 else
1390 xf_emit(ctx, 4, 0);
1391 xf_emit(ctx, 1, 0x804);
1392 xf_emit(ctx, 1, 1);
1393 xf_emit(ctx, 1, 0x1a);
1394 if (dev_priv->chipset != 0x50)
1395 xf_emit(ctx, 1, 0x7f);
1396 xf_emit(ctx, 1, 0);
1397 xf_emit(ctx, 1, 1);
1398 xf_emit(ctx, 1, 0x80c14);
1399 xf_emit(ctx, 1, 0);
1400 xf_emit(ctx, 1, 0x8100c12);
1401 xf_emit(ctx, 2, 4);
1402 xf_emit(ctx, 1, 0);
1403 xf_emit(ctx, 1, 0x10);
1404 xf_emit(ctx, 3, 0);
1405 xf_emit(ctx, 1, 1);
1406 xf_emit(ctx, 1, 0x8100c12);
1407 xf_emit(ctx, 6, 0);
1408 if (dev_priv->chipset == 0x50)
1409 xf_emit(ctx, 1, 0x3ff);
1410 else
1411 xf_emit(ctx, 1, 0x7ff);
1412 xf_emit(ctx, 1, 0x80c14);
1413 xf_emit(ctx, 0x38, 0);
1414 xf_emit(ctx, 1, 1);
1415 xf_emit(ctx, 2, 0);
1416 xf_emit(ctx, 1, 0x10);
1417 xf_emit(ctx, 0x38, 0);
1418 xf_emit(ctx, 2, 0x88);
1419 xf_emit(ctx, 2, 0);
1420 xf_emit(ctx, 1, 4);
1421 xf_emit(ctx, 0x16, 0);
1422 xf_emit(ctx, 1, 0x26);
1423 xf_emit(ctx, 2, 0);
1424 xf_emit(ctx, 1, 0x3f800000);
1425 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1426 xf_emit(ctx, 4, 0);
1427 else
1428 xf_emit(ctx, 3, 0);
1429 xf_emit(ctx, 1, 0x1a);
1430 xf_emit(ctx, 1, 0x10);
1431 if (dev_priv->chipset != 0x50)
1432 xf_emit(ctx, 0x28, 0);
1433 else
1434 xf_emit(ctx, 0x25, 0);
1435 xf_emit(ctx, 1, 0x52);
1436 xf_emit(ctx, 1, 0);
1437 xf_emit(ctx, 1, 0x26);
1438 xf_emit(ctx, 1, 0);
1439 xf_emit(ctx, 2, 4);
1440 xf_emit(ctx, 1, 0);
1441 xf_emit(ctx, 1, 0x1a);
1442 xf_emit(ctx, 2, 0);
1443 xf_emit(ctx, 1, 0x00ffff00);
1444 xf_emit(ctx, 1, 0);
1445}
1446
1447static void
1448nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx)
1449{
1450 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1451 /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */
1452 xf_emit(ctx, 1, 0x3f);
1453 xf_emit(ctx, 0xa, 0);
1454 xf_emit(ctx, 1, 2);
1455 xf_emit(ctx, 2, 0x04000000);
1456 xf_emit(ctx, 8, 0);
1457 xf_emit(ctx, 1, 4);
1458 xf_emit(ctx, 3, 0);
1459 xf_emit(ctx, 1, 4);
1460 if (dev_priv->chipset == 0x50)
1461 xf_emit(ctx, 0x10, 0);
1462 else
1463 xf_emit(ctx, 0x11, 0);
1464 xf_emit(ctx, 1, 1);
1465 xf_emit(ctx, 1, 0x1001);
1466 xf_emit(ctx, 4, 0xffff);
1467 xf_emit(ctx, 0x20, 0);
1468 xf_emit(ctx, 0x10, 0x3f800000);
1469 xf_emit(ctx, 1, 0x10);
1470 if (dev_priv->chipset == 0x50)
1471 xf_emit(ctx, 1, 0);
1472 else
1473 xf_emit(ctx, 2, 0);
1474 xf_emit(ctx, 1, 3);
1475 xf_emit(ctx, 2, 0);
1476}
1477
1478static void
1479nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx)
1480{
1481 /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */
1482 xf_emit(ctx, 2, 0x04000000);
1483 xf_emit(ctx, 1, 0);
1484 xf_emit(ctx, 1, 0x80);
1485 xf_emit(ctx, 3, 0);
1486 xf_emit(ctx, 1, 0x80);
1487 xf_emit(ctx, 1, 0);
1488}
1489
1490static void
1491nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx)
1492{
1493 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1494 /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */
1495 xf_emit(ctx, 2, 4);
1496 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1497 xf_emit(ctx, 0x1c4d, 0);
1498 else
1499 xf_emit(ctx, 0x1c4b, 0);
1500 xf_emit(ctx, 2, 4);
1501 xf_emit(ctx, 1, 0x8100c12);
1502 if (dev_priv->chipset != 0x50)
1503 xf_emit(ctx, 1, 3);
1504 xf_emit(ctx, 1, 0);
1505 xf_emit(ctx, 1, 0x8100c12);
1506 xf_emit(ctx, 1, 0);
1507 xf_emit(ctx, 1, 0x80c14);
1508 xf_emit(ctx, 1, 1);
1509 if (dev_priv->chipset >= 0xa0)
1510 xf_emit(ctx, 2, 4);
1511 xf_emit(ctx, 1, 0x80c14);
1512 xf_emit(ctx, 2, 0);
1513 xf_emit(ctx, 1, 0x8100c12);
1514 xf_emit(ctx, 1, 0x27);
1515 xf_emit(ctx, 2, 0);
1516 xf_emit(ctx, 1, 1);
1517 xf_emit(ctx, 0x3c1, 0);
1518 xf_emit(ctx, 1, 1);
1519 xf_emit(ctx, 0x16, 0);
1520 xf_emit(ctx, 1, 0x8100c12);
1521 xf_emit(ctx, 1, 0);
1522}
1523
1524static void
1525nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx)
1526{
1527 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1528 /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */
1529 xf_emit(ctx, 4, 0);
1530 xf_emit(ctx, 1, 0xf);
1531 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1532 xf_emit(ctx, 8, 0);
1533 else
1534 xf_emit(ctx, 4, 0);
1535 xf_emit(ctx, 1, 0x20);
1536 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1537 xf_emit(ctx, 0x11, 0);
1538 else if (dev_priv->chipset >= 0xa0)
1539 xf_emit(ctx, 0xf, 0);
1540 else
1541 xf_emit(ctx, 0xe, 0);
1542 xf_emit(ctx, 1, 0x1a);
1543 xf_emit(ctx, 0xd, 0);
1544 xf_emit(ctx, 2, 4);
1545 xf_emit(ctx, 1, 0);
1546 xf_emit(ctx, 1, 4);
1547 xf_emit(ctx, 1, 8);
1548 xf_emit(ctx, 1, 0);
1549 if (dev_priv->chipset == 0x50)
1550 xf_emit(ctx, 1, 0x3ff);
1551 else
1552 xf_emit(ctx, 1, 0x7ff);
1553 if (dev_priv->chipset == 0xa8)
1554 xf_emit(ctx, 1, 0x1e00);
1555 xf_emit(ctx, 0xc, 0);
1556 xf_emit(ctx, 1, 0xf);
1557 if (dev_priv->chipset == 0x50)
1558 xf_emit(ctx, 0x125, 0);
1559 else if (dev_priv->chipset < 0xa0)
1560 xf_emit(ctx, 0x126, 0);
1561 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
1562 xf_emit(ctx, 0x124, 0);
1563 else
1564 xf_emit(ctx, 0x1f7, 0);
1565 xf_emit(ctx, 1, 0xf);
1566 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1567 xf_emit(ctx, 3, 0);
1568 else
1569 xf_emit(ctx, 1, 0);
1570 xf_emit(ctx, 1, 1);
1571 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1572 xf_emit(ctx, 0xa1, 0);
1573 else
1574 xf_emit(ctx, 0x5a, 0);
1575 xf_emit(ctx, 1, 0xf);
1576 if (dev_priv->chipset < 0xa0)
1577 xf_emit(ctx, 0x834, 0);
1578 else if (dev_priv->chipset == 0xa0)
1579 xf_emit(ctx, 0x1873, 0);
1580 else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1581 xf_emit(ctx, 0x8ba, 0);
1582 else
1583 xf_emit(ctx, 0x833, 0);
1584 xf_emit(ctx, 1, 0xf);
1585 xf_emit(ctx, 0xf, 0);
1586}
1587
1588static void
1589nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx)
1590{
1591 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1592 /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */
1593 xf_emit(ctx, 2, 0);
1594 if (dev_priv->chipset == 0x50)
1595 xf_emit(ctx, 2, 1);
1596 else
1597 xf_emit(ctx, 2, 0);
1598 xf_emit(ctx, 1, 0);
1599 xf_emit(ctx, 1, 1);
1600 xf_emit(ctx, 2, 0x100);
1601 xf_emit(ctx, 1, 0x11);
1602 xf_emit(ctx, 1, 0);
1603 xf_emit(ctx, 1, 8);
1604 xf_emit(ctx, 5, 0);
1605 xf_emit(ctx, 1, 1);
1606 xf_emit(ctx, 1, 0);
1607 xf_emit(ctx, 3, 1);
1608 xf_emit(ctx, 1, 0xcf);
1609 xf_emit(ctx, 1, 2);
1610 xf_emit(ctx, 6, 0);
1611 xf_emit(ctx, 1, 1);
1612 xf_emit(ctx, 1, 0);
1613 xf_emit(ctx, 3, 1);
1614 xf_emit(ctx, 4, 0);
1615 xf_emit(ctx, 1, 4);
1616 xf_emit(ctx, 1, 0);
1617 xf_emit(ctx, 1, 1);
1618 xf_emit(ctx, 1, 0x15);
1619 xf_emit(ctx, 3, 0);
1620 xf_emit(ctx, 1, 0x4444480);
1621 xf_emit(ctx, 0x37, 0);
1622}
1623
1624static void
1625nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx)
1626{
1627 /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */
1628 xf_emit(ctx, 4, 0);
1629 xf_emit(ctx, 1, 0x8100c12);
1630 xf_emit(ctx, 4, 0);
1631 xf_emit(ctx, 1, 0x100);
1632 xf_emit(ctx, 2, 0);
1633 xf_emit(ctx, 1, 0x10001);
1634 xf_emit(ctx, 1, 0);
1635 xf_emit(ctx, 1, 0x10001);
1636 xf_emit(ctx, 1, 1);
1637 xf_emit(ctx, 1, 0x10001);
1638 xf_emit(ctx, 1, 1);
1639 xf_emit(ctx, 1, 4);
1640 xf_emit(ctx, 1, 2);
1641}
1642
1643static void
1644nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx)
1645{
1646 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1647 /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */
1648 xf_emit(ctx, 1, 0x3f800000);
1649 xf_emit(ctx, 6, 0);
1650 xf_emit(ctx, 1, 4);
1651 xf_emit(ctx, 1, 0x1a);
1652 xf_emit(ctx, 2, 0);
1653 xf_emit(ctx, 1, 1);
1654 xf_emit(ctx, 0x12, 0);
1655 xf_emit(ctx, 1, 0x00ffff00);
1656 xf_emit(ctx, 6, 0);
1657 xf_emit(ctx, 1, 0xf);
1658 xf_emit(ctx, 7, 0);
1659 xf_emit(ctx, 1, 0x0fac6881);
1660 xf_emit(ctx, 1, 0x11);
1661 xf_emit(ctx, 0xf, 0);
1662 xf_emit(ctx, 1, 4);
1663 xf_emit(ctx, 2, 0);
1664 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1665 xf_emit(ctx, 1, 3);
1666 else if (dev_priv->chipset >= 0xa0)
1667 xf_emit(ctx, 1, 1);
1668 xf_emit(ctx, 2, 0);
1669 xf_emit(ctx, 1, 2);
1670 xf_emit(ctx, 2, 0x04000000);
1671 xf_emit(ctx, 3, 0);
1672 xf_emit(ctx, 1, 5);
1673 xf_emit(ctx, 1, 0x52);
1674 if (dev_priv->chipset == 0x50) {
1675 xf_emit(ctx, 0x13, 0);
1676 } else {
1677 xf_emit(ctx, 4, 0);
1678 xf_emit(ctx, 1, 1);
1679 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1680 xf_emit(ctx, 0x11, 0);
1681 else
1682 xf_emit(ctx, 0x10, 0);
1683 }
1684 xf_emit(ctx, 0x10, 0x3f800000);
1685 xf_emit(ctx, 1, 0x10);
1686 xf_emit(ctx, 0x26, 0);
1687 xf_emit(ctx, 1, 0x8100c12);
1688 xf_emit(ctx, 1, 5);
1689 xf_emit(ctx, 2, 0);
1690 xf_emit(ctx, 1, 1);
1691 xf_emit(ctx, 1, 0);
1692 xf_emit(ctx, 4, 0xffff);
1693 if (dev_priv->chipset != 0x50)
1694 xf_emit(ctx, 1, 3);
1695 if (dev_priv->chipset < 0xa0)
1696 xf_emit(ctx, 0x1f, 0);
1697 else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1698 xf_emit(ctx, 0xc, 0);
1699 else
1700 xf_emit(ctx, 3, 0);
1701 xf_emit(ctx, 1, 0x00ffff00);
1702 xf_emit(ctx, 1, 0x1a);
1703 if (dev_priv->chipset != 0x50) {
1704 xf_emit(ctx, 1, 0);
1705 xf_emit(ctx, 1, 3);
1706 }
1707 if (dev_priv->chipset < 0xa0)
1708 xf_emit(ctx, 0x26, 0);
1709 else
1710 xf_emit(ctx, 0x3c, 0);
1711 xf_emit(ctx, 1, 0x102);
1712 xf_emit(ctx, 1, 0);
1713 xf_emit(ctx, 4, 4);
1714 if (dev_priv->chipset >= 0xa0)
1715 xf_emit(ctx, 8, 0);
1716 xf_emit(ctx, 2, 4);
1717 xf_emit(ctx, 1, 0);
1718 if (dev_priv->chipset == 0x50)
1719 xf_emit(ctx, 1, 0x3ff);
1720 else
1721 xf_emit(ctx, 1, 0x7ff);
1722 xf_emit(ctx, 1, 0);
1723 xf_emit(ctx, 1, 0x102);
1724 xf_emit(ctx, 9, 0);
1725 xf_emit(ctx, 4, 4);
1726 xf_emit(ctx, 0x2c, 0);
1727}
1728
1729static void
1730nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
1731{
1732 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1733 int magic2;
1734 if (dev_priv->chipset == 0x50) {
1735 magic2 = 0x00003e60;
1736 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
1737 magic2 = 0x001ffe67;
1738 } else {
1739 magic2 = 0x00087e67;
1740 }
1741 xf_emit(ctx, 8, 0);
1742 xf_emit(ctx, 1, 2);
1743 xf_emit(ctx, 1, 0);
1744 xf_emit(ctx, 1, magic2);
1745 xf_emit(ctx, 4, 0);
1746 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1747 xf_emit(ctx, 1, 1);
1748 xf_emit(ctx, 7, 0);
1749 if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa)
1750 xf_emit(ctx, 1, 0x15);
1751 xf_emit(ctx, 1, 0);
1752 xf_emit(ctx, 1, 1);
1753 xf_emit(ctx, 1, 0x10);
1754 xf_emit(ctx, 2, 0);
1755 xf_emit(ctx, 1, 1);
1756 xf_emit(ctx, 4, 0);
1757 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
1758 xf_emit(ctx, 1, 4);
1759 xf_emit(ctx, 1, 0x400);
1760 xf_emit(ctx, 1, 0x300);
1761 xf_emit(ctx, 1, 0x1001);
1762 if (dev_priv->chipset != 0xa0) {
1763 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1764 xf_emit(ctx, 1, 0);
1765 else
1766 xf_emit(ctx, 1, 0x15);
1767 }
1768 xf_emit(ctx, 3, 0);
1769 }
1770 xf_emit(ctx, 2, 0);
1771 xf_emit(ctx, 1, 2);
1772 xf_emit(ctx, 8, 0);
1773 xf_emit(ctx, 1, 1);
1774 xf_emit(ctx, 1, 0x10);
1775 xf_emit(ctx, 1, 0);
1776 xf_emit(ctx, 1, 1);
1777 xf_emit(ctx, 0x13, 0);
1778 xf_emit(ctx, 1, 0x10);
1779 xf_emit(ctx, 0x10, 0);
1780 xf_emit(ctx, 0x10, 0x3f800000);
1781 xf_emit(ctx, 0x19, 0);
1782 xf_emit(ctx, 1, 0x10);
1783 xf_emit(ctx, 1, 0);
1784 xf_emit(ctx, 1, 0x3f);
1785 xf_emit(ctx, 6, 0);
1786 xf_emit(ctx, 1, 1);
1787 xf_emit(ctx, 1, 0);
1788 xf_emit(ctx, 1, 1);
1789 xf_emit(ctx, 1, 0);
1790 xf_emit(ctx, 1, 1);
1791 if (dev_priv->chipset >= 0xa0) {
1792 xf_emit(ctx, 2, 0);
1793 xf_emit(ctx, 1, 0x1001);
1794 xf_emit(ctx, 0xb, 0);
1795 } else {
1796 xf_emit(ctx, 0xc, 0);
1797 }
1798 xf_emit(ctx, 1, 0x11);
1799 xf_emit(ctx, 7, 0);
1800 xf_emit(ctx, 1, 0xf);
1801 xf_emit(ctx, 7, 0);
1802 xf_emit(ctx, 1, 0x11);
1803 if (dev_priv->chipset == 0x50)
1804 xf_emit(ctx, 4, 0);
1805 else
1806 xf_emit(ctx, 6, 0);
1807 xf_emit(ctx, 3, 1);
1808 xf_emit(ctx, 1, 2);
1809 xf_emit(ctx, 1, 1);
1810 xf_emit(ctx, 1, 2);
1811 xf_emit(ctx, 1, 1);
1812 xf_emit(ctx, 1, 0);
1813 xf_emit(ctx, 1, magic2);
1814 xf_emit(ctx, 1, 0);
1815 xf_emit(ctx, 1, 0x0fac6881);
1816 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1817 xf_emit(ctx, 1, 0);
1818 xf_emit(ctx, 0x18, 1);
1819 xf_emit(ctx, 8, 2);
1820 xf_emit(ctx, 8, 1);
1821 xf_emit(ctx, 8, 2);
1822 xf_emit(ctx, 8, 1);
1823 xf_emit(ctx, 3, 0);
1824 xf_emit(ctx, 1, 1);
1825 xf_emit(ctx, 5, 0);
1826 xf_emit(ctx, 1, 1);
1827 xf_emit(ctx, 0x16, 0);
1828 } else {
1829 if (dev_priv->chipset >= 0xa0)
1830 xf_emit(ctx, 0x1b, 0);
1831 else
1832 xf_emit(ctx, 0x15, 0);
1833 }
1834 xf_emit(ctx, 1, 1);
1835 xf_emit(ctx, 1, 2);
1836 xf_emit(ctx, 2, 1);
1837 xf_emit(ctx, 1, 2);
1838 xf_emit(ctx, 2, 1);
1839 if (dev_priv->chipset >= 0xa0)
1840 xf_emit(ctx, 4, 0);
1841 else
1842 xf_emit(ctx, 3, 0);
1843 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1844 xf_emit(ctx, 0x10, 1);
1845 xf_emit(ctx, 8, 2);
1846 xf_emit(ctx, 0x10, 1);
1847 xf_emit(ctx, 8, 2);
1848 xf_emit(ctx, 8, 1);
1849 xf_emit(ctx, 3, 0);
1850 }
1851 xf_emit(ctx, 1, 0x11);
1852 xf_emit(ctx, 1, 1);
1853 xf_emit(ctx, 0x5b, 0);
1854}
1855
1856static void
1857nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx)
1858{
1859 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1860 int magic3;
1861 if (dev_priv->chipset == 0x50)
1862 magic3 = 0x1000;
1863 else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8)
1864 magic3 = 0x1e00;
1865 else
1866 magic3 = 0;
1867 xf_emit(ctx, 1, 0);
1868 xf_emit(ctx, 1, 4);
1869 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1870 xf_emit(ctx, 0x24, 0);
1871 else if (dev_priv->chipset >= 0xa0)
1872 xf_emit(ctx, 0x14, 0);
1873 else
1874 xf_emit(ctx, 0x15, 0);
1875 xf_emit(ctx, 2, 4);
1876 if (dev_priv->chipset >= 0xa0)
1877 xf_emit(ctx, 1, 0x03020100);
1878 else
1879 xf_emit(ctx, 1, 0x00608080);
1880 xf_emit(ctx, 4, 0);
1881 xf_emit(ctx, 1, 4);
1882 xf_emit(ctx, 2, 0);
1883 xf_emit(ctx, 2, 4);
1884 xf_emit(ctx, 1, 0x80);
1885 if (magic3)
1886 xf_emit(ctx, 1, magic3);
1887 xf_emit(ctx, 1, 4);
1888 xf_emit(ctx, 0x24, 0);
1889 xf_emit(ctx, 1, 4);
1890 xf_emit(ctx, 1, 0x80);
1891 xf_emit(ctx, 1, 4);
1892 xf_emit(ctx, 1, 0x03020100);
1893 xf_emit(ctx, 1, 3);
1894 if (magic3)
1895 xf_emit(ctx, 1, magic3);
1896 xf_emit(ctx, 1, 4);
1897 xf_emit(ctx, 4, 0);
1898 xf_emit(ctx, 1, 4);
1899 xf_emit(ctx, 1, 3);
1900 xf_emit(ctx, 3, 0);
1901 xf_emit(ctx, 1, 4);
1902 if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
1903 xf_emit(ctx, 0x1024, 0);
1904 else if (dev_priv->chipset < 0xa0)
1905 xf_emit(ctx, 0xa24, 0);
1906 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
1907 xf_emit(ctx, 0x214, 0);
1908 else
1909 xf_emit(ctx, 0x414, 0);
1910 xf_emit(ctx, 1, 4);
1911 xf_emit(ctx, 1, 3);
1912 xf_emit(ctx, 2, 0);
1913}
1914
1915static void
1916nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx)
1917{
1918 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1919 int magic1, magic2;
1920 if (dev_priv->chipset == 0x50) {
1921 magic1 = 0x3ff;
1922 magic2 = 0x00003e60;
1923 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
1924 magic1 = 0x7ff;
1925 magic2 = 0x001ffe67;
1926 } else {
1927 magic1 = 0x7ff;
1928 magic2 = 0x00087e67;
1929 }
1930 xf_emit(ctx, 3, 0);
1931 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1932 xf_emit(ctx, 1, 1);
1933 xf_emit(ctx, 0xc, 0);
1934 xf_emit(ctx, 1, 0xf);
1935 xf_emit(ctx, 0xb, 0);
1936 xf_emit(ctx, 1, 4);
1937 xf_emit(ctx, 4, 0xffff);
1938 xf_emit(ctx, 8, 0);
1939 xf_emit(ctx, 1, 1);
1940 xf_emit(ctx, 3, 0);
1941 xf_emit(ctx, 1, 1);
1942 xf_emit(ctx, 5, 0);
1943 xf_emit(ctx, 1, 1);
1944 xf_emit(ctx, 2, 0);
1945 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1946 xf_emit(ctx, 1, 3);
1947 xf_emit(ctx, 1, 0);
1948 } else if (dev_priv->chipset >= 0xa0)
1949 xf_emit(ctx, 1, 1);
1950 xf_emit(ctx, 0xa, 0);
1951 xf_emit(ctx, 2, 1);
1952 xf_emit(ctx, 1, 2);
1953 xf_emit(ctx, 2, 1);
1954 xf_emit(ctx, 1, 2);
1955 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1956 xf_emit(ctx, 1, 0);
1957 xf_emit(ctx, 0x18, 1);
1958 xf_emit(ctx, 8, 2);
1959 xf_emit(ctx, 8, 1);
1960 xf_emit(ctx, 8, 2);
1961 xf_emit(ctx, 8, 1);
1962 xf_emit(ctx, 1, 0);
1963 }
1964 xf_emit(ctx, 1, 1);
1965 xf_emit(ctx, 1, 0);
1966 xf_emit(ctx, 1, 0x11);
1967 xf_emit(ctx, 7, 0);
1968 xf_emit(ctx, 1, 0x0fac6881);
1969 xf_emit(ctx, 2, 0);
1970 xf_emit(ctx, 1, 4);
1971 xf_emit(ctx, 3, 0);
1972 xf_emit(ctx, 1, 0x11);
1973 xf_emit(ctx, 1, 1);
1974 xf_emit(ctx, 1, 0);
1975 xf_emit(ctx, 3, 0xcf);
1976 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1977 xf_emit(ctx, 1, 1);
1978 xf_emit(ctx, 0xa, 0);
1979 xf_emit(ctx, 2, 1);
1980 xf_emit(ctx, 1, 2);
1981 xf_emit(ctx, 2, 1);
1982 xf_emit(ctx, 1, 2);
1983 xf_emit(ctx, 1, 1);
1984 xf_emit(ctx, 1, 0);
1985 xf_emit(ctx, 8, 1);
1986 xf_emit(ctx, 1, 0x11);
1987 xf_emit(ctx, 7, 0);
1988 xf_emit(ctx, 1, 0x0fac6881);
1989 xf_emit(ctx, 1, 0xf);
1990 xf_emit(ctx, 7, 0);
1991 xf_emit(ctx, 1, magic2);
1992 xf_emit(ctx, 2, 0);
1993 xf_emit(ctx, 1, 0x11);
1994 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1995 xf_emit(ctx, 2, 1);
1996 else
1997 xf_emit(ctx, 1, 1);
1998 if(dev_priv->chipset == 0x50)
1999 xf_emit(ctx, 1, 0);
2000 else
2001 xf_emit(ctx, 3, 0);
2002 xf_emit(ctx, 1, 4);
2003 xf_emit(ctx, 5, 0);
2004 xf_emit(ctx, 1, 1);
2005 xf_emit(ctx, 4, 0);
2006 xf_emit(ctx, 1, 0x11);
2007 xf_emit(ctx, 7, 0);
2008 xf_emit(ctx, 1, 0x0fac6881);
2009 xf_emit(ctx, 3, 0);
2010 xf_emit(ctx, 1, 0x11);
2011 xf_emit(ctx, 1, 1);
2012 xf_emit(ctx, 1, 0);
2013 xf_emit(ctx, 1, 1);
2014 xf_emit(ctx, 1, 0);
2015 xf_emit(ctx, 1, 1);
2016 xf_emit(ctx, 1, 0);
2017 xf_emit(ctx, 1, magic1);
2018 xf_emit(ctx, 1, 0);
2019 xf_emit(ctx, 1, 1);
2020 xf_emit(ctx, 1, 0);
2021 xf_emit(ctx, 1, 1);
2022 xf_emit(ctx, 2, 0);
2023 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2024 xf_emit(ctx, 1, 1);
2025 xf_emit(ctx, 0x28, 0);
2026 xf_emit(ctx, 8, 8);
2027 xf_emit(ctx, 1, 0x11);
2028 xf_emit(ctx, 7, 0);
2029 xf_emit(ctx, 1, 0x0fac6881);
2030 xf_emit(ctx, 8, 0x400);
2031 xf_emit(ctx, 8, 0x300);
2032 xf_emit(ctx, 1, 1);
2033 xf_emit(ctx, 1, 0xf);
2034 xf_emit(ctx, 7, 0);
2035 xf_emit(ctx, 1, 0x20);
2036 xf_emit(ctx, 1, 0x11);
2037 xf_emit(ctx, 1, 0x100);
2038 xf_emit(ctx, 1, 0);
2039 xf_emit(ctx, 1, 1);
2040 xf_emit(ctx, 2, 0);
2041 xf_emit(ctx, 1, 0x40);
2042 xf_emit(ctx, 1, 0x100);
2043 xf_emit(ctx, 1, 0);
2044 xf_emit(ctx, 1, 3);
2045 xf_emit(ctx, 4, 0);
2046 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2047 xf_emit(ctx, 1, 1);
2048 xf_emit(ctx, 1, magic2);
2049 xf_emit(ctx, 3, 0);
2050 xf_emit(ctx, 1, 2);
2051 xf_emit(ctx, 1, 0x0fac6881);
2052 xf_emit(ctx, 9, 0);
2053 xf_emit(ctx, 1, 1);
2054 xf_emit(ctx, 4, 0);
2055 xf_emit(ctx, 1, 4);
2056 xf_emit(ctx, 1, 0);
2057 xf_emit(ctx, 1, 1);
2058 xf_emit(ctx, 1, 0x400);
2059 xf_emit(ctx, 1, 0x300);
2060 xf_emit(ctx, 1, 0x1001);
2061 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2062 xf_emit(ctx, 4, 0);
2063 else
2064 xf_emit(ctx, 3, 0);
2065 xf_emit(ctx, 1, 0x11);
2066 xf_emit(ctx, 7, 0);
2067 xf_emit(ctx, 1, 0x0fac6881);
2068 xf_emit(ctx, 1, 0xf);
2069 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2070 xf_emit(ctx, 0x15, 0);
2071 xf_emit(ctx, 1, 1);
2072 xf_emit(ctx, 3, 0);
2073 } else
2074 xf_emit(ctx, 0x17, 0);
2075 if (dev_priv->chipset >= 0xa0)
2076 xf_emit(ctx, 1, 0x0fac6881);
2077 xf_emit(ctx, 1, magic2);
2078 xf_emit(ctx, 3, 0);
2079 xf_emit(ctx, 1, 0x11);
2080 xf_emit(ctx, 2, 0);
2081 xf_emit(ctx, 1, 4);
2082 xf_emit(ctx, 1, 0);
2083 xf_emit(ctx, 2, 1);
2084 xf_emit(ctx, 3, 0);
2085 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2086 xf_emit(ctx, 2, 1);
2087 else
2088 xf_emit(ctx, 1, 1);
2089 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2090 xf_emit(ctx, 2, 0);
2091 else if (dev_priv->chipset != 0x50)
2092 xf_emit(ctx, 1, 0);
2093}
2094
2095static void
2096nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx)
2097{
2098 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2099 xf_emit(ctx, 3, 0);
2100 xf_emit(ctx, 1, 1);
2101 xf_emit(ctx, 1, 0);
2102 xf_emit(ctx, 1, 1);
2103 if (dev_priv->chipset == 0x50)
2104 xf_emit(ctx, 2, 0);
2105 else
2106 xf_emit(ctx, 3, 0);
2107 xf_emit(ctx, 1, 0x2a712488);
2108 xf_emit(ctx, 1, 0);
2109 xf_emit(ctx, 1, 0x4085c000);
2110 xf_emit(ctx, 1, 0x40);
2111 xf_emit(ctx, 1, 0x100);
2112 xf_emit(ctx, 1, 0x10100);
2113 xf_emit(ctx, 1, 0x02800000);
2114}
2115
2116static void
2117nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx)
2118{
2119 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2120 xf_emit(ctx, 2, 0x04e3bfdf);
2121 xf_emit(ctx, 1, 1);
2122 xf_emit(ctx, 1, 0);
2123 xf_emit(ctx, 1, 0x00ffff00);
2124 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2125 xf_emit(ctx, 2, 1);
2126 else
2127 xf_emit(ctx, 1, 1);
2128 xf_emit(ctx, 2, 0);
2129 xf_emit(ctx, 1, 0x00ffff00);
2130 xf_emit(ctx, 8, 0);
2131 xf_emit(ctx, 1, 1);
2132 xf_emit(ctx, 1, 0);
2133 xf_emit(ctx, 1, 1);
2134 xf_emit(ctx, 1, 0x30201000);
2135 xf_emit(ctx, 1, 0x70605040);
2136 xf_emit(ctx, 1, 0xb8a89888);
2137 xf_emit(ctx, 1, 0xf8e8d8c8);
2138 xf_emit(ctx, 1, 0);
2139 xf_emit(ctx, 1, 0x1a);
2140}
2141
2142static void
2143nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx)
2144{
2145 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2146 xf_emit(ctx, 3, 0);
2147 xf_emit(ctx, 1, 0xfac6881);
2148 xf_emit(ctx, 4, 0);
2149 xf_emit(ctx, 1, 4);
2150 xf_emit(ctx, 1, 0);
2151 xf_emit(ctx, 2, 1);
2152 xf_emit(ctx, 2, 0);
2153 xf_emit(ctx, 1, 1);
2154 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2155 xf_emit(ctx, 0xb, 0);
2156 else
2157 xf_emit(ctx, 0xa, 0);
2158 xf_emit(ctx, 8, 1);
2159 xf_emit(ctx, 1, 0x11);
2160 xf_emit(ctx, 7, 0);
2161 xf_emit(ctx, 1, 0xfac6881);
2162 xf_emit(ctx, 1, 0xf);
2163 xf_emit(ctx, 7, 0);
2164 xf_emit(ctx, 1, 0x11);
2165 xf_emit(ctx, 1, 1);
2166 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2167 xf_emit(ctx, 6, 0);
2168 xf_emit(ctx, 1, 1);
2169 xf_emit(ctx, 6, 0);
2170 } else {
2171 xf_emit(ctx, 0xb, 0);
2172 }
2173}
2174
2175static void
2176nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
2177{
2178 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2179 if (dev_priv->chipset < 0xa0) {
2180 nv50_graph_construct_xfer_tp_x1(ctx);
2181 nv50_graph_construct_xfer_tp_x2(ctx);
2182 nv50_graph_construct_xfer_tp_x3(ctx);
2183 if (dev_priv->chipset == 0x50)
2184 xf_emit(ctx, 0xf, 0);
2185 else
2186 xf_emit(ctx, 0x12, 0);
2187 nv50_graph_construct_xfer_tp_x4(ctx);
2188 } else {
2189 nv50_graph_construct_xfer_tp_x3(ctx);
2190 if (dev_priv->chipset < 0xaa)
2191 xf_emit(ctx, 0xc, 0);
2192 else
2193 xf_emit(ctx, 0xa, 0);
2194 nv50_graph_construct_xfer_tp_x2(ctx);
2195 nv50_graph_construct_xfer_tp_x5(ctx);
2196 nv50_graph_construct_xfer_tp_x4(ctx);
2197 nv50_graph_construct_xfer_tp_x1(ctx);
2198 }
2199}
2200
2201static void
2202nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
2203{
2204 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2205 int i, mpcnt;
2206 if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
2207 mpcnt = 1;
2208 else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8)
2209 mpcnt = 2;
2210 else
2211 mpcnt = 3;
2212 for (i = 0; i < mpcnt; i++) {
2213 xf_emit(ctx, 1, 0);
2214 xf_emit(ctx, 1, 0x80);
2215 xf_emit(ctx, 1, 0x80007004);
2216 xf_emit(ctx, 1, 0x04000400);
2217 if (dev_priv->chipset >= 0xa0)
2218 xf_emit(ctx, 1, 0xc0);
2219 xf_emit(ctx, 1, 0x1000);
2220 xf_emit(ctx, 2, 0);
2221 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) {
2222 xf_emit(ctx, 1, 0xe00);
2223 xf_emit(ctx, 1, 0x1e00);
2224 }
2225 xf_emit(ctx, 1, 1);
2226 xf_emit(ctx, 2, 0);
2227 if (dev_priv->chipset == 0x50)
2228 xf_emit(ctx, 2, 0x1000);
2229 xf_emit(ctx, 1, 1);
2230 xf_emit(ctx, 1, 0);
2231 xf_emit(ctx, 1, 4);
2232 xf_emit(ctx, 1, 2);
2233 if (dev_priv->chipset >= 0xaa)
2234 xf_emit(ctx, 0xb, 0);
2235 else if (dev_priv->chipset >= 0xa0)
2236 xf_emit(ctx, 0xc, 0);
2237 else
2238 xf_emit(ctx, 0xa, 0);
2239 }
2240 xf_emit(ctx, 1, 0x08100c12);
2241 xf_emit(ctx, 1, 0);
2242 if (dev_priv->chipset >= 0xa0) {
2243 xf_emit(ctx, 1, 0x1fe21);
2244 }
2245 xf_emit(ctx, 5, 0);
2246 xf_emit(ctx, 4, 0xffff);
2247 xf_emit(ctx, 1, 1);
2248 xf_emit(ctx, 2, 0x10001);
2249 xf_emit(ctx, 1, 1);
2250 xf_emit(ctx, 1, 0);
2251 xf_emit(ctx, 1, 0x1fe21);
2252 xf_emit(ctx, 1, 0);
2253 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2254 xf_emit(ctx, 1, 1);
2255 xf_emit(ctx, 4, 0);
2256 xf_emit(ctx, 1, 0x08100c12);
2257 xf_emit(ctx, 1, 4);
2258 xf_emit(ctx, 1, 0);
2259 xf_emit(ctx, 1, 2);
2260 xf_emit(ctx, 1, 0x11);
2261 xf_emit(ctx, 8, 0);
2262 xf_emit(ctx, 1, 0xfac6881);
2263 xf_emit(ctx, 1, 0);
2264 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2265 xf_emit(ctx, 1, 3);
2266 xf_emit(ctx, 3, 0);
2267 xf_emit(ctx, 1, 4);
2268 xf_emit(ctx, 9, 0);
2269 xf_emit(ctx, 1, 2);
2270 xf_emit(ctx, 2, 1);
2271 xf_emit(ctx, 1, 2);
2272 xf_emit(ctx, 3, 1);
2273 xf_emit(ctx, 1, 0);
2274 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2275 xf_emit(ctx, 8, 2);
2276 xf_emit(ctx, 0x10, 1);
2277 xf_emit(ctx, 8, 2);
2278 xf_emit(ctx, 0x18, 1);
2279 xf_emit(ctx, 3, 0);
2280 }
2281 xf_emit(ctx, 1, 4);
2282 if (dev_priv->chipset == 0x50)
2283 xf_emit(ctx, 0x3a0, 0);
2284 else if (dev_priv->chipset < 0x94)
2285 xf_emit(ctx, 0x3a2, 0);
2286 else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
2287 xf_emit(ctx, 0x39f, 0);
2288 else
2289 xf_emit(ctx, 0x3a3, 0);
2290 xf_emit(ctx, 1, 0x11);
2291 xf_emit(ctx, 1, 0);
2292 xf_emit(ctx, 1, 1);
2293 xf_emit(ctx, 0x2d, 0);
2294}
2295
2296static void
2297nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
2298{
2299 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2300 int i;
2301 uint32_t offset;
2302 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
2303 int size = 0;
2304
2305 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
2306
2307 if (dev_priv->chipset < 0xa0) {
2308 for (i = 0; i < 8; i++) {
2309 ctx->ctxvals_pos = offset + i;
2310 if (i == 0)
2311 xf_emit(ctx, 1, 0x08100c12);
2312 if (units & (1 << i))
2313 nv50_graph_construct_xfer_tp2(ctx);
2314 if ((ctx->ctxvals_pos-offset)/8 > size)
2315 size = (ctx->ctxvals_pos-offset)/8;
2316 }
2317 } else {
2318 /* Strand 0: TPs 0, 1 */
2319 ctx->ctxvals_pos = offset;
2320 xf_emit(ctx, 1, 0x08100c12);
2321 if (units & (1 << 0))
2322 nv50_graph_construct_xfer_tp2(ctx);
2323 if (units & (1 << 1))
2324 nv50_graph_construct_xfer_tp2(ctx);
2325 if ((ctx->ctxvals_pos-offset)/8 > size)
2326 size = (ctx->ctxvals_pos-offset)/8;
2327
2328 /* Strand 0: TPs 2, 3 */
2329 ctx->ctxvals_pos = offset + 1;
2330 if (units & (1 << 2))
2331 nv50_graph_construct_xfer_tp2(ctx);
2332 if (units & (1 << 3))
2333 nv50_graph_construct_xfer_tp2(ctx);
2334 if ((ctx->ctxvals_pos-offset)/8 > size)
2335 size = (ctx->ctxvals_pos-offset)/8;
2336
2337 /* Strand 0: TPs 4, 5, 6 */
2338 ctx->ctxvals_pos = offset + 2;
2339 if (units & (1 << 4))
2340 nv50_graph_construct_xfer_tp2(ctx);
2341 if (units & (1 << 5))
2342 nv50_graph_construct_xfer_tp2(ctx);
2343 if (units & (1 << 6))
2344 nv50_graph_construct_xfer_tp2(ctx);
2345 if ((ctx->ctxvals_pos-offset)/8 > size)
2346 size = (ctx->ctxvals_pos-offset)/8;
2347
2348 /* Strand 0: TPs 7, 8, 9 */
2349 ctx->ctxvals_pos = offset + 3;
2350 if (units & (1 << 7))
2351 nv50_graph_construct_xfer_tp2(ctx);
2352 if (units & (1 << 8))
2353 nv50_graph_construct_xfer_tp2(ctx);
2354 if (units & (1 << 9))
2355 nv50_graph_construct_xfer_tp2(ctx);
2356 if ((ctx->ctxvals_pos-offset)/8 > size)
2357 size = (ctx->ctxvals_pos-offset)/8;
2358 }
2359 ctx->ctxvals_pos = offset + size * 8;
2360 ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
2361 cp_lsr (ctx, offset);
2362 cp_out (ctx, CP_SET_XFER_POINTER);
2363 cp_lsr (ctx, size);
2364 cp_out (ctx, CP_SEEK_2);
2365 cp_out (ctx, CP_XFER_2);
2366 cp_wait(ctx, XFER, BUSY);
2367}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index f0dc4e36ef05..de1f5b0062c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -390,7 +390,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
390 if (gpuobj->im_backing) 390 if (gpuobj->im_backing)
391 return -EINVAL; 391 return -EINVAL;
392 392
393 *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1); 393 *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE);
394 if (*sz == 0) 394 if (*sz == 0)
395 return -EINVAL; 395 return -EINVAL;
396 396
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 1cc7b937b1ea..ed38262d9985 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -30,6 +30,9 @@ $(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
30$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable 30$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
31 $(call if_changed,mkregtable) 31 $(call if_changed,mkregtable)
32 32
33$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
34 $(call if_changed,mkregtable)
35
33$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h 36$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
34 37
35$(obj)/r200.o: $(obj)/r200_reg_safe.h 38$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -42,6 +45,8 @@ $(obj)/r420.o: $(obj)/r420_reg_safe.h
42 45
43$(obj)/rs600.o: $(obj)/rs600_reg_safe.h 46$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
44 47
48$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
49
45radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ 50radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
46 radeon_irq.o r300_cmdbuf.o r600_cp.o 51 radeon_irq.o r300_cmdbuf.o r600_cp.o
47# add KMS driver 52# add KMS driver
@@ -54,8 +59,10 @@ radeon-y += radeon_device.o radeon_kms.o \
54 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 59 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
55 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 60 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
56 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 61 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
57 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o 62 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
63 evergreen.o
58 64
59radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 65radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
66radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
60 67
61obj-$(CONFIG_DRM_RADEON)+= radeon.o 68obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 7f152f66f196..d75788feac6c 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -881,8 +881,6 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
881 uint8_t attr = U8((*ptr)++), shift; 881 uint8_t attr = U8((*ptr)++), shift;
882 uint32_t saved, dst; 882 uint32_t saved, dst;
883 int dptr = *ptr; 883 int dptr = *ptr;
884 attr &= 0x38;
885 attr |= atom_def_dst[attr >> 3] << 6;
886 SDEBUG(" dst: "); 884 SDEBUG(" dst: ");
887 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 885 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
888 shift = atom_get_src(ctx, attr, ptr); 886 shift = atom_get_src(ctx, attr, ptr);
@@ -897,8 +895,6 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
897 uint8_t attr = U8((*ptr)++), shift; 895 uint8_t attr = U8((*ptr)++), shift;
898 uint32_t saved, dst; 896 uint32_t saved, dst;
899 int dptr = *ptr; 897 int dptr = *ptr;
900 attr &= 0x38;
901 attr |= atom_def_dst[attr >> 3] << 6;
902 SDEBUG(" dst: "); 898 SDEBUG(" dst: ");
903 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 899 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
904 shift = atom_get_src(ctx, attr, ptr); 900 shift = atom_get_src(ctx, attr, ptr);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 91ad0d1c1b17..6732b5dd8ff4 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2006-2007 Advanced Micro Devices, Inc. 2 * Copyright 2006-2007 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -20,10 +20,12 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23/****************************************************************************/ 23
24/****************************************************************************/
24/*Portion I: Definitions shared between VBIOS and Driver */ 25/*Portion I: Definitions shared between VBIOS and Driver */
25/****************************************************************************/ 26/****************************************************************************/
26 27
28
27#ifndef _ATOMBIOS_H 29#ifndef _ATOMBIOS_H
28#define _ATOMBIOS_H 30#define _ATOMBIOS_H
29 31
@@ -40,39 +42,46 @@
40#endif 42#endif
41 43
42#ifdef _H2INC 44#ifdef _H2INC
43#ifndef ULONG 45 #ifndef ULONG
44typedef unsigned long ULONG; 46 typedef unsigned long ULONG;
45#endif 47 #endif
46 48
47#ifndef UCHAR 49 #ifndef UCHAR
48typedef unsigned char UCHAR; 50 typedef unsigned char UCHAR;
49#endif 51 #endif
50 52
51#ifndef USHORT 53 #ifndef USHORT
52typedef unsigned short USHORT; 54 typedef unsigned short USHORT;
53#endif 55 #endif
54#endif 56#endif
55 57
56#define ATOM_DAC_A 0 58#define ATOM_DAC_A 0
57#define ATOM_DAC_B 1 59#define ATOM_DAC_B 1
58#define ATOM_EXT_DAC 2 60#define ATOM_EXT_DAC 2
59 61
60#define ATOM_CRTC1 0 62#define ATOM_CRTC1 0
61#define ATOM_CRTC2 1 63#define ATOM_CRTC2 1
64#define ATOM_CRTC3 2
65#define ATOM_CRTC4 3
66#define ATOM_CRTC5 4
67#define ATOM_CRTC6 5
68#define ATOM_CRTC_INVALID 0xFF
62 69
63#define ATOM_DIGA 0 70#define ATOM_DIGA 0
64#define ATOM_DIGB 1 71#define ATOM_DIGB 1
65 72
66#define ATOM_PPLL1 0 73#define ATOM_PPLL1 0
67#define ATOM_PPLL2 1 74#define ATOM_PPLL2 1
75#define ATOM_DCPLL 2
76#define ATOM_PPLL_INVALID 0xFF
68 77
69#define ATOM_SCALER1 0 78#define ATOM_SCALER1 0
70#define ATOM_SCALER2 1 79#define ATOM_SCALER2 1
71 80
72#define ATOM_SCALER_DISABLE 0 81#define ATOM_SCALER_DISABLE 0
73#define ATOM_SCALER_CENTER 1 82#define ATOM_SCALER_CENTER 1
74#define ATOM_SCALER_EXPANSION 2 83#define ATOM_SCALER_EXPANSION 2
75#define ATOM_SCALER_MULTI_EX 3 84#define ATOM_SCALER_MULTI_EX 3
76 85
77#define ATOM_DISABLE 0 86#define ATOM_DISABLE 0
78#define ATOM_ENABLE 1 87#define ATOM_ENABLE 1
@@ -82,6 +91,7 @@ typedef unsigned short USHORT;
82#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5) 91#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
83#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5) 92#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
84#define ATOM_ENCODER_INIT (ATOM_DISABLE+7) 93#define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
94#define ATOM_GET_STATUS (ATOM_DISABLE+8)
85 95
86#define ATOM_BLANKING 1 96#define ATOM_BLANKING 1
87#define ATOM_BLANKING_OFF 0 97#define ATOM_BLANKING_OFF 0
@@ -114,7 +124,7 @@ typedef unsigned short USHORT;
114#define ATOM_DAC2_CV ATOM_DAC1_CV 124#define ATOM_DAC2_CV ATOM_DAC1_CV
115#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC 125#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC
116#define ATOM_DAC2_PAL ATOM_DAC1_PAL 126#define ATOM_DAC2_PAL ATOM_DAC1_PAL
117 127
118#define ATOM_PM_ON 0 128#define ATOM_PM_ON 0
119#define ATOM_PM_STANDBY 1 129#define ATOM_PM_STANDBY 1
120#define ATOM_PM_SUSPEND 2 130#define ATOM_PM_SUSPEND 2
@@ -134,6 +144,7 @@ typedef unsigned short USHORT;
134#define ATOM_PANEL_MISC_TEMPORAL 0x00000040 144#define ATOM_PANEL_MISC_TEMPORAL 0x00000040
135#define ATOM_PANEL_MISC_API_ENABLED 0x00000080 145#define ATOM_PANEL_MISC_API_ENABLED 0x00000080
136 146
147
137#define MEMTYPE_DDR1 "DDR1" 148#define MEMTYPE_DDR1 "DDR1"
138#define MEMTYPE_DDR2 "DDR2" 149#define MEMTYPE_DDR2 "DDR2"
139#define MEMTYPE_DDR3 "DDR3" 150#define MEMTYPE_DDR3 "DDR3"
@@ -145,19 +156,19 @@ typedef unsigned short USHORT;
145 156
146/* Maximum size of that FireGL flag string */ 157/* Maximum size of that FireGL flag string */
147 158
148#define ATOM_FIREGL_FLAG_STRING "FGL" /* Flag used to enable FireGL Support */ 159#define ATOM_FIREGL_FLAG_STRING "FGL" //Flag used to enable FireGL Support
149#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 /* sizeof( ATOM_FIREGL_FLAG_STRING ) */ 160#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 //sizeof( ATOM_FIREGL_FLAG_STRING )
150 161
151#define ATOM_FAKE_DESKTOP_STRING "DSK" /* Flag used to enable mobile ASIC on Desktop */ 162#define ATOM_FAKE_DESKTOP_STRING "DSK" //Flag used to enable mobile ASIC on Desktop
152#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 163#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
153 164
154#define ATOM_M54T_FLAG_STRING "M54T" /* Flag used to enable M54T Support */ 165#define ATOM_M54T_FLAG_STRING "M54T" //Flag used to enable M54T Support
155#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 /* sizeof( ATOM_M54T_FLAG_STRING ) */ 166#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 //sizeof( ATOM_M54T_FLAG_STRING )
156 167
157#define HW_ASSISTED_I2C_STATUS_FAILURE 2 168#define HW_ASSISTED_I2C_STATUS_FAILURE 2
158#define HW_ASSISTED_I2C_STATUS_SUCCESS 1 169#define HW_ASSISTED_I2C_STATUS_SUCCESS 1
159 170
160#pragma pack(1) /* BIOS data must use byte aligment */ 171#pragma pack(1) /* BIOS data must use byte aligment */
161 172
162/* Define offset to location of ROM header. */ 173/* Define offset to location of ROM header. */
163 174
@@ -165,367 +176,410 @@ typedef unsigned short USHORT;
165#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L 176#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L
166 177
167#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94 178#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94
168#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */ 179#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
169#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f 180#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f
170#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e 181#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e
171 182
172/* Common header for all ROM Data tables. 183/* Common header for all ROM Data tables.
173 Every table pointed _ATOM_MASTER_DATA_TABLE has this common header. 184 Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
174 And the pointer actually points to this header. */ 185 And the pointer actually points to this header. */
175 186
176typedef struct _ATOM_COMMON_TABLE_HEADER { 187typedef struct _ATOM_COMMON_TABLE_HEADER
177 USHORT usStructureSize; 188{
178 UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */ 189 USHORT usStructureSize;
179 UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */ 190 UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
180 /*Image can't be updated, while Driver needs to carry the new table! */ 191 UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
181} ATOM_COMMON_TABLE_HEADER; 192 /*Image can't be updated, while Driver needs to carry the new table! */
182 193}ATOM_COMMON_TABLE_HEADER;
183typedef struct _ATOM_ROM_HEADER { 194
184 ATOM_COMMON_TABLE_HEADER sHeader; 195typedef struct _ATOM_ROM_HEADER
185 UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios, 196{
186 atombios should init it as "ATOM", don't change the position */ 197 ATOM_COMMON_TABLE_HEADER sHeader;
187 USHORT usBiosRuntimeSegmentAddress; 198 UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
188 USHORT usProtectedModeInfoOffset; 199 atombios should init it as "ATOM", don't change the position */
189 USHORT usConfigFilenameOffset; 200 USHORT usBiosRuntimeSegmentAddress;
190 USHORT usCRC_BlockOffset; 201 USHORT usProtectedModeInfoOffset;
191 USHORT usBIOS_BootupMessageOffset; 202 USHORT usConfigFilenameOffset;
192 USHORT usInt10Offset; 203 USHORT usCRC_BlockOffset;
193 USHORT usPciBusDevInitCode; 204 USHORT usBIOS_BootupMessageOffset;
194 USHORT usIoBaseAddress; 205 USHORT usInt10Offset;
195 USHORT usSubsystemVendorID; 206 USHORT usPciBusDevInitCode;
196 USHORT usSubsystemID; 207 USHORT usIoBaseAddress;
197 USHORT usPCI_InfoOffset; 208 USHORT usSubsystemVendorID;
198 USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */ 209 USHORT usSubsystemID;
199 USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */ 210 USHORT usPCI_InfoOffset;
200 UCHAR ucExtendedFunctionCode; 211 USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
201 UCHAR ucReserved; 212 USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
202} ATOM_ROM_HEADER; 213 UCHAR ucExtendedFunctionCode;
214 UCHAR ucReserved;
215}ATOM_ROM_HEADER;
203 216
204/*==============================Command Table Portion==================================== */ 217/*==============================Command Table Portion==================================== */
205 218
206#ifdef UEFI_BUILD 219#ifdef UEFI_BUILD
207#define UTEMP USHORT 220 #define UTEMP USHORT
208#define USHORT void* 221 #define USHORT void*
209#endif 222#endif
210 223
211typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES { 224typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
212 USHORT ASIC_Init; /* Function Table, used by various SW components,latest version 1.1 */ 225 USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1
213 USHORT GetDisplaySurfaceSize; /* Atomic Table, Used by Bios when enabling HW ICON */ 226 USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON
214 USHORT ASIC_RegistersInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ 227 USHORT ASIC_RegistersInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
215 USHORT VRAM_BlockVenderDetection; /* Atomic Table, used only by Bios */ 228 USHORT VRAM_BlockVenderDetection; //Atomic Table, used only by Bios
216 USHORT DIGxEncoderControl; /* Only used by Bios */ 229 USHORT DIGxEncoderControl; //Only used by Bios
217 USHORT MemoryControllerInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ 230 USHORT MemoryControllerInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
218 USHORT EnableCRTCMemReq; /* Function Table,directly used by various SW components,latest version 2.1 */ 231 USHORT EnableCRTCMemReq; //Function Table,directly used by various SW components,latest version 2.1
219 USHORT MemoryParamAdjust; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed */ 232 USHORT MemoryParamAdjust; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed
220 USHORT DVOEncoderControl; /* Function Table,directly used by various SW components,latest version 1.2 */ 233 USHORT DVOEncoderControl; //Function Table,directly used by various SW components,latest version 1.2
221 USHORT GPIOPinControl; /* Atomic Table, only used by Bios */ 234 USHORT GPIOPinControl; //Atomic Table, only used by Bios
222 USHORT SetEngineClock; /*Function Table,directly used by various SW components,latest version 1.1 */ 235 USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1
223 USHORT SetMemoryClock; /* Function Table,directly used by various SW components,latest version 1.1 */ 236 USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1
224 USHORT SetPixelClock; /*Function Table,directly used by various SW components,latest version 1.2 */ 237 USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2
225 USHORT DynamicClockGating; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ 238 USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
226 USHORT ResetMemoryDLL; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 239 USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
227 USHORT ResetMemoryDevice; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 240 USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
228 USHORT MemoryPLLInit; 241 USHORT MemoryPLLInit;
229 USHORT AdjustDisplayPll; /* only used by Bios */ 242 USHORT AdjustDisplayPll; //only used by Bios
230 USHORT AdjustMemoryController; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 243 USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
231 USHORT EnableASIC_StaticPwrMgt; /* Atomic Table, only used by Bios */ 244 USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
232 USHORT ASIC_StaticPwrMgtStatusChange; /* Obsolete, only used by Bios */ 245 USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
233 USHORT DAC_LoadDetection; /* Atomic Table, directly used by various SW components,latest version 1.2 */ 246 USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
234 USHORT LVTMAEncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.3 */ 247 USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
235 USHORT LCD1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 248 USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
236 USHORT DAC1EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 249 USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
237 USHORT DAC2EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 250 USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
238 USHORT DVOOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 251 USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
239 USHORT CV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 252 USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
240 USHORT GetConditionalGoldenSetting; /* only used by Bios */ 253 USHORT GetConditionalGoldenSetting; //only used by Bios
241 USHORT TVEncoderControl; /* Function Table,directly used by various SW components,latest version 1.1 */ 254 USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
242 USHORT TMDSAEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */ 255 USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
243 USHORT LVDSEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */ 256 USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
244 USHORT TV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 257 USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
245 USHORT EnableScaler; /* Atomic Table, used only by Bios */ 258 USHORT EnableScaler; //Atomic Table, used only by Bios
246 USHORT BlankCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 259 USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
247 USHORT EnableCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 260 USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
248 USHORT GetPixelClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 261 USHORT GetPixelClock; //Atomic Table, directly used by various SW components,latest version 1.1
249 USHORT EnableVGA_Render; /* Function Table,directly used by various SW components,latest version 1.1 */ 262 USHORT EnableVGA_Render; //Function Table,directly used by various SW components,latest version 1.1
250 USHORT EnableVGA_Access; /* Obsolete , only used by Bios */ 263 USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios
251 USHORT SetCRTC_Timing; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 264 USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1
252 USHORT SetCRTC_OverScan; /* Atomic Table, used by various SW components,latest version 1.1 */ 265 USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1
253 USHORT SetCRTC_Replication; /* Atomic Table, used only by Bios */ 266 USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
254 USHORT SelectCRTC_Source; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 267 USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
255 USHORT EnableGraphSurfaces; /* Atomic Table, used only by Bios */ 268 USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
256 USHORT UpdateCRTC_DoubleBufferRegisters; 269 USHORT UpdateCRTC_DoubleBufferRegisters;
257 USHORT LUT_AutoFill; /* Atomic Table, only used by Bios */ 270 USHORT LUT_AutoFill; //Atomic Table, only used by Bios
258 USHORT EnableHW_IconCursor; /* Atomic Table, only used by Bios */ 271 USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
259 USHORT GetMemoryClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 272 USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
260 USHORT GetEngineClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 273 USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1
261 USHORT SetCRTC_UsingDTDTiming; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 274 USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1
262 USHORT ExternalEncoderControl; /* Atomic Table, directly used by various SW components,latest version 2.1 */ 275 USHORT ExternalEncoderControl; //Atomic Table, directly used by various SW components,latest version 2.1
263 USHORT LVTMAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 276 USHORT LVTMAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
264 USHORT VRAM_BlockDetectionByStrap; /* Atomic Table, used only by Bios */ 277 USHORT VRAM_BlockDetectionByStrap; //Atomic Table, used only by Bios
265 USHORT MemoryCleanUp; /* Atomic Table, only used by Bios */ 278 USHORT MemoryCleanUp; //Atomic Table, only used by Bios
266 USHORT ProcessI2cChannelTransaction; /* Function Table,only used by Bios */ 279 USHORT ProcessI2cChannelTransaction; //Function Table,only used by Bios
267 USHORT WriteOneByteToHWAssistedI2C; /* Function Table,indirectly used by various SW components */ 280 USHORT WriteOneByteToHWAssistedI2C; //Function Table,indirectly used by various SW components
268 USHORT ReadHWAssistedI2CStatus; /* Atomic Table, indirectly used by various SW components */ 281 USHORT ReadHWAssistedI2CStatus; //Atomic Table, indirectly used by various SW components
269 USHORT SpeedFanControl; /* Function Table,indirectly used by various SW components,called from ASIC_Init */ 282 USHORT SpeedFanControl; //Function Table,indirectly used by various SW components,called from ASIC_Init
270 USHORT PowerConnectorDetection; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 283 USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1
271 USHORT MC_Synchronization; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 284 USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
272 USHORT ComputeMemoryEnginePLL; /* Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock */ 285 USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock
273 USHORT MemoryRefreshConversion; /* Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock */ 286 USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock
274 USHORT VRAM_GetCurrentInfoBlock; /* Atomic Table, used only by Bios */ 287 USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios
275 USHORT DynamicMemorySettings; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 288 USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
276 USHORT MemoryTraining; /* Atomic Table, used only by Bios */ 289 USHORT MemoryTraining; //Atomic Table, used only by Bios
277 USHORT EnableSpreadSpectrumOnPPLL; /* Atomic Table, directly used by various SW components,latest version 1.2 */ 290 USHORT EnableSpreadSpectrumOnPPLL; //Atomic Table, directly used by various SW components,latest version 1.2
278 USHORT TMDSAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 291 USHORT TMDSAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
279 USHORT SetVoltage; /* Function Table,directly and/or indirectly used by various SW components,latest version 1.1 */ 292 USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
280 USHORT DAC1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 293 USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
281 USHORT DAC2OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 294 USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
282 USHORT SetupHWAssistedI2CStatus; /* Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" */ 295 USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
283 USHORT ClockSource; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ 296 USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
284 USHORT MemoryDeviceInit; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 297 USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
285 USHORT EnableYUV; /* Atomic Table, indirectly used by various SW components,called from EnableVGARender */ 298 USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
286 USHORT DIG1EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ 299 USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
287 USHORT DIG2EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ 300 USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
288 USHORT DIG1TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ 301 USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
289 USHORT DIG2TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ 302 USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
290 USHORT ProcessAuxChannelTransaction; /* Function Table,only used by Bios */ 303 USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios
291 USHORT DPEncoderService; /* Function Table,only used by Bios */ 304 USHORT DPEncoderService; //Function Table,only used by Bios
292} ATOM_MASTER_LIST_OF_COMMAND_TABLES; 305}ATOM_MASTER_LIST_OF_COMMAND_TABLES;
293 306
294/* For backward compatible */ 307// For backward compatible
295#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction 308#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
296#define UNIPHYTransmitterControl DIG1TransmitterControl 309#define UNIPHYTransmitterControl DIG1TransmitterControl
297#define LVTMATransmitterControl DIG2TransmitterControl 310#define LVTMATransmitterControl DIG2TransmitterControl
298#define SetCRTC_DPM_State GetConditionalGoldenSetting 311#define SetCRTC_DPM_State GetConditionalGoldenSetting
299#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange 312#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
313#define HPDInterruptService ReadHWAssistedI2CStatus
314#define EnableVGA_Access GetSCLKOverMCLKRatio
300 315
301typedef struct _ATOM_MASTER_COMMAND_TABLE { 316typedef struct _ATOM_MASTER_COMMAND_TABLE
302 ATOM_COMMON_TABLE_HEADER sHeader; 317{
303 ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables; 318 ATOM_COMMON_TABLE_HEADER sHeader;
304} ATOM_MASTER_COMMAND_TABLE; 319 ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
305 320}ATOM_MASTER_COMMAND_TABLE;
306/****************************************************************************/ 321
307/* Structures used in every command table */ 322/****************************************************************************/
308/****************************************************************************/ 323// Structures used in every command table
309typedef struct _ATOM_TABLE_ATTRIBUTE { 324/****************************************************************************/
325typedef struct _ATOM_TABLE_ATTRIBUTE
326{
310#if ATOM_BIG_ENDIAN 327#if ATOM_BIG_ENDIAN
311 USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */ 328 USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
312 USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */ 329 USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
313 USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */ 330 USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
314#else 331#else
315 USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */ 332 USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
316 USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */ 333 USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
317 USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */ 334 USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
318#endif 335#endif
319} ATOM_TABLE_ATTRIBUTE; 336}ATOM_TABLE_ATTRIBUTE;
320
321typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS {
322 ATOM_TABLE_ATTRIBUTE sbfAccess;
323 USHORT susAccess;
324} ATOM_TABLE_ATTRIBUTE_ACCESS;
325 337
326/****************************************************************************/ 338typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS
327/* Common header for all command tables. */ 339{
328/* Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. */ 340 ATOM_TABLE_ATTRIBUTE sbfAccess;
329/* And the pointer actually points to this header. */ 341 USHORT susAccess;
330/****************************************************************************/ 342}ATOM_TABLE_ATTRIBUTE_ACCESS;
331typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER { 343
332 ATOM_COMMON_TABLE_HEADER CommonHeader; 344/****************************************************************************/
333 ATOM_TABLE_ATTRIBUTE TableAttribute; 345// Common header for all command tables.
334} ATOM_COMMON_ROM_COMMAND_TABLE_HEADER; 346// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header.
347// And the pointer actually points to this header.
348/****************************************************************************/
349typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
350{
351 ATOM_COMMON_TABLE_HEADER CommonHeader;
352 ATOM_TABLE_ATTRIBUTE TableAttribute;
353}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
335 354
336/****************************************************************************/ 355/****************************************************************************/
337/* Structures used by ComputeMemoryEnginePLLTable */ 356// Structures used by ComputeMemoryEnginePLLTable
338/****************************************************************************/ 357/****************************************************************************/
339#define COMPUTE_MEMORY_PLL_PARAM 1 358#define COMPUTE_MEMORY_PLL_PARAM 1
340#define COMPUTE_ENGINE_PLL_PARAM 2 359#define COMPUTE_ENGINE_PLL_PARAM 2
341 360
342typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS { 361typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
343 ULONG ulClock; /* When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div */ 362{
344 UCHAR ucAction; /* 0:reserved //1:Memory //2:Engine */ 363 ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div
345 UCHAR ucReserved; /* may expand to return larger Fbdiv later */ 364 UCHAR ucAction; //0:reserved //1:Memory //2:Engine
346 UCHAR ucFbDiv; /* return value */ 365 UCHAR ucReserved; //may expand to return larger Fbdiv later
347 UCHAR ucPostDiv; /* return value */ 366 UCHAR ucFbDiv; //return value
348} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS; 367 UCHAR ucPostDiv; //return value
349 368}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
350typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 { 369
351 ULONG ulClock; /* When return, [23:0] return real clock */ 370typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
352 UCHAR ucAction; /* 0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register */ 371{
353 USHORT usFbDiv; /* return Feedback value to be written to register */ 372 ULONG ulClock; //When return, [23:0] return real clock
354 UCHAR ucPostDiv; /* return post div to be written to register */ 373 UCHAR ucAction; //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register
355} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2; 374 USHORT usFbDiv; //return Feedback value to be written to register
375 UCHAR ucPostDiv; //return post div to be written to register
376}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
356#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS 377#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
357 378
358#define SET_CLOCK_FREQ_MASK 0x00FFFFFF /* Clock change tables only take bit [23:0] as the requested clock value */ 379
359#define USE_NON_BUS_CLOCK_MASK 0x01000000 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */ 380#define SET_CLOCK_FREQ_MASK 0x00FFFFFF //Clock change tables only take bit [23:0] as the requested clock value
360#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */ 381#define USE_NON_BUS_CLOCK_MASK 0x01000000 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
361#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */ 382#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
362#define FIRST_TIME_CHANGE_CLOCK 0x08000000 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */ 383#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
363#define SKIP_SW_PROGRAM_PLL 0x10000000 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */ 384#define FIRST_TIME_CHANGE_CLOCK 0x08000000 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
385#define SKIP_SW_PROGRAM_PLL 0x10000000 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
364#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK 386#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK
365 387
366#define b3USE_NON_BUS_CLOCK_MASK 0x01 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */ 388#define b3USE_NON_BUS_CLOCK_MASK 0x01 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
367#define b3USE_MEMORY_SELF_REFRESH 0x02 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */ 389#define b3USE_MEMORY_SELF_REFRESH 0x02 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
368#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */ 390#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
369#define b3FIRST_TIME_CHANGE_CLOCK 0x08 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */ 391#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
370#define b3SKIP_SW_PROGRAM_PLL 0x10 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */ 392#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
371 393
372typedef struct _ATOM_COMPUTE_CLOCK_FREQ { 394typedef struct _ATOM_COMPUTE_CLOCK_FREQ
395{
373#if ATOM_BIG_ENDIAN 396#if ATOM_BIG_ENDIAN
374 ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */ 397 ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
375 ULONG ulClockFreq:24; /* in unit of 10kHz */ 398 ULONG ulClockFreq:24; // in unit of 10kHz
376#else 399#else
377 ULONG ulClockFreq:24; /* in unit of 10kHz */ 400 ULONG ulClockFreq:24; // in unit of 10kHz
378 ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */ 401 ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
379#endif 402#endif
380} ATOM_COMPUTE_CLOCK_FREQ; 403}ATOM_COMPUTE_CLOCK_FREQ;
381
382typedef struct _ATOM_S_MPLL_FB_DIVIDER {
383 USHORT usFbDivFrac;
384 USHORT usFbDiv;
385} ATOM_S_MPLL_FB_DIVIDER;
386 404
387typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 { 405typedef struct _ATOM_S_MPLL_FB_DIVIDER
388 union { 406{
389 ATOM_COMPUTE_CLOCK_FREQ ulClock; /* Input Parameter */ 407 USHORT usFbDivFrac;
390 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; /* Output Parameter */ 408 USHORT usFbDiv;
391 }; 409}ATOM_S_MPLL_FB_DIVIDER;
392 UCHAR ucRefDiv; /* Output Parameter */
393 UCHAR ucPostDiv; /* Output Parameter */
394 UCHAR ucCntlFlag; /* Output Parameter */
395 UCHAR ucReserved;
396} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
397 410
398/* ucCntlFlag */ 411typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
412{
413 union
414 {
415 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
416 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
417 };
418 UCHAR ucRefDiv; //Output Parameter
419 UCHAR ucPostDiv; //Output Parameter
420 UCHAR ucCntlFlag; //Output Parameter
421 UCHAR ucReserved;
422}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
423
424// ucCntlFlag
399#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1 425#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1
400#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2 426#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2
401#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4 427#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4
428#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9 8
402 429
403typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER {
404 ATOM_COMPUTE_CLOCK_FREQ ulClock;
405 ULONG ulReserved[2];
406} DYNAMICE_MEMORY_SETTINGS_PARAMETER;
407
408typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER {
409 ATOM_COMPUTE_CLOCK_FREQ ulClock;
410 ULONG ulMemoryClock;
411 ULONG ulReserved;
412} DYNAMICE_ENGINE_SETTINGS_PARAMETER;
413
414/****************************************************************************/
415/* Structures used by SetEngineClockTable */
416/****************************************************************************/
417typedef struct _SET_ENGINE_CLOCK_PARAMETERS {
418 ULONG ulTargetEngineClock; /* In 10Khz unit */
419} SET_ENGINE_CLOCK_PARAMETERS;
420 430
421typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION { 431// V4 are only used for APU which PLL outside GPU
422 ULONG ulTargetEngineClock; /* In 10Khz unit */ 432typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
423 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; 433{
424} SET_ENGINE_CLOCK_PS_ALLOCATION; 434#if ATOM_BIG_ENDIAN
435 ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
436 ULONG ulClock:24; //Input= target clock, output = actual clock
437#else
438 ULONG ulClock:24; //Input= target clock, output = actual clock
439 ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
440#endif
441}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
425 442
426/****************************************************************************/ 443typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
427/* Structures used by SetMemoryClockTable */ 444{
428/****************************************************************************/ 445 ATOM_COMPUTE_CLOCK_FREQ ulClock;
429typedef struct _SET_MEMORY_CLOCK_PARAMETERS { 446 ULONG ulReserved[2];
430 ULONG ulTargetMemoryClock; /* In 10Khz unit */ 447}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
431} SET_MEMORY_CLOCK_PARAMETERS;
432 448
433typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION { 449typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
434 ULONG ulTargetMemoryClock; /* In 10Khz unit */ 450{
435 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; 451 ATOM_COMPUTE_CLOCK_FREQ ulClock;
436} SET_MEMORY_CLOCK_PS_ALLOCATION; 452 ULONG ulMemoryClock;
453 ULONG ulReserved;
454}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
455
456/****************************************************************************/
457// Structures used by SetEngineClockTable
458/****************************************************************************/
459typedef struct _SET_ENGINE_CLOCK_PARAMETERS
460{
461 ULONG ulTargetEngineClock; //In 10Khz unit
462}SET_ENGINE_CLOCK_PARAMETERS;
437 463
438/****************************************************************************/ 464typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
439/* Structures used by ASIC_Init.ctb */ 465{
440/****************************************************************************/ 466 ULONG ulTargetEngineClock; //In 10Khz unit
441typedef struct _ASIC_INIT_PARAMETERS { 467 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
442 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 468}SET_ENGINE_CLOCK_PS_ALLOCATION;
443 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 469
444} ASIC_INIT_PARAMETERS; 470/****************************************************************************/
471// Structures used by SetMemoryClockTable
472/****************************************************************************/
473typedef struct _SET_MEMORY_CLOCK_PARAMETERS
474{
475 ULONG ulTargetMemoryClock; //In 10Khz unit
476}SET_MEMORY_CLOCK_PARAMETERS;
445 477
446typedef struct _ASIC_INIT_PS_ALLOCATION { 478typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION
447 ASIC_INIT_PARAMETERS sASICInitClocks; 479{
448 SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; /* Caller doesn't need to init this structure */ 480 ULONG ulTargetMemoryClock; //In 10Khz unit
449} ASIC_INIT_PS_ALLOCATION; 481 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
482}SET_MEMORY_CLOCK_PS_ALLOCATION;
483
484/****************************************************************************/
485// Structures used by ASIC_Init.ctb
486/****************************************************************************/
487typedef struct _ASIC_INIT_PARAMETERS
488{
489 ULONG ulDefaultEngineClock; //In 10Khz unit
490 ULONG ulDefaultMemoryClock; //In 10Khz unit
491}ASIC_INIT_PARAMETERS;
450 492
451/****************************************************************************/ 493typedef struct _ASIC_INIT_PS_ALLOCATION
452/* Structure used by DynamicClockGatingTable.ctb */ 494{
453/****************************************************************************/ 495 ASIC_INIT_PARAMETERS sASICInitClocks;
454typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS { 496 SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure
455 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 497}ASIC_INIT_PS_ALLOCATION;
456 UCHAR ucPadding[3]; 498
457} DYNAMIC_CLOCK_GATING_PARAMETERS; 499/****************************************************************************/
500// Structure used by DynamicClockGatingTable.ctb
501/****************************************************************************/
502typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS
503{
504 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
505 UCHAR ucPadding[3];
506}DYNAMIC_CLOCK_GATING_PARAMETERS;
458#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS 507#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
459 508
460/****************************************************************************/ 509/****************************************************************************/
461/* Structure used by EnableASIC_StaticPwrMgtTable.ctb */ 510// Structure used by EnableASIC_StaticPwrMgtTable.ctb
462/****************************************************************************/ 511/****************************************************************************/
463typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS { 512typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
464 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 513{
465 UCHAR ucPadding[3]; 514 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
466} ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS; 515 UCHAR ucPadding[3];
516}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
467#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS 517#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
468 518
469/****************************************************************************/ 519/****************************************************************************/
470/* Structures used by DAC_LoadDetectionTable.ctb */ 520// Structures used by DAC_LoadDetectionTable.ctb
471/****************************************************************************/ 521/****************************************************************************/
472typedef struct _DAC_LOAD_DETECTION_PARAMETERS { 522typedef struct _DAC_LOAD_DETECTION_PARAMETERS
473 USHORT usDeviceID; /* {ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} */ 523{
474 UCHAR ucDacType; /* {ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} */ 524 USHORT usDeviceID; //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT}
475 UCHAR ucMisc; /* Valid only when table revision =1.3 and above */ 525 UCHAR ucDacType; //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC}
476} DAC_LOAD_DETECTION_PARAMETERS; 526 UCHAR ucMisc; //Valid only when table revision =1.3 and above
527}DAC_LOAD_DETECTION_PARAMETERS;
477 528
478/* DAC_LOAD_DETECTION_PARAMETERS.ucMisc */ 529// DAC_LOAD_DETECTION_PARAMETERS.ucMisc
479#define DAC_LOAD_MISC_YPrPb 0x01 530#define DAC_LOAD_MISC_YPrPb 0x01
480 531
481typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION { 532typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION
482 DAC_LOAD_DETECTION_PARAMETERS sDacload; 533{
483 ULONG Reserved[2]; /* Don't set this one, allocation for EXT DAC */ 534 DAC_LOAD_DETECTION_PARAMETERS sDacload;
484} DAC_LOAD_DETECTION_PS_ALLOCATION; 535 ULONG Reserved[2];// Don't set this one, allocation for EXT DAC
485 536}DAC_LOAD_DETECTION_PS_ALLOCATION;
486/****************************************************************************/ 537
487/* Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb */ 538/****************************************************************************/
488/****************************************************************************/ 539// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb
489typedef struct _DAC_ENCODER_CONTROL_PARAMETERS { 540/****************************************************************************/
490 USHORT usPixelClock; /* in 10KHz; for bios convenient */ 541typedef struct _DAC_ENCODER_CONTROL_PARAMETERS
491 UCHAR ucDacStandard; /* See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) */ 542{
492 UCHAR ucAction; /* 0: turn off encoder */ 543 USHORT usPixelClock; // in 10KHz; for bios convenient
493 /* 1: setup and turn on encoder */ 544 UCHAR ucDacStandard; // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0)
494 /* 7: ATOM_ENCODER_INIT Initialize DAC */ 545 UCHAR ucAction; // 0: turn off encoder
495} DAC_ENCODER_CONTROL_PARAMETERS; 546 // 1: setup and turn on encoder
547 // 7: ATOM_ENCODER_INIT Initialize DAC
548}DAC_ENCODER_CONTROL_PARAMETERS;
496 549
497#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS 550#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS
498 551
499/****************************************************************************/ 552/****************************************************************************/
500/* Structures used by DIG1EncoderControlTable */ 553// Structures used by DIG1EncoderControlTable
501/* DIG2EncoderControlTable */ 554// DIG2EncoderControlTable
502/* ExternalEncoderControlTable */ 555// ExternalEncoderControlTable
503/****************************************************************************/ 556/****************************************************************************/
504typedef struct _DIG_ENCODER_CONTROL_PARAMETERS { 557typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
505 USHORT usPixelClock; /* in 10KHz; for bios convenient */ 558{
506 UCHAR ucConfig; 559 USHORT usPixelClock; // in 10KHz; for bios convenient
507 /* [2] Link Select: */ 560 UCHAR ucConfig;
508 /* =0: PHY linkA if bfLane<3 */ 561 // [2] Link Select:
509 /* =1: PHY linkB if bfLanes<3 */ 562 // =0: PHY linkA if bfLane<3
510 /* =0: PHY linkA+B if bfLanes=3 */ 563 // =1: PHY linkB if bfLanes<3
511 /* [3] Transmitter Sel */ 564 // =0: PHY linkA+B if bfLanes=3
512 /* =0: UNIPHY or PCIEPHY */ 565 // [3] Transmitter Sel
513 /* =1: LVTMA */ 566 // =0: UNIPHY or PCIEPHY
514 UCHAR ucAction; /* =0: turn off encoder */ 567 // =1: LVTMA
515 /* =1: turn on encoder */ 568 UCHAR ucAction; // =0: turn off encoder
516 UCHAR ucEncoderMode; 569 // =1: turn on encoder
517 /* =0: DP encoder */ 570 UCHAR ucEncoderMode;
518 /* =1: LVDS encoder */ 571 // =0: DP encoder
519 /* =2: DVI encoder */ 572 // =1: LVDS encoder
520 /* =3: HDMI encoder */ 573 // =2: DVI encoder
521 /* =4: SDVO encoder */ 574 // =3: HDMI encoder
522 UCHAR ucLaneNum; /* how many lanes to enable */ 575 // =4: SDVO encoder
523 UCHAR ucReserved[2]; 576 UCHAR ucLaneNum; // how many lanes to enable
524} DIG_ENCODER_CONTROL_PARAMETERS; 577 UCHAR ucReserved[2];
578}DIG_ENCODER_CONTROL_PARAMETERS;
525#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS 579#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS
526#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS 580#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS
527 581
528/* ucConfig */ 582//ucConfig
529#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 583#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
530#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 584#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
531#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 585#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
@@ -539,52 +593,57 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
539#define ATOM_ENCODER_CONFIG_LVTMA 0x08 593#define ATOM_ENCODER_CONFIG_LVTMA 0x08
540#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00 594#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00
541#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08 595#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08
542#define ATOM_ENCODER_CONFIG_DIGB 0x80 /* VBIOS Internal use, outside SW should set this bit=0 */ 596#define ATOM_ENCODER_CONFIG_DIGB 0x80 // VBIOS Internal use, outside SW should set this bit=0
543/* ucAction */ 597// ucAction
544/* ATOM_ENABLE: Enable Encoder */ 598// ATOM_ENABLE: Enable Encoder
545/* ATOM_DISABLE: Disable Encoder */ 599// ATOM_DISABLE: Disable Encoder
546 600
547/* ucEncoderMode */ 601//ucEncoderMode
548#define ATOM_ENCODER_MODE_DP 0 602#define ATOM_ENCODER_MODE_DP 0
549#define ATOM_ENCODER_MODE_LVDS 1 603#define ATOM_ENCODER_MODE_LVDS 1
550#define ATOM_ENCODER_MODE_DVI 2 604#define ATOM_ENCODER_MODE_DVI 2
551#define ATOM_ENCODER_MODE_HDMI 3 605#define ATOM_ENCODER_MODE_HDMI 3
552#define ATOM_ENCODER_MODE_SDVO 4 606#define ATOM_ENCODER_MODE_SDVO 4
607#define ATOM_ENCODER_MODE_DP_AUDIO 5
553#define ATOM_ENCODER_MODE_TV 13 608#define ATOM_ENCODER_MODE_TV 13
554#define ATOM_ENCODER_MODE_CV 14 609#define ATOM_ENCODER_MODE_CV 14
555#define ATOM_ENCODER_MODE_CRT 15 610#define ATOM_ENCODER_MODE_CRT 15
556 611
557typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 { 612typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
613{
558#if ATOM_BIG_ENDIAN 614#if ATOM_BIG_ENDIAN
559 UCHAR ucReserved1:2; 615 UCHAR ucReserved1:2;
560 UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */ 616 UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
561 UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */ 617 UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
562 UCHAR ucReserved:1; 618 UCHAR ucReserved:1;
563 UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */ 619 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
564#else 620#else
565 UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */ 621 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
566 UCHAR ucReserved:1; 622 UCHAR ucReserved:1;
567 UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */ 623 UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
568 UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */ 624 UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
569 UCHAR ucReserved1:2; 625 UCHAR ucReserved1:2;
570#endif 626#endif
571} ATOM_DIG_ENCODER_CONFIG_V2; 627}ATOM_DIG_ENCODER_CONFIG_V2;
572 628
573typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
574 USHORT usPixelClock; /* in 10KHz; for bios convenient */
575 ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
576 UCHAR ucAction;
577 UCHAR ucEncoderMode;
578 /* =0: DP encoder */
579 /* =1: LVDS encoder */
580 /* =2: DVI encoder */
581 /* =3: HDMI encoder */
582 /* =4: SDVO encoder */
583 UCHAR ucLaneNum; /* how many lanes to enable */
584 UCHAR ucReserved[2];
585} DIG_ENCODER_CONTROL_PARAMETERS_V2;
586 629
587/* ucConfig */ 630typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
631{
632 USHORT usPixelClock; // in 10KHz; for bios convenient
633 ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
634 UCHAR ucAction;
635 UCHAR ucEncoderMode;
636 // =0: DP encoder
637 // =1: LVDS encoder
638 // =2: DVI encoder
639 // =3: HDMI encoder
640 // =4: SDVO encoder
641 UCHAR ucLaneNum; // how many lanes to enable
642 UCHAR ucStatus; // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS
643 UCHAR ucReserved;
644}DIG_ENCODER_CONTROL_PARAMETERS_V2;
645
646//ucConfig
588#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01 647#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01
589#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00 648#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00
590#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01 649#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01
@@ -596,58 +655,122 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
596#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08 655#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08
597#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10 656#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10
598 657
599/****************************************************************************/ 658// ucAction:
600/* Structures used by UNIPHYTransmitterControlTable */ 659// ATOM_DISABLE
601/* LVTMATransmitterControlTable */ 660// ATOM_ENABLE
602/* DVOOutputControlTable */ 661#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08
603/****************************************************************************/ 662#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09
604typedef struct _ATOM_DP_VS_MODE { 663#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a
605 UCHAR ucLaneSel; 664#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b
606 UCHAR ucLaneSet; 665#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c
607} ATOM_DP_VS_MODE; 666#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
608 667#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e
609typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS { 668#define ATOM_ENCODER_CMD_SETUP 0x0f
610 union { 669
611 USHORT usPixelClock; /* in 10KHz; for bios convenient */ 670// ucStatus
612 USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */ 671#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
613 ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */ 672#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
673
674// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
675typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
676{
677#if ATOM_BIG_ENDIAN
678 UCHAR ucReserved1:1;
679 UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
680 UCHAR ucReserved:3;
681 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
682#else
683 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
684 UCHAR ucReserved:3;
685 UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
686 UCHAR ucReserved1:1;
687#endif
688}ATOM_DIG_ENCODER_CONFIG_V3;
689
690#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70
691
692
693typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
694{
695 USHORT usPixelClock; // in 10KHz; for bios convenient
696 ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
697 UCHAR ucAction;
698 UCHAR ucEncoderMode;
699 // =0: DP encoder
700 // =1: LVDS encoder
701 // =2: DVI encoder
702 // =3: HDMI encoder
703 // =4: SDVO encoder
704 // =5: DP audio
705 UCHAR ucLaneNum; // how many lanes to enable
706 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
707 UCHAR ucReserved;
708}DIG_ENCODER_CONTROL_PARAMETERS_V3;
709
710
711// define ucBitPerColor:
712#define PANEL_BPC_UNDEFINE 0x00
713#define PANEL_6BIT_PER_COLOR 0x01
714#define PANEL_8BIT_PER_COLOR 0x02
715#define PANEL_10BIT_PER_COLOR 0x03
716#define PANEL_12BIT_PER_COLOR 0x04
717#define PANEL_16BIT_PER_COLOR 0x05
718
719/****************************************************************************/
720// Structures used by UNIPHYTransmitterControlTable
721// LVTMATransmitterControlTable
722// DVOOutputControlTable
723/****************************************************************************/
724typedef struct _ATOM_DP_VS_MODE
725{
726 UCHAR ucLaneSel;
727 UCHAR ucLaneSet;
728}ATOM_DP_VS_MODE;
729
730typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS
731{
732 union
733 {
734 USHORT usPixelClock; // in 10KHz; for bios convenient
735 USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
736 ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
614 }; 737 };
615 UCHAR ucConfig; 738 UCHAR ucConfig;
616 /* [0]=0: 4 lane Link, */ 739 // [0]=0: 4 lane Link,
617 /* =1: 8 lane Link ( Dual Links TMDS ) */ 740 // =1: 8 lane Link ( Dual Links TMDS )
618 /* [1]=0: InCoherent mode */ 741 // [1]=0: InCoherent mode
619 /* =1: Coherent Mode */ 742 // =1: Coherent Mode
620 /* [2] Link Select: */ 743 // [2] Link Select:
621 /* =0: PHY linkA if bfLane<3 */ 744 // =0: PHY linkA if bfLane<3
622 /* =1: PHY linkB if bfLanes<3 */ 745 // =1: PHY linkB if bfLanes<3
623 /* =0: PHY linkA+B if bfLanes=3 */ 746 // =0: PHY linkA+B if bfLanes=3
624 /* [5:4]PCIE lane Sel */ 747 // [5:4]PCIE lane Sel
625 /* =0: lane 0~3 or 0~7 */ 748 // =0: lane 0~3 or 0~7
626 /* =1: lane 4~7 */ 749 // =1: lane 4~7
627 /* =2: lane 8~11 or 8~15 */ 750 // =2: lane 8~11 or 8~15
628 /* =3: lane 12~15 */ 751 // =3: lane 12~15
629 UCHAR ucAction; /* =0: turn off encoder */ 752 UCHAR ucAction; // =0: turn off encoder
630 /* =1: turn on encoder */ 753 // =1: turn on encoder
631 UCHAR ucReserved[4]; 754 UCHAR ucReserved[4];
632} DIG_TRANSMITTER_CONTROL_PARAMETERS; 755}DIG_TRANSMITTER_CONTROL_PARAMETERS;
633 756
634#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS 757#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
635 758
636/* ucInitInfo */ 759//ucInitInfo
637#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff 760#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
638 761
639/* ucConfig */ 762//ucConfig
640#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01 763#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01
641#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02 764#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02
642#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04 765#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04
643#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00 766#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00
644#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04 767#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04
645#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00 768#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
646#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04 769#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04
647 770
648#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ 771#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
649#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ 772#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
650#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ 773#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
651 774
652#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30 775#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30
653#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00 776#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00
@@ -661,7 +784,7 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
661#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80 784#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80
662#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0 785#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0
663 786
664/* ucAction */ 787//ucAction
665#define ATOM_TRANSMITTER_ACTION_DISABLE 0 788#define ATOM_TRANSMITTER_ACTION_DISABLE 0
666#define ATOM_TRANSMITTER_ACTION_ENABLE 1 789#define ATOM_TRANSMITTER_ACTION_ENABLE 1
667#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2 790#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2
@@ -674,93 +797,168 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
674#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9 797#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9
675#define ATOM_TRANSMITTER_ACTION_SETUP 10 798#define ATOM_TRANSMITTER_ACTION_SETUP 10
676#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11 799#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11
800#define ATOM_TRANSMITTER_ACTION_POWER_ON 12
801#define ATOM_TRANSMITTER_ACTION_POWER_OFF 13
677 802
678/* Following are used for DigTransmitterControlTable ver1.2 */ 803// Following are used for DigTransmitterControlTable ver1.2
679typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 { 804typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2
805{
680#if ATOM_BIG_ENDIAN 806#if ATOM_BIG_ENDIAN
681 UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */ 807 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
682 /* =1 Dig Transmitter 2 ( Uniphy CD ) */ 808 // =1 Dig Transmitter 2 ( Uniphy CD )
683 /* =2 Dig Transmitter 3 ( Uniphy EF ) */ 809 // =2 Dig Transmitter 3 ( Uniphy EF )
684 UCHAR ucReserved:1; 810 UCHAR ucReserved:1;
685 UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */ 811 UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
686 UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */ 812 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
687 UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */ 813 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
688 /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */ 814 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
689 815
690 UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */ 816 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
691 UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */ 817 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
692#else 818#else
693 UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */ 819 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
694 UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */ 820 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
695 UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */ 821 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
696 /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */ 822 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
697 UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */ 823 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
698 UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */ 824 UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
699 UCHAR ucReserved:1; 825 UCHAR ucReserved:1;
700 UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */ 826 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
701 /* =1 Dig Transmitter 2 ( Uniphy CD ) */ 827 // =1 Dig Transmitter 2 ( Uniphy CD )
702 /* =2 Dig Transmitter 3 ( Uniphy EF ) */ 828 // =2 Dig Transmitter 3 ( Uniphy EF )
703#endif 829#endif
704} ATOM_DIG_TRANSMITTER_CONFIG_V2; 830}ATOM_DIG_TRANSMITTER_CONFIG_V2;
705 831
706/* ucConfig */ 832//ucConfig
707/* Bit0 */ 833//Bit0
708#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01 834#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01
709 835
710/* Bit1 */ 836//Bit1
711#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02 837#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02
712 838
713/* Bit2 */ 839//Bit2
714#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04 840#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04
715#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00 841#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
716#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04 842#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04
717 843
718/* Bit3 */ 844// Bit3
719#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08 845#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08
720#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */ 846#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
721#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */ 847#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
722 848
723/* Bit4 */ 849// Bit4
724#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10 850#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10
725 851
726/* Bit7:6 */ 852// Bit7:6
727#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0 853#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0
728#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 /* AB */ 854#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 //AB
729#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 /* CD */ 855#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 //CD
730#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 /* EF */ 856#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 //EF
731 857
732typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 { 858typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2
733 union { 859{
734 USHORT usPixelClock; /* in 10KHz; for bios convenient */ 860 union
735 USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */ 861 {
736 ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */ 862 USHORT usPixelClock; // in 10KHz; for bios convenient
863 USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
864 ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
737 }; 865 };
738 ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig; 866 ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
739 UCHAR ucAction; /* define as ATOM_TRANSMITER_ACTION_XXX */ 867 UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
740 UCHAR ucReserved[4]; 868 UCHAR ucReserved[4];
741} DIG_TRANSMITTER_CONTROL_PARAMETERS_V2; 869}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
742 870
743/****************************************************************************/ 871typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
744/* Structures used by DAC1OuputControlTable */ 872{
745/* DAC2OuputControlTable */ 873#if ATOM_BIG_ENDIAN
746/* LVTMAOutputControlTable (Before DEC30) */ 874 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
747/* TMDSAOutputControlTable (Before DEC30) */ 875 // =1 Dig Transmitter 2 ( Uniphy CD )
748/****************************************************************************/ 876 // =2 Dig Transmitter 3 ( Uniphy EF )
749typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS { 877 UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
750 UCHAR ucAction; /* Possible input:ATOM_ENABLE||ATOMDISABLE */ 878 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
751 /* When the display is LCD, in addition to above: */ 879 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
752 /* ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| */ 880 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
753 /* ATOM_LCD_SELFTEST_STOP */ 881 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
882 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
883#else
884 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
885 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
886 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
887 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
888 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
889 UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
890 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
891 // =1 Dig Transmitter 2 ( Uniphy CD )
892 // =2 Dig Transmitter 3 ( Uniphy EF )
893#endif
894}ATOM_DIG_TRANSMITTER_CONFIG_V3;
754 895
755 UCHAR aucPadding[3]; /* padding to DWORD aligned */ 896typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
756} DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS; 897{
898 union
899 {
900 USHORT usPixelClock; // in 10KHz; for bios convenient
901 USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
902 ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
903 };
904 ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig;
905 UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
906 UCHAR ucLaneNum;
907 UCHAR ucReserved[3];
908}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3;
909
910//ucConfig
911//Bit0
912#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR 0x01
913
914//Bit1
915#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT 0x02
916
917//Bit2
918#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK 0x04
919#define ATOM_TRANSMITTER_CONFIG_V3_LINKA 0x00
920#define ATOM_TRANSMITTER_CONFIG_V3_LINKB 0x04
921
922// Bit3
923#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK 0x08
924#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER 0x00
925#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER 0x08
926
927// Bit5:4
928#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 0x30
929#define ATOM_TRASMITTER_CONFIG_V3_P1PLL 0x00
930#define ATOM_TRASMITTER_CONFIG_V3_P2PLL 0x10
931#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT 0x20
932
933// Bit7:6
934#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK 0xC0
935#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1 0x00 //AB
936#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD
937#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF
938
939/****************************************************************************/
940// Structures used by DAC1OuputControlTable
941// DAC2OuputControlTable
942// LVTMAOutputControlTable (Before DEC30)
943// TMDSAOutputControlTable (Before DEC30)
944/****************************************************************************/
945typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
946{
947 UCHAR ucAction; // Possible input:ATOM_ENABLE||ATOMDISABLE
948 // When the display is LCD, in addition to above:
949 // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START||
950 // ATOM_LCD_SELFTEST_STOP
951
952 UCHAR aucPadding[3]; // padding to DWORD aligned
953}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
757 954
758#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 955#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
759 956
760#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 957
958#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
761#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION 959#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
762 960
763#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 961#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
764#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION 962#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
765 963
766#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 964#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
@@ -782,397 +980,550 @@ typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
782#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION 980#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
783#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS 981#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS
784 982
785/****************************************************************************/ 983/****************************************************************************/
786/* Structures used by BlankCRTCTable */ 984// Structures used by BlankCRTCTable
787/****************************************************************************/ 985/****************************************************************************/
788typedef struct _BLANK_CRTC_PARAMETERS { 986typedef struct _BLANK_CRTC_PARAMETERS
789 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 987{
790 UCHAR ucBlanking; /* ATOM_BLANKING or ATOM_BLANKINGOFF */ 988 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
791 USHORT usBlackColorRCr; 989 UCHAR ucBlanking; // ATOM_BLANKING or ATOM_BLANKINGOFF
792 USHORT usBlackColorGY; 990 USHORT usBlackColorRCr;
793 USHORT usBlackColorBCb; 991 USHORT usBlackColorGY;
794} BLANK_CRTC_PARAMETERS; 992 USHORT usBlackColorBCb;
993}BLANK_CRTC_PARAMETERS;
795#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS 994#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS
796 995
797/****************************************************************************/ 996/****************************************************************************/
798/* Structures used by EnableCRTCTable */ 997// Structures used by EnableCRTCTable
799/* EnableCRTCMemReqTable */ 998// EnableCRTCMemReqTable
800/* UpdateCRTC_DoubleBufferRegistersTable */ 999// UpdateCRTC_DoubleBufferRegistersTable
801/****************************************************************************/ 1000/****************************************************************************/
802typedef struct _ENABLE_CRTC_PARAMETERS { 1001typedef struct _ENABLE_CRTC_PARAMETERS
803 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1002{
804 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 1003 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
805 UCHAR ucPadding[2]; 1004 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
806} ENABLE_CRTC_PARAMETERS; 1005 UCHAR ucPadding[2];
1006}ENABLE_CRTC_PARAMETERS;
807#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS 1007#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS
808 1008
809/****************************************************************************/ 1009/****************************************************************************/
810/* Structures used by SetCRTC_OverScanTable */ 1010// Structures used by SetCRTC_OverScanTable
811/****************************************************************************/ 1011/****************************************************************************/
812typedef struct _SET_CRTC_OVERSCAN_PARAMETERS { 1012typedef struct _SET_CRTC_OVERSCAN_PARAMETERS
813 USHORT usOverscanRight; /* right */ 1013{
814 USHORT usOverscanLeft; /* left */ 1014 USHORT usOverscanRight; // right
815 USHORT usOverscanBottom; /* bottom */ 1015 USHORT usOverscanLeft; // left
816 USHORT usOverscanTop; /* top */ 1016 USHORT usOverscanBottom; // bottom
817 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1017 USHORT usOverscanTop; // top
818 UCHAR ucPadding[3]; 1018 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
819} SET_CRTC_OVERSCAN_PARAMETERS; 1019 UCHAR ucPadding[3];
1020}SET_CRTC_OVERSCAN_PARAMETERS;
820#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS 1021#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS
821 1022
822/****************************************************************************/ 1023/****************************************************************************/
823/* Structures used by SetCRTC_ReplicationTable */ 1024// Structures used by SetCRTC_ReplicationTable
824/****************************************************************************/ 1025/****************************************************************************/
825typedef struct _SET_CRTC_REPLICATION_PARAMETERS { 1026typedef struct _SET_CRTC_REPLICATION_PARAMETERS
826 UCHAR ucH_Replication; /* horizontal replication */ 1027{
827 UCHAR ucV_Replication; /* vertical replication */ 1028 UCHAR ucH_Replication; // horizontal replication
828 UCHAR usCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1029 UCHAR ucV_Replication; // vertical replication
829 UCHAR ucPadding; 1030 UCHAR usCRTC; // ATOM_CRTC1 or ATOM_CRTC2
830} SET_CRTC_REPLICATION_PARAMETERS; 1031 UCHAR ucPadding;
1032}SET_CRTC_REPLICATION_PARAMETERS;
831#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS 1033#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS
832 1034
833/****************************************************************************/ 1035/****************************************************************************/
834/* Structures used by SelectCRTC_SourceTable */ 1036// Structures used by SelectCRTC_SourceTable
835/****************************************************************************/ 1037/****************************************************************************/
836typedef struct _SELECT_CRTC_SOURCE_PARAMETERS { 1038typedef struct _SELECT_CRTC_SOURCE_PARAMETERS
837 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1039{
838 UCHAR ucDevice; /* ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... */ 1040 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
839 UCHAR ucPadding[2]; 1041 UCHAR ucDevice; // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|....
840} SELECT_CRTC_SOURCE_PARAMETERS; 1042 UCHAR ucPadding[2];
1043}SELECT_CRTC_SOURCE_PARAMETERS;
841#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS 1044#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS
842 1045
843typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 { 1046typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2
844 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1047{
845 UCHAR ucEncoderID; /* DAC1/DAC2/TVOUT/DIG1/DIG2/DVO */ 1048 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
846 UCHAR ucEncodeMode; /* Encoding mode, only valid when using DIG1/DIG2/DVO */ 1049 UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO
847 UCHAR ucPadding; 1050 UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO
848} SELECT_CRTC_SOURCE_PARAMETERS_V2; 1051 UCHAR ucPadding;
849 1052}SELECT_CRTC_SOURCE_PARAMETERS_V2;
850/* ucEncoderID */ 1053
851/* #define ASIC_INT_DAC1_ENCODER_ID 0x00 */ 1054//ucEncoderID
852/* #define ASIC_INT_TV_ENCODER_ID 0x02 */ 1055//#define ASIC_INT_DAC1_ENCODER_ID 0x00
853/* #define ASIC_INT_DIG1_ENCODER_ID 0x03 */ 1056//#define ASIC_INT_TV_ENCODER_ID 0x02
854/* #define ASIC_INT_DAC2_ENCODER_ID 0x04 */ 1057//#define ASIC_INT_DIG1_ENCODER_ID 0x03
855/* #define ASIC_EXT_TV_ENCODER_ID 0x06 */ 1058//#define ASIC_INT_DAC2_ENCODER_ID 0x04
856/* #define ASIC_INT_DVO_ENCODER_ID 0x07 */ 1059//#define ASIC_EXT_TV_ENCODER_ID 0x06
857/* #define ASIC_INT_DIG2_ENCODER_ID 0x09 */ 1060//#define ASIC_INT_DVO_ENCODER_ID 0x07
858/* #define ASIC_EXT_DIG_ENCODER_ID 0x05 */ 1061//#define ASIC_INT_DIG2_ENCODER_ID 0x09
859 1062//#define ASIC_EXT_DIG_ENCODER_ID 0x05
860/* ucEncodeMode */ 1063
861/* #define ATOM_ENCODER_MODE_DP 0 */ 1064//ucEncodeMode
862/* #define ATOM_ENCODER_MODE_LVDS 1 */ 1065//#define ATOM_ENCODER_MODE_DP 0
863/* #define ATOM_ENCODER_MODE_DVI 2 */ 1066//#define ATOM_ENCODER_MODE_LVDS 1
864/* #define ATOM_ENCODER_MODE_HDMI 3 */ 1067//#define ATOM_ENCODER_MODE_DVI 2
865/* #define ATOM_ENCODER_MODE_SDVO 4 */ 1068//#define ATOM_ENCODER_MODE_HDMI 3
866/* #define ATOM_ENCODER_MODE_TV 13 */ 1069//#define ATOM_ENCODER_MODE_SDVO 4
867/* #define ATOM_ENCODER_MODE_CV 14 */ 1070//#define ATOM_ENCODER_MODE_TV 13
868/* #define ATOM_ENCODER_MODE_CRT 15 */ 1071//#define ATOM_ENCODER_MODE_CV 14
869 1072//#define ATOM_ENCODER_MODE_CRT 15
870/****************************************************************************/ 1073
871/* Structures used by SetPixelClockTable */ 1074/****************************************************************************/
872/* GetPixelClockTable */ 1075// Structures used by SetPixelClockTable
873/****************************************************************************/ 1076// GetPixelClockTable
874/* Major revision=1., Minor revision=1 */ 1077/****************************************************************************/
875typedef struct _PIXEL_CLOCK_PARAMETERS { 1078//Major revision=1., Minor revision=1
876 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ 1079typedef struct _PIXEL_CLOCK_PARAMETERS
877 /* 0 means disable PPLL */ 1080{
878 USHORT usRefDiv; /* Reference divider */ 1081 USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
879 USHORT usFbDiv; /* feedback divider */ 1082 // 0 means disable PPLL
880 UCHAR ucPostDiv; /* post divider */ 1083 USHORT usRefDiv; // Reference divider
881 UCHAR ucFracFbDiv; /* fractional feedback divider */ 1084 USHORT usFbDiv; // feedback divider
882 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ 1085 UCHAR ucPostDiv; // post divider
883 UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */ 1086 UCHAR ucFracFbDiv; // fractional feedback divider
884 UCHAR ucCRTC; /* Which CRTC uses this Ppll */ 1087 UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
885 UCHAR ucPadding; 1088 UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
886} PIXEL_CLOCK_PARAMETERS; 1089 UCHAR ucCRTC; // Which CRTC uses this Ppll
887 1090 UCHAR ucPadding;
888/* Major revision=1., Minor revision=2, add ucMiscIfno */ 1091}PIXEL_CLOCK_PARAMETERS;
889/* ucMiscInfo: */ 1092
1093//Major revision=1., Minor revision=2, add ucMiscIfno
1094//ucMiscInfo:
890#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1 1095#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
891#define MISC_DEVICE_INDEX_MASK 0xF0 1096#define MISC_DEVICE_INDEX_MASK 0xF0
892#define MISC_DEVICE_INDEX_SHIFT 4 1097#define MISC_DEVICE_INDEX_SHIFT 4
893 1098
894typedef struct _PIXEL_CLOCK_PARAMETERS_V2 { 1099typedef struct _PIXEL_CLOCK_PARAMETERS_V2
895 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ 1100{
896 /* 0 means disable PPLL */ 1101 USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
897 USHORT usRefDiv; /* Reference divider */ 1102 // 0 means disable PPLL
898 USHORT usFbDiv; /* feedback divider */ 1103 USHORT usRefDiv; // Reference divider
899 UCHAR ucPostDiv; /* post divider */ 1104 USHORT usFbDiv; // feedback divider
900 UCHAR ucFracFbDiv; /* fractional feedback divider */ 1105 UCHAR ucPostDiv; // post divider
901 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ 1106 UCHAR ucFracFbDiv; // fractional feedback divider
902 UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */ 1107 UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
903 UCHAR ucCRTC; /* Which CRTC uses this Ppll */ 1108 UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
904 UCHAR ucMiscInfo; /* Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog */ 1109 UCHAR ucCRTC; // Which CRTC uses this Ppll
905} PIXEL_CLOCK_PARAMETERS_V2; 1110 UCHAR ucMiscInfo; // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog
906 1111}PIXEL_CLOCK_PARAMETERS_V2;
907/* Major revision=1., Minor revision=3, structure/definition change */ 1112
908/* ucEncoderMode: */ 1113//Major revision=1., Minor revision=3, structure/definition change
909/* ATOM_ENCODER_MODE_DP */ 1114//ucEncoderMode:
910/* ATOM_ENOCDER_MODE_LVDS */ 1115//ATOM_ENCODER_MODE_DP
911/* ATOM_ENOCDER_MODE_DVI */ 1116//ATOM_ENOCDER_MODE_LVDS
912/* ATOM_ENOCDER_MODE_HDMI */ 1117//ATOM_ENOCDER_MODE_DVI
913/* ATOM_ENOCDER_MODE_SDVO */ 1118//ATOM_ENOCDER_MODE_HDMI
914/* ATOM_ENCODER_MODE_TV 13 */ 1119//ATOM_ENOCDER_MODE_SDVO
915/* ATOM_ENCODER_MODE_CV 14 */ 1120//ATOM_ENCODER_MODE_TV 13
916/* ATOM_ENCODER_MODE_CRT 15 */ 1121//ATOM_ENCODER_MODE_CV 14
917 1122//ATOM_ENCODER_MODE_CRT 15
918/* ucDVOConfig */ 1123
919/* #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 */ 1124//ucDVOConfig
920/* #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 */ 1125//#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
921/* #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 */ 1126//#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
922/* #define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c */ 1127//#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
923/* #define DVO_ENCODER_CONFIG_LOW12BIT 0x00 */ 1128//#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c
924/* #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 */ 1129//#define DVO_ENCODER_CONFIG_LOW12BIT 0x00
925/* #define DVO_ENCODER_CONFIG_24BIT 0x08 */ 1130//#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
926 1131//#define DVO_ENCODER_CONFIG_24BIT 0x08
927/* ucMiscInfo: also changed, see below */ 1132
1133//ucMiscInfo: also changed, see below
928#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01 1134#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01
929#define PIXEL_CLOCK_MISC_VGA_MODE 0x02 1135#define PIXEL_CLOCK_MISC_VGA_MODE 0x02
930#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04 1136#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04
931#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00 1137#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00
932#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04 1138#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04
933#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08 1139#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08
1140#define PIXEL_CLOCK_MISC_REF_DIV_SRC 0x10
1141// V1.4 for RoadRunner
1142#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10
1143#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20
934 1144
935typedef struct _PIXEL_CLOCK_PARAMETERS_V3 { 1145typedef struct _PIXEL_CLOCK_PARAMETERS_V3
936 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ 1146{
937 /* 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. */ 1147 USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
938 USHORT usRefDiv; /* Reference divider */ 1148 // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0.
939 USHORT usFbDiv; /* feedback divider */ 1149 USHORT usRefDiv; // Reference divider
940 UCHAR ucPostDiv; /* post divider */ 1150 USHORT usFbDiv; // feedback divider
941 UCHAR ucFracFbDiv; /* fractional feedback divider */ 1151 UCHAR ucPostDiv; // post divider
942 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ 1152 UCHAR ucFracFbDiv; // fractional feedback divider
943 UCHAR ucTransmitterId; /* graphic encoder id defined in objectId.h */ 1153 UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
944 union { 1154 UCHAR ucTransmitterId; // graphic encoder id defined in objectId.h
945 UCHAR ucEncoderMode; /* encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ */ 1155 union
946 UCHAR ucDVOConfig; /* when use DVO, need to know SDR/DDR, 12bit or 24bit */ 1156 {
1157 UCHAR ucEncoderMode; // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/
1158 UCHAR ucDVOConfig; // when use DVO, need to know SDR/DDR, 12bit or 24bit
947 }; 1159 };
948 UCHAR ucMiscInfo; /* bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel */ 1160 UCHAR ucMiscInfo; // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel
949 /* bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source */ 1161 // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source
950} PIXEL_CLOCK_PARAMETERS_V3; 1162 // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider
1163}PIXEL_CLOCK_PARAMETERS_V3;
951 1164
952#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2 1165#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2
953#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST 1166#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST
954 1167
955/****************************************************************************/ 1168typedef struct _PIXEL_CLOCK_PARAMETERS_V5
956/* Structures used by AdjustDisplayPllTable */ 1169{
957/****************************************************************************/ 1170 UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to
958typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS { 1171 // drive the pixel clock. not used for DCPLL case.
1172 union{
1173 UCHAR ucReserved;
1174 UCHAR ucFracFbDiv; // [gphan] temporary to prevent build problem. remove it after driver code is changed.
1175 };
1176 USHORT usPixelClock; // target the pixel clock to drive the CRTC timing
1177 // 0 means disable PPLL/DCPLL.
1178 USHORT usFbDiv; // feedback divider integer part.
1179 UCHAR ucPostDiv; // post divider.
1180 UCHAR ucRefDiv; // Reference divider
1181 UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
1182 UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
1183 // indicate which graphic encoder will be used.
1184 UCHAR ucEncoderMode; // Encoder mode:
1185 UCHAR ucMiscInfo; // bit[0]= Force program PPLL
1186 // bit[1]= when VGA timing is used.
1187 // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
1188 // bit[4]= RefClock source for PPLL.
1189 // =0: XTLAIN( default mode )
1190 // =1: other external clock source, which is pre-defined
1191 // by VBIOS depend on the feature required.
1192 // bit[7:5]: reserved.
1193 ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
1194
1195}PIXEL_CLOCK_PARAMETERS_V5;
1196
1197#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL 0x01
1198#define PIXEL_CLOCK_V5_MISC_VGA_MODE 0x02
1199#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK 0x0c
1200#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP 0x00
1201#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP 0x04
1202#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08
1203#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10
1204
1205typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
1206{
1207 PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
1208}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2;
1209
1210typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2
1211{
1212 UCHAR ucStatus;
1213 UCHAR ucRefDivSrc; // =1: reference clock source from XTALIN, =0: source from PCIE ref clock
1214 UCHAR ucReserved[2];
1215}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2;
1216
1217typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
1218{
1219 PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
1220}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
1221
1222/****************************************************************************/
1223// Structures used by AdjustDisplayPllTable
1224/****************************************************************************/
1225typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
1226{
959 USHORT usPixelClock; 1227 USHORT usPixelClock;
960 UCHAR ucTransmitterID; 1228 UCHAR ucTransmitterID;
961 UCHAR ucEncodeMode; 1229 UCHAR ucEncodeMode;
962 union { 1230 union
963 UCHAR ucDVOConfig; /* if DVO, need passing link rate and output 12bitlow or 24bit */ 1231 {
964 UCHAR ucConfig; /* if none DVO, not defined yet */ 1232 UCHAR ucDVOConfig; //if DVO, need passing link rate and output 12bitlow or 24bit
1233 UCHAR ucConfig; //if none DVO, not defined yet
965 }; 1234 };
966 UCHAR ucReserved[3]; 1235 UCHAR ucReserved[3];
967} ADJUST_DISPLAY_PLL_PARAMETERS; 1236}ADJUST_DISPLAY_PLL_PARAMETERS;
968 1237
969#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10 1238#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10
970
971#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS 1239#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS
972 1240
973/****************************************************************************/ 1241typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
974/* Structures used by EnableYUVTable */ 1242{
975/****************************************************************************/ 1243 USHORT usPixelClock; // target pixel clock
976typedef struct _ENABLE_YUV_PARAMETERS { 1244 UCHAR ucTransmitterID; // transmitter id defined in objectid.h
977 UCHAR ucEnable; /* ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) */ 1245 UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI
978 UCHAR ucCRTC; /* Which CRTC needs this YUV or RGB format */ 1246 UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
979 UCHAR ucPadding[2]; 1247 UCHAR ucReserved[3];
980} ENABLE_YUV_PARAMETERS; 1248}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
1249
1250// usDispPllConfig v1.2 for RoadRunner
1251#define DISPPLL_CONFIG_DVO_RATE_SEL 0x0001 // need only when ucTransmitterID = DVO
1252#define DISPPLL_CONFIG_DVO_DDR_SPEED 0x0000 // need only when ucTransmitterID = DVO
1253#define DISPPLL_CONFIG_DVO_SDR_SPEED 0x0001 // need only when ucTransmitterID = DVO
1254#define DISPPLL_CONFIG_DVO_OUTPUT_SEL 0x000c // need only when ucTransmitterID = DVO
1255#define DISPPLL_CONFIG_DVO_LOW12BIT 0x0000 // need only when ucTransmitterID = DVO
1256#define DISPPLL_CONFIG_DVO_UPPER12BIT 0x0004 // need only when ucTransmitterID = DVO
1257#define DISPPLL_CONFIG_DVO_24BIT 0x0008 // need only when ucTransmitterID = DVO
1258#define DISPPLL_CONFIG_SS_ENABLE 0x0010 // Only used when ucEncoderMode = DP or LVDS
1259#define DISPPLL_CONFIG_COHERENT_MODE 0x0020 // Only used when ucEncoderMode = TMDS or HDMI
1260#define DISPPLL_CONFIG_DUAL_LINK 0x0040 // Only used when ucEncoderMode = TMDS or LVDS
1261
1262
1263typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3
1264{
1265 ULONG ulDispPllFreq; // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc
1266 UCHAR ucRefDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given )
1267 UCHAR ucPostDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider
1268 UCHAR ucReserved[2];
1269}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3;
1270
1271typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3
1272{
1273 union
1274 {
1275 ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 sInput;
1276 ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput;
1277 };
1278} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3;
1279
1280/****************************************************************************/
1281// Structures used by EnableYUVTable
1282/****************************************************************************/
1283typedef struct _ENABLE_YUV_PARAMETERS
1284{
1285 UCHAR ucEnable; // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB)
1286 UCHAR ucCRTC; // Which CRTC needs this YUV or RGB format
1287 UCHAR ucPadding[2];
1288}ENABLE_YUV_PARAMETERS;
981#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS 1289#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
982 1290
983/****************************************************************************/ 1291/****************************************************************************/
984/* Structures used by GetMemoryClockTable */ 1292// Structures used by GetMemoryClockTable
985/****************************************************************************/ 1293/****************************************************************************/
986typedef struct _GET_MEMORY_CLOCK_PARAMETERS { 1294typedef struct _GET_MEMORY_CLOCK_PARAMETERS
987 ULONG ulReturnMemoryClock; /* current memory speed in 10KHz unit */ 1295{
1296 ULONG ulReturnMemoryClock; // current memory speed in 10KHz unit
988} GET_MEMORY_CLOCK_PARAMETERS; 1297} GET_MEMORY_CLOCK_PARAMETERS;
989#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS 1298#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS
990 1299
991/****************************************************************************/ 1300/****************************************************************************/
992/* Structures used by GetEngineClockTable */ 1301// Structures used by GetEngineClockTable
993/****************************************************************************/ 1302/****************************************************************************/
994typedef struct _GET_ENGINE_CLOCK_PARAMETERS { 1303typedef struct _GET_ENGINE_CLOCK_PARAMETERS
995 ULONG ulReturnEngineClock; /* current engine speed in 10KHz unit */ 1304{
1305 ULONG ulReturnEngineClock; // current engine speed in 10KHz unit
996} GET_ENGINE_CLOCK_PARAMETERS; 1306} GET_ENGINE_CLOCK_PARAMETERS;
997#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS 1307#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS
998 1308
999/****************************************************************************/ 1309/****************************************************************************/
1000/* Following Structures and constant may be obsolete */ 1310// Following Structures and constant may be obsolete
1001/****************************************************************************/ 1311/****************************************************************************/
1002/* Maxium 8 bytes,the data read in will be placed in the parameter space. */ 1312//Maxium 8 bytes,the data read in will be placed in the parameter space.
1003/* Read operaion successeful when the paramter space is non-zero, otherwise read operation failed */ 1313//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed
1004typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS { 1314typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
1005 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ 1315{
1006 USHORT usVRAMAddress; /* Adress in Frame Buffer where to pace raw EDID */ 1316 USHORT usPrescale; //Ratio between Engine clock and I2C clock
1007 USHORT usStatus; /* When use output: lower byte EDID checksum, high byte hardware status */ 1317 USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID
1008 /* WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte */ 1318 USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status
1009 UCHAR ucSlaveAddr; /* Read from which slave */ 1319 //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte
1010 UCHAR ucLineNumber; /* Read from which HW assisted line */ 1320 UCHAR ucSlaveAddr; //Read from which slave
1011} READ_EDID_FROM_HW_I2C_DATA_PARAMETERS; 1321 UCHAR ucLineNumber; //Read from which HW assisted line
1322}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
1012#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS 1323#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
1013 1324
1325
1014#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0 1326#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0
1015#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1 1327#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1
1016#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2 1328#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2
1017#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3 1329#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3
1018#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4 1330#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4
1019 1331
1020typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS { 1332typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1021 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ 1333{
1022 USHORT usByteOffset; /* Write to which byte */ 1334 USHORT usPrescale; //Ratio between Engine clock and I2C clock
1023 /* Upper portion of usByteOffset is Format of data */ 1335 USHORT usByteOffset; //Write to which byte
1024 /* 1bytePS+offsetPS */ 1336 //Upper portion of usByteOffset is Format of data
1025 /* 2bytesPS+offsetPS */ 1337 //1bytePS+offsetPS
1026 /* blockID+offsetPS */ 1338 //2bytesPS+offsetPS
1027 /* blockID+offsetID */ 1339 //blockID+offsetPS
1028 /* blockID+counterID+offsetID */ 1340 //blockID+offsetID
1029 UCHAR ucData; /* PS data1 */ 1341 //blockID+counterID+offsetID
1030 UCHAR ucStatus; /* Status byte 1=success, 2=failure, Also is used as PS data2 */ 1342 UCHAR ucData; //PS data1
1031 UCHAR ucSlaveAddr; /* Write to which slave */ 1343 UCHAR ucStatus; //Status byte 1=success, 2=failure, Also is used as PS data2
1032 UCHAR ucLineNumber; /* Write from which HW assisted line */ 1344 UCHAR ucSlaveAddr; //Write to which slave
1033} WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS; 1345 UCHAR ucLineNumber; //Write from which HW assisted line
1346}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
1034 1347
1035#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS 1348#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1036 1349
1037typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS { 1350typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
1038 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ 1351{
1039 UCHAR ucSlaveAddr; /* Write to which slave */ 1352 USHORT usPrescale; //Ratio between Engine clock and I2C clock
1040 UCHAR ucLineNumber; /* Write from which HW assisted line */ 1353 UCHAR ucSlaveAddr; //Write to which slave
1041} SET_UP_HW_I2C_DATA_PARAMETERS; 1354 UCHAR ucLineNumber; //Write from which HW assisted line
1355}SET_UP_HW_I2C_DATA_PARAMETERS;
1356
1042 1357
1043/**************************************************************************/ 1358/**************************************************************************/
1044#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS 1359#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1045 1360
1046/****************************************************************************/ 1361/****************************************************************************/
1047/* Structures used by PowerConnectorDetectionTable */ 1362// Structures used by PowerConnectorDetectionTable
1048/****************************************************************************/ 1363/****************************************************************************/
1049typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS { 1364typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS
1050 UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */ 1365{
1051 UCHAR ucPwrBehaviorId; 1366 UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
1052 USHORT usPwrBudget; /* how much power currently boot to in unit of watt */ 1367 UCHAR ucPwrBehaviorId;
1053} POWER_CONNECTOR_DETECTION_PARAMETERS; 1368 USHORT usPwrBudget; //how much power currently boot to in unit of watt
1054 1369}POWER_CONNECTOR_DETECTION_PARAMETERS;
1055typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION { 1370
1056 UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */ 1371typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION
1057 UCHAR ucReserved; 1372{
1058 USHORT usPwrBudget; /* how much power currently boot to in unit of watt */ 1373 UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
1059 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; 1374 UCHAR ucReserved;
1060} POWER_CONNECTOR_DETECTION_PS_ALLOCATION; 1375 USHORT usPwrBudget; //how much power currently boot to in unit of watt
1376 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1377}POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
1061 1378
1062/****************************LVDS SS Command Table Definitions**********************/ 1379/****************************LVDS SS Command Table Definitions**********************/
1063 1380
1064/****************************************************************************/ 1381/****************************************************************************/
1065/* Structures used by EnableSpreadSpectrumOnPPLLTable */ 1382// Structures used by EnableSpreadSpectrumOnPPLLTable
1066/****************************************************************************/ 1383/****************************************************************************/
1067typedef struct _ENABLE_LVDS_SS_PARAMETERS { 1384typedef struct _ENABLE_LVDS_SS_PARAMETERS
1068 USHORT usSpreadSpectrumPercentage; 1385{
1069 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ 1386 USHORT usSpreadSpectrumPercentage;
1070 UCHAR ucSpreadSpectrumStepSize_Delay; /* bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY */ 1387 UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
1071 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 1388 UCHAR ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY
1072 UCHAR ucPadding[3]; 1389 UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
1073} ENABLE_LVDS_SS_PARAMETERS; 1390 UCHAR ucPadding[3];
1074 1391}ENABLE_LVDS_SS_PARAMETERS;
1075/* ucTableFormatRevision=1,ucTableContentRevision=2 */ 1392
1076typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 { 1393//ucTableFormatRevision=1,ucTableContentRevision=2
1077 USHORT usSpreadSpectrumPercentage; 1394typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2
1078 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ 1395{
1079 UCHAR ucSpreadSpectrumStep; /* */ 1396 USHORT usSpreadSpectrumPercentage;
1080 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 1397 UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
1081 UCHAR ucSpreadSpectrumDelay; 1398 UCHAR ucSpreadSpectrumStep; //
1082 UCHAR ucSpreadSpectrumRange; 1399 UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
1083 UCHAR ucPadding; 1400 UCHAR ucSpreadSpectrumDelay;
1084} ENABLE_LVDS_SS_PARAMETERS_V2; 1401 UCHAR ucSpreadSpectrumRange;
1085 1402 UCHAR ucPadding;
1086/* This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. */ 1403}ENABLE_LVDS_SS_PARAMETERS_V2;
1087typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL { 1404
1088 USHORT usSpreadSpectrumPercentage; 1405//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS.
1089 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ 1406typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL
1090 UCHAR ucSpreadSpectrumStep; /* */ 1407{
1091 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 1408 USHORT usSpreadSpectrumPercentage;
1092 UCHAR ucSpreadSpectrumDelay; 1409 UCHAR ucSpreadSpectrumType; // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
1093 UCHAR ucSpreadSpectrumRange; 1410 UCHAR ucSpreadSpectrumStep; //
1094 UCHAR ucPpll; /* ATOM_PPLL1/ATOM_PPLL2 */ 1411 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
1095} ENABLE_SPREAD_SPECTRUM_ON_PPLL; 1412 UCHAR ucSpreadSpectrumDelay;
1413 UCHAR ucSpreadSpectrumRange;
1414 UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2
1415}ENABLE_SPREAD_SPECTRUM_ON_PPLL;
1416
1417typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
1418{
1419 USHORT usSpreadSpectrumPercentage;
1420 UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
1421 // Bit[1]: 1-Ext. 0-Int.
1422 // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
1423 // Bits[7:4] reserved
1424 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
1425 USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
1426 USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
1427}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2;
1428
1429#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD 0x00
1430#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD 0x01
1431#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD 0x02
1432#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK 0x0c
1433#define ATOM_PPLL_SS_TYPE_V2_P1PLL 0x00
1434#define ATOM_PPLL_SS_TYPE_V2_P2PLL 0x04
1435#define ATOM_PPLL_SS_TYPE_V2_DCPLL 0x08
1436#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK 0x00FF
1437#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT 0
1438#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00
1439#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8
1096 1440
1097#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL 1441#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
1098 1442
1099/**************************************************************************/ 1443/**************************************************************************/
1100 1444
1101typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION { 1445typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION
1102 PIXEL_CLOCK_PARAMETERS sPCLKInput; 1446{
1103 ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; /* Caller doesn't need to init this portion */ 1447 PIXEL_CLOCK_PARAMETERS sPCLKInput;
1104} SET_PIXEL_CLOCK_PS_ALLOCATION; 1448 ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion
1449}SET_PIXEL_CLOCK_PS_ALLOCATION;
1105 1450
1106#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION 1451#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION
1107 1452
1108/****************************************************************************/ 1453/****************************************************************************/
1109/* Structures used by ### */ 1454// Structures used by ###
1110/****************************************************************************/ 1455/****************************************************************************/
1111typedef struct _MEMORY_TRAINING_PARAMETERS { 1456typedef struct _MEMORY_TRAINING_PARAMETERS
1112 ULONG ulTargetMemoryClock; /* In 10Khz unit */ 1457{
1113} MEMORY_TRAINING_PARAMETERS; 1458 ULONG ulTargetMemoryClock; //In 10Khz unit
1459}MEMORY_TRAINING_PARAMETERS;
1114#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS 1460#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
1115 1461
1462
1116/****************************LVDS and other encoder command table definitions **********************/ 1463/****************************LVDS and other encoder command table definitions **********************/
1117 1464
1118/****************************************************************************/
1119/* Structures used by LVDSEncoderControlTable (Before DCE30) */
1120/* LVTMAEncoderControlTable (Before DCE30) */
1121/* TMDSAEncoderControlTable (Before DCE30) */
1122/****************************************************************************/
1123typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS {
1124 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1125 UCHAR ucMisc; /* bit0=0: Enable single link */
1126 /* =1: Enable dual link */
1127 /* Bit1=0: 666RGB */
1128 /* =1: 888RGB */
1129 UCHAR ucAction; /* 0: turn off encoder */
1130 /* 1: setup and turn on encoder */
1131} LVDS_ENCODER_CONTROL_PARAMETERS;
1132 1465
1133#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS 1466/****************************************************************************/
1467// Structures used by LVDSEncoderControlTable (Before DCE30)
1468// LVTMAEncoderControlTable (Before DCE30)
1469// TMDSAEncoderControlTable (Before DCE30)
1470/****************************************************************************/
1471typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS
1472{
1473 USHORT usPixelClock; // in 10KHz; for bios convenient
1474 UCHAR ucMisc; // bit0=0: Enable single link
1475 // =1: Enable dual link
1476 // Bit1=0: 666RGB
1477 // =1: 888RGB
1478 UCHAR ucAction; // 0: turn off encoder
1479 // 1: setup and turn on encoder
1480}LVDS_ENCODER_CONTROL_PARAMETERS;
1134 1481
1482#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
1483
1135#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS 1484#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS
1136#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS 1485#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
1137 1486
1138#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS 1487#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS
1139#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS 1488#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
1140 1489
1141/* ucTableFormatRevision=1,ucTableContentRevision=2 */
1142typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
1143 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1144 UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx definitions below */
1145 UCHAR ucAction; /* 0: turn off encoder */
1146 /* 1: setup and turn on encoder */
1147 UCHAR ucTruncate; /* bit0=0: Disable truncate */
1148 /* =1: Enable truncate */
1149 /* bit4=0: 666RGB */
1150 /* =1: 888RGB */
1151 UCHAR ucSpatial; /* bit0=0: Disable spatial dithering */
1152 /* =1: Enable spatial dithering */
1153 /* bit4=0: 666RGB */
1154 /* =1: 888RGB */
1155 UCHAR ucTemporal; /* bit0=0: Disable temporal dithering */
1156 /* =1: Enable temporal dithering */
1157 /* bit4=0: 666RGB */
1158 /* =1: 888RGB */
1159 /* bit5=0: Gray level 2 */
1160 /* =1: Gray level 4 */
1161 UCHAR ucFRC; /* bit4=0: 25FRC_SEL pattern E */
1162 /* =1: 25FRC_SEL pattern F */
1163 /* bit6:5=0: 50FRC_SEL pattern A */
1164 /* =1: 50FRC_SEL pattern B */
1165 /* =2: 50FRC_SEL pattern C */
1166 /* =3: 50FRC_SEL pattern D */
1167 /* bit7=0: 75FRC_SEL pattern E */
1168 /* =1: 75FRC_SEL pattern F */
1169} LVDS_ENCODER_CONTROL_PARAMETERS_V2;
1170 1490
1171#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 1491//ucTableFormatRevision=1,ucTableContentRevision=2
1492typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2
1493{
1494 USHORT usPixelClock; // in 10KHz; for bios convenient
1495 UCHAR ucMisc; // see PANEL_ENCODER_MISC_xx defintions below
1496 UCHAR ucAction; // 0: turn off encoder
1497 // 1: setup and turn on encoder
1498 UCHAR ucTruncate; // bit0=0: Disable truncate
1499 // =1: Enable truncate
1500 // bit4=0: 666RGB
1501 // =1: 888RGB
1502 UCHAR ucSpatial; // bit0=0: Disable spatial dithering
1503 // =1: Enable spatial dithering
1504 // bit4=0: 666RGB
1505 // =1: 888RGB
1506 UCHAR ucTemporal; // bit0=0: Disable temporal dithering
1507 // =1: Enable temporal dithering
1508 // bit4=0: 666RGB
1509 // =1: 888RGB
1510 // bit5=0: Gray level 2
1511 // =1: Gray level 4
1512 UCHAR ucFRC; // bit4=0: 25FRC_SEL pattern E
1513 // =1: 25FRC_SEL pattern F
1514 // bit6:5=0: 50FRC_SEL pattern A
1515 // =1: 50FRC_SEL pattern B
1516 // =2: 50FRC_SEL pattern C
1517 // =3: 50FRC_SEL pattern D
1518 // bit7=0: 75FRC_SEL pattern E
1519 // =1: 75FRC_SEL pattern F
1520}LVDS_ENCODER_CONTROL_PARAMETERS_V2;
1172 1521
1522#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1523
1173#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 1524#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1174#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 1525#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
1175 1526
1176#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 1527#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
1177#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2 1528#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
1178 1529
@@ -1185,38 +1536,42 @@ typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
1185#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 1536#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
1186#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3 1537#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
1187 1538
1188/****************************************************************************/ 1539/****************************************************************************/
1189/* Structures used by ### */ 1540// Structures used by ###
1190/****************************************************************************/ 1541/****************************************************************************/
1191typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS { 1542typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS
1192 UCHAR ucEnable; /* Enable or Disable External TMDS encoder */ 1543{
1193 UCHAR ucMisc; /* Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} */ 1544 UCHAR ucEnable; // Enable or Disable External TMDS encoder
1194 UCHAR ucPadding[2]; 1545 UCHAR ucMisc; // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB}
1195} ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS; 1546 UCHAR ucPadding[2];
1196 1547}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
1197typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION { 1548
1198 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder; 1549typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION
1199 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */ 1550{
1200} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION; 1551 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
1552 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
1553}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
1201 1554
1202#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 1555#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1203 1556
1204typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 { 1557typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2
1205 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder; 1558{
1206 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */ 1559 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
1207} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2; 1560 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
1561}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
1208 1562
1209typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION { 1563typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION
1210 DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder; 1564{
1211 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; 1565 DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
1212} EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION; 1566 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1567}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
1213 1568
1214/****************************************************************************/ 1569/****************************************************************************/
1215/* Structures used by DVOEncoderControlTable */ 1570// Structures used by DVOEncoderControlTable
1216/****************************************************************************/ 1571/****************************************************************************/
1217/* ucTableFormatRevision=1,ucTableContentRevision=3 */ 1572//ucTableFormatRevision=1,ucTableContentRevision=3
1218 1573
1219/* ucDVOConfig: */ 1574//ucDVOConfig:
1220#define DVO_ENCODER_CONFIG_RATE_SEL 0x01 1575#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
1221#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 1576#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
1222#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 1577#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
@@ -1225,21 +1580,22 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
1225#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 1580#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
1226#define DVO_ENCODER_CONFIG_24BIT 0x08 1581#define DVO_ENCODER_CONFIG_24BIT 0x08
1227 1582
1228typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 { 1583typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
1229 USHORT usPixelClock; 1584{
1230 UCHAR ucDVOConfig; 1585 USHORT usPixelClock;
1231 UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */ 1586 UCHAR ucDVOConfig;
1232 UCHAR ucReseved[4]; 1587 UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
1233} DVO_ENCODER_CONTROL_PARAMETERS_V3; 1588 UCHAR ucReseved[4];
1589}DVO_ENCODER_CONTROL_PARAMETERS_V3;
1234#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3 1590#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
1235 1591
1236/* ucTableFormatRevision=1 */ 1592//ucTableFormatRevision=1
1237/* ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for */ 1593//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for
1238/* bit1=0: non-coherent mode */ 1594// bit1=0: non-coherent mode
1239/* =1: coherent mode */ 1595// =1: coherent mode
1240 1596
1241/* ========================================================================================== */ 1597//==========================================================================================
1242/* Only change is here next time when changing encoder parameter definitions again! */ 1598//Only change is here next time when changing encoder parameter definitions again!
1243#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 1599#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
1244#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST 1600#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST
1245 1601
@@ -1252,7 +1608,7 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
1252#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS 1608#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS
1253#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION 1609#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION
1254 1610
1255/* ========================================================================================== */ 1611//==========================================================================================
1256#define PANEL_ENCODER_MISC_DUAL 0x01 1612#define PANEL_ENCODER_MISC_DUAL 0x01
1257#define PANEL_ENCODER_MISC_COHERENT 0x02 1613#define PANEL_ENCODER_MISC_COHERENT 0x02
1258#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04 1614#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04
@@ -1281,159 +1637,159 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
1281#define PANEL_ENCODER_75FRC_E 0x00 1637#define PANEL_ENCODER_75FRC_E 0x00
1282#define PANEL_ENCODER_75FRC_F 0x80 1638#define PANEL_ENCODER_75FRC_F 0x80
1283 1639
1284/****************************************************************************/ 1640/****************************************************************************/
1285/* Structures used by SetVoltageTable */ 1641// Structures used by SetVoltageTable
1286/****************************************************************************/ 1642/****************************************************************************/
1287#define SET_VOLTAGE_TYPE_ASIC_VDDC 1 1643#define SET_VOLTAGE_TYPE_ASIC_VDDC 1
1288#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2 1644#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2
1289#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3 1645#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3
1290#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4 1646#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4
1291#define SET_VOLTAGE_INIT_MODE 5 1647#define SET_VOLTAGE_INIT_MODE 5
1292#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 /* Gets the Max. voltage for the soldered Asic */ 1648#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 //Gets the Max. voltage for the soldered Asic
1293 1649
1294#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1 1650#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1
1295#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2 1651#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2
1296#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4 1652#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
1297 1653
1298#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0 1654#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
1299#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1 1655#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
1300#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2 1656#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
1301 1657
1302typedef struct _SET_VOLTAGE_PARAMETERS { 1658typedef struct _SET_VOLTAGE_PARAMETERS
1303 UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */ 1659{
1304 UCHAR ucVoltageMode; /* To set all, to set source A or source B or ... */ 1660 UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
1305 UCHAR ucVoltageIndex; /* An index to tell which voltage level */ 1661 UCHAR ucVoltageMode; // To set all, to set source A or source B or ...
1306 UCHAR ucReserved; 1662 UCHAR ucVoltageIndex; // An index to tell which voltage level
1307} SET_VOLTAGE_PARAMETERS; 1663 UCHAR ucReserved;
1308 1664}SET_VOLTAGE_PARAMETERS;
1309typedef struct _SET_VOLTAGE_PARAMETERS_V2 {
1310 UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
1311 UCHAR ucVoltageMode; /* Not used, maybe use for state machine for differen power mode */
1312 USHORT usVoltageLevel; /* real voltage level */
1313} SET_VOLTAGE_PARAMETERS_V2;
1314
1315typedef struct _SET_VOLTAGE_PS_ALLOCATION {
1316 SET_VOLTAGE_PARAMETERS sASICSetVoltage;
1317 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1318} SET_VOLTAGE_PS_ALLOCATION;
1319
1320/****************************************************************************/
1321/* Structures used by TVEncoderControlTable */
1322/****************************************************************************/
1323typedef struct _TV_ENCODER_CONTROL_PARAMETERS {
1324 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1325 UCHAR ucTvStandard; /* See definition "ATOM_TV_NTSC ..." */
1326 UCHAR ucAction; /* 0: turn off encoder */
1327 /* 1: setup and turn on encoder */
1328} TV_ENCODER_CONTROL_PARAMETERS;
1329
1330typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION {
1331 TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
1332 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Don't set this one */
1333} TV_ENCODER_CONTROL_PS_ALLOCATION;
1334
1335/* ==============================Data Table Portion==================================== */
1336
1337#ifdef UEFI_BUILD
1338#define UTEMP USHORT
1339#define USHORT void*
1340#endif
1341
1342/****************************************************************************/
1343/* Structure used in Data.mtb */
1344/****************************************************************************/
1345typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES {
1346 USHORT UtilityPipeLine; /* Offest for the utility to get parser info,Don't change this position! */
1347 USHORT MultimediaCapabilityInfo; /* Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios */
1348 USHORT MultimediaConfigInfo; /* Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios */
1349 USHORT StandardVESA_Timing; /* Only used by Bios */
1350 USHORT FirmwareInfo; /* Shared by various SW components,latest version 1.4 */
1351 USHORT DAC_Info; /* Will be obsolete from R600 */
1352 USHORT LVDS_Info; /* Shared by various SW components,latest version 1.1 */
1353 USHORT TMDS_Info; /* Will be obsolete from R600 */
1354 USHORT AnalogTV_Info; /* Shared by various SW components,latest version 1.1 */
1355 USHORT SupportedDevicesInfo; /* Will be obsolete from R600 */
1356 USHORT GPIO_I2C_Info; /* Shared by various SW components,latest version 1.2 will be used from R600 */
1357 USHORT VRAM_UsageByFirmware; /* Shared by various SW components,latest version 1.3 will be used from R600 */
1358 USHORT GPIO_Pin_LUT; /* Shared by various SW components,latest version 1.1 */
1359 USHORT VESA_ToInternalModeLUT; /* Only used by Bios */
1360 USHORT ComponentVideoInfo; /* Shared by various SW components,latest version 2.1 will be used from R600 */
1361 USHORT PowerPlayInfo; /* Shared by various SW components,latest version 2.1,new design from R600 */
1362 USHORT CompassionateData; /* Will be obsolete from R600 */
1363 USHORT SaveRestoreInfo; /* Only used by Bios */
1364 USHORT PPLL_SS_Info; /* Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info */
1365 USHORT OemInfo; /* Defined and used by external SW, should be obsolete soon */
1366 USHORT XTMDS_Info; /* Will be obsolete from R600 */
1367 USHORT MclkSS_Info; /* Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used */
1368 USHORT Object_Header; /* Shared by various SW components,latest version 1.1 */
1369 USHORT IndirectIOAccess; /* Only used by Bios,this table position can't change at all!! */
1370 USHORT MC_InitParameter; /* Only used by command table */
1371 USHORT ASIC_VDDC_Info; /* Will be obsolete from R600 */
1372 USHORT ASIC_InternalSS_Info; /* New tabel name from R600, used to be called "ASIC_MVDDC_Info" */
1373 USHORT TV_VideoMode; /* Only used by command table */
1374 USHORT VRAM_Info; /* Only used by command table, latest version 1.3 */
1375 USHORT MemoryTrainingInfo; /* Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 */
1376 USHORT IntegratedSystemInfo; /* Shared by various SW components */
1377 USHORT ASIC_ProfilingInfo; /* New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 */
1378 USHORT VoltageObjectInfo; /* Shared by various SW components, latest version 1.1 */
1379 USHORT PowerSourceInfo; /* Shared by various SW components, latest versoin 1.1 */
1380} ATOM_MASTER_LIST_OF_DATA_TABLES;
1381
1382#ifdef UEFI_BUILD
1383#define USHORT UTEMP
1384#endif
1385 1665
1386typedef struct _ATOM_MASTER_DATA_TABLE { 1666typedef struct _SET_VOLTAGE_PARAMETERS_V2
1387 ATOM_COMMON_TABLE_HEADER sHeader; 1667{
1388 ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; 1668 UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
1389} ATOM_MASTER_DATA_TABLE; 1669 UCHAR ucVoltageMode; // Not used, maybe use for state machine for differen power mode
1670 USHORT usVoltageLevel; // real voltage level
1671}SET_VOLTAGE_PARAMETERS_V2;
1390 1672
1391/****************************************************************************/ 1673typedef struct _SET_VOLTAGE_PS_ALLOCATION
1392/* Structure used in MultimediaCapabilityInfoTable */ 1674{
1393/****************************************************************************/ 1675 SET_VOLTAGE_PARAMETERS sASICSetVoltage;
1394typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO { 1676 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1395 ATOM_COMMON_TABLE_HEADER sHeader; 1677}SET_VOLTAGE_PS_ALLOCATION;
1396 ULONG ulSignature; /* HW info table signature string "$ATI" */ 1678
1397 UCHAR ucI2C_Type; /* I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) */ 1679/****************************************************************************/
1398 UCHAR ucTV_OutInfo; /* Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) */ 1680// Structures used by TVEncoderControlTable
1399 UCHAR ucVideoPortInfo; /* Provides the video port capabilities */ 1681/****************************************************************************/
1400 UCHAR ucHostPortInfo; /* Provides host port configuration information */ 1682typedef struct _TV_ENCODER_CONTROL_PARAMETERS
1401} ATOM_MULTIMEDIA_CAPABILITY_INFO; 1683{
1684 USHORT usPixelClock; // in 10KHz; for bios convenient
1685 UCHAR ucTvStandard; // See definition "ATOM_TV_NTSC ..."
1686 UCHAR ucAction; // 0: turn off encoder
1687 // 1: setup and turn on encoder
1688}TV_ENCODER_CONTROL_PARAMETERS;
1402 1689
1403/****************************************************************************/ 1690typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION
1404/* Structure used in MultimediaConfigInfoTable */ 1691{
1405/****************************************************************************/ 1692 TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
1406typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO { 1693 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; // Don't set this one
1407 ATOM_COMMON_TABLE_HEADER sHeader; 1694}TV_ENCODER_CONTROL_PS_ALLOCATION;
1408 ULONG ulSignature; /* MM info table signature sting "$MMT" */
1409 UCHAR ucTunerInfo; /* Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) */
1410 UCHAR ucAudioChipInfo; /* List the audio chip type (3:0) product type (4) and OEM revision (7:5) */
1411 UCHAR ucProductID; /* Defines as OEM ID or ATI board ID dependent on product type setting */
1412 UCHAR ucMiscInfo1; /* Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) */
1413 UCHAR ucMiscInfo2; /* I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) */
1414 UCHAR ucMiscInfo3; /* Video Decoder Type (3:0) Video In Standard/Crystal (7:4) */
1415 UCHAR ucMiscInfo4; /* Video Decoder Host Config (2:0) reserved (7:3) */
1416 UCHAR ucVideoInput0Info; /* Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1417 UCHAR ucVideoInput1Info; /* Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1418 UCHAR ucVideoInput2Info; /* Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1419 UCHAR ucVideoInput3Info; /* Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1420 UCHAR ucVideoInput4Info; /* Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1421} ATOM_MULTIMEDIA_CONFIG_INFO;
1422 1695
1423/****************************************************************************/ 1696//==============================Data Table Portion====================================
1424/* Structures used in FirmwareInfoTable */
1425/****************************************************************************/
1426 1697
1427/* usBIOSCapability Definition: */ 1698/****************************************************************************/
1428/* Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */ 1699// Structure used in Data.mtb
1429/* Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */ 1700/****************************************************************************/
1430/* Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */ 1701typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
1431/* Others: Reserved */ 1702{
1703 USHORT UtilityPipeLine; // Offest for the utility to get parser info,Don't change this position!
1704 USHORT MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios
1705 USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
1706 USHORT StandardVESA_Timing; // Only used by Bios
1707 USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
1708 USHORT DAC_Info; // Will be obsolete from R600
1709 USHORT LVDS_Info; // Shared by various SW components,latest version 1.1
1710 USHORT TMDS_Info; // Will be obsolete from R600
1711 USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
1712 USHORT SupportedDevicesInfo; // Will be obsolete from R600
1713 USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
1714 USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600
1715 USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1
1716 USHORT VESA_ToInternalModeLUT; // Only used by Bios
1717 USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600
1718 USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600
1719 USHORT CompassionateData; // Will be obsolete from R600
1720 USHORT SaveRestoreInfo; // Only used by Bios
1721 USHORT PPLL_SS_Info; // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info
1722 USHORT OemInfo; // Defined and used by external SW, should be obsolete soon
1723 USHORT XTMDS_Info; // Will be obsolete from R600
1724 USHORT MclkSS_Info; // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used
1725 USHORT Object_Header; // Shared by various SW components,latest version 1.1
1726 USHORT IndirectIOAccess; // Only used by Bios,this table position can't change at all!!
1727 USHORT MC_InitParameter; // Only used by command table
1728 USHORT ASIC_VDDC_Info; // Will be obsolete from R600
1729 USHORT ASIC_InternalSS_Info; // New tabel name from R600, used to be called "ASIC_MVDDC_Info"
1730 USHORT TV_VideoMode; // Only used by command table
1731 USHORT VRAM_Info; // Only used by command table, latest version 1.3
1732 USHORT MemoryTrainingInfo; // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1
1733 USHORT IntegratedSystemInfo; // Shared by various SW components
1734 USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
1735 USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1
1736 USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
1737}ATOM_MASTER_LIST_OF_DATA_TABLES;
1738
1739typedef struct _ATOM_MASTER_DATA_TABLE
1740{
1741 ATOM_COMMON_TABLE_HEADER sHeader;
1742 ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
1743}ATOM_MASTER_DATA_TABLE;
1744
1745/****************************************************************************/
1746// Structure used in MultimediaCapabilityInfoTable
1747/****************************************************************************/
1748typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO
1749{
1750 ATOM_COMMON_TABLE_HEADER sHeader;
1751 ULONG ulSignature; // HW info table signature string "$ATI"
1752 UCHAR ucI2C_Type; // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc)
1753 UCHAR ucTV_OutInfo; // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7)
1754 UCHAR ucVideoPortInfo; // Provides the video port capabilities
1755 UCHAR ucHostPortInfo; // Provides host port configuration information
1756}ATOM_MULTIMEDIA_CAPABILITY_INFO;
1757
1758/****************************************************************************/
1759// Structure used in MultimediaConfigInfoTable
1760/****************************************************************************/
1761typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
1762{
1763 ATOM_COMMON_TABLE_HEADER sHeader;
1764 ULONG ulSignature; // MM info table signature sting "$MMT"
1765 UCHAR ucTunerInfo; // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5)
1766 UCHAR ucAudioChipInfo; // List the audio chip type (3:0) product type (4) and OEM revision (7:5)
1767 UCHAR ucProductID; // Defines as OEM ID or ATI board ID dependent on product type setting
1768 UCHAR ucMiscInfo1; // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7)
1769 UCHAR ucMiscInfo2; // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6)
1770 UCHAR ucMiscInfo3; // Video Decoder Type (3:0) Video In Standard/Crystal (7:4)
1771 UCHAR ucMiscInfo4; // Video Decoder Host Config (2:0) reserved (7:3)
1772 UCHAR ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1773 UCHAR ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1774 UCHAR ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1775 UCHAR ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1776 UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1777}ATOM_MULTIMEDIA_CONFIG_INFO;
1778
1779/****************************************************************************/
1780// Structures used in FirmwareInfoTable
1781/****************************************************************************/
1782
1783// usBIOSCapability Defintion:
1784// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted;
1785// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported;
1786// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported;
1787// Others: Reserved
1432#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001 1788#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001
1433#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002 1789#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002
1434#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004 1790#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004
1435#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 1791#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 // (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable.
1436#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 1792#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 // (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable.
1437#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020 1793#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020
1438#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040 1794#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040
1439#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080 1795#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080
@@ -1441,242 +1797,292 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
1441#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00 1797#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00
1442#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000 1798#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
1443#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000 1799#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000
1800#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT 0x0008 // (valid from v2.1 ): =1: memclk ss enable with external ss chip
1801#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT 0x0010 // (valid from v2.1 ): =1: engclk ss enable with external ss chip
1444 1802
1445#ifndef _H2INC 1803#ifndef _H2INC
1446 1804
1447/* Please don't add or expand this bitfield structure below, this one will retire soon.! */ 1805//Please don't add or expand this bitfield structure below, this one will retire soon.!
1448typedef struct _ATOM_FIRMWARE_CAPABILITY { 1806typedef struct _ATOM_FIRMWARE_CAPABILITY
1807{
1449#if ATOM_BIG_ENDIAN 1808#if ATOM_BIG_ENDIAN
1450 USHORT Reserved:3; 1809 USHORT Reserved:3;
1451 USHORT HyperMemory_Size:4; 1810 USHORT HyperMemory_Size:4;
1452 USHORT HyperMemory_Support:1; 1811 USHORT HyperMemory_Support:1;
1453 USHORT PPMode_Assigned:1; 1812 USHORT PPMode_Assigned:1;
1454 USHORT WMI_SUPPORT:1; 1813 USHORT WMI_SUPPORT:1;
1455 USHORT GPUControlsBL:1; 1814 USHORT GPUControlsBL:1;
1456 USHORT EngineClockSS_Support:1; 1815 USHORT EngineClockSS_Support:1;
1457 USHORT MemoryClockSS_Support:1; 1816 USHORT MemoryClockSS_Support:1;
1458 USHORT ExtendedDesktopSupport:1; 1817 USHORT ExtendedDesktopSupport:1;
1459 USHORT DualCRTC_Support:1; 1818 USHORT DualCRTC_Support:1;
1460 USHORT FirmwarePosted:1; 1819 USHORT FirmwarePosted:1;
1461#else 1820#else
1462 USHORT FirmwarePosted:1; 1821 USHORT FirmwarePosted:1;
1463 USHORT DualCRTC_Support:1; 1822 USHORT DualCRTC_Support:1;
1464 USHORT ExtendedDesktopSupport:1; 1823 USHORT ExtendedDesktopSupport:1;
1465 USHORT MemoryClockSS_Support:1; 1824 USHORT MemoryClockSS_Support:1;
1466 USHORT EngineClockSS_Support:1; 1825 USHORT EngineClockSS_Support:1;
1467 USHORT GPUControlsBL:1; 1826 USHORT GPUControlsBL:1;
1468 USHORT WMI_SUPPORT:1; 1827 USHORT WMI_SUPPORT:1;
1469 USHORT PPMode_Assigned:1; 1828 USHORT PPMode_Assigned:1;
1470 USHORT HyperMemory_Support:1; 1829 USHORT HyperMemory_Support:1;
1471 USHORT HyperMemory_Size:4; 1830 USHORT HyperMemory_Size:4;
1472 USHORT Reserved:3; 1831 USHORT Reserved:3;
1473#endif 1832#endif
1474} ATOM_FIRMWARE_CAPABILITY; 1833}ATOM_FIRMWARE_CAPABILITY;
1475 1834
1476typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS { 1835typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
1477 ATOM_FIRMWARE_CAPABILITY sbfAccess; 1836{
1478 USHORT susAccess; 1837 ATOM_FIRMWARE_CAPABILITY sbfAccess;
1479} ATOM_FIRMWARE_CAPABILITY_ACCESS; 1838 USHORT susAccess;
1839}ATOM_FIRMWARE_CAPABILITY_ACCESS;
1480 1840
1481#else 1841#else
1482 1842
1483typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS { 1843typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
1484 USHORT susAccess; 1844{
1485} ATOM_FIRMWARE_CAPABILITY_ACCESS; 1845 USHORT susAccess;
1846}ATOM_FIRMWARE_CAPABILITY_ACCESS;
1486 1847
1487#endif 1848#endif
1488 1849
1489typedef struct _ATOM_FIRMWARE_INFO { 1850typedef struct _ATOM_FIRMWARE_INFO
1490 ATOM_COMMON_TABLE_HEADER sHeader; 1851{
1491 ULONG ulFirmwareRevision; 1852 ATOM_COMMON_TABLE_HEADER sHeader;
1492 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 1853 ULONG ulFirmwareRevision;
1493 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 1854 ULONG ulDefaultEngineClock; //In 10Khz unit
1494 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ 1855 ULONG ulDefaultMemoryClock; //In 10Khz unit
1495 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ 1856 ULONG ulDriverTargetEngineClock; //In 10Khz unit
1496 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ 1857 ULONG ulDriverTargetMemoryClock; //In 10Khz unit
1497 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ 1858 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
1498 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ 1859 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
1499 ULONG ulASICMaxEngineClock; /* In 10Khz unit */ 1860 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
1500 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ 1861 ULONG ulASICMaxEngineClock; //In 10Khz unit
1501 UCHAR ucASICMaxTemperature; 1862 ULONG ulASICMaxMemoryClock; //In 10Khz unit
1502 UCHAR ucPadding[3]; /* Don't use them */ 1863 UCHAR ucASICMaxTemperature;
1503 ULONG aulReservedForBIOS[3]; /* Don't use them */ 1864 UCHAR ucPadding[3]; //Don't use them
1504 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ 1865 ULONG aulReservedForBIOS[3]; //Don't use them
1505 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ 1866 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
1506 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ 1867 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
1507 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ 1868 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
1508 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ 1869 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
1509 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ 1870 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
1510 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ 1871 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
1511 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ 1872 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
1512 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ 1873 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
1513 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit, the definitions above can't change!!! */ 1874 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
1514 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; 1875 USHORT usMinPixelClockPLL_Output; //In 10Khz unit, the definitions above can't change!!!
1515 USHORT usReferenceClock; /* In 10Khz unit */ 1876 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1516 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ 1877 USHORT usReferenceClock; //In 10Khz unit
1517 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ 1878 USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
1518 UCHAR ucDesign_ID; /* Indicate what is the board design */ 1879 UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
1519 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ 1880 UCHAR ucDesign_ID; //Indicate what is the board design
1520} ATOM_FIRMWARE_INFO; 1881 UCHAR ucMemoryModule_ID; //Indicate what is the board design
1521 1882}ATOM_FIRMWARE_INFO;
1522typedef struct _ATOM_FIRMWARE_INFO_V1_2 { 1883
1523 ATOM_COMMON_TABLE_HEADER sHeader; 1884typedef struct _ATOM_FIRMWARE_INFO_V1_2
1524 ULONG ulFirmwareRevision; 1885{
1525 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 1886 ATOM_COMMON_TABLE_HEADER sHeader;
1526 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 1887 ULONG ulFirmwareRevision;
1527 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ 1888 ULONG ulDefaultEngineClock; //In 10Khz unit
1528 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ 1889 ULONG ulDefaultMemoryClock; //In 10Khz unit
1529 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ 1890 ULONG ulDriverTargetEngineClock; //In 10Khz unit
1530 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ 1891 ULONG ulDriverTargetMemoryClock; //In 10Khz unit
1531 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ 1892 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
1532 ULONG ulASICMaxEngineClock; /* In 10Khz unit */ 1893 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
1533 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ 1894 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
1534 UCHAR ucASICMaxTemperature; 1895 ULONG ulASICMaxEngineClock; //In 10Khz unit
1535 UCHAR ucMinAllowedBL_Level; 1896 ULONG ulASICMaxMemoryClock; //In 10Khz unit
1536 UCHAR ucPadding[2]; /* Don't use them */ 1897 UCHAR ucASICMaxTemperature;
1537 ULONG aulReservedForBIOS[2]; /* Don't use them */ 1898 UCHAR ucMinAllowedBL_Level;
1538 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ 1899 UCHAR ucPadding[2]; //Don't use them
1539 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ 1900 ULONG aulReservedForBIOS[2]; //Don't use them
1540 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ 1901 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
1541 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ 1902 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
1542 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ 1903 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
1543 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ 1904 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
1544 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ 1905 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
1545 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ 1906 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
1546 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ 1907 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
1547 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ 1908 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
1548 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ 1909 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
1549 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; 1910 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
1550 USHORT usReferenceClock; /* In 10Khz unit */ 1911 USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
1551 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ 1912 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1552 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ 1913 USHORT usReferenceClock; //In 10Khz unit
1553 UCHAR ucDesign_ID; /* Indicate what is the board design */ 1914 USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
1554 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ 1915 UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
1555} ATOM_FIRMWARE_INFO_V1_2; 1916 UCHAR ucDesign_ID; //Indicate what is the board design
1556 1917 UCHAR ucMemoryModule_ID; //Indicate what is the board design
1557typedef struct _ATOM_FIRMWARE_INFO_V1_3 { 1918}ATOM_FIRMWARE_INFO_V1_2;
1558 ATOM_COMMON_TABLE_HEADER sHeader; 1919
1559 ULONG ulFirmwareRevision; 1920typedef struct _ATOM_FIRMWARE_INFO_V1_3
1560 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 1921{
1561 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 1922 ATOM_COMMON_TABLE_HEADER sHeader;
1562 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ 1923 ULONG ulFirmwareRevision;
1563 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ 1924 ULONG ulDefaultEngineClock; //In 10Khz unit
1564 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ 1925 ULONG ulDefaultMemoryClock; //In 10Khz unit
1565 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ 1926 ULONG ulDriverTargetEngineClock; //In 10Khz unit
1566 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ 1927 ULONG ulDriverTargetMemoryClock; //In 10Khz unit
1567 ULONG ulASICMaxEngineClock; /* In 10Khz unit */ 1928 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
1568 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ 1929 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
1569 UCHAR ucASICMaxTemperature; 1930 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
1570 UCHAR ucMinAllowedBL_Level; 1931 ULONG ulASICMaxEngineClock; //In 10Khz unit
1571 UCHAR ucPadding[2]; /* Don't use them */ 1932 ULONG ulASICMaxMemoryClock; //In 10Khz unit
1572 ULONG aulReservedForBIOS; /* Don't use them */ 1933 UCHAR ucASICMaxTemperature;
1573 ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */ 1934 UCHAR ucMinAllowedBL_Level;
1574 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ 1935 UCHAR ucPadding[2]; //Don't use them
1575 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ 1936 ULONG aulReservedForBIOS; //Don't use them
1576 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ 1937 ULONG ul3DAccelerationEngineClock;//In 10Khz unit
1577 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ 1938 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
1578 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ 1939 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
1579 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ 1940 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
1580 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ 1941 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
1581 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ 1942 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
1582 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ 1943 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
1583 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ 1944 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
1584 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ 1945 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
1585 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; 1946 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
1586 USHORT usReferenceClock; /* In 10Khz unit */ 1947 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
1587 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ 1948 USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
1588 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ 1949 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1589 UCHAR ucDesign_ID; /* Indicate what is the board design */ 1950 USHORT usReferenceClock; //In 10Khz unit
1590 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ 1951 USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
1591} ATOM_FIRMWARE_INFO_V1_3; 1952 UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
1592 1953 UCHAR ucDesign_ID; //Indicate what is the board design
1593typedef struct _ATOM_FIRMWARE_INFO_V1_4 { 1954 UCHAR ucMemoryModule_ID; //Indicate what is the board design
1594 ATOM_COMMON_TABLE_HEADER sHeader; 1955}ATOM_FIRMWARE_INFO_V1_3;
1595 ULONG ulFirmwareRevision; 1956
1596 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 1957typedef struct _ATOM_FIRMWARE_INFO_V1_4
1597 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 1958{
1598 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ 1959 ATOM_COMMON_TABLE_HEADER sHeader;
1599 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ 1960 ULONG ulFirmwareRevision;
1600 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ 1961 ULONG ulDefaultEngineClock; //In 10Khz unit
1601 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ 1962 ULONG ulDefaultMemoryClock; //In 10Khz unit
1602 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ 1963 ULONG ulDriverTargetEngineClock; //In 10Khz unit
1603 ULONG ulASICMaxEngineClock; /* In 10Khz unit */ 1964 ULONG ulDriverTargetMemoryClock; //In 10Khz unit
1604 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ 1965 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
1605 UCHAR ucASICMaxTemperature; 1966 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
1606 UCHAR ucMinAllowedBL_Level; 1967 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
1607 USHORT usBootUpVDDCVoltage; /* In MV unit */ 1968 ULONG ulASICMaxEngineClock; //In 10Khz unit
1608 USHORT usLcdMinPixelClockPLL_Output; /* In MHz unit */ 1969 ULONG ulASICMaxMemoryClock; //In 10Khz unit
1609 USHORT usLcdMaxPixelClockPLL_Output; /* In MHz unit */ 1970 UCHAR ucASICMaxTemperature;
1610 ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */ 1971 UCHAR ucMinAllowedBL_Level;
1611 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ 1972 USHORT usBootUpVDDCVoltage; //In MV unit
1612 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ 1973 USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
1613 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ 1974 USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
1614 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ 1975 ULONG ul3DAccelerationEngineClock;//In 10Khz unit
1615 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ 1976 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
1616 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ 1977 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
1617 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ 1978 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
1618 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ 1979 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
1619 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ 1980 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
1620 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ 1981 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
1621 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ 1982 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
1622 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; 1983 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
1623 USHORT usReferenceClock; /* In 10Khz unit */ 1984 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
1624 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ 1985 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
1625 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ 1986 USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
1626 UCHAR ucDesign_ID; /* Indicate what is the board design */ 1987 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1627 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ 1988 USHORT usReferenceClock; //In 10Khz unit
1628} ATOM_FIRMWARE_INFO_V1_4; 1989 USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
1629 1990 UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
1630#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V1_4 1991 UCHAR ucDesign_ID; //Indicate what is the board design
1631 1992 UCHAR ucMemoryModule_ID; //Indicate what is the board design
1632/****************************************************************************/ 1993}ATOM_FIRMWARE_INFO_V1_4;
1633/* Structures used in IntegratedSystemInfoTable */ 1994
1634/****************************************************************************/ 1995//the structure below to be used from Cypress
1996typedef struct _ATOM_FIRMWARE_INFO_V2_1
1997{
1998 ATOM_COMMON_TABLE_HEADER sHeader;
1999 ULONG ulFirmwareRevision;
2000 ULONG ulDefaultEngineClock; //In 10Khz unit
2001 ULONG ulDefaultMemoryClock; //In 10Khz unit
2002 ULONG ulReserved1;
2003 ULONG ulReserved2;
2004 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
2005 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
2006 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
2007 ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock
2008 ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit
2009 UCHAR ucReserved1; //Was ucASICMaxTemperature;
2010 UCHAR ucMinAllowedBL_Level;
2011 USHORT usBootUpVDDCVoltage; //In MV unit
2012 USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
2013 USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
2014 ULONG ulReserved4; //Was ulAsicMaximumVoltage
2015 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
2016 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
2017 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
2018 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
2019 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
2020 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
2021 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
2022 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
2023 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
2024 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
2025 USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
2026 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
2027 USHORT usCoreReferenceClock; //In 10Khz unit
2028 USHORT usMemoryReferenceClock; //In 10Khz unit
2029 USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
2030 UCHAR ucMemoryModule_ID; //Indicate what is the board design
2031 UCHAR ucReserved4[3];
2032}ATOM_FIRMWARE_INFO_V2_1;
2033
2034
2035#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1
2036
2037/****************************************************************************/
2038// Structures used in IntegratedSystemInfoTable
2039/****************************************************************************/
1635#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2 2040#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2
1636#define IGP_CAP_FLAG_AC_CARD 0x4 2041#define IGP_CAP_FLAG_AC_CARD 0x4
1637#define IGP_CAP_FLAG_SDVO_CARD 0x8 2042#define IGP_CAP_FLAG_SDVO_CARD 0x8
1638#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10 2043#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10
1639 2044
1640typedef struct _ATOM_INTEGRATED_SYSTEM_INFO { 2045typedef struct _ATOM_INTEGRATED_SYSTEM_INFO
1641 ATOM_COMMON_TABLE_HEADER sHeader; 2046{
1642 ULONG ulBootUpEngineClock; /* in 10kHz unit */ 2047 ATOM_COMMON_TABLE_HEADER sHeader;
1643 ULONG ulBootUpMemoryClock; /* in 10kHz unit */ 2048 ULONG ulBootUpEngineClock; //in 10kHz unit
1644 ULONG ulMaxSystemMemoryClock; /* in 10kHz unit */ 2049 ULONG ulBootUpMemoryClock; //in 10kHz unit
1645 ULONG ulMinSystemMemoryClock; /* in 10kHz unit */ 2050 ULONG ulMaxSystemMemoryClock; //in 10kHz unit
1646 UCHAR ucNumberOfCyclesInPeriodHi; 2051 ULONG ulMinSystemMemoryClock; //in 10kHz unit
1647 UCHAR ucLCDTimingSel; /* =0:not valid.!=0 sel this timing descriptor from LCD EDID. */ 2052 UCHAR ucNumberOfCyclesInPeriodHi;
1648 USHORT usReserved1; 2053 UCHAR ucLCDTimingSel; //=0:not valid.!=0 sel this timing descriptor from LCD EDID.
1649 USHORT usInterNBVoltageLow; /* An intermidiate PMW value to set the voltage */ 2054 USHORT usReserved1;
1650 USHORT usInterNBVoltageHigh; /* Another intermidiate PMW value to set the voltage */ 2055 USHORT usInterNBVoltageLow; //An intermidiate PMW value to set the voltage
1651 ULONG ulReserved[2]; 2056 USHORT usInterNBVoltageHigh; //Another intermidiate PMW value to set the voltage
1652 2057 ULONG ulReserved[2];
1653 USHORT usFSBClock; /* In MHz unit */ 2058
1654 USHORT usCapabilityFlag; /* Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable */ 2059 USHORT usFSBClock; //In MHz unit
1655 /* Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card */ 2060 USHORT usCapabilityFlag; //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable
1656 /* Bit[4]==1: P/2 mode, ==0: P/1 mode */ 2061 //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card
1657 USHORT usPCIENBCfgReg7; /* bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal */ 2062 //Bit[4]==1: P/2 mode, ==0: P/1 mode
1658 USHORT usK8MemoryClock; /* in MHz unit */ 2063 USHORT usPCIENBCfgReg7; //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal
1659 USHORT usK8SyncStartDelay; /* in 0.01 us unit */ 2064 USHORT usK8MemoryClock; //in MHz unit
1660 USHORT usK8DataReturnTime; /* in 0.01 us unit */ 2065 USHORT usK8SyncStartDelay; //in 0.01 us unit
1661 UCHAR ucMaxNBVoltage; 2066 USHORT usK8DataReturnTime; //in 0.01 us unit
1662 UCHAR ucMinNBVoltage; 2067 UCHAR ucMaxNBVoltage;
1663 UCHAR ucMemoryType; /* [7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved */ 2068 UCHAR ucMinNBVoltage;
1664 UCHAR ucNumberOfCyclesInPeriod; /* CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod */ 2069 UCHAR ucMemoryType; //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved
1665 UCHAR ucStartingPWM_HighTime; /* CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime */ 2070 UCHAR ucNumberOfCyclesInPeriod; //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod
1666 UCHAR ucHTLinkWidth; /* 16 bit vs. 8 bit */ 2071 UCHAR ucStartingPWM_HighTime; //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime
1667 UCHAR ucMaxNBVoltageHigh; 2072 UCHAR ucHTLinkWidth; //16 bit vs. 8 bit
1668 UCHAR ucMinNBVoltageHigh; 2073 UCHAR ucMaxNBVoltageHigh;
1669} ATOM_INTEGRATED_SYSTEM_INFO; 2074 UCHAR ucMinNBVoltageHigh;
2075}ATOM_INTEGRATED_SYSTEM_INFO;
1670 2076
1671/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO 2077/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
1672ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock 2078ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
1673 For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock 2079 For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
1674ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 2080ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
1675 For AMD IGP,for now this can be 0 2081 For AMD IGP,for now this can be 0
1676ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 2082ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
1677 For AMD IGP,for now this can be 0 2083 For AMD IGP,for now this can be 0
1678 2084
1679usFSBClock: For Intel IGP,it's FSB Freq 2085usFSBClock: For Intel IGP,it's FSB Freq
1680 For AMD IGP,it's HT Link Speed 2086 For AMD IGP,it's HT Link Speed
1681 2087
1682usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200 2088usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200
@@ -1687,98 +2093,113 @@ VC:Voltage Control
1687ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. 2093ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
1688ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. 2094ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
1689 2095
1690ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. 2096ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
1691ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 2097ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
1692 2098
1693ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. 2099ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
1694ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. 2100ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
1695 2101
2102
1696usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all. 2103usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
1697usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all. 2104usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
1698*/ 2105*/
1699 2106
2107
1700/* 2108/*
1701The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST; 2109The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
1702Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. 2110Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
1703The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries. 2111The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
1704 2112
1705SW components can access the IGP system infor structure in the same way as before 2113SW components can access the IGP system infor structure in the same way as before
1706*/ 2114*/
1707 2115
1708typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 { 2116
1709 ATOM_COMMON_TABLE_HEADER sHeader; 2117typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
1710 ULONG ulBootUpEngineClock; /* in 10kHz unit */ 2118{
1711 ULONG ulReserved1[2]; /* must be 0x0 for the reserved */ 2119 ATOM_COMMON_TABLE_HEADER sHeader;
1712 ULONG ulBootUpUMAClock; /* in 10kHz unit */ 2120 ULONG ulBootUpEngineClock; //in 10kHz unit
1713 ULONG ulBootUpSidePortClock; /* in 10kHz unit */ 2121 ULONG ulReserved1[2]; //must be 0x0 for the reserved
1714 ULONG ulMinSidePortClock; /* in 10kHz unit */ 2122 ULONG ulBootUpUMAClock; //in 10kHz unit
1715 ULONG ulReserved2[6]; /* must be 0x0 for the reserved */ 2123 ULONG ulBootUpSidePortClock; //in 10kHz unit
1716 ULONG ulSystemConfig; /* see explanation below */ 2124 ULONG ulMinSidePortClock; //in 10kHz unit
1717 ULONG ulBootUpReqDisplayVector; 2125 ULONG ulReserved2[6]; //must be 0x0 for the reserved
1718 ULONG ulOtherDisplayMisc; 2126 ULONG ulSystemConfig; //see explanation below
1719 ULONG ulDDISlot1Config; 2127 ULONG ulBootUpReqDisplayVector;
1720 ULONG ulDDISlot2Config; 2128 ULONG ulOtherDisplayMisc;
1721 UCHAR ucMemoryType; /* [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved */ 2129 ULONG ulDDISlot1Config;
1722 UCHAR ucUMAChannelNumber; 2130 ULONG ulDDISlot2Config;
1723 UCHAR ucDockingPinBit; 2131 UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
1724 UCHAR ucDockingPinPolarity; 2132 UCHAR ucUMAChannelNumber;
1725 ULONG ulDockingPinCFGInfo; 2133 UCHAR ucDockingPinBit;
1726 ULONG ulCPUCapInfo; 2134 UCHAR ucDockingPinPolarity;
1727 USHORT usNumberOfCyclesInPeriod; 2135 ULONG ulDockingPinCFGInfo;
1728 USHORT usMaxNBVoltage; 2136 ULONG ulCPUCapInfo;
1729 USHORT usMinNBVoltage; 2137 USHORT usNumberOfCyclesInPeriod;
1730 USHORT usBootUpNBVoltage; 2138 USHORT usMaxNBVoltage;
1731 ULONG ulHTLinkFreq; /* in 10Khz */ 2139 USHORT usMinNBVoltage;
1732 USHORT usMinHTLinkWidth; 2140 USHORT usBootUpNBVoltage;
1733 USHORT usMaxHTLinkWidth; 2141 ULONG ulHTLinkFreq; //in 10Khz
1734 USHORT usUMASyncStartDelay; 2142 USHORT usMinHTLinkWidth;
1735 USHORT usUMADataReturnTime; 2143 USHORT usMaxHTLinkWidth;
1736 USHORT usLinkStatusZeroTime; 2144 USHORT usUMASyncStartDelay;
1737 USHORT usReserved; 2145 USHORT usUMADataReturnTime;
1738 ULONG ulHighVoltageHTLinkFreq; /* in 10Khz */ 2146 USHORT usLinkStatusZeroTime;
1739 ULONG ulLowVoltageHTLinkFreq; /* in 10Khz */ 2147 USHORT usDACEfuse; //for storing badgap value (for RS880 only)
1740 USHORT usMaxUpStreamHTLinkWidth; 2148 ULONG ulHighVoltageHTLinkFreq; // in 10Khz
1741 USHORT usMaxDownStreamHTLinkWidth; 2149 ULONG ulLowVoltageHTLinkFreq; // in 10Khz
1742 USHORT usMinUpStreamHTLinkWidth; 2150 USHORT usMaxUpStreamHTLinkWidth;
1743 USHORT usMinDownStreamHTLinkWidth; 2151 USHORT usMaxDownStreamHTLinkWidth;
1744 ULONG ulReserved3[97]; /* must be 0x0 */ 2152 USHORT usMinUpStreamHTLinkWidth;
1745} ATOM_INTEGRATED_SYSTEM_INFO_V2; 2153 USHORT usMinDownStreamHTLinkWidth;
2154 USHORT usFirmwareVersion; //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW.
2155 USHORT usFullT0Time; // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us.
2156 ULONG ulReserved3[96]; //must be 0x0
2157}ATOM_INTEGRATED_SYSTEM_INFO_V2;
1746 2158
1747/* 2159/*
1748ulBootUpEngineClock: Boot-up Engine Clock in 10Khz; 2160ulBootUpEngineClock: Boot-up Engine Clock in 10Khz;
1749ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present 2161ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
1750ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock 2162ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
1751 2163
1752ulSystemConfig: 2164ulSystemConfig:
1753Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; 2165Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
1754Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state 2166Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
1755 =0: system boots up at driver control state. Power state depends on PowerPlay table. 2167 =0: system boots up at driver control state. Power state depends on PowerPlay table.
1756Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used. 2168Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
1757Bit[3]=1: Only one power state(Performance) will be supported. 2169Bit[3]=1: Only one power state(Performance) will be supported.
1758 =0: Multiple power states supported from PowerPlay table. 2170 =0: Multiple power states supported from PowerPlay table.
1759Bit[4]=1: CLMC is supported and enabled on current system. 2171Bit[4]=1: CLMC is supported and enabled on current system.
1760 =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface. 2172 =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
1761Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement. 2173Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
1762 =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied. 2174 =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
1763Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored. 2175Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
1764 =0: Voltage settings is determined by powerplay table. 2176 =0: Voltage settings is determined by powerplay table.
1765Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue. 2177Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
1766 =0: Enable CLMC as regular mode, CDLD and CILR will be enabled. 2178 =0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
2179Bit[8]=1: CDLF is supported and enabled on current system.
2180 =0: CDLF is not supported or enabled on current system.
2181Bit[9]=1: DLL Shut Down feature is enabled on current system.
2182 =0: DLL Shut Down feature is not enabled or supported on current system.
1767 2183
1768ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions. 2184ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
1769 2185
1770ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion; 2186ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
1771 [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition; 2187 [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSupportedStd definition;
1772 2188
1773ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design). 2189ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
1774 [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) 2190 [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
1775 [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) 2191 [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12)
1776 [15:8] - Lane configuration attribute; 2192 When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time.
2193 in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example:
2194 one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2.
2195
2196 [15:8] - Lane configuration attribute;
1777 [23:16]- Connector type, possible value: 2197 [23:16]- Connector type, possible value:
1778 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 2198 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
1779 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D 2199 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
1780 CONNECTOR_OBJECT_ID_HDMI_TYPE_A 2200 CONNECTOR_OBJECT_ID_HDMI_TYPE_A
1781 CONNECTOR_OBJECT_ID_DISPLAYPORT 2201 CONNECTOR_OBJECT_ID_DISPLAYPORT
2202 CONNECTOR_OBJECT_ID_eDP
1782 [31:24]- Reserved 2203 [31:24]- Reserved
1783 2204
1784ulDDISlot2Config: Same as Slot1. 2205ulDDISlot2Config: Same as Slot1.
@@ -1787,29 +2208,31 @@ For IGP, Hypermemory is the only memory type showed in CCC.
1787 2208
1788ucUMAChannelNumber: how many channels for the UMA; 2209ucUMAChannelNumber: how many channels for the UMA;
1789 2210
1790ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin 2211ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
1791ucDockingPinBit: which bit in this register to read the pin status; 2212ucDockingPinBit: which bit in this register to read the pin status;
1792ucDockingPinPolarity:Polarity of the pin when docked; 2213ucDockingPinPolarity:Polarity of the pin when docked;
1793 2214
1794ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0 2215ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
1795 2216
1796usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. 2217usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
1797usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. 2218
2219usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
1798usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode. 2220usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
1799 GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0 2221 GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
1800 PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1 2222 PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
1801 GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 2223 GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
2224
1802usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value. 2225usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
1803 2226
1804ulHTLinkFreq: Bootup HT link Frequency in 10Khz. 2227ulHTLinkFreq: Bootup HT link Frequency in 10Khz.
1805usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. 2228usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
1806 If CDLW enabled, both upstream and downstream width should be the same during bootup.
1807usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
1808 If CDLW enabled, both upstream and downstream width should be the same during bootup. 2229 If CDLW enabled, both upstream and downstream width should be the same during bootup.
2230usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
2231 If CDLW enabled, both upstream and downstream width should be the same during bootup.
1809 2232
1810usUMASyncStartDelay: Memory access latency, required for watermark calculation 2233usUMASyncStartDelay: Memory access latency, required for watermark calculation
1811usUMADataReturnTime: Memory access latency, required for watermark calculation 2234usUMADataReturnTime: Memory access latency, required for watermark calculation
1812usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us 2235usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
1813for Griffin or Greyhound. SBIOS needs to convert to actual time by: 2236for Griffin or Greyhound. SBIOS needs to convert to actual time by:
1814 if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us) 2237 if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
1815 if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us) 2238 if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
@@ -1817,7 +2240,7 @@ for Griffin or Greyhound. SBIOS needs to convert to actual time by:
1817 if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us) 2240 if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
1818 2241
1819ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0. 2242ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
1820 This must be less than or equal to ulHTLinkFreq(bootup frequency). 2243 This must be less than or equal to ulHTLinkFreq(bootup frequency).
1821ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0. 2244ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
1822 This must be less than or equal to ulHighVoltageHTLinkFreq. 2245 This must be less than or equal to ulHighVoltageHTLinkFreq.
1823 2246
@@ -1827,14 +2250,17 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep
1827usMinDownStreamHTLinkWidth: same as above. 2250usMinDownStreamHTLinkWidth: same as above.
1828*/ 2251*/
1829 2252
2253
1830#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 2254#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
1831#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 2255#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
1832#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004 2256#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
1833#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008 2257#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008
1834#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010 2258#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010
1835#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020 2259#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020
1836#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040 2260#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040
1837#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080 2261#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080
2262#define SYSTEM_CONFIG_CDLF_ENABLED 0x00000100
2263#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED 0x00000200
1838 2264
1839#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF 2265#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF
1840 2266
@@ -1851,6 +2277,41 @@ usMinDownStreamHTLinkWidth: same as above.
1851 2277
1852#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000 2278#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000
1853 2279
2280// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR
2281typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
2282{
2283 ATOM_COMMON_TABLE_HEADER sHeader;
2284 ULONG ulBootUpEngineClock; //in 10kHz unit
2285 ULONG ulDentistVCOFreq; //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK.
2286 ULONG ulLClockFreq; //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge
2287 ULONG ulBootUpUMAClock; //in 10kHz unit
2288 ULONG ulReserved1[8]; //must be 0x0 for the reserved
2289 ULONG ulBootUpReqDisplayVector;
2290 ULONG ulOtherDisplayMisc;
2291 ULONG ulReserved2[4]; //must be 0x0 for the reserved
2292 ULONG ulSystemConfig; //TBD
2293 ULONG ulCPUCapInfo; //TBD
2294 USHORT usMaxNBVoltage; //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
2295 USHORT usMinNBVoltage; //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
2296 USHORT usBootUpNBVoltage; //boot up NB voltage
2297 UCHAR ucHtcTmpLmt; //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD
2298 UCHAR ucTjOffset; //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD
2299 ULONG ulReserved3[4]; //must be 0x0 for the reserved
2300 ULONG ulDDISlot1Config; //see above ulDDISlot1Config definition
2301 ULONG ulDDISlot2Config;
2302 ULONG ulDDISlot3Config;
2303 ULONG ulDDISlot4Config;
2304 ULONG ulReserved4[4]; //must be 0x0 for the reserved
2305 UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
2306 UCHAR ucUMAChannelNumber;
2307 USHORT usReserved;
2308 ULONG ulReserved5[4]; //must be 0x0 for the reserved
2309 ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default
2310 ULONG ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback
2311 ULONG ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications
2312 ULONG ulReserved6[61]; //must be 0x0
2313}ATOM_INTEGRATED_SYSTEM_INFO_V5;
2314
1854#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000 2315#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000
1855#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001 2316#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001
1856#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002 2317#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002
@@ -1866,8 +2327,8 @@ usMinDownStreamHTLinkWidth: same as above.
1866#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C 2327#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C
1867#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D 2328#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D
1868 2329
1869/* define ASIC internal encoder id ( bit vector ) */ 2330// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable
1870#define ASIC_INT_DAC1_ENCODER_ID 0x00 2331#define ASIC_INT_DAC1_ENCODER_ID 0x00
1871#define ASIC_INT_TV_ENCODER_ID 0x02 2332#define ASIC_INT_TV_ENCODER_ID 0x02
1872#define ASIC_INT_DIG1_ENCODER_ID 0x03 2333#define ASIC_INT_DIG1_ENCODER_ID 0x03
1873#define ASIC_INT_DAC2_ENCODER_ID 0x04 2334#define ASIC_INT_DAC2_ENCODER_ID 0x04
@@ -1875,10 +2336,24 @@ usMinDownStreamHTLinkWidth: same as above.
1875#define ASIC_INT_DVO_ENCODER_ID 0x07 2336#define ASIC_INT_DVO_ENCODER_ID 0x07
1876#define ASIC_INT_DIG2_ENCODER_ID 0x09 2337#define ASIC_INT_DIG2_ENCODER_ID 0x09
1877#define ASIC_EXT_DIG_ENCODER_ID 0x05 2338#define ASIC_EXT_DIG_ENCODER_ID 0x05
2339#define ASIC_EXT_DIG2_ENCODER_ID 0x08
2340#define ASIC_INT_DIG3_ENCODER_ID 0x0a
2341#define ASIC_INT_DIG4_ENCODER_ID 0x0b
2342#define ASIC_INT_DIG5_ENCODER_ID 0x0c
2343#define ASIC_INT_DIG6_ENCODER_ID 0x0d
1878 2344
1879/* define Encoder attribute */ 2345//define Encoder attribute
1880#define ATOM_ANALOG_ENCODER 0 2346#define ATOM_ANALOG_ENCODER 0
1881#define ATOM_DIGITAL_ENCODER 1 2347#define ATOM_DIGITAL_ENCODER 1
2348#define ATOM_DP_ENCODER 2
2349
2350#define ATOM_ENCODER_ENUM_MASK 0x70
2351#define ATOM_ENCODER_ENUM_ID1 0x00
2352#define ATOM_ENCODER_ENUM_ID2 0x10
2353#define ATOM_ENCODER_ENUM_ID3 0x20
2354#define ATOM_ENCODER_ENUM_ID4 0x30
2355#define ATOM_ENCODER_ENUM_ID5 0x40
2356#define ATOM_ENCODER_ENUM_ID6 0x50
1882 2357
1883#define ATOM_DEVICE_CRT1_INDEX 0x00000000 2358#define ATOM_DEVICE_CRT1_INDEX 0x00000000
1884#define ATOM_DEVICE_LCD1_INDEX 0x00000001 2359#define ATOM_DEVICE_LCD1_INDEX 0x00000001
@@ -1886,45 +2361,40 @@ usMinDownStreamHTLinkWidth: same as above.
1886#define ATOM_DEVICE_DFP1_INDEX 0x00000003 2361#define ATOM_DEVICE_DFP1_INDEX 0x00000003
1887#define ATOM_DEVICE_CRT2_INDEX 0x00000004 2362#define ATOM_DEVICE_CRT2_INDEX 0x00000004
1888#define ATOM_DEVICE_LCD2_INDEX 0x00000005 2363#define ATOM_DEVICE_LCD2_INDEX 0x00000005
1889#define ATOM_DEVICE_TV2_INDEX 0x00000006 2364#define ATOM_DEVICE_DFP6_INDEX 0x00000006
1890#define ATOM_DEVICE_DFP2_INDEX 0x00000007 2365#define ATOM_DEVICE_DFP2_INDEX 0x00000007
1891#define ATOM_DEVICE_CV_INDEX 0x00000008 2366#define ATOM_DEVICE_CV_INDEX 0x00000008
1892#define ATOM_DEVICE_DFP3_INDEX 0x00000009 2367#define ATOM_DEVICE_DFP3_INDEX 0x00000009
1893#define ATOM_DEVICE_DFP4_INDEX 0x0000000A 2368#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
1894#define ATOM_DEVICE_DFP5_INDEX 0x0000000B 2369#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
2370
1895#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C 2371#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C
1896#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D 2372#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D
1897#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E 2373#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E
1898#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F 2374#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F
1899#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1) 2375#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1)
1900#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO 2376#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO
1901#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1) 2377#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1 )
1902 2378
1903#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1) 2379#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1)
1904 2380
1905#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX) 2381#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX )
1906#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX) 2382#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX )
1907#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX) 2383#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX )
1908#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX) 2384#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX )
1909#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX) 2385#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX )
1910#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX) 2386#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX )
1911#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX) 2387#define ATOM_DEVICE_DFP6_SUPPORT (0x1L << ATOM_DEVICE_DFP6_INDEX )
1912#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX) 2388#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX )
1913#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX) 2389#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX )
1914#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX) 2390#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX )
1915#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX ) 2391#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
1916#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX) 2392#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX )
1917 2393
1918#define ATOM_DEVICE_CRT_SUPPORT \ 2394#define ATOM_DEVICE_CRT_SUPPORT (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
1919 (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT) 2395#define ATOM_DEVICE_DFP_SUPPORT (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT)
1920#define ATOM_DEVICE_DFP_SUPPORT \ 2396#define ATOM_DEVICE_TV_SUPPORT (ATOM_DEVICE_TV1_SUPPORT)
1921 (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | \ 2397#define ATOM_DEVICE_LCD_SUPPORT (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
1922 ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | \
1923 ATOM_DEVICE_DFP5_SUPPORT)
1924#define ATOM_DEVICE_TV_SUPPORT \
1925 (ATOM_DEVICE_TV1_SUPPORT | ATOM_DEVICE_TV2_SUPPORT)
1926#define ATOM_DEVICE_LCD_SUPPORT \
1927 (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
1928 2398
1929#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0 2399#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0
1930#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004 2400#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004
@@ -1942,6 +2412,7 @@ usMinDownStreamHTLinkWidth: same as above.
1942#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E 2412#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E
1943#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F 2413#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F
1944 2414
2415
1945#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F 2416#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F
1946#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000 2417#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000
1947#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000 2418#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000
@@ -1958,139 +2429,150 @@ usMinDownStreamHTLinkWidth: same as above.
1958#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004 2429#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004
1959#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001 2430#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001
1960#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002 2431#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002
1961#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 /* For IGP RS600 */ 2432#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 //For IGP RS600
1962#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 /* For IGP RS690 */ 2433#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 //For IGP RS690
1963 2434
1964#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080 2435#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080
1965#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007 2436#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007
1966#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000 2437#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000
1967#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001 2438#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001
1968 2439
1969/* usDeviceSupport: */ 2440// usDeviceSupport:
1970/* Bits0 = 0 - no CRT1 support= 1- CRT1 is supported */ 2441// Bits0 = 0 - no CRT1 support= 1- CRT1 is supported
1971/* Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported */ 2442// Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported
1972/* Bit 2 = 0 - no TV1 support= 1- TV1 is supported */ 2443// Bit 2 = 0 - no TV1 support= 1- TV1 is supported
1973/* Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported */ 2444// Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported
1974/* Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported */ 2445// Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported
1975/* Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported */ 2446// Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported
1976/* Bit 6 = 0 - no TV2 support= 1- TV2 is supported */ 2447// Bit 6 = 0 - no DFP6 support= 1- DFP6 is supported
1977/* Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported */ 2448// Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported
1978/* Bit 8 = 0 - no CV support= 1- CV is supported */ 2449// Bit 8 = 0 - no CV support= 1- CV is supported
1979/* Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported */ 2450// Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported
1980/* Byte1 (Supported Device Info) */ 2451// Bit 10 = 0 - no DFP4 support= 1- DFP4 is supported
1981/* Bit 0 = = 0 - no CV support= 1- CV is supported */ 2452// Bit 11 = 0 - no DFP5 support= 1- DFP5 is supported
1982/* */ 2453//
1983/* */ 2454//
1984
1985/* ucI2C_ConfigID */
1986/* [7:0] - I2C LINE Associate ID */
1987/* = 0 - no I2C */
1988/* [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) */
1989/* = 0, [6:0]=SW assisted I2C ID */
1990/* [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use */
1991/* = 2, HW engine for Multimedia use */
1992/* = 3-7 Reserved for future I2C engines */
1993/* [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C */
1994
1995typedef struct _ATOM_I2C_ID_CONFIG {
1996#if ATOM_BIG_ENDIAN
1997 UCHAR bfHW_Capable:1;
1998 UCHAR bfHW_EngineID:3;
1999 UCHAR bfI2C_LineMux:4;
2000#else
2001 UCHAR bfI2C_LineMux:4;
2002 UCHAR bfHW_EngineID:3;
2003 UCHAR bfHW_Capable:1;
2004#endif
2005} ATOM_I2C_ID_CONFIG;
2006
2007typedef union _ATOM_I2C_ID_CONFIG_ACCESS {
2008 ATOM_I2C_ID_CONFIG sbfAccess;
2009 UCHAR ucAccess;
2010} ATOM_I2C_ID_CONFIG_ACCESS;
2011 2455
2012/****************************************************************************/ 2456/****************************************************************************/
2013/* Structure used in GPIO_I2C_InfoTable */ 2457/* Structure used in MclkSS_InfoTable */
2014/****************************************************************************/ 2458/****************************************************************************/
2015typedef struct _ATOM_GPIO_I2C_ASSIGMENT { 2459// ucI2C_ConfigID
2016 USHORT usClkMaskRegisterIndex; 2460// [7:0] - I2C LINE Associate ID
2017 USHORT usClkEnRegisterIndex; 2461// = 0 - no I2C
2018 USHORT usClkY_RegisterIndex; 2462// [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection)
2019 USHORT usClkA_RegisterIndex; 2463// = 0, [6:0]=SW assisted I2C ID
2020 USHORT usDataMaskRegisterIndex; 2464// [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use
2021 USHORT usDataEnRegisterIndex; 2465// = 2, HW engine for Multimedia use
2022 USHORT usDataY_RegisterIndex; 2466// = 3-7 Reserved for future I2C engines
2023 USHORT usDataA_RegisterIndex; 2467// [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C
2024 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; 2468
2025 UCHAR ucClkMaskShift; 2469typedef struct _ATOM_I2C_ID_CONFIG
2026 UCHAR ucClkEnShift; 2470{
2027 UCHAR ucClkY_Shift; 2471#if ATOM_BIG_ENDIAN
2028 UCHAR ucClkA_Shift; 2472 UCHAR bfHW_Capable:1;
2029 UCHAR ucDataMaskShift; 2473 UCHAR bfHW_EngineID:3;
2030 UCHAR ucDataEnShift; 2474 UCHAR bfI2C_LineMux:4;
2031 UCHAR ucDataY_Shift; 2475#else
2032 UCHAR ucDataA_Shift; 2476 UCHAR bfI2C_LineMux:4;
2033 UCHAR ucReserved1; 2477 UCHAR bfHW_EngineID:3;
2034 UCHAR ucReserved2; 2478 UCHAR bfHW_Capable:1;
2035} ATOM_GPIO_I2C_ASSIGMENT; 2479#endif
2036 2480}ATOM_I2C_ID_CONFIG;
2037typedef struct _ATOM_GPIO_I2C_INFO {
2038 ATOM_COMMON_TABLE_HEADER sHeader;
2039 ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
2040} ATOM_GPIO_I2C_INFO;
2041 2481
2042/****************************************************************************/ 2482typedef union _ATOM_I2C_ID_CONFIG_ACCESS
2043/* Common Structure used in other structures */ 2483{
2044/****************************************************************************/ 2484 ATOM_I2C_ID_CONFIG sbfAccess;
2485 UCHAR ucAccess;
2486}ATOM_I2C_ID_CONFIG_ACCESS;
2487
2488
2489/****************************************************************************/
2490// Structure used in GPIO_I2C_InfoTable
2491/****************************************************************************/
2492typedef struct _ATOM_GPIO_I2C_ASSIGMENT
2493{
2494 USHORT usClkMaskRegisterIndex;
2495 USHORT usClkEnRegisterIndex;
2496 USHORT usClkY_RegisterIndex;
2497 USHORT usClkA_RegisterIndex;
2498 USHORT usDataMaskRegisterIndex;
2499 USHORT usDataEnRegisterIndex;
2500 USHORT usDataY_RegisterIndex;
2501 USHORT usDataA_RegisterIndex;
2502 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
2503 UCHAR ucClkMaskShift;
2504 UCHAR ucClkEnShift;
2505 UCHAR ucClkY_Shift;
2506 UCHAR ucClkA_Shift;
2507 UCHAR ucDataMaskShift;
2508 UCHAR ucDataEnShift;
2509 UCHAR ucDataY_Shift;
2510 UCHAR ucDataA_Shift;
2511 UCHAR ucReserved1;
2512 UCHAR ucReserved2;
2513}ATOM_GPIO_I2C_ASSIGMENT;
2514
2515typedef struct _ATOM_GPIO_I2C_INFO
2516{
2517 ATOM_COMMON_TABLE_HEADER sHeader;
2518 ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
2519}ATOM_GPIO_I2C_INFO;
2520
2521/****************************************************************************/
2522// Common Structure used in other structures
2523/****************************************************************************/
2045 2524
2046#ifndef _H2INC 2525#ifndef _H2INC
2047 2526
2048/* Please don't add or expand this bitfield structure below, this one will retire soon.! */ 2527//Please don't add or expand this bitfield structure below, this one will retire soon.!
2049typedef struct _ATOM_MODE_MISC_INFO { 2528typedef struct _ATOM_MODE_MISC_INFO
2529{
2050#if ATOM_BIG_ENDIAN 2530#if ATOM_BIG_ENDIAN
2051 USHORT Reserved:6; 2531 USHORT Reserved:6;
2052 USHORT RGB888:1; 2532 USHORT RGB888:1;
2053 USHORT DoubleClock:1; 2533 USHORT DoubleClock:1;
2054 USHORT Interlace:1; 2534 USHORT Interlace:1;
2055 USHORT CompositeSync:1; 2535 USHORT CompositeSync:1;
2056 USHORT V_ReplicationBy2:1; 2536 USHORT V_ReplicationBy2:1;
2057 USHORT H_ReplicationBy2:1; 2537 USHORT H_ReplicationBy2:1;
2058 USHORT VerticalCutOff:1; 2538 USHORT VerticalCutOff:1;
2059 USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */ 2539 USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
2060 USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */ 2540 USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
2061 USHORT HorizontalCutOff:1; 2541 USHORT HorizontalCutOff:1;
2062#else 2542#else
2063 USHORT HorizontalCutOff:1; 2543 USHORT HorizontalCutOff:1;
2064 USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */ 2544 USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
2065 USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */ 2545 USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
2066 USHORT VerticalCutOff:1; 2546 USHORT VerticalCutOff:1;
2067 USHORT H_ReplicationBy2:1; 2547 USHORT H_ReplicationBy2:1;
2068 USHORT V_ReplicationBy2:1; 2548 USHORT V_ReplicationBy2:1;
2069 USHORT CompositeSync:1; 2549 USHORT CompositeSync:1;
2070 USHORT Interlace:1; 2550 USHORT Interlace:1;
2071 USHORT DoubleClock:1; 2551 USHORT DoubleClock:1;
2072 USHORT RGB888:1; 2552 USHORT RGB888:1;
2073 USHORT Reserved:6; 2553 USHORT Reserved:6;
2074#endif 2554#endif
2075} ATOM_MODE_MISC_INFO; 2555}ATOM_MODE_MISC_INFO;
2076 2556
2077typedef union _ATOM_MODE_MISC_INFO_ACCESS { 2557typedef union _ATOM_MODE_MISC_INFO_ACCESS
2078 ATOM_MODE_MISC_INFO sbfAccess; 2558{
2079 USHORT usAccess; 2559 ATOM_MODE_MISC_INFO sbfAccess;
2080} ATOM_MODE_MISC_INFO_ACCESS; 2560 USHORT usAccess;
2081 2561}ATOM_MODE_MISC_INFO_ACCESS;
2562
2082#else 2563#else
2083 2564
2084typedef union _ATOM_MODE_MISC_INFO_ACCESS { 2565typedef union _ATOM_MODE_MISC_INFO_ACCESS
2085 USHORT usAccess; 2566{
2086} ATOM_MODE_MISC_INFO_ACCESS; 2567 USHORT usAccess;
2087 2568}ATOM_MODE_MISC_INFO_ACCESS;
2569
2088#endif 2570#endif
2089 2571
2090/* usModeMiscInfo- */ 2572// usModeMiscInfo-
2091#define ATOM_H_CUTOFF 0x01 2573#define ATOM_H_CUTOFF 0x01
2092#define ATOM_HSYNC_POLARITY 0x02 /* 0=Active High, 1=Active Low */ 2574#define ATOM_HSYNC_POLARITY 0x02 //0=Active High, 1=Active Low
2093#define ATOM_VSYNC_POLARITY 0x04 /* 0=Active High, 1=Active Low */ 2575#define ATOM_VSYNC_POLARITY 0x04 //0=Active High, 1=Active Low
2094#define ATOM_V_CUTOFF 0x08 2576#define ATOM_V_CUTOFF 0x08
2095#define ATOM_H_REPLICATIONBY2 0x10 2577#define ATOM_H_REPLICATIONBY2 0x10
2096#define ATOM_V_REPLICATIONBY2 0x20 2578#define ATOM_V_REPLICATIONBY2 0x20
@@ -2099,10 +2581,10 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS {
2099#define ATOM_DOUBLE_CLOCK_MODE 0x100 2581#define ATOM_DOUBLE_CLOCK_MODE 0x100
2100#define ATOM_RGB888_MODE 0x200 2582#define ATOM_RGB888_MODE 0x200
2101 2583
2102/* usRefreshRate- */ 2584//usRefreshRate-
2103#define ATOM_REFRESH_43 43 2585#define ATOM_REFRESH_43 43
2104#define ATOM_REFRESH_47 47 2586#define ATOM_REFRESH_47 47
2105#define ATOM_REFRESH_56 56 2587#define ATOM_REFRESH_56 56
2106#define ATOM_REFRESH_60 60 2588#define ATOM_REFRESH_60 60
2107#define ATOM_REFRESH_65 65 2589#define ATOM_REFRESH_65 65
2108#define ATOM_REFRESH_70 70 2590#define ATOM_REFRESH_70 70
@@ -2110,192 +2592,233 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS {
2110#define ATOM_REFRESH_75 75 2592#define ATOM_REFRESH_75 75
2111#define ATOM_REFRESH_85 85 2593#define ATOM_REFRESH_85 85
2112 2594
2113/* ATOM_MODE_TIMING data are exactly the same as VESA timing data. */ 2595// ATOM_MODE_TIMING data are exactly the same as VESA timing data.
2114/* Translation from EDID to ATOM_MODE_TIMING, use the following formula. */ 2596// Translation from EDID to ATOM_MODE_TIMING, use the following formula.
2115/* */ 2597//
2116/* VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK */ 2598// VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK
2117/* = EDID_HA + EDID_HBL */ 2599// = EDID_HA + EDID_HBL
2118/* VESA_HDISP = VESA_ACTIVE = EDID_HA */ 2600// VESA_HDISP = VESA_ACTIVE = EDID_HA
2119/* VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH */ 2601// VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH
2120/* = EDID_HA + EDID_HSO */ 2602// = EDID_HA + EDID_HSO
2121/* VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW */ 2603// VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW
2122/* VESA_BORDER = EDID_BORDER */ 2604// VESA_BORDER = EDID_BORDER
2123 2605
2124/****************************************************************************/ 2606/****************************************************************************/
2125/* Structure used in SetCRTC_UsingDTDTimingTable */ 2607// Structure used in SetCRTC_UsingDTDTimingTable
2126/****************************************************************************/ 2608/****************************************************************************/
2127typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS { 2609typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS
2128 USHORT usH_Size; 2610{
2129 USHORT usH_Blanking_Time; 2611 USHORT usH_Size;
2130 USHORT usV_Size; 2612 USHORT usH_Blanking_Time;
2131 USHORT usV_Blanking_Time; 2613 USHORT usV_Size;
2132 USHORT usH_SyncOffset; 2614 USHORT usV_Blanking_Time;
2133 USHORT usH_SyncWidth; 2615 USHORT usH_SyncOffset;
2134 USHORT usV_SyncOffset; 2616 USHORT usH_SyncWidth;
2135 USHORT usV_SyncWidth; 2617 USHORT usV_SyncOffset;
2136 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; 2618 USHORT usV_SyncWidth;
2137 UCHAR ucH_Border; /* From DFP EDID */ 2619 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2138 UCHAR ucV_Border; 2620 UCHAR ucH_Border; // From DFP EDID
2139 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 2621 UCHAR ucV_Border;
2140 UCHAR ucPadding[3]; 2622 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
2141} SET_CRTC_USING_DTD_TIMING_PARAMETERS; 2623 UCHAR ucPadding[3];
2142 2624}SET_CRTC_USING_DTD_TIMING_PARAMETERS;
2143/****************************************************************************/ 2625
2144/* Structure used in SetCRTC_TimingTable */ 2626/****************************************************************************/
2145/****************************************************************************/ 2627// Structure used in SetCRTC_TimingTable
2146typedef struct _SET_CRTC_TIMING_PARAMETERS { 2628/****************************************************************************/
2147 USHORT usH_Total; /* horizontal total */ 2629typedef struct _SET_CRTC_TIMING_PARAMETERS
2148 USHORT usH_Disp; /* horizontal display */ 2630{
2149 USHORT usH_SyncStart; /* horozontal Sync start */ 2631 USHORT usH_Total; // horizontal total
2150 USHORT usH_SyncWidth; /* horizontal Sync width */ 2632 USHORT usH_Disp; // horizontal display
2151 USHORT usV_Total; /* vertical total */ 2633 USHORT usH_SyncStart; // horozontal Sync start
2152 USHORT usV_Disp; /* vertical display */ 2634 USHORT usH_SyncWidth; // horizontal Sync width
2153 USHORT usV_SyncStart; /* vertical Sync start */ 2635 USHORT usV_Total; // vertical total
2154 USHORT usV_SyncWidth; /* vertical Sync width */ 2636 USHORT usV_Disp; // vertical display
2155 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; 2637 USHORT usV_SyncStart; // vertical Sync start
2156 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 2638 USHORT usV_SyncWidth; // vertical Sync width
2157 UCHAR ucOverscanRight; /* right */ 2639 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2158 UCHAR ucOverscanLeft; /* left */ 2640 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
2159 UCHAR ucOverscanBottom; /* bottom */ 2641 UCHAR ucOverscanRight; // right
2160 UCHAR ucOverscanTop; /* top */ 2642 UCHAR ucOverscanLeft; // left
2161 UCHAR ucReserved; 2643 UCHAR ucOverscanBottom; // bottom
2162} SET_CRTC_TIMING_PARAMETERS; 2644 UCHAR ucOverscanTop; // top
2645 UCHAR ucReserved;
2646}SET_CRTC_TIMING_PARAMETERS;
2163#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS 2647#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
2164 2648
2165/****************************************************************************/ 2649/****************************************************************************/
2166/* Structure used in StandardVESA_TimingTable */ 2650// Structure used in StandardVESA_TimingTable
2167/* AnalogTV_InfoTable */ 2651// AnalogTV_InfoTable
2168/* ComponentVideoInfoTable */ 2652// ComponentVideoInfoTable
2169/****************************************************************************/ 2653/****************************************************************************/
2170typedef struct _ATOM_MODE_TIMING { 2654typedef struct _ATOM_MODE_TIMING
2171 USHORT usCRTC_H_Total; 2655{
2172 USHORT usCRTC_H_Disp; 2656 USHORT usCRTC_H_Total;
2173 USHORT usCRTC_H_SyncStart; 2657 USHORT usCRTC_H_Disp;
2174 USHORT usCRTC_H_SyncWidth; 2658 USHORT usCRTC_H_SyncStart;
2175 USHORT usCRTC_V_Total; 2659 USHORT usCRTC_H_SyncWidth;
2176 USHORT usCRTC_V_Disp; 2660 USHORT usCRTC_V_Total;
2177 USHORT usCRTC_V_SyncStart; 2661 USHORT usCRTC_V_Disp;
2178 USHORT usCRTC_V_SyncWidth; 2662 USHORT usCRTC_V_SyncStart;
2179 USHORT usPixelClock; /* in 10Khz unit */ 2663 USHORT usCRTC_V_SyncWidth;
2180 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; 2664 USHORT usPixelClock; //in 10Khz unit
2181 USHORT usCRTC_OverscanRight; 2665 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2182 USHORT usCRTC_OverscanLeft; 2666 USHORT usCRTC_OverscanRight;
2183 USHORT usCRTC_OverscanBottom; 2667 USHORT usCRTC_OverscanLeft;
2184 USHORT usCRTC_OverscanTop; 2668 USHORT usCRTC_OverscanBottom;
2185 USHORT usReserve; 2669 USHORT usCRTC_OverscanTop;
2186 UCHAR ucInternalModeNumber; 2670 USHORT usReserve;
2187 UCHAR ucRefreshRate; 2671 UCHAR ucInternalModeNumber;
2188} ATOM_MODE_TIMING; 2672 UCHAR ucRefreshRate;
2189 2673}ATOM_MODE_TIMING;
2190typedef struct _ATOM_DTD_FORMAT { 2674
2191 USHORT usPixClk; 2675typedef struct _ATOM_DTD_FORMAT
2192 USHORT usHActive; 2676{
2193 USHORT usHBlanking_Time; 2677 USHORT usPixClk;
2194 USHORT usVActive; 2678 USHORT usHActive;
2195 USHORT usVBlanking_Time; 2679 USHORT usHBlanking_Time;
2196 USHORT usHSyncOffset; 2680 USHORT usVActive;
2197 USHORT usHSyncWidth; 2681 USHORT usVBlanking_Time;
2198 USHORT usVSyncOffset; 2682 USHORT usHSyncOffset;
2199 USHORT usVSyncWidth; 2683 USHORT usHSyncWidth;
2200 USHORT usImageHSize; 2684 USHORT usVSyncOffset;
2201 USHORT usImageVSize; 2685 USHORT usVSyncWidth;
2202 UCHAR ucHBorder; 2686 USHORT usImageHSize;
2203 UCHAR ucVBorder; 2687 USHORT usImageVSize;
2204 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; 2688 UCHAR ucHBorder;
2205 UCHAR ucInternalModeNumber; 2689 UCHAR ucVBorder;
2206 UCHAR ucRefreshRate; 2690 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2207} ATOM_DTD_FORMAT; 2691 UCHAR ucInternalModeNumber;
2208 2692 UCHAR ucRefreshRate;
2209/****************************************************************************/ 2693}ATOM_DTD_FORMAT;
2210/* Structure used in LVDS_InfoTable */ 2694
2211/* * Need a document to describe this table */ 2695/****************************************************************************/
2212/****************************************************************************/ 2696// Structure used in LVDS_InfoTable
2697// * Need a document to describe this table
2698/****************************************************************************/
2213#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 2699#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
2214#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 2700#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
2215#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 2701#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
2216#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 2702#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
2217 2703
2218/* Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. */ 2704//ucTableFormatRevision=1
2219/* Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL */ 2705//ucTableContentRevision=1
2220#define LCDPANEL_CAP_READ_EDID 0x1 2706typedef struct _ATOM_LVDS_INFO
2221 2707{
2222/* ucTableFormatRevision=1 */ 2708 ATOM_COMMON_TABLE_HEADER sHeader;
2223/* ucTableContentRevision=1 */ 2709 ATOM_DTD_FORMAT sLCDTiming;
2224typedef struct _ATOM_LVDS_INFO { 2710 USHORT usModePatchTableOffset;
2225 ATOM_COMMON_TABLE_HEADER sHeader; 2711 USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
2226 ATOM_DTD_FORMAT sLCDTiming; 2712 USHORT usOffDelayInMs;
2227 USHORT usModePatchTableOffset; 2713 UCHAR ucPowerSequenceDigOntoDEin10Ms;
2228 USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */ 2714 UCHAR ucPowerSequenceDEtoBLOnin10Ms;
2229 USHORT usOffDelayInMs; 2715 UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
2230 UCHAR ucPowerSequenceDigOntoDEin10Ms; 2716 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
2231 UCHAR ucPowerSequenceDEtoBLOnin10Ms; 2717 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
2232 UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */ 2718 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
2233 /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */ 2719 UCHAR ucPanelDefaultRefreshRate;
2234 /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */ 2720 UCHAR ucPanelIdentification;
2235 /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */ 2721 UCHAR ucSS_Id;
2236 UCHAR ucPanelDefaultRefreshRate; 2722}ATOM_LVDS_INFO;
2237 UCHAR ucPanelIdentification; 2723
2238 UCHAR ucSS_Id; 2724//ucTableFormatRevision=1
2239} ATOM_LVDS_INFO; 2725//ucTableContentRevision=2
2240 2726typedef struct _ATOM_LVDS_INFO_V12
2241/* ucTableFormatRevision=1 */ 2727{
2242/* ucTableContentRevision=2 */ 2728 ATOM_COMMON_TABLE_HEADER sHeader;
2243typedef struct _ATOM_LVDS_INFO_V12 { 2729 ATOM_DTD_FORMAT sLCDTiming;
2244 ATOM_COMMON_TABLE_HEADER sHeader; 2730 USHORT usExtInfoTableOffset;
2245 ATOM_DTD_FORMAT sLCDTiming; 2731 USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
2246 USHORT usExtInfoTableOffset; 2732 USHORT usOffDelayInMs;
2247 USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */ 2733 UCHAR ucPowerSequenceDigOntoDEin10Ms;
2248 USHORT usOffDelayInMs; 2734 UCHAR ucPowerSequenceDEtoBLOnin10Ms;
2249 UCHAR ucPowerSequenceDigOntoDEin10Ms; 2735 UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
2250 UCHAR ucPowerSequenceDEtoBLOnin10Ms; 2736 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
2251 UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */ 2737 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
2252 /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */ 2738 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
2253 /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */ 2739 UCHAR ucPanelDefaultRefreshRate;
2254 /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */ 2740 UCHAR ucPanelIdentification;
2255 UCHAR ucPanelDefaultRefreshRate; 2741 UCHAR ucSS_Id;
2256 UCHAR ucPanelIdentification; 2742 USHORT usLCDVenderID;
2257 UCHAR ucSS_Id; 2743 USHORT usLCDProductID;
2258 USHORT usLCDVenderID; 2744 UCHAR ucLCDPanel_SpecialHandlingCap;
2259 USHORT usLCDProductID; 2745 UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
2260 UCHAR ucLCDPanel_SpecialHandlingCap; 2746 UCHAR ucReserved[2];
2261 UCHAR ucPanelInfoSize; /* start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable */ 2747}ATOM_LVDS_INFO_V12;
2262 UCHAR ucReserved[2]; 2748
2263} ATOM_LVDS_INFO_V12; 2749//Definitions for ucLCDPanel_SpecialHandlingCap:
2750
2751//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
2752//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
2753#define LCDPANEL_CAP_READ_EDID 0x1
2754
2755//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
2756//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
2757//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
2758#define LCDPANEL_CAP_DRR_SUPPORTED 0x2
2759
2760//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
2761#define LCDPANEL_CAP_eDP 0x4
2762
2763
2764//Color Bit Depth definition in EDID V1.4 @BYTE 14h
2765//Bit 6 5 4
2766 // 0 0 0 - Color bit depth is undefined
2767 // 0 0 1 - 6 Bits per Primary Color
2768 // 0 1 0 - 8 Bits per Primary Color
2769 // 0 1 1 - 10 Bits per Primary Color
2770 // 1 0 0 - 12 Bits per Primary Color
2771 // 1 0 1 - 14 Bits per Primary Color
2772 // 1 1 0 - 16 Bits per Primary Color
2773 // 1 1 1 - Reserved
2774
2775#define PANEL_COLOR_BIT_DEPTH_MASK 0x70
2776
2777// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled}
2778#define PANEL_RANDOM_DITHER 0x80
2779#define PANEL_RANDOM_DITHER_MASK 0x80
2780
2264 2781
2265#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 2782#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
2266 2783
2267typedef struct _ATOM_PATCH_RECORD_MODE { 2784typedef struct _ATOM_PATCH_RECORD_MODE
2268 UCHAR ucRecordType; 2785{
2269 USHORT usHDisp; 2786 UCHAR ucRecordType;
2270 USHORT usVDisp; 2787 USHORT usHDisp;
2271} ATOM_PATCH_RECORD_MODE; 2788 USHORT usVDisp;
2789}ATOM_PATCH_RECORD_MODE;
2272 2790
2273typedef struct _ATOM_LCD_RTS_RECORD { 2791typedef struct _ATOM_LCD_RTS_RECORD
2274 UCHAR ucRecordType; 2792{
2275 UCHAR ucRTSValue; 2793 UCHAR ucRecordType;
2276} ATOM_LCD_RTS_RECORD; 2794 UCHAR ucRTSValue;
2795}ATOM_LCD_RTS_RECORD;
2277 2796
2278/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */ 2797//!! If the record below exits, it shoud always be the first record for easy use in command table!!!
2279typedef struct _ATOM_LCD_MODE_CONTROL_CAP { 2798// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead.
2280 UCHAR ucRecordType; 2799typedef struct _ATOM_LCD_MODE_CONTROL_CAP
2281 USHORT usLCDCap; 2800{
2282} ATOM_LCD_MODE_CONTROL_CAP; 2801 UCHAR ucRecordType;
2802 USHORT usLCDCap;
2803}ATOM_LCD_MODE_CONTROL_CAP;
2283 2804
2284#define LCD_MODE_CAP_BL_OFF 1 2805#define LCD_MODE_CAP_BL_OFF 1
2285#define LCD_MODE_CAP_CRTC_OFF 2 2806#define LCD_MODE_CAP_CRTC_OFF 2
2286#define LCD_MODE_CAP_PANEL_OFF 4 2807#define LCD_MODE_CAP_PANEL_OFF 4
2287 2808
2288typedef struct _ATOM_FAKE_EDID_PATCH_RECORD { 2809typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
2289 UCHAR ucRecordType; 2810{
2290 UCHAR ucFakeEDIDLength; 2811 UCHAR ucRecordType;
2291 UCHAR ucFakeEDIDString[1]; /* This actually has ucFakeEdidLength elements. */ 2812 UCHAR ucFakeEDIDLength;
2813 UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements.
2292} ATOM_FAKE_EDID_PATCH_RECORD; 2814} ATOM_FAKE_EDID_PATCH_RECORD;
2293 2815
2294typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD { 2816typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD
2295 UCHAR ucRecordType; 2817{
2296 USHORT usHSize; 2818 UCHAR ucRecordType;
2297 USHORT usVSize; 2819 USHORT usHSize;
2298} ATOM_PANEL_RESOLUTION_PATCH_RECORD; 2820 USHORT usVSize;
2821}ATOM_PANEL_RESOLUTION_PATCH_RECORD;
2299 2822
2300#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1 2823#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1
2301#define LCD_RTS_RECORD_TYPE 2 2824#define LCD_RTS_RECORD_TYPE 2
@@ -2306,21 +2829,25 @@ typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
2306 2829
2307/****************************Spread Spectrum Info Table Definitions **********************/ 2830/****************************Spread Spectrum Info Table Definitions **********************/
2308 2831
2309/* ucTableFormatRevision=1 */ 2832//ucTableFormatRevision=1
2310/* ucTableContentRevision=2 */ 2833//ucTableContentRevision=2
2311typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT { 2834typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT
2312 USHORT usSpreadSpectrumPercentage; 2835{
2313 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ 2836 USHORT usSpreadSpectrumPercentage;
2314 UCHAR ucSS_Step; 2837 UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS Others:TBD
2315 UCHAR ucSS_Delay; 2838 UCHAR ucSS_Step;
2316 UCHAR ucSS_Id; 2839 UCHAR ucSS_Delay;
2317 UCHAR ucRecommendedRef_Div; 2840 UCHAR ucSS_Id;
2318 UCHAR ucSS_Range; /* it was reserved for V11 */ 2841 UCHAR ucRecommendedRef_Div;
2319} ATOM_SPREAD_SPECTRUM_ASSIGNMENT; 2842 UCHAR ucSS_Range; //it was reserved for V11
2843}ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
2320 2844
2321#define ATOM_MAX_SS_ENTRY 16 2845#define ATOM_MAX_SS_ENTRY 16
2322#define ATOM_DP_SS_ID1 0x0f1 /* SS modulation freq=30k */ 2846#define ATOM_DP_SS_ID1 0x0f1 // SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well.
2323#define ATOM_DP_SS_ID2 0x0f2 /* SS modulation freq=33k */ 2847#define ATOM_DP_SS_ID2 0x0f2 // SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable.
2848#define ATOM_LVLINK_2700MHz_SS_ID 0x0f3 // SS ID for LV link translator chip at 2.7Ghz
2849#define ATOM_LVLINK_1620MHz_SS_ID 0x0f4 // SS ID for LV link translator chip at 1.62Ghz
2850
2324 2851
2325#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000 2852#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
2326#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000 2853#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
@@ -2329,29 +2856,30 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
2329#define ATOM_INTERNAL_SS_MASK 0x00000000 2856#define ATOM_INTERNAL_SS_MASK 0x00000000
2330#define ATOM_EXTERNAL_SS_MASK 0x00000002 2857#define ATOM_EXTERNAL_SS_MASK 0x00000002
2331#define EXEC_SS_STEP_SIZE_SHIFT 2 2858#define EXEC_SS_STEP_SIZE_SHIFT 2
2332#define EXEC_SS_DELAY_SHIFT 4 2859#define EXEC_SS_DELAY_SHIFT 4
2333#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4 2860#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4
2334 2861
2335typedef struct _ATOM_SPREAD_SPECTRUM_INFO { 2862typedef struct _ATOM_SPREAD_SPECTRUM_INFO
2336 ATOM_COMMON_TABLE_HEADER sHeader; 2863{
2337 ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY]; 2864 ATOM_COMMON_TABLE_HEADER sHeader;
2338} ATOM_SPREAD_SPECTRUM_INFO; 2865 ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
2339 2866}ATOM_SPREAD_SPECTRUM_INFO;
2340/****************************************************************************/ 2867
2341/* Structure used in AnalogTV_InfoTable (Top level) */ 2868/****************************************************************************/
2342/****************************************************************************/ 2869// Structure used in AnalogTV_InfoTable (Top level)
2343/* ucTVBootUpDefaultStd definiton: */ 2870/****************************************************************************/
2344 2871//ucTVBootUpDefaultStd definiton:
2345/* ATOM_TV_NTSC 1 */ 2872
2346/* ATOM_TV_NTSCJ 2 */ 2873//ATOM_TV_NTSC 1
2347/* ATOM_TV_PAL 3 */ 2874//ATOM_TV_NTSCJ 2
2348/* ATOM_TV_PALM 4 */ 2875//ATOM_TV_PAL 3
2349/* ATOM_TV_PALCN 5 */ 2876//ATOM_TV_PALM 4
2350/* ATOM_TV_PALN 6 */ 2877//ATOM_TV_PALCN 5
2351/* ATOM_TV_PAL60 7 */ 2878//ATOM_TV_PALN 6
2352/* ATOM_TV_SECAM 8 */ 2879//ATOM_TV_PAL60 7
2353 2880//ATOM_TV_SECAM 8
2354/* ucTVSuppportedStd definition: */ 2881
2882//ucTVSupportedStd definition:
2355#define NTSC_SUPPORT 0x1 2883#define NTSC_SUPPORT 0x1
2356#define NTSCJ_SUPPORT 0x2 2884#define NTSCJ_SUPPORT 0x2
2357 2885
@@ -2364,46 +2892,58 @@ typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
2364 2892
2365#define MAX_SUPPORTED_TV_TIMING 2 2893#define MAX_SUPPORTED_TV_TIMING 2
2366 2894
2367typedef struct _ATOM_ANALOG_TV_INFO { 2895typedef struct _ATOM_ANALOG_TV_INFO
2368 ATOM_COMMON_TABLE_HEADER sHeader; 2896{
2369 UCHAR ucTV_SupportedStandard; 2897 ATOM_COMMON_TABLE_HEADER sHeader;
2370 UCHAR ucTV_BootUpDefaultStandard; 2898 UCHAR ucTV_SupportedStandard;
2371 UCHAR ucExt_TV_ASIC_ID; 2899 UCHAR ucTV_BootUpDefaultStandard;
2372 UCHAR ucExt_TV_ASIC_SlaveAddr; 2900 UCHAR ucExt_TV_ASIC_ID;
2373 /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; */ 2901 UCHAR ucExt_TV_ASIC_SlaveAddr;
2374 ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING]; 2902 /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];*/
2375} ATOM_ANALOG_TV_INFO; 2903 ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
2904}ATOM_ANALOG_TV_INFO;
2376 2905
2377#define MAX_SUPPORTED_TV_TIMING_V1_2 3 2906#define MAX_SUPPORTED_TV_TIMING_V1_2 3
2378 2907
2379typedef struct _ATOM_ANALOG_TV_INFO_V1_2 { 2908typedef struct _ATOM_ANALOG_TV_INFO_V1_2
2380 ATOM_COMMON_TABLE_HEADER sHeader; 2909{
2381 UCHAR ucTV_SupportedStandard; 2910 ATOM_COMMON_TABLE_HEADER sHeader;
2382 UCHAR ucTV_BootUpDefaultStandard; 2911 UCHAR ucTV_SupportedStandard;
2383 UCHAR ucExt_TV_ASIC_ID; 2912 UCHAR ucTV_BootUpDefaultStandard;
2384 UCHAR ucExt_TV_ASIC_SlaveAddr; 2913 UCHAR ucExt_TV_ASIC_ID;
2385 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; 2914 UCHAR ucExt_TV_ASIC_SlaveAddr;
2386} ATOM_ANALOG_TV_INFO_V1_2; 2915 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];
2916}ATOM_ANALOG_TV_INFO_V1_2;
2917
2918typedef struct _ATOM_DPCD_INFO
2919{
2920 UCHAR ucRevisionNumber; //10h : Revision 1.0; 11h : Revision 1.1
2921 UCHAR ucMaxLinkRate; //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane
2922 UCHAR ucMaxLane; //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP
2923 UCHAR ucMaxDownSpread; //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec)
2924}ATOM_DPCD_INFO;
2925
2926#define ATOM_DPCD_MAX_LANE_MASK 0x1F
2387 2927
2388/**************************************************************************/ 2928/**************************************************************************/
2389/* VRAM usage and their definitions */ 2929// VRAM usage and their defintions
2390 2930
2391/* One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */ 2931// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data.
2392/* Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */ 2932// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below.
2393/* All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! */ 2933// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned!
2394/* To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR */ 2934// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR
2395/* To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX */ 2935// To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX
2396 2936
2397#ifndef VESA_MEMORY_IN_64K_BLOCK 2937#ifndef VESA_MEMORY_IN_64K_BLOCK
2398#define VESA_MEMORY_IN_64K_BLOCK 0x100 /* 256*64K=16Mb (Max. VESA memory is 16Mb!) */ 2938#define VESA_MEMORY_IN_64K_BLOCK 0x100 //256*64K=16Mb (Max. VESA memory is 16Mb!)
2399#endif 2939#endif
2400 2940
2401#define ATOM_EDID_RAW_DATASIZE 256 /* In Bytes */ 2941#define ATOM_EDID_RAW_DATASIZE 256 //In Bytes
2402#define ATOM_HWICON_SURFACE_SIZE 4096 /* In Bytes */ 2942#define ATOM_HWICON_SURFACE_SIZE 4096 //In Bytes
2403#define ATOM_HWICON_INFOTABLE_SIZE 32 2943#define ATOM_HWICON_INFOTABLE_SIZE 32
2404#define MAX_DTD_MODE_IN_VRAM 6 2944#define MAX_DTD_MODE_IN_VRAM 6
2405#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) /* 28= (SIZEOF ATOM_DTD_FORMAT) */ 2945#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT)
2406#define ATOM_STD_MODE_SUPPORT_TBL_SIZE (32*8) /* 32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) */ 2946#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
2407#define DFP_ENCODER_TYPE_OFFSET 0x80 2947#define DFP_ENCODER_TYPE_OFFSET 0x80
2408#define DP_ENCODER_LANE_NUM_OFFSET 0x84 2948#define DP_ENCODER_LANE_NUM_OFFSET 0x84
2409#define DP_ENCODER_LINK_RATE_OFFSET 0x88 2949#define DP_ENCODER_LINK_RATE_OFFSET 0x88
@@ -2417,7 +2957,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
2417 2957
2418#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2958#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2419#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2959#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2420#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2960#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2421 2961
2422#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2962#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2423 2963
@@ -2431,13 +2971,13 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
2431 2971
2432#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2972#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2433#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2973#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2434#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2974#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2435 2975
2436#define ATOM_TV2_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2976#define ATOM_DFP6_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2437#define ATOM_TV2_DTD_MODE_TBL_ADDR (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2977#define ATOM_DFP6_DTD_MODE_TBL_ADDR (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2438#define ATOM_TV2_STD_MODE_TBL_ADDR (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2978#define ATOM_DFP6_STD_MODE_TBL_ADDR (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2439 2979
2440#define ATOM_DFP2_EDID_ADDR (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2980#define ATOM_DFP2_EDID_ADDR (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2441#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2981#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2442#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2982#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2443 2983
@@ -2457,533 +2997,850 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
2457#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2997#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2458#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2998#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2459 2999
2460#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) 3000#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2461 3001
2462#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 256) 3002#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256)
2463#define ATOM_STACK_STORAGE_END (ATOM_STACK_STORAGE_START + 512) 3003#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512
2464 3004
2465/* The size below is in Kb! */ 3005//The size below is in Kb!
2466#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) 3006#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
2467 3007
2468#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L 3008#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
2469#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 3009#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
2470#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 3010#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
2471#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0 3011#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
2472 3012
2473/***********************************************************************************/ 3013/***********************************************************************************/
2474/* Structure used in VRAM_UsageByFirmwareTable */ 3014// Structure used in VRAM_UsageByFirmwareTable
2475/* Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm */ 3015// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm
2476/* at running time. */ 3016// at running time.
2477/* note2: From RV770, the memory is more than 32bit addressable, so we will change */ 3017// note2: From RV770, the memory is more than 32bit addressable, so we will change
2478/* ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains */ 3018// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
2479/* exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware */ 3019// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
2480/* (in offset to start of memory address) is KB aligned instead of byte aligend. */ 3020// (in offset to start of memory address) is KB aligned instead of byte aligend.
2481/***********************************************************************************/ 3021/***********************************************************************************/
3022// Note3:
3023/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
3024for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have:
3025
3026If (ulStartAddrUsedByFirmware!=0)
3027FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB;
3028Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose
3029else //Non VGA case
3030 if (FB_Size<=2Gb)
3031 FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB;
3032 else
3033 FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB
3034
3035CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
3036
2482#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1 3037#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
2483 3038
2484typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO { 3039typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
2485 ULONG ulStartAddrUsedByFirmware; 3040{
2486 USHORT usFirmwareUseInKb; 3041 ULONG ulStartAddrUsedByFirmware;
2487 USHORT usReserved; 3042 USHORT usFirmwareUseInKb;
2488} ATOM_FIRMWARE_VRAM_RESERVE_INFO; 3043 USHORT usReserved;
3044}ATOM_FIRMWARE_VRAM_RESERVE_INFO;
2489 3045
2490typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE { 3046typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE
2491 ATOM_COMMON_TABLE_HEADER sHeader; 3047{
2492 ATOM_FIRMWARE_VRAM_RESERVE_INFO 3048 ATOM_COMMON_TABLE_HEADER sHeader;
2493 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; 3049 ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
2494} ATOM_VRAM_USAGE_BY_FIRMWARE; 3050}ATOM_VRAM_USAGE_BY_FIRMWARE;
2495 3051
2496/****************************************************************************/ 3052// change verion to 1.5, when allow driver to allocate the vram area for command table access.
2497/* Structure used in GPIO_Pin_LUTTable */ 3053typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5
2498/****************************************************************************/ 3054{
2499typedef struct _ATOM_GPIO_PIN_ASSIGNMENT { 3055 ULONG ulStartAddrUsedByFirmware;
2500 USHORT usGpioPin_AIndex; 3056 USHORT usFirmwareUseInKb;
2501 UCHAR ucGpioPinBitShift; 3057 USHORT usFBUsedByDrvInKb;
2502 UCHAR ucGPIO_ID; 3058}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5;
2503} ATOM_GPIO_PIN_ASSIGNMENT;
2504 3059
2505typedef struct _ATOM_GPIO_PIN_LUT { 3060typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5
2506 ATOM_COMMON_TABLE_HEADER sHeader; 3061{
2507 ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1]; 3062 ATOM_COMMON_TABLE_HEADER sHeader;
2508} ATOM_GPIO_PIN_LUT; 3063 ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
3064}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5;
3065
3066/****************************************************************************/
3067// Structure used in GPIO_Pin_LUTTable
3068/****************************************************************************/
3069typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
3070{
3071 USHORT usGpioPin_AIndex;
3072 UCHAR ucGpioPinBitShift;
3073 UCHAR ucGPIO_ID;
3074}ATOM_GPIO_PIN_ASSIGNMENT;
2509 3075
2510/****************************************************************************/ 3076typedef struct _ATOM_GPIO_PIN_LUT
2511/* Structure used in ComponentVideoInfoTable */ 3077{
2512/****************************************************************************/ 3078 ATOM_COMMON_TABLE_HEADER sHeader;
3079 ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
3080}ATOM_GPIO_PIN_LUT;
3081
3082/****************************************************************************/
3083// Structure used in ComponentVideoInfoTable
3084/****************************************************************************/
2513#define GPIO_PIN_ACTIVE_HIGH 0x1 3085#define GPIO_PIN_ACTIVE_HIGH 0x1
2514 3086
2515#define MAX_SUPPORTED_CV_STANDARDS 5 3087#define MAX_SUPPORTED_CV_STANDARDS 5
2516 3088
2517/* definitions for ATOM_D_INFO.ucSettings */ 3089// definitions for ATOM_D_INFO.ucSettings
2518#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F /* [4:0] */ 3090#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F // [4:0]
2519#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 /* [6:5] = must be zeroed out */ 3091#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 // [6:5] = must be zeroed out
2520#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 /* [7] */ 3092#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 // [7]
2521 3093
2522typedef struct _ATOM_GPIO_INFO { 3094typedef struct _ATOM_GPIO_INFO
2523 USHORT usAOffset; 3095{
2524 UCHAR ucSettings; 3096 USHORT usAOffset;
2525 UCHAR ucReserved; 3097 UCHAR ucSettings;
2526} ATOM_GPIO_INFO; 3098 UCHAR ucReserved;
3099}ATOM_GPIO_INFO;
2527 3100
2528/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) */ 3101// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector)
2529#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2 3102#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2
2530 3103
2531/* definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i */ 3104// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i
2532#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 /* [7]; */ 3105#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 //[7];
2533#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F /* [6:0] */ 3106#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F //[6:0]
2534 3107
2535/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode */ 3108// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode
2536/* Line 3 out put 5V. */ 3109//Line 3 out put 5V.
2537#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 /* represent gpio 3 state for 16:9 */ 3110#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 //represent gpio 3 state for 16:9
2538#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 /* represent gpio 4 state for 16:9 */ 3111#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 //represent gpio 4 state for 16:9
2539#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0 3112#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
2540 3113
2541/* Line 3 out put 2.2V */ 3114//Line 3 out put 2.2V
2542#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 /* represent gpio 3 state for 4:3 Letter box */ 3115#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 //represent gpio 3 state for 4:3 Letter box
2543#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 /* represent gpio 4 state for 4:3 Letter box */ 3116#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 //represent gpio 4 state for 4:3 Letter box
2544#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2 3117#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
2545 3118
2546/* Line 3 out put 0V */ 3119//Line 3 out put 0V
2547#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 /* represent gpio 3 state for 4:3 */ 3120#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 //represent gpio 3 state for 4:3
2548#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 /* represent gpio 4 state for 4:3 */ 3121#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 //represent gpio 4 state for 4:3
2549#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4 3122#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
2550 3123
2551#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F /* bit [5:0] */ 3124#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F // bit [5:0]
2552 3125
2553#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 /* bit 7 */ 3126#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 //bit 7
2554 3127
2555/* GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. */ 3128//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks.
2556#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 /* bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */ 3129#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
2557#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 /* bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */ 3130#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
2558 3131
2559typedef struct _ATOM_COMPONENT_VIDEO_INFO { 3132
2560 ATOM_COMMON_TABLE_HEADER sHeader; 3133typedef struct _ATOM_COMPONENT_VIDEO_INFO
2561 USHORT usMask_PinRegisterIndex; 3134{
2562 USHORT usEN_PinRegisterIndex; 3135 ATOM_COMMON_TABLE_HEADER sHeader;
2563 USHORT usY_PinRegisterIndex; 3136 USHORT usMask_PinRegisterIndex;
2564 USHORT usA_PinRegisterIndex; 3137 USHORT usEN_PinRegisterIndex;
2565 UCHAR ucBitShift; 3138 USHORT usY_PinRegisterIndex;
2566 UCHAR ucPinActiveState; /* ucPinActiveState: Bit0=1 active high, =0 active low */ 3139 USHORT usA_PinRegisterIndex;
2567 ATOM_DTD_FORMAT sReserved; /* must be zeroed out */ 3140 UCHAR ucBitShift;
2568 UCHAR ucMiscInfo; 3141 UCHAR ucPinActiveState; //ucPinActiveState: Bit0=1 active high, =0 active low
2569 UCHAR uc480i; 3142 ATOM_DTD_FORMAT sReserved; // must be zeroed out
2570 UCHAR uc480p; 3143 UCHAR ucMiscInfo;
2571 UCHAR uc720p; 3144 UCHAR uc480i;
2572 UCHAR uc1080i; 3145 UCHAR uc480p;
2573 UCHAR ucLetterBoxMode; 3146 UCHAR uc720p;
2574 UCHAR ucReserved[3]; 3147 UCHAR uc1080i;
2575 UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */ 3148 UCHAR ucLetterBoxMode;
2576 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; 3149 UCHAR ucReserved[3];
2577 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; 3150 UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
2578} ATOM_COMPONENT_VIDEO_INFO; 3151 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
2579 3152 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
2580/* ucTableFormatRevision=2 */ 3153}ATOM_COMPONENT_VIDEO_INFO;
2581/* ucTableContentRevision=1 */ 3154
2582typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 { 3155//ucTableFormatRevision=2
2583 ATOM_COMMON_TABLE_HEADER sHeader; 3156//ucTableContentRevision=1
2584 UCHAR ucMiscInfo; 3157typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21
2585 UCHAR uc480i; 3158{
2586 UCHAR uc480p; 3159 ATOM_COMMON_TABLE_HEADER sHeader;
2587 UCHAR uc720p; 3160 UCHAR ucMiscInfo;
2588 UCHAR uc1080i; 3161 UCHAR uc480i;
2589 UCHAR ucReserved; 3162 UCHAR uc480p;
2590 UCHAR ucLetterBoxMode; 3163 UCHAR uc720p;
2591 UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */ 3164 UCHAR uc1080i;
2592 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; 3165 UCHAR ucReserved;
2593 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; 3166 UCHAR ucLetterBoxMode;
2594} ATOM_COMPONENT_VIDEO_INFO_V21; 3167 UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
3168 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
3169 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
3170}ATOM_COMPONENT_VIDEO_INFO_V21;
2595 3171
2596#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21 3172#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21
2597 3173
2598/****************************************************************************/ 3174/****************************************************************************/
2599/* Structure used in object_InfoTable */ 3175// Structure used in object_InfoTable
2600/****************************************************************************/ 3176/****************************************************************************/
2601typedef struct _ATOM_OBJECT_HEADER { 3177typedef struct _ATOM_OBJECT_HEADER
2602 ATOM_COMMON_TABLE_HEADER sHeader; 3178{
2603 USHORT usDeviceSupport; 3179 ATOM_COMMON_TABLE_HEADER sHeader;
2604 USHORT usConnectorObjectTableOffset; 3180 USHORT usDeviceSupport;
2605 USHORT usRouterObjectTableOffset; 3181 USHORT usConnectorObjectTableOffset;
2606 USHORT usEncoderObjectTableOffset; 3182 USHORT usRouterObjectTableOffset;
2607 USHORT usProtectionObjectTableOffset; /* only available when Protection block is independent. */ 3183 USHORT usEncoderObjectTableOffset;
2608 USHORT usDisplayPathTableOffset; 3184 USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
2609} ATOM_OBJECT_HEADER; 3185 USHORT usDisplayPathTableOffset;
2610 3186}ATOM_OBJECT_HEADER;
2611typedef struct _ATOM_DISPLAY_OBJECT_PATH { 3187
2612 USHORT usDeviceTag; /* supported device */ 3188typedef struct _ATOM_OBJECT_HEADER_V3
2613 USHORT usSize; /* the size of ATOM_DISPLAY_OBJECT_PATH */ 3189{
2614 USHORT usConnObjectId; /* Connector Object ID */ 3190 ATOM_COMMON_TABLE_HEADER sHeader;
2615 USHORT usGPUObjectId; /* GPU ID */ 3191 USHORT usDeviceSupport;
2616 USHORT usGraphicObjIds[1]; /* 1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. */ 3192 USHORT usConnectorObjectTableOffset;
2617} ATOM_DISPLAY_OBJECT_PATH; 3193 USHORT usRouterObjectTableOffset;
2618 3194 USHORT usEncoderObjectTableOffset;
2619typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE { 3195 USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
2620 UCHAR ucNumOfDispPath; 3196 USHORT usDisplayPathTableOffset;
2621 UCHAR ucVersion; 3197 USHORT usMiscObjectTableOffset;
2622 UCHAR ucPadding[2]; 3198}ATOM_OBJECT_HEADER_V3;
2623 ATOM_DISPLAY_OBJECT_PATH asDispPath[1]; 3199
2624} ATOM_DISPLAY_OBJECT_PATH_TABLE; 3200typedef struct _ATOM_DISPLAY_OBJECT_PATH
2625 3201{
2626typedef struct _ATOM_OBJECT /* each object has this structure */ 3202 USHORT usDeviceTag; //supported device
2627{ 3203 USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
2628 USHORT usObjectID; 3204 USHORT usConnObjectId; //Connector Object ID
2629 USHORT usSrcDstTableOffset; 3205 USHORT usGPUObjectId; //GPU ID
2630 USHORT usRecordOffset; /* this pointing to a bunch of records defined below */ 3206 USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
2631 USHORT usReserved; 3207}ATOM_DISPLAY_OBJECT_PATH;
2632} ATOM_OBJECT; 3208
2633 3209typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
2634typedef struct _ATOM_OBJECT_TABLE /* Above 4 object table offset pointing to a bunch of objects all have this structure */ 3210{
2635{ 3211 UCHAR ucNumOfDispPath;
2636 UCHAR ucNumberOfObjects; 3212 UCHAR ucVersion;
2637 UCHAR ucPadding[3]; 3213 UCHAR ucPadding[2];
2638 ATOM_OBJECT asObjects[1]; 3214 ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
2639} ATOM_OBJECT_TABLE; 3215}ATOM_DISPLAY_OBJECT_PATH_TABLE;
2640 3216
2641typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT /* usSrcDstTableOffset pointing to this structure */ 3217
2642{ 3218typedef struct _ATOM_OBJECT //each object has this structure
2643 UCHAR ucNumberOfSrc; 3219{
2644 USHORT usSrcObjectID[1]; 3220 USHORT usObjectID;
2645 UCHAR ucNumberOfDst; 3221 USHORT usSrcDstTableOffset;
2646 USHORT usDstObjectID[1]; 3222 USHORT usRecordOffset; //this pointing to a bunch of records defined below
2647} ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; 3223 USHORT usReserved;
2648 3224}ATOM_OBJECT;
2649/* Related definitions, all records are differnt but they have a commond header */ 3225
2650typedef struct _ATOM_COMMON_RECORD_HEADER { 3226typedef struct _ATOM_OBJECT_TABLE //Above 4 object table offset pointing to a bunch of objects all have this structure
2651 UCHAR ucRecordType; /* An emun to indicate the record type */ 3227{
2652 UCHAR ucRecordSize; /* The size of the whole record in byte */ 3228 UCHAR ucNumberOfObjects;
2653} ATOM_COMMON_RECORD_HEADER; 3229 UCHAR ucPadding[3];
2654 3230 ATOM_OBJECT asObjects[1];
2655#define ATOM_I2C_RECORD_TYPE 1 3231}ATOM_OBJECT_TABLE;
3232
3233typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure
3234{
3235 UCHAR ucNumberOfSrc;
3236 USHORT usSrcObjectID[1];
3237 UCHAR ucNumberOfDst;
3238 USHORT usDstObjectID[1];
3239}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
3240
3241
3242//Two definitions below are for OPM on MXM module designs
3243
3244#define EXT_HPDPIN_LUTINDEX_0 0
3245#define EXT_HPDPIN_LUTINDEX_1 1
3246#define EXT_HPDPIN_LUTINDEX_2 2
3247#define EXT_HPDPIN_LUTINDEX_3 3
3248#define EXT_HPDPIN_LUTINDEX_4 4
3249#define EXT_HPDPIN_LUTINDEX_5 5
3250#define EXT_HPDPIN_LUTINDEX_6 6
3251#define EXT_HPDPIN_LUTINDEX_7 7
3252#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES (EXT_HPDPIN_LUTINDEX_7+1)
3253
3254#define EXT_AUXDDC_LUTINDEX_0 0
3255#define EXT_AUXDDC_LUTINDEX_1 1
3256#define EXT_AUXDDC_LUTINDEX_2 2
3257#define EXT_AUXDDC_LUTINDEX_3 3
3258#define EXT_AUXDDC_LUTINDEX_4 4
3259#define EXT_AUXDDC_LUTINDEX_5 5
3260#define EXT_AUXDDC_LUTINDEX_6 6
3261#define EXT_AUXDDC_LUTINDEX_7 7
3262#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1)
3263
3264typedef struct _EXT_DISPLAY_PATH
3265{
3266 USHORT usDeviceTag; //A bit vector to show what devices are supported
3267 USHORT usDeviceACPIEnum; //16bit device ACPI id.
3268 USHORT usDeviceConnector; //A physical connector for displays to plug in, using object connector definitions
3269 UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT
3270 UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT
3271 USHORT usExtEncoderObjId; //external encoder object id
3272 USHORT usReserved[3];
3273}EXT_DISPLAY_PATH;
3274
3275#define NUMBER_OF_UCHAR_FOR_GUID 16
3276#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
3277
3278typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
3279{
3280 ATOM_COMMON_TABLE_HEADER sHeader;
3281 UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
3282 EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
3283 UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
3284 UCHAR Reserved [7]; // for potential expansion
3285}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
3286
3287//Related definitions, all records are differnt but they have a commond header
3288typedef struct _ATOM_COMMON_RECORD_HEADER
3289{
3290 UCHAR ucRecordType; //An emun to indicate the record type
3291 UCHAR ucRecordSize; //The size of the whole record in byte
3292}ATOM_COMMON_RECORD_HEADER;
3293
3294
3295#define ATOM_I2C_RECORD_TYPE 1
2656#define ATOM_HPD_INT_RECORD_TYPE 2 3296#define ATOM_HPD_INT_RECORD_TYPE 2
2657#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3 3297#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3
2658#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4 3298#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4
2659#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ 3299#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
2660#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ 3300#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
2661#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7 3301#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7
2662#define ATOM_JTAG_RECORD_TYPE 8 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ 3302#define ATOM_JTAG_RECORD_TYPE 8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
2663#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9 3303#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9
2664#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10 3304#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10
2665#define ATOM_CONNECTOR_CF_RECORD_TYPE 11 3305#define ATOM_CONNECTOR_CF_RECORD_TYPE 11
2666#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12 3306#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12
2667#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13 3307#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13
2668#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14 3308#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
2669#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15 3309#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
2670 3310#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE 16 //This is for the case when connectors are not known to object table
2671/* Must be updated when new record type is added,equal to that record definition! */ 3311#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table
2672#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_CF_RECORD_TYPE 3312#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
2673 3313#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
2674typedef struct _ATOM_I2C_RECORD { 3314
2675 ATOM_COMMON_RECORD_HEADER sheader; 3315
2676 ATOM_I2C_ID_CONFIG sucI2cId; 3316//Must be updated when new record type is added,equal to that record definition!
2677 UCHAR ucI2CAddr; /* The slave address, it's 0 when the record is attached to connector for DDC */ 3317#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
2678} ATOM_I2C_RECORD; 3318
2679 3319typedef struct _ATOM_I2C_RECORD
2680typedef struct _ATOM_HPD_INT_RECORD { 3320{
2681 ATOM_COMMON_RECORD_HEADER sheader; 3321 ATOM_COMMON_RECORD_HEADER sheader;
2682 UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ 3322 ATOM_I2C_ID_CONFIG sucI2cId;
2683 UCHAR ucPlugged_PinState; 3323 UCHAR ucI2CAddr; //The slave address, it's 0 when the record is attached to connector for DDC
2684} ATOM_HPD_INT_RECORD; 3324}ATOM_I2C_RECORD;
2685 3325
2686typedef struct _ATOM_OUTPUT_PROTECTION_RECORD { 3326typedef struct _ATOM_HPD_INT_RECORD
2687 ATOM_COMMON_RECORD_HEADER sheader; 3327{
2688 UCHAR ucProtectionFlag; 3328 ATOM_COMMON_RECORD_HEADER sheader;
2689 UCHAR ucReserved; 3329 UCHAR ucHPDIntGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
2690} ATOM_OUTPUT_PROTECTION_RECORD; 3330 UCHAR ucPlugged_PinState;
2691 3331}ATOM_HPD_INT_RECORD;
2692typedef struct _ATOM_CONNECTOR_DEVICE_TAG { 3332
2693 ULONG ulACPIDeviceEnum; /* Reserved for now */ 3333
2694 USHORT usDeviceID; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT" */ 3334typedef struct _ATOM_OUTPUT_PROTECTION_RECORD
2695 USHORT usPadding; 3335{
2696} ATOM_CONNECTOR_DEVICE_TAG; 3336 ATOM_COMMON_RECORD_HEADER sheader;
2697 3337 UCHAR ucProtectionFlag;
2698typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD { 3338 UCHAR ucReserved;
2699 ATOM_COMMON_RECORD_HEADER sheader; 3339}ATOM_OUTPUT_PROTECTION_RECORD;
2700 UCHAR ucNumberOfDevice; 3340
2701 UCHAR ucReserved; 3341typedef struct _ATOM_CONNECTOR_DEVICE_TAG
2702 ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation */ 3342{
2703} ATOM_CONNECTOR_DEVICE_TAG_RECORD; 3343 ULONG ulACPIDeviceEnum; //Reserved for now
2704 3344 USHORT usDeviceID; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT"
2705typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD { 3345 USHORT usPadding;
2706 ATOM_COMMON_RECORD_HEADER sheader; 3346}ATOM_CONNECTOR_DEVICE_TAG;
2707 UCHAR ucConfigGPIOID; 3347
2708 UCHAR ucConfigGPIOState; /* Set to 1 when it's active high to enable external flow in */ 3348typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD
2709 UCHAR ucFlowinGPIPID; 3349{
2710 UCHAR ucExtInGPIPID; 3350 ATOM_COMMON_RECORD_HEADER sheader;
2711} ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD; 3351 UCHAR ucNumberOfDevice;
2712 3352 UCHAR ucReserved;
2713typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD { 3353 ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
2714 ATOM_COMMON_RECORD_HEADER sheader; 3354}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
2715 UCHAR ucCTL1GPIO_ID; 3355
2716 UCHAR ucCTL1GPIOState; /* Set to 1 when it's active high */ 3356
2717 UCHAR ucCTL2GPIO_ID; 3357typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD
2718 UCHAR ucCTL2GPIOState; /* Set to 1 when it's active high */ 3358{
2719 UCHAR ucCTL3GPIO_ID; 3359 ATOM_COMMON_RECORD_HEADER sheader;
2720 UCHAR ucCTL3GPIOState; /* Set to 1 when it's active high */ 3360 UCHAR ucConfigGPIOID;
2721 UCHAR ucCTLFPGA_IN_ID; 3361 UCHAR ucConfigGPIOState; //Set to 1 when it's active high to enable external flow in
2722 UCHAR ucPadding[3]; 3362 UCHAR ucFlowinGPIPID;
2723} ATOM_ENCODER_FPGA_CONTROL_RECORD; 3363 UCHAR ucExtInGPIPID;
2724 3364}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
2725typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD { 3365
2726 ATOM_COMMON_RECORD_HEADER sheader; 3366typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD
2727 UCHAR ucGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ 3367{
2728 UCHAR ucTVActiveState; /* Indicating when the pin==0 or 1 when TV is connected */ 3368 ATOM_COMMON_RECORD_HEADER sheader;
2729} ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD; 3369 UCHAR ucCTL1GPIO_ID;
2730 3370 UCHAR ucCTL1GPIOState; //Set to 1 when it's active high
2731typedef struct _ATOM_JTAG_RECORD { 3371 UCHAR ucCTL2GPIO_ID;
2732 ATOM_COMMON_RECORD_HEADER sheader; 3372 UCHAR ucCTL2GPIOState; //Set to 1 when it's active high
2733 UCHAR ucTMSGPIO_ID; 3373 UCHAR ucCTL3GPIO_ID;
2734 UCHAR ucTMSGPIOState; /* Set to 1 when it's active high */ 3374 UCHAR ucCTL3GPIOState; //Set to 1 when it's active high
2735 UCHAR ucTCKGPIO_ID; 3375 UCHAR ucCTLFPGA_IN_ID;
2736 UCHAR ucTCKGPIOState; /* Set to 1 when it's active high */ 3376 UCHAR ucPadding[3];
2737 UCHAR ucTDOGPIO_ID; 3377}ATOM_ENCODER_FPGA_CONTROL_RECORD;
2738 UCHAR ucTDOGPIOState; /* Set to 1 when it's active high */ 3378
2739 UCHAR ucTDIGPIO_ID; 3379typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD
2740 UCHAR ucTDIGPIOState; /* Set to 1 when it's active high */ 3380{
2741 UCHAR ucPadding[2]; 3381 ATOM_COMMON_RECORD_HEADER sheader;
2742} ATOM_JTAG_RECORD; 3382 UCHAR ucGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
2743 3383 UCHAR ucTVActiveState; //Indicating when the pin==0 or 1 when TV is connected
2744/* The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually */ 3384}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
2745typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR { 3385
2746 UCHAR ucGPIOID; /* GPIO_ID, find the corresponding ID in GPIO_LUT table */ 3386typedef struct _ATOM_JTAG_RECORD
2747 UCHAR ucGPIO_PinState; /* Pin state showing how to set-up the pin */ 3387{
2748} ATOM_GPIO_PIN_CONTROL_PAIR; 3388 ATOM_COMMON_RECORD_HEADER sheader;
2749 3389 UCHAR ucTMSGPIO_ID;
2750typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD { 3390 UCHAR ucTMSGPIOState; //Set to 1 when it's active high
2751 ATOM_COMMON_RECORD_HEADER sheader; 3391 UCHAR ucTCKGPIO_ID;
2752 UCHAR ucFlags; /* Future expnadibility */ 3392 UCHAR ucTCKGPIOState; //Set to 1 when it's active high
2753 UCHAR ucNumberOfPins; /* Number of GPIO pins used to control the object */ 3393 UCHAR ucTDOGPIO_ID;
2754 ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; /* the real gpio pin pair determined by number of pins ucNumberOfPins */ 3394 UCHAR ucTDOGPIOState; //Set to 1 when it's active high
2755} ATOM_OBJECT_GPIO_CNTL_RECORD; 3395 UCHAR ucTDIGPIO_ID;
2756 3396 UCHAR ucTDIGPIOState; //Set to 1 when it's active high
2757/* Definitions for GPIO pin state */ 3397 UCHAR ucPadding[2];
3398}ATOM_JTAG_RECORD;
3399
3400
3401//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually
3402typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR
3403{
3404 UCHAR ucGPIOID; // GPIO_ID, find the corresponding ID in GPIO_LUT table
3405 UCHAR ucGPIO_PinState; // Pin state showing how to set-up the pin
3406}ATOM_GPIO_PIN_CONTROL_PAIR;
3407
3408typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
3409{
3410 ATOM_COMMON_RECORD_HEADER sheader;
3411 UCHAR ucFlags; // Future expnadibility
3412 UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object
3413 ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins
3414}ATOM_OBJECT_GPIO_CNTL_RECORD;
3415
3416//Definitions for GPIO pin state
2758#define GPIO_PIN_TYPE_INPUT 0x00 3417#define GPIO_PIN_TYPE_INPUT 0x00
2759#define GPIO_PIN_TYPE_OUTPUT 0x10 3418#define GPIO_PIN_TYPE_OUTPUT 0x10
2760#define GPIO_PIN_TYPE_HW_CONTROL 0x20 3419#define GPIO_PIN_TYPE_HW_CONTROL 0x20
2761 3420
2762/* For GPIO_PIN_TYPE_OUTPUT the following is defined */ 3421//For GPIO_PIN_TYPE_OUTPUT the following is defined
2763#define GPIO_PIN_OUTPUT_STATE_MASK 0x01 3422#define GPIO_PIN_OUTPUT_STATE_MASK 0x01
2764#define GPIO_PIN_OUTPUT_STATE_SHIFT 0 3423#define GPIO_PIN_OUTPUT_STATE_SHIFT 0
2765#define GPIO_PIN_STATE_ACTIVE_LOW 0x0 3424#define GPIO_PIN_STATE_ACTIVE_LOW 0x0
2766#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1 3425#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
2767 3426
2768typedef struct _ATOM_ENCODER_DVO_CF_RECORD { 3427// Indexes to GPIO array in GLSync record
2769 ATOM_COMMON_RECORD_HEADER sheader; 3428#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0
2770 ULONG ulStrengthControl; /* DVOA strength control for CF */ 3429#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1
2771 UCHAR ucPadding[2]; 3430#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2
2772} ATOM_ENCODER_DVO_CF_RECORD; 3431#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ 3
3432#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4
3433#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
3434#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6
3435#define ATOM_GPIO_INDEX_GLSYNC_MAX 7
3436
3437typedef struct _ATOM_ENCODER_DVO_CF_RECORD
3438{
3439 ATOM_COMMON_RECORD_HEADER sheader;
3440 ULONG ulStrengthControl; // DVOA strength control for CF
3441 UCHAR ucPadding[2];
3442}ATOM_ENCODER_DVO_CF_RECORD;
2773 3443
2774/* value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle */ 3444// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
2775#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 3445#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
2776#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 3446#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
2777 3447
2778typedef struct _ATOM_CONNECTOR_CF_RECORD { 3448typedef struct _ATOM_CONNECTOR_CF_RECORD
2779 ATOM_COMMON_RECORD_HEADER sheader; 3449{
2780 USHORT usMaxPixClk; 3450 ATOM_COMMON_RECORD_HEADER sheader;
2781 UCHAR ucFlowCntlGpioId; 3451 USHORT usMaxPixClk;
2782 UCHAR ucSwapCntlGpioId; 3452 UCHAR ucFlowCntlGpioId;
2783 UCHAR ucConnectedDvoBundle; 3453 UCHAR ucSwapCntlGpioId;
2784 UCHAR ucPadding; 3454 UCHAR ucConnectedDvoBundle;
2785} ATOM_CONNECTOR_CF_RECORD; 3455 UCHAR ucPadding;
2786 3456}ATOM_CONNECTOR_CF_RECORD;
2787typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD { 3457
2788 ATOM_COMMON_RECORD_HEADER sheader; 3458typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD
2789 ATOM_DTD_FORMAT asTiming; 3459{
2790} ATOM_CONNECTOR_HARDCODE_DTD_RECORD; 3460 ATOM_COMMON_RECORD_HEADER sheader;
2791 3461 ATOM_DTD_FORMAT asTiming;
2792typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD { 3462}ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
2793 ATOM_COMMON_RECORD_HEADER sheader; /* ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE */ 3463
2794 UCHAR ucSubConnectorType; /* CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A */ 3464typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD
2795 UCHAR ucReserved; 3465{
2796} ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD; 3466 ATOM_COMMON_RECORD_HEADER sheader; //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE
2797 3467 UCHAR ucSubConnectorType; //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A
2798typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD { 3468 UCHAR ucReserved;
2799 ATOM_COMMON_RECORD_HEADER sheader; 3469}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
2800 UCHAR ucMuxType; /* decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state */ 3470
2801 UCHAR ucMuxControlPin; 3471
2802 UCHAR ucMuxState[2]; /* for alligment purpose */ 3472typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD
2803} ATOM_ROUTER_DDC_PATH_SELECT_RECORD; 3473{
2804 3474 ATOM_COMMON_RECORD_HEADER sheader;
2805typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD { 3475 UCHAR ucMuxType; //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state
2806 ATOM_COMMON_RECORD_HEADER sheader; 3476 UCHAR ucMuxControlPin;
2807 UCHAR ucMuxType; 3477 UCHAR ucMuxState[2]; //for alligment purpose
2808 UCHAR ucMuxControlPin; 3478}ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
2809 UCHAR ucMuxState[2]; /* for alligment purpose */ 3479
2810} ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD; 3480typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
2811 3481{
2812/* define ucMuxType */ 3482 ATOM_COMMON_RECORD_HEADER sheader;
3483 UCHAR ucMuxType;
3484 UCHAR ucMuxControlPin;
3485 UCHAR ucMuxState[2]; //for alligment purpose
3486}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
3487
3488// define ucMuxType
2813#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f 3489#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f
2814#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01 3490#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01
2815 3491
2816/****************************************************************************/ 3492typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE
2817/* ASIC voltage data table */ 3493{
2818/****************************************************************************/ 3494 ATOM_COMMON_RECORD_HEADER sheader;
2819typedef struct _ATOM_VOLTAGE_INFO_HEADER { 3495 UCHAR ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES]; //An fixed size array which maps external pins to internal GPIO_PIN_INFO table
2820 USHORT usVDDCBaseLevel; /* In number of 50mv unit */ 3496}ATOM_CONNECTOR_HPDPIN_LUT_RECORD;
2821 USHORT usReserved; /* For possible extension table offset */ 3497
2822 UCHAR ucNumOfVoltageEntries; 3498typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE
2823 UCHAR ucBytesPerVoltageEntry; 3499{
2824 UCHAR ucVoltageStep; /* Indicating in how many mv increament is one step, 0.5mv unit */ 3500 ATOM_COMMON_RECORD_HEADER sheader;
2825 UCHAR ucDefaultVoltageEntry; 3501 ATOM_I2C_ID_CONFIG ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES]; //An fixed size array which maps external pins to internal DDC ID
2826 UCHAR ucVoltageControlI2cLine; 3502}ATOM_CONNECTOR_AUXDDC_LUT_RECORD;
2827 UCHAR ucVoltageControlAddress; 3503
2828 UCHAR ucVoltageControlOffset; 3504typedef struct _ATOM_OBJECT_LINK_RECORD
2829} ATOM_VOLTAGE_INFO_HEADER; 3505{
2830 3506 ATOM_COMMON_RECORD_HEADER sheader;
2831typedef struct _ATOM_VOLTAGE_INFO { 3507 USHORT usObjectID; //could be connector, encorder or other object in object.h
2832 ATOM_COMMON_TABLE_HEADER sHeader; 3508}ATOM_OBJECT_LINK_RECORD;
2833 ATOM_VOLTAGE_INFO_HEADER viHeader; 3509
2834 UCHAR ucVoltageEntries[64]; /* 64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry */ 3510typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
2835} ATOM_VOLTAGE_INFO; 3511{
2836 3512 ATOM_COMMON_RECORD_HEADER sheader;
2837typedef struct _ATOM_VOLTAGE_FORMULA { 3513 USHORT usReserved;
2838 USHORT usVoltageBaseLevel; /* In number of 1mv unit */ 3514}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
2839 USHORT usVoltageStep; /* Indicating in how many mv increament is one step, 1mv unit */ 3515
2840 UCHAR ucNumOfVoltageEntries; /* Number of Voltage Entry, which indicate max Voltage */ 3516/****************************************************************************/
2841 UCHAR ucFlag; /* bit0=0 :step is 1mv =1 0.5mv */ 3517// ASIC voltage data table
2842 UCHAR ucBaseVID; /* if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep */ 3518/****************************************************************************/
2843 UCHAR ucReserved; 3519typedef struct _ATOM_VOLTAGE_INFO_HEADER
2844 UCHAR ucVIDAdjustEntries[32]; /* 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries */ 3520{
2845} ATOM_VOLTAGE_FORMULA; 3521 USHORT usVDDCBaseLevel; //In number of 50mv unit
2846 3522 USHORT usReserved; //For possible extension table offset
2847typedef struct _ATOM_VOLTAGE_CONTROL { 3523 UCHAR ucNumOfVoltageEntries;
2848 UCHAR ucVoltageControlId; /* Indicate it is controlled by I2C or GPIO or HW state machine */ 3524 UCHAR ucBytesPerVoltageEntry;
2849 UCHAR ucVoltageControlI2cLine; 3525 UCHAR ucVoltageStep; //Indicating in how many mv increament is one step, 0.5mv unit
2850 UCHAR ucVoltageControlAddress; 3526 UCHAR ucDefaultVoltageEntry;
2851 UCHAR ucVoltageControlOffset; 3527 UCHAR ucVoltageControlI2cLine;
2852 USHORT usGpioPin_AIndex; /* GPIO_PAD register index */ 3528 UCHAR ucVoltageControlAddress;
2853 UCHAR ucGpioPinBitShift[9]; /* at most 8 pin support 255 VIDs, termintate with 0xff */ 3529 UCHAR ucVoltageControlOffset;
2854 UCHAR ucReserved; 3530}ATOM_VOLTAGE_INFO_HEADER;
2855} ATOM_VOLTAGE_CONTROL; 3531
2856 3532typedef struct _ATOM_VOLTAGE_INFO
2857/* Define ucVoltageControlId */ 3533{
3534 ATOM_COMMON_TABLE_HEADER sHeader;
3535 ATOM_VOLTAGE_INFO_HEADER viHeader;
3536 UCHAR ucVoltageEntries[64]; //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry
3537}ATOM_VOLTAGE_INFO;
3538
3539
3540typedef struct _ATOM_VOLTAGE_FORMULA
3541{
3542 USHORT usVoltageBaseLevel; // In number of 1mv unit
3543 USHORT usVoltageStep; // Indicating in how many mv increament is one step, 1mv unit
3544 UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
3545 UCHAR ucFlag; // bit0=0 :step is 1mv =1 0.5mv
3546 UCHAR ucBaseVID; // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep
3547 UCHAR ucReserved;
3548 UCHAR ucVIDAdjustEntries[32]; // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries
3549}ATOM_VOLTAGE_FORMULA;
3550
3551typedef struct _VOLTAGE_LUT_ENTRY
3552{
3553 USHORT usVoltageCode; // The Voltage ID, either GPIO or I2C code
3554 USHORT usVoltageValue; // The corresponding Voltage Value, in mV
3555}VOLTAGE_LUT_ENTRY;
3556
3557typedef struct _ATOM_VOLTAGE_FORMULA_V2
3558{
3559 UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
3560 UCHAR ucReserved[3];
3561 VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries
3562}ATOM_VOLTAGE_FORMULA_V2;
3563
3564typedef struct _ATOM_VOLTAGE_CONTROL
3565{
3566 UCHAR ucVoltageControlId; //Indicate it is controlled by I2C or GPIO or HW state machine
3567 UCHAR ucVoltageControlI2cLine;
3568 UCHAR ucVoltageControlAddress;
3569 UCHAR ucVoltageControlOffset;
3570 USHORT usGpioPin_AIndex; //GPIO_PAD register index
3571 UCHAR ucGpioPinBitShift[9]; //at most 8 pin support 255 VIDs, termintate with 0xff
3572 UCHAR ucReserved;
3573}ATOM_VOLTAGE_CONTROL;
3574
3575// Define ucVoltageControlId
2858#define VOLTAGE_CONTROLLED_BY_HW 0x00 3576#define VOLTAGE_CONTROLLED_BY_HW 0x00
2859#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F 3577#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F
2860#define VOLTAGE_CONTROLLED_BY_GPIO 0x80 3578#define VOLTAGE_CONTROLLED_BY_GPIO 0x80
2861#define VOLTAGE_CONTROL_ID_LM64 0x01 /* I2C control, used for R5xx Core Voltage */ 3579#define VOLTAGE_CONTROL_ID_LM64 0x01 //I2C control, used for R5xx Core Voltage
2862#define VOLTAGE_CONTROL_ID_DAC 0x02 /* I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI */ 3580#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
2863#define VOLTAGE_CONTROL_ID_VT116xM 0x03 /* I2C control, used for R6xx Core Voltage */ 3581#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage
2864#define VOLTAGE_CONTROL_ID_DS4402 0x04 3582#define VOLTAGE_CONTROL_ID_DS4402 0x04
2865 3583
2866typedef struct _ATOM_VOLTAGE_OBJECT { 3584typedef struct _ATOM_VOLTAGE_OBJECT
2867 UCHAR ucVoltageType; /* Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI */ 3585{
2868 UCHAR ucSize; /* Size of Object */ 3586 UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
2869 ATOM_VOLTAGE_CONTROL asControl; /* describ how to control */ 3587 UCHAR ucSize; //Size of Object
2870 ATOM_VOLTAGE_FORMULA asFormula; /* Indicate How to convert real Voltage to VID */ 3588 ATOM_VOLTAGE_CONTROL asControl; //describ how to control
2871} ATOM_VOLTAGE_OBJECT; 3589 ATOM_VOLTAGE_FORMULA asFormula; //Indicate How to convert real Voltage to VID
2872 3590}ATOM_VOLTAGE_OBJECT;
2873typedef struct _ATOM_VOLTAGE_OBJECT_INFO { 3591
2874 ATOM_COMMON_TABLE_HEADER sHeader; 3592typedef struct _ATOM_VOLTAGE_OBJECT_V2
2875 ATOM_VOLTAGE_OBJECT asVoltageObj[3]; /* Info for Voltage control */ 3593{
2876} ATOM_VOLTAGE_OBJECT_INFO; 3594 UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
2877 3595 UCHAR ucSize; //Size of Object
2878typedef struct _ATOM_LEAKID_VOLTAGE { 3596 ATOM_VOLTAGE_CONTROL asControl; //describ how to control
2879 UCHAR ucLeakageId; 3597 ATOM_VOLTAGE_FORMULA_V2 asFormula; //Indicate How to convert real Voltage to VID
2880 UCHAR ucReserved; 3598}ATOM_VOLTAGE_OBJECT_V2;
2881 USHORT usVoltage; 3599
2882} ATOM_LEAKID_VOLTAGE; 3600typedef struct _ATOM_VOLTAGE_OBJECT_INFO
2883 3601{
2884typedef struct _ATOM_ASIC_PROFILE_VOLTAGE { 3602 ATOM_COMMON_TABLE_HEADER sHeader;
2885 UCHAR ucProfileId; 3603 ATOM_VOLTAGE_OBJECT asVoltageObj[3]; //Info for Voltage control
2886 UCHAR ucReserved; 3604}ATOM_VOLTAGE_OBJECT_INFO;
2887 USHORT usSize; 3605
2888 USHORT usEfuseSpareStartAddr; 3606typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V2
2889 USHORT usFuseIndex[8]; /* from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, */ 3607{
2890 ATOM_LEAKID_VOLTAGE asLeakVol[2]; /* Leakid and relatd voltage */ 3608 ATOM_COMMON_TABLE_HEADER sHeader;
2891} ATOM_ASIC_PROFILE_VOLTAGE; 3609 ATOM_VOLTAGE_OBJECT_V2 asVoltageObj[3]; //Info for Voltage control
2892 3610}ATOM_VOLTAGE_OBJECT_INFO_V2;
2893/* ucProfileId */ 3611
2894#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1 3612typedef struct _ATOM_LEAKID_VOLTAGE
3613{
3614 UCHAR ucLeakageId;
3615 UCHAR ucReserved;
3616 USHORT usVoltage;
3617}ATOM_LEAKID_VOLTAGE;
3618
3619typedef struct _ATOM_ASIC_PROFILE_VOLTAGE
3620{
3621 UCHAR ucProfileId;
3622 UCHAR ucReserved;
3623 USHORT usSize;
3624 USHORT usEfuseSpareStartAddr;
3625 USHORT usFuseIndex[8]; //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id,
3626 ATOM_LEAKID_VOLTAGE asLeakVol[2]; //Leakid and relatd voltage
3627}ATOM_ASIC_PROFILE_VOLTAGE;
3628
3629//ucProfileId
3630#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
2895#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1 3631#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1
2896#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2 3632#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2
2897 3633
2898typedef struct _ATOM_ASIC_PROFILING_INFO { 3634typedef struct _ATOM_ASIC_PROFILING_INFO
2899 ATOM_COMMON_TABLE_HEADER asHeader; 3635{
2900 ATOM_ASIC_PROFILE_VOLTAGE asVoltage; 3636 ATOM_COMMON_TABLE_HEADER asHeader;
2901} ATOM_ASIC_PROFILING_INFO; 3637 ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
2902 3638}ATOM_ASIC_PROFILING_INFO;
2903typedef struct _ATOM_POWER_SOURCE_OBJECT { 3639
2904 UCHAR ucPwrSrcId; /* Power source */ 3640typedef struct _ATOM_POWER_SOURCE_OBJECT
2905 UCHAR ucPwrSensorType; /* GPIO, I2C or none */ 3641{
2906 UCHAR ucPwrSensId; /* if GPIO detect, it is GPIO id, if I2C detect, it is I2C id */ 3642 UCHAR ucPwrSrcId; // Power source
2907 UCHAR ucPwrSensSlaveAddr; /* Slave address if I2C detect */ 3643 UCHAR ucPwrSensorType; // GPIO, I2C or none
2908 UCHAR ucPwrSensRegIndex; /* I2C register Index if I2C detect */ 3644 UCHAR ucPwrSensId; // if GPIO detect, it is GPIO id, if I2C detect, it is I2C id
2909 UCHAR ucPwrSensRegBitMask; /* detect which bit is used if I2C detect */ 3645 UCHAR ucPwrSensSlaveAddr; // Slave address if I2C detect
2910 UCHAR ucPwrSensActiveState; /* high active or low active */ 3646 UCHAR ucPwrSensRegIndex; // I2C register Index if I2C detect
2911 UCHAR ucReserve[3]; /* reserve */ 3647 UCHAR ucPwrSensRegBitMask; // detect which bit is used if I2C detect
2912 USHORT usSensPwr; /* in unit of watt */ 3648 UCHAR ucPwrSensActiveState; // high active or low active
2913} ATOM_POWER_SOURCE_OBJECT; 3649 UCHAR ucReserve[3]; // reserve
2914 3650 USHORT usSensPwr; // in unit of watt
2915typedef struct _ATOM_POWER_SOURCE_INFO { 3651}ATOM_POWER_SOURCE_OBJECT;
2916 ATOM_COMMON_TABLE_HEADER asHeader; 3652
2917 UCHAR asPwrbehave[16]; 3653typedef struct _ATOM_POWER_SOURCE_INFO
2918 ATOM_POWER_SOURCE_OBJECT asPwrObj[1]; 3654{
2919} ATOM_POWER_SOURCE_INFO; 3655 ATOM_COMMON_TABLE_HEADER asHeader;
2920 3656 UCHAR asPwrbehave[16];
2921/* Define ucPwrSrcId */ 3657 ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
3658}ATOM_POWER_SOURCE_INFO;
3659
3660
3661//Define ucPwrSrcId
2922#define POWERSOURCE_PCIE_ID1 0x00 3662#define POWERSOURCE_PCIE_ID1 0x00
2923#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01 3663#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01
2924#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02 3664#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02
2925#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04 3665#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04
2926#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08 3666#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08
2927 3667
2928/* define ucPwrSensorId */ 3668//define ucPwrSensorId
2929#define POWER_SENSOR_ALWAYS 0x00 3669#define POWER_SENSOR_ALWAYS 0x00
2930#define POWER_SENSOR_GPIO 0x01 3670#define POWER_SENSOR_GPIO 0x01
2931#define POWER_SENSOR_I2C 0x02 3671#define POWER_SENSOR_I2C 0x02
2932 3672
3673typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
3674{
3675 ATOM_COMMON_TABLE_HEADER sHeader;
3676 ULONG ulBootUpEngineClock;
3677 ULONG ulDentistVCOFreq;
3678 ULONG ulBootUpUMAClock;
3679 ULONG ulReserved1[8];
3680 ULONG ulBootUpReqDisplayVector;
3681 ULONG ulOtherDisplayMisc;
3682 ULONG ulGPUCapInfo;
3683 ULONG ulReserved2[3];
3684 ULONG ulSystemConfig;
3685 ULONG ulCPUCapInfo;
3686 USHORT usMaxNBVoltage;
3687 USHORT usMinNBVoltage;
3688 USHORT usBootUpNBVoltage;
3689 USHORT usExtDispConnInfoOffset;
3690 UCHAR ucHtcTmpLmt;
3691 UCHAR ucTjOffset;
3692 UCHAR ucMemoryType;
3693 UCHAR ucUMAChannelNumber;
3694 ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];
3695 ULONG ulCSR_M3_ARB_CNTL_UVD[10];
3696 ULONG ulCSR_M3_ARB_CNTL_FS3D[10];
3697 ULONG ulReserved3[42];
3698 ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
3699}ATOM_INTEGRATED_SYSTEM_INFO_V6;
3700
3701/**********************************************************************************************************************
3702// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
3703//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit.
3704//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
3705//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
3706//ulReserved1[8] Reserved by now, must be 0x0.
3707//ulBootUpReqDisplayVector VBIOS boot up display IDs
3708// ATOM_DEVICE_CRT1_SUPPORT 0x0001
3709// ATOM_DEVICE_CRT2_SUPPORT 0x0010
3710// ATOM_DEVICE_DFP1_SUPPORT 0x0008
3711// ATOM_DEVICE_DFP6_SUPPORT 0x0040
3712// ATOM_DEVICE_DFP2_SUPPORT 0x0080
3713// ATOM_DEVICE_DFP3_SUPPORT 0x0200
3714// ATOM_DEVICE_DFP4_SUPPORT 0x0400
3715// ATOM_DEVICE_DFP5_SUPPORT 0x0800
3716// ATOM_DEVICE_LCD1_SUPPORT 0x0002
3717//ulOtherDisplayMisc Other display related flags, not defined yet.
3718//ulGPUCapInfo TBD
3719//ulReserved2[3] must be 0x0 for the reserved.
3720//ulSystemConfig TBD
3721//ulCPUCapInfo TBD
3722//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
3723//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
3724//usBootUpNBVoltage Boot up NB voltage in unit of mv.
3725//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
3726//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
3727//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
3728//ucUMAChannelNumber System memory channel numbers.
3729//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table.
3730//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default
3731//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback.
3732//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
3733**********************************************************************************************************************/
3734
2933/**************************************************************************/ 3735/**************************************************************************/
2934/* This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design */ 3736// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
2935/* Memory SS Info Table */ 3737//Memory SS Info Table
2936/* Define Memory Clock SS chip ID */ 3738//Define Memory Clock SS chip ID
2937#define ICS91719 1 3739#define ICS91719 1
2938#define ICS91720 2 3740#define ICS91720 2
2939 3741
2940/* Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol */ 3742//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol
2941typedef struct _ATOM_I2C_DATA_RECORD { 3743typedef struct _ATOM_I2C_DATA_RECORD
2942 UCHAR ucNunberOfBytes; /* Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" */ 3744{
2943 UCHAR ucI2CData[1]; /* I2C data in bytes, should be less than 16 bytes usually */ 3745 UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
2944} ATOM_I2C_DATA_RECORD; 3746 UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually
2945 3747}ATOM_I2C_DATA_RECORD;
2946/* Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information */ 3748
2947typedef struct _ATOM_I2C_DEVICE_SETUP_INFO { 3749
2948 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* I2C line and HW/SW assisted cap. */ 3750//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information
2949 UCHAR ucSSChipID; /* SS chip being used */ 3751typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
2950 UCHAR ucSSChipSlaveAddr; /* Slave Address to set up this SS chip */ 3752{
2951 UCHAR ucNumOfI2CDataRecords; /* number of data block */ 3753 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //I2C line and HW/SW assisted cap.
2952 ATOM_I2C_DATA_RECORD asI2CData[1]; 3754 UCHAR ucSSChipID; //SS chip being used
2953} ATOM_I2C_DEVICE_SETUP_INFO; 3755 UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip
2954 3756 UCHAR ucNumOfI2CDataRecords; //number of data block
2955/* ========================================================================================== */ 3757 ATOM_I2C_DATA_RECORD asI2CData[1];
2956typedef struct _ATOM_ASIC_MVDD_INFO { 3758}ATOM_I2C_DEVICE_SETUP_INFO;
2957 ATOM_COMMON_TABLE_HEADER sHeader; 3759
2958 ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1]; 3760//==========================================================================================
2959} ATOM_ASIC_MVDD_INFO; 3761typedef struct _ATOM_ASIC_MVDD_INFO
2960 3762{
2961/* ========================================================================================== */ 3763 ATOM_COMMON_TABLE_HEADER sHeader;
3764 ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
3765}ATOM_ASIC_MVDD_INFO;
3766
3767//==========================================================================================
2962#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO 3768#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO
2963 3769
2964/* ========================================================================================== */ 3770//==========================================================================================
2965/**************************************************************************/ 3771/**************************************************************************/
2966 3772
2967typedef struct _ATOM_ASIC_SS_ASSIGNMENT { 3773typedef struct _ATOM_ASIC_SS_ASSIGNMENT
2968 ULONG ulTargetClockRange; /* Clock Out frequence (VCO ), in unit of 10Khz */ 3774{
2969 USHORT usSpreadSpectrumPercentage; /* in unit of 0.01% */ 3775 ULONG ulTargetClockRange; //Clock Out frequence (VCO ), in unit of 10Khz
2970 USHORT usSpreadRateInKhz; /* in unit of kHz, modulation freq */ 3776 USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
2971 UCHAR ucClockIndication; /* Indicate which clock source needs SS */ 3777 USHORT usSpreadRateInKhz; //in unit of kHz, modulation freq
2972 UCHAR ucSpreadSpectrumMode; /* Bit1=0 Down Spread,=1 Center Spread. */ 3778 UCHAR ucClockIndication; //Indicate which clock source needs SS
2973 UCHAR ucReserved[2]; 3779 UCHAR ucSpreadSpectrumMode; //Bit1=0 Down Spread,=1 Center Spread.
2974} ATOM_ASIC_SS_ASSIGNMENT; 3780 UCHAR ucReserved[2];
2975 3781}ATOM_ASIC_SS_ASSIGNMENT;
2976/* Define ucSpreadSpectrumType */ 3782
3783//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type.
3784//SS is not required or enabled if a match is not found.
2977#define ASIC_INTERNAL_MEMORY_SS 1 3785#define ASIC_INTERNAL_MEMORY_SS 1
2978#define ASIC_INTERNAL_ENGINE_SS 2 3786#define ASIC_INTERNAL_ENGINE_SS 2
2979#define ASIC_INTERNAL_UVD_SS 3 3787#define ASIC_INTERNAL_UVD_SS 3
3788#define ASIC_INTERNAL_SS_ON_TMDS 4
3789#define ASIC_INTERNAL_SS_ON_HDMI 5
3790#define ASIC_INTERNAL_SS_ON_LVDS 6
3791#define ASIC_INTERNAL_SS_ON_DP 7
3792#define ASIC_INTERNAL_SS_ON_DCPLL 8
3793
3794typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
3795{
3796 ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
3797 //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
3798 USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
3799 USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
3800 UCHAR ucClockIndication; //Indicate which clock source needs SS
3801 UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
3802 UCHAR ucReserved[2];
3803}ATOM_ASIC_SS_ASSIGNMENT_V2;
3804
3805//ucSpreadSpectrumMode
3806//#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
3807//#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
3808//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
3809//#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001
3810//#define ATOM_INTERNAL_SS_MASK 0x00000000
3811//#define ATOM_EXTERNAL_SS_MASK 0x00000002
3812
3813typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
3814{
3815 ATOM_COMMON_TABLE_HEADER sHeader;
3816 ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
3817}ATOM_ASIC_INTERNAL_SS_INFO;
2980 3818
2981typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { 3819typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
2982 ATOM_COMMON_TABLE_HEADER sHeader; 3820{
2983 ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4]; 3821 ATOM_COMMON_TABLE_HEADER sHeader;
2984} ATOM_ASIC_INTERNAL_SS_INFO; 3822 ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only.
3823}ATOM_ASIC_INTERNAL_SS_INFO_V2;
2985 3824
2986/* ==============================Scratch Pad Definition Portion=============================== */ 3825typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
3826{
3827 ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
3828 //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
3829 USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
3830 USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
3831 UCHAR ucClockIndication; //Indicate which clock source needs SS
3832 UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
3833 UCHAR ucReserved[2];
3834}ATOM_ASIC_SS_ASSIGNMENT_V3;
3835
3836typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
3837{
3838 ATOM_COMMON_TABLE_HEADER sHeader;
3839 ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only.
3840}ATOM_ASIC_INTERNAL_SS_INFO_V3;
3841
3842
3843//==============================Scratch Pad Definition Portion===============================
2987#define ATOM_DEVICE_CONNECT_INFO_DEF 0 3844#define ATOM_DEVICE_CONNECT_INFO_DEF 0
2988#define ATOM_ROM_LOCATION_DEF 1 3845#define ATOM_ROM_LOCATION_DEF 1
2989#define ATOM_TV_STANDARD_DEF 2 3846#define ATOM_TV_STANDARD_DEF 2
@@ -2995,7 +3852,8 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
2995#define ATOM_I2C_CHANNEL_STATUS_DEF 8 3852#define ATOM_I2C_CHANNEL_STATUS_DEF 8
2996#define ATOM_I2C_CHANNEL_STATUS1_DEF 9 3853#define ATOM_I2C_CHANNEL_STATUS1_DEF 9
2997 3854
2998/* BIOS_0_SCRATCH Definition */ 3855
3856// BIOS_0_SCRATCH Definition
2999#define ATOM_S0_CRT1_MONO 0x00000001L 3857#define ATOM_S0_CRT1_MONO 0x00000001L
3000#define ATOM_S0_CRT1_COLOR 0x00000002L 3858#define ATOM_S0_CRT1_COLOR 0x00000002L
3001#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR) 3859#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
@@ -3008,6 +3866,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3008#define ATOM_S0_CV_DIN_A 0x00000020L 3866#define ATOM_S0_CV_DIN_A 0x00000020L
3009#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A) 3867#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
3010 3868
3869
3011#define ATOM_S0_CRT2_MONO 0x00000100L 3870#define ATOM_S0_CRT2_MONO 0x00000100L
3012#define ATOM_S0_CRT2_COLOR 0x00000200L 3871#define ATOM_S0_CRT2_COLOR 0x00000200L
3013#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR) 3872#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
@@ -3025,28 +3884,27 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3025#define ATOM_S0_DFP2 0x00020000L 3884#define ATOM_S0_DFP2 0x00020000L
3026#define ATOM_S0_LCD1 0x00040000L 3885#define ATOM_S0_LCD1 0x00040000L
3027#define ATOM_S0_LCD2 0x00080000L 3886#define ATOM_S0_LCD2 0x00080000L
3028#define ATOM_S0_TV2 0x00100000L 3887#define ATOM_S0_DFP6 0x00100000L
3029#define ATOM_S0_DFP3 0x00200000L 3888#define ATOM_S0_DFP3 0x00200000L
3030#define ATOM_S0_DFP4 0x00400000L 3889#define ATOM_S0_DFP4 0x00400000L
3031#define ATOM_S0_DFP5 0x00800000L 3890#define ATOM_S0_DFP5 0x00800000L
3032 3891
3033#define ATOM_S0_DFP_MASK \ 3892#define ATOM_S0_DFP_MASK ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6
3034 (ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5)
3035 3893
3036#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L /* If set, indicates we are running a PCIE asic with */ 3894#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L // If set, indicates we are running a PCIE asic with
3037 /* the FAD/HDP reg access bug. Bit is read by DAL */ 3895 // the FAD/HDP reg access bug. Bit is read by DAL, this is obsolete from RV5xx
3038 3896
3039#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L 3897#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L
3040#define ATOM_S0_THERMAL_STATE_SHIFT 26 3898#define ATOM_S0_THERMAL_STATE_SHIFT 26
3041 3899
3042#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L 3900#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
3043#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 3901#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
3044 3902
3045#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 3903#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
3046#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 3904#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
3047#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 3905#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
3048 3906
3049/* Byte aligned definition for BIOS usage */ 3907//Byte aligned defintion for BIOS usage
3050#define ATOM_S0_CRT1_MONOb0 0x01 3908#define ATOM_S0_CRT1_MONOb0 0x01
3051#define ATOM_S0_CRT1_COLORb0 0x02 3909#define ATOM_S0_CRT1_COLORb0 0x02
3052#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0) 3910#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
@@ -3076,8 +3934,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3076#define ATOM_S0_DFP2b2 0x02 3934#define ATOM_S0_DFP2b2 0x02
3077#define ATOM_S0_LCD1b2 0x04 3935#define ATOM_S0_LCD1b2 0x04
3078#define ATOM_S0_LCD2b2 0x08 3936#define ATOM_S0_LCD2b2 0x08
3079#define ATOM_S0_TV2b2 0x10 3937#define ATOM_S0_DFP6b2 0x10
3080#define ATOM_S0_DFP3b2 0x20 3938#define ATOM_S0_DFP3b2 0x20
3939#define ATOM_S0_DFP4b2 0x40
3940#define ATOM_S0_DFP5b2 0x80
3941
3081 3942
3082#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C 3943#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C
3083#define ATOM_S0_THERMAL_STATE_SHIFTb3 2 3944#define ATOM_S0_THERMAL_STATE_SHIFTb3 2
@@ -3085,43 +3946,20 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3085#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0 3946#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
3086#define ATOM_S0_LCD1_SHIFT 18 3947#define ATOM_S0_LCD1_SHIFT 18
3087 3948
3088/* BIOS_1_SCRATCH Definition */ 3949// BIOS_1_SCRATCH Definition
3089#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL 3950#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL
3090#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L 3951#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L
3091 3952
3092/* BIOS_2_SCRATCH Definition */ 3953// BIOS_2_SCRATCH Definition
3093#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL 3954#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL
3094#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L 3955#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L
3095#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8 3956#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8
3096 3957
3097#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
3098#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
3099#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
3100#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
3101#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
3102#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
3103#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
3104#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
3105#define ATOM_S2_CV_DPMS_STATE 0x01000000L
3106#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
3107#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
3108#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
3109
3110#define ATOM_S2_DFP_DPM_STATE \
3111 (ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | \
3112 ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | \
3113 ATOM_S2_DFP5_DPMS_STATE)
3114
3115#define ATOM_S2_DEVICE_DPMS_STATE \
3116 (ATOM_S2_CRT1_DPMS_STATE + ATOM_S2_LCD1_DPMS_STATE + \
3117 ATOM_S2_TV1_DPMS_STATE + ATOM_S2_DFP_DPMS_STATE + \
3118 ATOM_S2_CRT2_DPMS_STATE + ATOM_S2_LCD2_DPMS_STATE + \
3119 ATOM_S2_TV2_DPMS_STATE + ATOM_S2_CV_DPMS_STATE)
3120
3121#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L 3958#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L
3122#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26 3959#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
3123#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L 3960#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L
3124 3961
3962#define ATOM_S2_DEVICE_DPMS_STATE 0x00010000L
3125#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L 3963#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L
3126 3964
3127#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0 3965#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0
@@ -3131,21 +3969,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3131#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30 3969#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
3132#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L 3970#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
3133 3971
3134/* Byte aligned definition for BIOS usage */ 3972
3973//Byte aligned defintion for BIOS usage
3135#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F 3974#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
3136#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF 3975#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
3137#define ATOM_S2_CRT1_DPMS_STATEb2 0x01 3976#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01
3138#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
3139#define ATOM_S2_TV1_DPMS_STATEb2 0x04
3140#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
3141#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
3142#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
3143#define ATOM_S2_TV2_DPMS_STATEb2 0x40
3144#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
3145#define ATOM_S2_CV_DPMS_STATEb3 0x01
3146#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
3147#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
3148#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
3149 3977
3150#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF 3978#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
3151#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C 3979#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
@@ -3153,21 +3981,22 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3153#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20 3981#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
3154#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0 3982#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
3155 3983
3156/* BIOS_3_SCRATCH Definition */ 3984
3985// BIOS_3_SCRATCH Definition
3157#define ATOM_S3_CRT1_ACTIVE 0x00000001L 3986#define ATOM_S3_CRT1_ACTIVE 0x00000001L
3158#define ATOM_S3_LCD1_ACTIVE 0x00000002L 3987#define ATOM_S3_LCD1_ACTIVE 0x00000002L
3159#define ATOM_S3_TV1_ACTIVE 0x00000004L 3988#define ATOM_S3_TV1_ACTIVE 0x00000004L
3160#define ATOM_S3_DFP1_ACTIVE 0x00000008L 3989#define ATOM_S3_DFP1_ACTIVE 0x00000008L
3161#define ATOM_S3_CRT2_ACTIVE 0x00000010L 3990#define ATOM_S3_CRT2_ACTIVE 0x00000010L
3162#define ATOM_S3_LCD2_ACTIVE 0x00000020L 3991#define ATOM_S3_LCD2_ACTIVE 0x00000020L
3163#define ATOM_S3_TV2_ACTIVE 0x00000040L 3992#define ATOM_S3_DFP6_ACTIVE 0x00000040L
3164#define ATOM_S3_DFP2_ACTIVE 0x00000080L 3993#define ATOM_S3_DFP2_ACTIVE 0x00000080L
3165#define ATOM_S3_CV_ACTIVE 0x00000100L 3994#define ATOM_S3_CV_ACTIVE 0x00000100L
3166#define ATOM_S3_DFP3_ACTIVE 0x00000200L 3995#define ATOM_S3_DFP3_ACTIVE 0x00000200L
3167#define ATOM_S3_DFP4_ACTIVE 0x00000400L 3996#define ATOM_S3_DFP4_ACTIVE 0x00000400L
3168#define ATOM_S3_DFP5_ACTIVE 0x00000800L 3997#define ATOM_S3_DFP5_ACTIVE 0x00000800L
3169 3998
3170#define ATOM_S3_DEVICE_ACTIVE_MASK 0x000003FFL 3999#define ATOM_S3_DEVICE_ACTIVE_MASK 0x00000FFFL
3171 4000
3172#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L 4001#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L
3173#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L 4002#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
@@ -3178,7 +4007,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3178#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L 4007#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L
3179#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L 4008#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L
3180#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L 4009#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L
3181#define ATOM_S3_TV2_CRTC_ACTIVE 0x00400000L 4010#define ATOM_S3_DFP6_CRTC_ACTIVE 0x00400000L
3182#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L 4011#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L
3183#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L 4012#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L
3184#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L 4013#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L
@@ -3187,17 +4016,18 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3187 4016
3188#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L 4017#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
3189#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L 4018#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L
4019//Below two definitions are not supported in pplib, but in the old powerplay in DAL
3190#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L 4020#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
3191#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L 4021#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
3192 4022
3193/* Byte aligned definition for BIOS usage */ 4023//Byte aligned defintion for BIOS usage
3194#define ATOM_S3_CRT1_ACTIVEb0 0x01 4024#define ATOM_S3_CRT1_ACTIVEb0 0x01
3195#define ATOM_S3_LCD1_ACTIVEb0 0x02 4025#define ATOM_S3_LCD1_ACTIVEb0 0x02
3196#define ATOM_S3_TV1_ACTIVEb0 0x04 4026#define ATOM_S3_TV1_ACTIVEb0 0x04
3197#define ATOM_S3_DFP1_ACTIVEb0 0x08 4027#define ATOM_S3_DFP1_ACTIVEb0 0x08
3198#define ATOM_S3_CRT2_ACTIVEb0 0x10 4028#define ATOM_S3_CRT2_ACTIVEb0 0x10
3199#define ATOM_S3_LCD2_ACTIVEb0 0x20 4029#define ATOM_S3_LCD2_ACTIVEb0 0x20
3200#define ATOM_S3_TV2_ACTIVEb0 0x40 4030#define ATOM_S3_DFP6_ACTIVEb0 0x40
3201#define ATOM_S3_DFP2_ACTIVEb0 0x80 4031#define ATOM_S3_DFP2_ACTIVEb0 0x80
3202#define ATOM_S3_CV_ACTIVEb1 0x01 4032#define ATOM_S3_CV_ACTIVEb1 0x01
3203#define ATOM_S3_DFP3_ACTIVEb1 0x02 4033#define ATOM_S3_DFP3_ACTIVEb1 0x02
@@ -3212,7 +4042,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3212#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08 4042#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08
3213#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10 4043#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10
3214#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20 4044#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20
3215#define ATOM_S3_TV2_CRTC_ACTIVEb2 0x40 4045#define ATOM_S3_DFP6_CRTC_ACTIVEb2 0x40
3216#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80 4046#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80
3217#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01 4047#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01
3218#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02 4048#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02
@@ -3221,35 +4051,31 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3221 4051
3222#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF 4052#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF
3223 4053
3224#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20 4054// BIOS_4_SCRATCH Definition
3225#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
3226#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
3227
3228/* BIOS_4_SCRATCH Definition */
3229#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL 4055#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL
3230#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L 4056#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
3231#define ATOM_S4_LCD1_REFRESH_SHIFT 8 4057#define ATOM_S4_LCD1_REFRESH_SHIFT 8
3232 4058
3233/* Byte aligned definition for BIOS usage */ 4059//Byte aligned defintion for BIOS usage
3234#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF 4060#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
3235#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0 4061#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
3236#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0 4062#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
3237 4063
3238/* BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! */ 4064// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!!
3239#define ATOM_S5_DOS_REQ_CRT1b0 0x01 4065#define ATOM_S5_DOS_REQ_CRT1b0 0x01
3240#define ATOM_S5_DOS_REQ_LCD1b0 0x02 4066#define ATOM_S5_DOS_REQ_LCD1b0 0x02
3241#define ATOM_S5_DOS_REQ_TV1b0 0x04 4067#define ATOM_S5_DOS_REQ_TV1b0 0x04
3242#define ATOM_S5_DOS_REQ_DFP1b0 0x08 4068#define ATOM_S5_DOS_REQ_DFP1b0 0x08
3243#define ATOM_S5_DOS_REQ_CRT2b0 0x10 4069#define ATOM_S5_DOS_REQ_CRT2b0 0x10
3244#define ATOM_S5_DOS_REQ_LCD2b0 0x20 4070#define ATOM_S5_DOS_REQ_LCD2b0 0x20
3245#define ATOM_S5_DOS_REQ_TV2b0 0x40 4071#define ATOM_S5_DOS_REQ_DFP6b0 0x40
3246#define ATOM_S5_DOS_REQ_DFP2b0 0x80 4072#define ATOM_S5_DOS_REQ_DFP2b0 0x80
3247#define ATOM_S5_DOS_REQ_CVb1 0x01 4073#define ATOM_S5_DOS_REQ_CVb1 0x01
3248#define ATOM_S5_DOS_REQ_DFP3b1 0x02 4074#define ATOM_S5_DOS_REQ_DFP3b1 0x02
3249#define ATOM_S5_DOS_REQ_DFP4b1 0x04 4075#define ATOM_S5_DOS_REQ_DFP4b1 0x04
3250#define ATOM_S5_DOS_REQ_DFP5b1 0x08 4076#define ATOM_S5_DOS_REQ_DFP5b1 0x08
3251 4077
3252#define ATOM_S5_DOS_REQ_DEVICEw0 0x03FF 4078#define ATOM_S5_DOS_REQ_DEVICEw0 0x0FFF
3253 4079
3254#define ATOM_S5_DOS_REQ_CRT1 0x0001 4080#define ATOM_S5_DOS_REQ_CRT1 0x0001
3255#define ATOM_S5_DOS_REQ_LCD1 0x0002 4081#define ATOM_S5_DOS_REQ_LCD1 0x0002
@@ -3257,22 +4083,21 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3257#define ATOM_S5_DOS_REQ_DFP1 0x0008 4083#define ATOM_S5_DOS_REQ_DFP1 0x0008
3258#define ATOM_S5_DOS_REQ_CRT2 0x0010 4084#define ATOM_S5_DOS_REQ_CRT2 0x0010
3259#define ATOM_S5_DOS_REQ_LCD2 0x0020 4085#define ATOM_S5_DOS_REQ_LCD2 0x0020
3260#define ATOM_S5_DOS_REQ_TV2 0x0040 4086#define ATOM_S5_DOS_REQ_DFP6 0x0040
3261#define ATOM_S5_DOS_REQ_DFP2 0x0080 4087#define ATOM_S5_DOS_REQ_DFP2 0x0080
3262#define ATOM_S5_DOS_REQ_CV 0x0100 4088#define ATOM_S5_DOS_REQ_CV 0x0100
3263#define ATOM_S5_DOS_REQ_DFP3 0x0200 4089#define ATOM_S5_DOS_REQ_DFP3 0x0200
3264#define ATOM_S5_DOS_REQ_DFP4 0x0400 4090#define ATOM_S5_DOS_REQ_DFP4 0x0400
3265#define ATOM_S5_DOS_REQ_DFP5 0x0800 4091#define ATOM_S5_DOS_REQ_DFP5 0x0800
3266 4092
3267#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0 4093#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0
3268#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0 4094#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0
3269#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0 4095#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0
3270#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1 4096#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1
3271#define ATOM_S5_DOS_FORCE_DEVICEw1 \ 4097#define ATOM_S5_DOS_FORCE_DEVICEw1 (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\
3272 (ATOM_S5_DOS_FORCE_CRT1b2 + ATOM_S5_DOS_FORCE_TV1b2 + \ 4098 (ATOM_S5_DOS_FORCE_CVb3<<8))
3273 ATOM_S5_DOS_FORCE_CRT2b2 + (ATOM_S5_DOS_FORCE_CVb3 << 8))
3274 4099
3275/* BIOS_6_SCRATCH Definition */ 4100// BIOS_6_SCRATCH Definition
3276#define ATOM_S6_DEVICE_CHANGE 0x00000001L 4101#define ATOM_S6_DEVICE_CHANGE 0x00000001L
3277#define ATOM_S6_SCALER_CHANGE 0x00000002L 4102#define ATOM_S6_SCALER_CHANGE 0x00000002L
3278#define ATOM_S6_LID_CHANGE 0x00000004L 4103#define ATOM_S6_LID_CHANGE 0x00000004L
@@ -3285,11 +4110,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3285#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L 4110#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L
3286#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L 4111#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L
3287#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L 4112#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L
3288#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L /* Normal expansion Request bit for LCD */ 4113#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L //Normal expansion Request bit for LCD
3289#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L /* Aspect ratio expansion Request bit for LCD */ 4114#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L //Aspect ratio expansion Request bit for LCD
3290 4115
3291#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L /* This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion */ 4116#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion
3292#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L /* This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion */ 4117#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion
3293 4118
3294#define ATOM_S6_ACC_REQ_CRT1 0x00010000L 4119#define ATOM_S6_ACC_REQ_CRT1 0x00010000L
3295#define ATOM_S6_ACC_REQ_LCD1 0x00020000L 4120#define ATOM_S6_ACC_REQ_LCD1 0x00020000L
@@ -3297,7 +4122,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3297#define ATOM_S6_ACC_REQ_DFP1 0x00080000L 4122#define ATOM_S6_ACC_REQ_DFP1 0x00080000L
3298#define ATOM_S6_ACC_REQ_CRT2 0x00100000L 4123#define ATOM_S6_ACC_REQ_CRT2 0x00100000L
3299#define ATOM_S6_ACC_REQ_LCD2 0x00200000L 4124#define ATOM_S6_ACC_REQ_LCD2 0x00200000L
3300#define ATOM_S6_ACC_REQ_TV2 0x00400000L 4125#define ATOM_S6_ACC_REQ_DFP6 0x00400000L
3301#define ATOM_S6_ACC_REQ_DFP2 0x00800000L 4126#define ATOM_S6_ACC_REQ_DFP2 0x00800000L
3302#define ATOM_S6_ACC_REQ_CV 0x01000000L 4127#define ATOM_S6_ACC_REQ_CV 0x01000000L
3303#define ATOM_S6_ACC_REQ_DFP3 0x02000000L 4128#define ATOM_S6_ACC_REQ_DFP3 0x02000000L
@@ -3310,7 +4135,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3310#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L 4135#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
3311#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L 4136#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
3312 4137
3313/* Byte aligned definition for BIOS usage */ 4138//Byte aligned defintion for BIOS usage
3314#define ATOM_S6_DEVICE_CHANGEb0 0x01 4139#define ATOM_S6_DEVICE_CHANGEb0 0x01
3315#define ATOM_S6_SCALER_CHANGEb0 0x02 4140#define ATOM_S6_SCALER_CHANGEb0 0x02
3316#define ATOM_S6_LID_CHANGEb0 0x04 4141#define ATOM_S6_LID_CHANGEb0 0x04
@@ -3320,11 +4145,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3320#define ATOM_S6_LID_STATEb0 0x40 4145#define ATOM_S6_LID_STATEb0 0x40
3321#define ATOM_S6_DOCK_STATEb0 0x80 4146#define ATOM_S6_DOCK_STATEb0 0x80
3322#define ATOM_S6_CRITICAL_STATEb1 0x01 4147#define ATOM_S6_CRITICAL_STATEb1 0x01
3323#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02 4148#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
3324#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04 4149#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04
3325#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08 4150#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
3326#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10 4151#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
3327#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 4152#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
3328 4153
3329#define ATOM_S6_ACC_REQ_CRT1b2 0x01 4154#define ATOM_S6_ACC_REQ_CRT1b2 0x01
3330#define ATOM_S6_ACC_REQ_LCD1b2 0x02 4155#define ATOM_S6_ACC_REQ_LCD1b2 0x02
@@ -3332,12 +4157,12 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3332#define ATOM_S6_ACC_REQ_DFP1b2 0x08 4157#define ATOM_S6_ACC_REQ_DFP1b2 0x08
3333#define ATOM_S6_ACC_REQ_CRT2b2 0x10 4158#define ATOM_S6_ACC_REQ_CRT2b2 0x10
3334#define ATOM_S6_ACC_REQ_LCD2b2 0x20 4159#define ATOM_S6_ACC_REQ_LCD2b2 0x20
3335#define ATOM_S6_ACC_REQ_TV2b2 0x40 4160#define ATOM_S6_ACC_REQ_DFP6b2 0x40
3336#define ATOM_S6_ACC_REQ_DFP2b2 0x80 4161#define ATOM_S6_ACC_REQ_DFP2b2 0x80
3337#define ATOM_S6_ACC_REQ_CVb3 0x01 4162#define ATOM_S6_ACC_REQ_CVb3 0x01
3338#define ATOM_S6_ACC_REQ_DFP3b3 0x02 4163#define ATOM_S6_ACC_REQ_DFP3b3 0x02
3339#define ATOM_S6_ACC_REQ_DFP4b3 0x04 4164#define ATOM_S6_ACC_REQ_DFP4b3 0x04
3340#define ATOM_S6_ACC_REQ_DFP5b3 0x08 4165#define ATOM_S6_ACC_REQ_DFP5b3 0x08
3341 4166
3342#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0 4167#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0
3343#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10 4168#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
@@ -3366,7 +4191,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3366#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30 4191#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30
3367#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31 4192#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31
3368 4193
3369/* BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! */ 4194// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!!
3370#define ATOM_S7_DOS_MODE_TYPEb0 0x03 4195#define ATOM_S7_DOS_MODE_TYPEb0 0x03
3371#define ATOM_S7_DOS_MODE_VGAb0 0x00 4196#define ATOM_S7_DOS_MODE_VGAb0 0x00
3372#define ATOM_S7_DOS_MODE_VESAb0 0x01 4197#define ATOM_S7_DOS_MODE_VESAb0 0x01
@@ -3378,220 +4203,194 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3378 4203
3379#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8 4204#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
3380 4205
3381/* BIOS_8_SCRATCH Definition */ 4206// BIOS_8_SCRATCH Definition
3382#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF 4207#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF
3383#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000 4208#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
3384 4209
3385#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0 4210#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0
3386#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16 4211#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16
3387 4212
3388/* BIOS_9_SCRATCH Definition */ 4213// BIOS_9_SCRATCH Definition
3389#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 4214#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
3390#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF 4215#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF
3391#endif 4216#endif
3392#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK 4217#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
3393#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000 4218#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000
3394#endif 4219#endif
3395#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 4220#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
3396#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0 4221#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
3397#endif 4222#endif
3398#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 4223#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
3399#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16 4224#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16
3400#endif 4225#endif
3401 4226
4227
3402#define ATOM_FLAG_SET 0x20 4228#define ATOM_FLAG_SET 0x20
3403#define ATOM_FLAG_CLEAR 0 4229#define ATOM_FLAG_CLEAR 0
3404#define CLEAR_ATOM_S6_ACC_MODE \ 4230#define CLEAR_ATOM_S6_ACC_MODE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
3405 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ 4231#define SET_ATOM_S6_DEVICE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
3406 ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR) 4232#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
3407#define SET_ATOM_S6_DEVICE_CHANGE \ 4233#define SET_ATOM_S6_SCALER_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
3408 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ 4234#define SET_ATOM_S6_LID_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
3409 ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
3410#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE \
3411 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3412 ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
3413#define SET_ATOM_S6_SCALER_CHANGE \
3414 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3415 ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
3416#define SET_ATOM_S6_LID_CHANGE \
3417 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3418 ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
3419
3420#define SET_ATOM_S6_LID_STATE \
3421 ((ATOM_ACC_CHANGE_INFO_DEF << 8) |\
3422 ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
3423#define CLEAR_ATOM_S6_LID_STATE \
3424 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3425 ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
3426
3427#define SET_ATOM_S6_DOCK_CHANGE \
3428 ((ATOM_ACC_CHANGE_INFO_DEF << 8)| \
3429 ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
3430#define SET_ATOM_S6_DOCK_STATE \
3431 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3432 ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
3433#define CLEAR_ATOM_S6_DOCK_STATE \
3434 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3435 ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
3436
3437#define SET_ATOM_S6_THERMAL_STATE_CHANGE \
3438 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3439 ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
3440#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE \
3441 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3442 ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
3443#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS \
3444 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3445 ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
3446
3447#define SET_ATOM_S6_CRITICAL_STATE \
3448 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3449 ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
3450#define CLEAR_ATOM_S6_CRITICAL_STATE \
3451 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3452 ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
3453
3454#define SET_ATOM_S6_REQ_SCALER \
3455 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3456 ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
3457#define CLEAR_ATOM_S6_REQ_SCALER \
3458 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3459 ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
3460
3461#define SET_ATOM_S6_REQ_SCALER_ARATIO \
3462 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3463 ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
3464#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO \
3465 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3466 ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
3467
3468#define SET_ATOM_S6_I2C_STATE_CHANGE \
3469 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3470 ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
3471
3472#define SET_ATOM_S6_DISPLAY_STATE_CHANGE \
3473 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3474 ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
3475
3476#define SET_ATOM_S6_DEVICE_RECONFIG \
3477 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3478 ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
3479#define CLEAR_ATOM_S0_LCD1 \
3480 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 ) | \
3481 ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
3482#define SET_ATOM_S7_DOS_8BIT_DAC_EN \
3483 ((ATOM_DOS_MODE_INFO_DEF << 8) | \
3484 ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
3485#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN \
3486 ((ATOM_DOS_MODE_INFO_DEF << 8) | \
3487 ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
3488 4235
3489/****************************************************************************/ 4236#define SET_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
3490/* Portion II: Definitinos only used in Driver */ 4237#define CLEAR_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
4238
4239#define SET_ATOM_S6_DOCK_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
4240#define SET_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
4241#define CLEAR_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
4242
4243#define SET_ATOM_S6_THERMAL_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
4244#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
4245#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
4246
4247#define SET_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
4248#define CLEAR_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
4249
4250#define SET_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
4251#define CLEAR_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
4252
4253#define SET_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
4254#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
4255
4256#define SET_ATOM_S6_I2C_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
4257
4258#define SET_ATOM_S6_DISPLAY_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
4259
4260#define SET_ATOM_S6_DEVICE_RECONFIG ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
4261#define CLEAR_ATOM_S0_LCD1 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )| ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
4262#define SET_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
4263#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
4264
4265/****************************************************************************/
4266//Portion II: Definitinos only used in Driver
3491/****************************************************************************/ 4267/****************************************************************************/
3492 4268
3493/* Macros used by driver */ 4269// Macros used by driver
4270#ifdef __cplusplus
4271#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast<char*>(&(static_cast<ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*>(0))->FieldName)-static_cast<char*>(0))/sizeof(USHORT))
3494 4272
3495#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char *)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES *)0)->FieldName)-(char *)0)/sizeof(USHORT)) 4273#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
4274#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
4275#else // not __cplusplus
4276#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
3496 4277
3497#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F) 4278#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
3498#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F) 4279#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
4280#endif // __cplusplus
3499 4281
3500#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION 4282#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
3501#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION 4283#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
3502 4284
3503/****************************************************************************/ 4285/****************************************************************************/
3504/* Portion III: Definitinos only used in VBIOS */ 4286//Portion III: Definitinos only used in VBIOS
3505/****************************************************************************/ 4287/****************************************************************************/
3506#define ATOM_DAC_SRC 0x80 4288#define ATOM_DAC_SRC 0x80
3507#define ATOM_SRC_DAC1 0 4289#define ATOM_SRC_DAC1 0
3508#define ATOM_SRC_DAC2 0x80 4290#define ATOM_SRC_DAC2 0x80
3509 4291
3510#ifdef UEFI_BUILD 4292typedef struct _MEMORY_PLLINIT_PARAMETERS
3511#define USHORT UTEMP 4293{
3512#endif 4294 ULONG ulTargetMemoryClock; //In 10Khz unit
3513 4295 UCHAR ucAction; //not define yet
3514typedef struct _MEMORY_PLLINIT_PARAMETERS { 4296 UCHAR ucFbDiv_Hi; //Fbdiv Hi byte
3515 ULONG ulTargetMemoryClock; /* In 10Khz unit */ 4297 UCHAR ucFbDiv; //FB value
3516 UCHAR ucAction; /* not define yet */ 4298 UCHAR ucPostDiv; //Post div
3517 UCHAR ucFbDiv_Hi; /* Fbdiv Hi byte */ 4299}MEMORY_PLLINIT_PARAMETERS;
3518 UCHAR ucFbDiv; /* FB value */
3519 UCHAR ucPostDiv; /* Post div */
3520} MEMORY_PLLINIT_PARAMETERS;
3521 4300
3522#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS 4301#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS
3523 4302
3524#define GPIO_PIN_WRITE 0x01 4303
4304#define GPIO_PIN_WRITE 0x01
3525#define GPIO_PIN_READ 0x00 4305#define GPIO_PIN_READ 0x00
3526 4306
3527typedef struct _GPIO_PIN_CONTROL_PARAMETERS { 4307typedef struct _GPIO_PIN_CONTROL_PARAMETERS
3528 UCHAR ucGPIO_ID; /* return value, read from GPIO pins */ 4308{
3529 UCHAR ucGPIOBitShift; /* define which bit in uGPIOBitVal need to be update */ 4309 UCHAR ucGPIO_ID; //return value, read from GPIO pins
3530 UCHAR ucGPIOBitVal; /* Set/Reset corresponding bit defined in ucGPIOBitMask */ 4310 UCHAR ucGPIOBitShift; //define which bit in uGPIOBitVal need to be update
3531 UCHAR ucAction; /* =GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write */ 4311 UCHAR ucGPIOBitVal; //Set/Reset corresponding bit defined in ucGPIOBitMask
3532} GPIO_PIN_CONTROL_PARAMETERS; 4312 UCHAR ucAction; //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write
3533 4313}GPIO_PIN_CONTROL_PARAMETERS;
3534typedef struct _ENABLE_SCALER_PARAMETERS { 4314
3535 UCHAR ucScaler; /* ATOM_SCALER1, ATOM_SCALER2 */ 4315typedef struct _ENABLE_SCALER_PARAMETERS
3536 UCHAR ucEnable; /* ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION */ 4316{
3537 UCHAR ucTVStandard; /* */ 4317 UCHAR ucScaler; // ATOM_SCALER1, ATOM_SCALER2
3538 UCHAR ucPadding[1]; 4318 UCHAR ucEnable; // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION
3539} ENABLE_SCALER_PARAMETERS; 4319 UCHAR ucTVStandard; //
3540#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS 4320 UCHAR ucPadding[1];
3541 4321}ENABLE_SCALER_PARAMETERS;
3542/* ucEnable: */ 4322#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
4323
4324//ucEnable:
3543#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0 4325#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0
3544#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1 4326#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1
3545#define SCALER_ENABLE_2TAP_ALPHA_MODE 2 4327#define SCALER_ENABLE_2TAP_ALPHA_MODE 2
3546#define SCALER_ENABLE_MULTITAP_MODE 3 4328#define SCALER_ENABLE_MULTITAP_MODE 3
3547 4329
3548typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS { 4330typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS
3549 ULONG usHWIconHorzVertPosn; /* Hardware Icon Vertical position */ 4331{
3550 UCHAR ucHWIconVertOffset; /* Hardware Icon Vertical offset */ 4332 ULONG usHWIconHorzVertPosn; // Hardware Icon Vertical position
3551 UCHAR ucHWIconHorzOffset; /* Hardware Icon Horizontal offset */ 4333 UCHAR ucHWIconVertOffset; // Hardware Icon Vertical offset
3552 UCHAR ucSelection; /* ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 */ 4334 UCHAR ucHWIconHorzOffset; // Hardware Icon Horizontal offset
3553 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 4335 UCHAR ucSelection; // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2
3554} ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS; 4336 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
3555 4337}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
3556typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION { 4338
3557 ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon; 4339typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION
3558 ENABLE_CRTC_PARAMETERS sReserved; 4340{
3559} ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION; 4341 ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
3560 4342 ENABLE_CRTC_PARAMETERS sReserved;
3561typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS { 4343}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
3562 USHORT usHight; /* Image Hight */ 4344
3563 USHORT usWidth; /* Image Width */ 4345typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS
3564 UCHAR ucSurface; /* Surface 1 or 2 */ 4346{
3565 UCHAR ucPadding[3]; 4347 USHORT usHight; // Image Hight
3566} ENABLE_GRAPH_SURFACE_PARAMETERS; 4348 USHORT usWidth; // Image Width
3567 4349 UCHAR ucSurface; // Surface 1 or 2
3568typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 { 4350 UCHAR ucPadding[3];
3569 USHORT usHight; /* Image Hight */ 4351}ENABLE_GRAPH_SURFACE_PARAMETERS;
3570 USHORT usWidth; /* Image Width */ 4352
3571 UCHAR ucSurface; /* Surface 1 or 2 */ 4353typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2
3572 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 4354{
3573 UCHAR ucPadding[2]; 4355 USHORT usHight; // Image Hight
3574} ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2; 4356 USHORT usWidth; // Image Width
3575 4357 UCHAR ucSurface; // Surface 1 or 2
3576typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION { 4358 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
3577 ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface; 4359 UCHAR ucPadding[2];
3578 ENABLE_YUV_PS_ALLOCATION sReserved; /* Don't set this one */ 4360}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
3579} ENABLE_GRAPH_SURFACE_PS_ALLOCATION; 4361
3580 4362typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
3581typedef struct _MEMORY_CLEAN_UP_PARAMETERS { 4363{
3582 USHORT usMemoryStart; /* in 8Kb boundry, offset from memory base address */ 4364 USHORT usHight; // Image Hight
3583 USHORT usMemorySize; /* 8Kb blocks aligned */ 4365 USHORT usWidth; // Image Width
3584} MEMORY_CLEAN_UP_PARAMETERS; 4366 UCHAR ucSurface; // Surface 1 or 2
4367 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
4368 USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0.
4369}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
4370
4371typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
4372{
4373 ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
4374 ENABLE_YUV_PS_ALLOCATION sReserved; // Don't set this one
4375}ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
4376
4377typedef struct _MEMORY_CLEAN_UP_PARAMETERS
4378{
4379 USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address
4380 USHORT usMemorySize; //8Kb blocks aligned
4381}MEMORY_CLEAN_UP_PARAMETERS;
3585#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS 4382#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
3586 4383
3587typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS { 4384typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
3588 USHORT usX_Size; /* When use as input parameter, usX_Size indicates which CRTC */ 4385{
3589 USHORT usY_Size; 4386 USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
3590} GET_DISPLAY_SURFACE_SIZE_PARAMETERS; 4387 USHORT usY_Size;
4388}GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
3591 4389
3592typedef struct _INDIRECT_IO_ACCESS { 4390typedef struct _INDIRECT_IO_ACCESS
3593 ATOM_COMMON_TABLE_HEADER sHeader; 4391{
3594 UCHAR IOAccessSequence[256]; 4392 ATOM_COMMON_TABLE_HEADER sHeader;
4393 UCHAR IOAccessSequence[256];
3595} INDIRECT_IO_ACCESS; 4394} INDIRECT_IO_ACCESS;
3596 4395
3597#define INDIRECT_READ 0x00 4396#define INDIRECT_READ 0x00
@@ -3615,93 +4414,108 @@ typedef struct _INDIRECT_IO_ACCESS {
3615#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ 4414#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
3616#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE 4415#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
3617 4416
3618typedef struct _ATOM_OEM_INFO { 4417typedef struct _ATOM_OEM_INFO
3619 ATOM_COMMON_TABLE_HEADER sHeader; 4418{
3620 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; 4419 ATOM_COMMON_TABLE_HEADER sHeader;
3621} ATOM_OEM_INFO; 4420 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
3622 4421}ATOM_OEM_INFO;
3623typedef struct _ATOM_TV_MODE { 4422
3624 UCHAR ucVMode_Num; /* Video mode number */ 4423typedef struct _ATOM_TV_MODE
3625 UCHAR ucTV_Mode_Num; /* Internal TV mode number */ 4424{
3626} ATOM_TV_MODE; 4425 UCHAR ucVMode_Num; //Video mode number
3627 4426 UCHAR ucTV_Mode_Num; //Internal TV mode number
3628typedef struct _ATOM_BIOS_INT_TVSTD_MODE { 4427}ATOM_TV_MODE;
3629 ATOM_COMMON_TABLE_HEADER sHeader; 4428
3630 USHORT usTV_Mode_LUT_Offset; /* Pointer to standard to internal number conversion table */ 4429typedef struct _ATOM_BIOS_INT_TVSTD_MODE
3631 USHORT usTV_FIFO_Offset; /* Pointer to FIFO entry table */ 4430{
3632 USHORT usNTSC_Tbl_Offset; /* Pointer to SDTV_Mode_NTSC table */ 4431 ATOM_COMMON_TABLE_HEADER sHeader;
3633 USHORT usPAL_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */ 4432 USHORT usTV_Mode_LUT_Offset; // Pointer to standard to internal number conversion table
3634 USHORT usCV_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */ 4433 USHORT usTV_FIFO_Offset; // Pointer to FIFO entry table
3635} ATOM_BIOS_INT_TVSTD_MODE; 4434 USHORT usNTSC_Tbl_Offset; // Pointer to SDTV_Mode_NTSC table
3636 4435 USHORT usPAL_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
3637typedef struct _ATOM_TV_MODE_SCALER_PTR { 4436 USHORT usCV_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
3638 USHORT ucFilter0_Offset; /* Pointer to filter format 0 coefficients */ 4437}ATOM_BIOS_INT_TVSTD_MODE;
3639 USHORT usFilter1_Offset; /* Pointer to filter format 0 coefficients */ 4438
3640 UCHAR ucTV_Mode_Num; 4439
3641} ATOM_TV_MODE_SCALER_PTR; 4440typedef struct _ATOM_TV_MODE_SCALER_PTR
3642 4441{
3643typedef struct _ATOM_STANDARD_VESA_TIMING { 4442 USHORT ucFilter0_Offset; //Pointer to filter format 0 coefficients
3644 ATOM_COMMON_TABLE_HEADER sHeader; 4443 USHORT usFilter1_Offset; //Pointer to filter format 0 coefficients
3645 ATOM_DTD_FORMAT aModeTimings[16]; /* 16 is not the real array number, just for initial allocation */ 4444 UCHAR ucTV_Mode_Num;
3646} ATOM_STANDARD_VESA_TIMING; 4445}ATOM_TV_MODE_SCALER_PTR;
3647 4446
3648typedef struct _ATOM_STD_FORMAT { 4447typedef struct _ATOM_STANDARD_VESA_TIMING
3649 USHORT usSTD_HDisp; 4448{
3650 USHORT usSTD_VDisp; 4449 ATOM_COMMON_TABLE_HEADER sHeader;
3651 USHORT usSTD_RefreshRate; 4450 ATOM_DTD_FORMAT aModeTimings[16]; // 16 is not the real array number, just for initial allocation
3652 USHORT usReserved; 4451}ATOM_STANDARD_VESA_TIMING;
3653} ATOM_STD_FORMAT; 4452
3654 4453
3655typedef struct _ATOM_VESA_TO_EXTENDED_MODE { 4454typedef struct _ATOM_STD_FORMAT
3656 USHORT usVESA_ModeNumber; 4455{
3657 USHORT usExtendedModeNumber; 4456 USHORT usSTD_HDisp;
3658} ATOM_VESA_TO_EXTENDED_MODE; 4457 USHORT usSTD_VDisp;
3659 4458 USHORT usSTD_RefreshRate;
3660typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT { 4459 USHORT usReserved;
3661 ATOM_COMMON_TABLE_HEADER sHeader; 4460}ATOM_STD_FORMAT;
3662 ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76]; 4461
3663} ATOM_VESA_TO_INTENAL_MODE_LUT; 4462typedef struct _ATOM_VESA_TO_EXTENDED_MODE
4463{
4464 USHORT usVESA_ModeNumber;
4465 USHORT usExtendedModeNumber;
4466}ATOM_VESA_TO_EXTENDED_MODE;
4467
4468typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT
4469{
4470 ATOM_COMMON_TABLE_HEADER sHeader;
4471 ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
4472}ATOM_VESA_TO_INTENAL_MODE_LUT;
3664 4473
3665/*************** ATOM Memory Related Data Structure ***********************/ 4474/*************** ATOM Memory Related Data Structure ***********************/
3666typedef struct _ATOM_MEMORY_VENDOR_BLOCK { 4475typedef struct _ATOM_MEMORY_VENDOR_BLOCK{
3667 UCHAR ucMemoryType; 4476 UCHAR ucMemoryType;
3668 UCHAR ucMemoryVendor; 4477 UCHAR ucMemoryVendor;
3669 UCHAR ucAdjMCId; 4478 UCHAR ucAdjMCId;
3670 UCHAR ucDynClkId; 4479 UCHAR ucDynClkId;
3671 ULONG ulDllResetClkRange; 4480 ULONG ulDllResetClkRange;
3672} ATOM_MEMORY_VENDOR_BLOCK; 4481}ATOM_MEMORY_VENDOR_BLOCK;
3673 4482
3674typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG { 4483
4484typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{
3675#if ATOM_BIG_ENDIAN 4485#if ATOM_BIG_ENDIAN
3676 ULONG ucMemBlkId:8; 4486 ULONG ucMemBlkId:8;
3677 ULONG ulMemClockRange:24; 4487 ULONG ulMemClockRange:24;
3678#else 4488#else
3679 ULONG ulMemClockRange:24; 4489 ULONG ulMemClockRange:24;
3680 ULONG ucMemBlkId:8; 4490 ULONG ucMemBlkId:8;
3681#endif 4491#endif
3682} ATOM_MEMORY_SETTING_ID_CONFIG; 4492}ATOM_MEMORY_SETTING_ID_CONFIG;
3683 4493
3684typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS { 4494typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
3685 ATOM_MEMORY_SETTING_ID_CONFIG slAccess; 4495{
3686 ULONG ulAccess; 4496 ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
3687} ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS; 4497 ULONG ulAccess;
3688 4498}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
3689typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK { 4499
3690 ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID; 4500
3691 ULONG aulMemData[1]; 4501typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
3692} ATOM_MEMORY_SETTING_DATA_BLOCK; 4502 ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
3693 4503 ULONG aulMemData[1];
3694typedef struct _ATOM_INIT_REG_INDEX_FORMAT { 4504}ATOM_MEMORY_SETTING_DATA_BLOCK;
3695 USHORT usRegIndex; /* MC register index */ 4505
3696 UCHAR ucPreRegDataLength; /* offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf */ 4506
3697} ATOM_INIT_REG_INDEX_FORMAT; 4507typedef struct _ATOM_INIT_REG_INDEX_FORMAT{
3698 4508 USHORT usRegIndex; // MC register index
3699typedef struct _ATOM_INIT_REG_BLOCK { 4509 UCHAR ucPreRegDataLength; // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf
3700 USHORT usRegIndexTblSize; /* size of asRegIndexBuf */ 4510}ATOM_INIT_REG_INDEX_FORMAT;
3701 USHORT usRegDataBlkSize; /* size of ATOM_MEMORY_SETTING_DATA_BLOCK */ 4511
3702 ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1]; 4512
3703 ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1]; 4513typedef struct _ATOM_INIT_REG_BLOCK{
3704} ATOM_INIT_REG_BLOCK; 4514 USHORT usRegIndexTblSize; //size of asRegIndexBuf
4515 USHORT usRegDataBlkSize; //size of ATOM_MEMORY_SETTING_DATA_BLOCK
4516 ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
4517 ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
4518}ATOM_INIT_REG_BLOCK;
3705 4519
3706#define END_OF_REG_INDEX_BLOCK 0x0ffff 4520#define END_OF_REG_INDEX_BLOCK 0x0ffff
3707#define END_OF_REG_DATA_BLOCK 0x00000000 4521#define END_OF_REG_DATA_BLOCK 0x00000000
@@ -3716,16 +4530,19 @@ typedef struct _ATOM_INIT_REG_BLOCK {
3716#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) 4530#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
3717#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) 4531#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
3718 4532
3719typedef struct _ATOM_MC_INIT_PARAM_TABLE { 4533
3720 ATOM_COMMON_TABLE_HEADER sHeader; 4534typedef struct _ATOM_MC_INIT_PARAM_TABLE
3721 USHORT usAdjustARB_SEQDataOffset; 4535{
3722 USHORT usMCInitMemTypeTblOffset; 4536 ATOM_COMMON_TABLE_HEADER sHeader;
3723 USHORT usMCInitCommonTblOffset; 4537 USHORT usAdjustARB_SEQDataOffset;
3724 USHORT usMCInitPowerDownTblOffset; 4538 USHORT usMCInitMemTypeTblOffset;
3725 ULONG ulARB_SEQDataBuf[32]; 4539 USHORT usMCInitCommonTblOffset;
3726 ATOM_INIT_REG_BLOCK asMCInitMemType; 4540 USHORT usMCInitPowerDownTblOffset;
3727 ATOM_INIT_REG_BLOCK asMCInitCommon; 4541 ULONG ulARB_SEQDataBuf[32];
3728} ATOM_MC_INIT_PARAM_TABLE; 4542 ATOM_INIT_REG_BLOCK asMCInitMemType;
4543 ATOM_INIT_REG_BLOCK asMCInitCommon;
4544}ATOM_MC_INIT_PARAM_TABLE;
4545
3729 4546
3730#define _4Mx16 0x2 4547#define _4Mx16 0x2
3731#define _4Mx32 0x3 4548#define _4Mx32 0x3
@@ -3751,221 +4568,272 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE {
3751 4568
3752#define QIMONDA INFINEON 4569#define QIMONDA INFINEON
3753#define PROMOS MOSEL 4570#define PROMOS MOSEL
4571#define KRETON INFINEON
3754 4572
3755/* ///////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// */ 4573/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
3756 4574
3757#define UCODE_ROM_START_ADDRESS 0x1c000 4575#define UCODE_ROM_START_ADDRESS 0x1c000
3758#define UCODE_SIGNATURE 0x4375434d /* 'MCuC' - MC uCode */ 4576#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode
3759 4577
3760/* uCode block header for reference */ 4578//uCode block header for reference
3761 4579
3762typedef struct _MCuCodeHeader { 4580typedef struct _MCuCodeHeader
3763 ULONG ulSignature; 4581{
3764 UCHAR ucRevision; 4582 ULONG ulSignature;
3765 UCHAR ucChecksum; 4583 UCHAR ucRevision;
3766 UCHAR ucReserved1; 4584 UCHAR ucChecksum;
3767 UCHAR ucReserved2; 4585 UCHAR ucReserved1;
3768 USHORT usParametersLength; 4586 UCHAR ucReserved2;
3769 USHORT usUCodeLength; 4587 USHORT usParametersLength;
3770 USHORT usReserved1; 4588 USHORT usUCodeLength;
3771 USHORT usReserved2; 4589 USHORT usReserved1;
4590 USHORT usReserved2;
3772} MCuCodeHeader; 4591} MCuCodeHeader;
3773 4592
3774/* //////////////////////////////////////////////////////////////////////////////// */ 4593//////////////////////////////////////////////////////////////////////////////////
3775 4594
3776#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16 4595#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16
3777 4596
3778#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF 4597#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF
3779typedef struct _ATOM_VRAM_MODULE_V1 { 4598typedef struct _ATOM_VRAM_MODULE_V1
3780 ULONG ulReserved; 4599{
3781 USHORT usEMRSValue; 4600 ULONG ulReserved;
3782 USHORT usMRSValue; 4601 USHORT usEMRSValue;
3783 USHORT usReserved; 4602 USHORT usMRSValue;
3784 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4603 USHORT usReserved;
3785 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; */ 4604 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
3786 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender */ 4605 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved;
3787 UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */ 4606 UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender
3788 UCHAR ucRow; /* Number of Row,in power of 2; */ 4607 UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
3789 UCHAR ucColumn; /* Number of Column,in power of 2; */ 4608 UCHAR ucRow; // Number of Row,in power of 2;
3790 UCHAR ucBank; /* Nunber of Bank; */ 4609 UCHAR ucColumn; // Number of Column,in power of 2;
3791 UCHAR ucRank; /* Number of Rank, in power of 2 */ 4610 UCHAR ucBank; // Nunber of Bank;
3792 UCHAR ucChannelNum; /* Number of channel; */ 4611 UCHAR ucRank; // Number of Rank, in power of 2
3793 UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */ 4612 UCHAR ucChannelNum; // Number of channel;
3794 UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */ 4613 UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
3795 UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */ 4614 UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
3796 UCHAR ucReserved[2]; 4615 UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
3797} ATOM_VRAM_MODULE_V1; 4616 UCHAR ucReserved[2];
3798 4617}ATOM_VRAM_MODULE_V1;
3799typedef struct _ATOM_VRAM_MODULE_V2 { 4618
3800 ULONG ulReserved; 4619
3801 ULONG ulFlags; /* To enable/disable functionalities based on memory type */ 4620typedef struct _ATOM_VRAM_MODULE_V2
3802 ULONG ulEngineClock; /* Override of default engine clock for particular memory type */ 4621{
3803 ULONG ulMemoryClock; /* Override of default memory clock for particular memory type */ 4622 ULONG ulReserved;
3804 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ 4623 ULONG ulFlags; // To enable/disable functionalities based on memory type
3805 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ 4624 ULONG ulEngineClock; // Override of default engine clock for particular memory type
3806 USHORT usEMRSValue; 4625 ULONG ulMemoryClock; // Override of default memory clock for particular memory type
3807 USHORT usMRSValue; 4626 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
3808 USHORT usReserved; 4627 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
3809 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4628 USHORT usEMRSValue;
3810 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */ 4629 USHORT usMRSValue;
3811 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */ 4630 USHORT usReserved;
3812 UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */ 4631 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
3813 UCHAR ucRow; /* Number of Row,in power of 2; */ 4632 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
3814 UCHAR ucColumn; /* Number of Column,in power of 2; */ 4633 UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
3815 UCHAR ucBank; /* Nunber of Bank; */ 4634 UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
3816 UCHAR ucRank; /* Number of Rank, in power of 2 */ 4635 UCHAR ucRow; // Number of Row,in power of 2;
3817 UCHAR ucChannelNum; /* Number of channel; */ 4636 UCHAR ucColumn; // Number of Column,in power of 2;
3818 UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */ 4637 UCHAR ucBank; // Nunber of Bank;
3819 UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */ 4638 UCHAR ucRank; // Number of Rank, in power of 2
3820 UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */ 4639 UCHAR ucChannelNum; // Number of channel;
3821 UCHAR ucRefreshRateFactor; 4640 UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
3822 UCHAR ucReserved[3]; 4641 UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
3823} ATOM_VRAM_MODULE_V2; 4642 UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
3824 4643 UCHAR ucRefreshRateFactor;
3825typedef struct _ATOM_MEMORY_TIMING_FORMAT { 4644 UCHAR ucReserved[3];
3826 ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */ 4645}ATOM_VRAM_MODULE_V2;
3827 union { 4646
3828 USHORT usMRS; /* mode register */ 4647
3829 USHORT usDDR3_MR0; 4648typedef struct _ATOM_MEMORY_TIMING_FORMAT
3830 }; 4649{
3831 union { 4650 ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
3832 USHORT usEMRS; /* extended mode register */ 4651 union{
3833 USHORT usDDR3_MR1; 4652 USHORT usMRS; // mode register
3834 }; 4653 USHORT usDDR3_MR0;
3835 UCHAR ucCL; /* CAS latency */ 4654 };
3836 UCHAR ucWL; /* WRITE Latency */ 4655 union{
3837 UCHAR uctRAS; /* tRAS */ 4656 USHORT usEMRS; // extended mode register
3838 UCHAR uctRC; /* tRC */ 4657 USHORT usDDR3_MR1;
3839 UCHAR uctRFC; /* tRFC */ 4658 };
3840 UCHAR uctRCDR; /* tRCDR */ 4659 UCHAR ucCL; // CAS latency
3841 UCHAR uctRCDW; /* tRCDW */ 4660 UCHAR ucWL; // WRITE Latency
3842 UCHAR uctRP; /* tRP */ 4661 UCHAR uctRAS; // tRAS
3843 UCHAR uctRRD; /* tRRD */ 4662 UCHAR uctRC; // tRC
3844 UCHAR uctWR; /* tWR */ 4663 UCHAR uctRFC; // tRFC
3845 UCHAR uctWTR; /* tWTR */ 4664 UCHAR uctRCDR; // tRCDR
3846 UCHAR uctPDIX; /* tPDIX */ 4665 UCHAR uctRCDW; // tRCDW
3847 UCHAR uctFAW; /* tFAW */ 4666 UCHAR uctRP; // tRP
3848 UCHAR uctAOND; /* tAOND */ 4667 UCHAR uctRRD; // tRRD
3849 union { 4668 UCHAR uctWR; // tWR
3850 struct { 4669 UCHAR uctWTR; // tWTR
3851 UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */ 4670 UCHAR uctPDIX; // tPDIX
3852 UCHAR ucReserved; 4671 UCHAR uctFAW; // tFAW
3853 }; 4672 UCHAR uctAOND; // tAOND
3854 USHORT usDDR3_MR2; 4673 union
3855 }; 4674 {
3856} ATOM_MEMORY_TIMING_FORMAT; 4675 struct {
3857 4676 UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
3858typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 { 4677 UCHAR ucReserved;
3859 ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */ 4678 };
3860 USHORT usMRS; /* mode register */ 4679 USHORT usDDR3_MR2;
3861 USHORT usEMRS; /* extended mode register */ 4680 };
3862 UCHAR ucCL; /* CAS latency */ 4681}ATOM_MEMORY_TIMING_FORMAT;
3863 UCHAR ucWL; /* WRITE Latency */ 4682
3864 UCHAR uctRAS; /* tRAS */ 4683
3865 UCHAR uctRC; /* tRC */ 4684typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1
3866 UCHAR uctRFC; /* tRFC */ 4685{
3867 UCHAR uctRCDR; /* tRCDR */ 4686 ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
3868 UCHAR uctRCDW; /* tRCDW */ 4687 USHORT usMRS; // mode register
3869 UCHAR uctRP; /* tRP */ 4688 USHORT usEMRS; // extended mode register
3870 UCHAR uctRRD; /* tRRD */ 4689 UCHAR ucCL; // CAS latency
3871 UCHAR uctWR; /* tWR */ 4690 UCHAR ucWL; // WRITE Latency
3872 UCHAR uctWTR; /* tWTR */ 4691 UCHAR uctRAS; // tRAS
3873 UCHAR uctPDIX; /* tPDIX */ 4692 UCHAR uctRC; // tRC
3874 UCHAR uctFAW; /* tFAW */ 4693 UCHAR uctRFC; // tRFC
3875 UCHAR uctAOND; /* tAOND */ 4694 UCHAR uctRCDR; // tRCDR
3876 UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */ 4695 UCHAR uctRCDW; // tRCDW
3877/* ///////////////////////GDDR parameters/////////////////////////////////// */ 4696 UCHAR uctRP; // tRP
3878 UCHAR uctCCDL; /* */ 4697 UCHAR uctRRD; // tRRD
3879 UCHAR uctCRCRL; /* */ 4698 UCHAR uctWR; // tWR
3880 UCHAR uctCRCWL; /* */ 4699 UCHAR uctWTR; // tWTR
3881 UCHAR uctCKE; /* */ 4700 UCHAR uctPDIX; // tPDIX
3882 UCHAR uctCKRSE; /* */ 4701 UCHAR uctFAW; // tFAW
3883 UCHAR uctCKRSX; /* */ 4702 UCHAR uctAOND; // tAOND
3884 UCHAR uctFAW32; /* */ 4703 UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
3885 UCHAR ucReserved1; /* */ 4704////////////////////////////////////GDDR parameters///////////////////////////////////
3886 UCHAR ucReserved2; /* */ 4705 UCHAR uctCCDL; //
3887 UCHAR ucTerminator; 4706 UCHAR uctCRCRL; //
3888} ATOM_MEMORY_TIMING_FORMAT_V1; 4707 UCHAR uctCRCWL; //
3889 4708 UCHAR uctCKE; //
3890typedef struct _ATOM_MEMORY_FORMAT { 4709 UCHAR uctCKRSE; //
3891 ULONG ulDllDisClock; /* memory DLL will be disable when target memory clock is below this clock */ 4710 UCHAR uctCKRSX; //
3892 union { 4711 UCHAR uctFAW32; //
3893 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ 4712 UCHAR ucMR5lo; //
3894 USHORT usDDR3_Reserved; /* Not used for DDR3 memory */ 4713 UCHAR ucMR5hi; //
3895 }; 4714 UCHAR ucTerminator;
3896 union { 4715}ATOM_MEMORY_TIMING_FORMAT_V1;
3897 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ 4716
3898 USHORT usDDR3_MR3; /* Used for DDR3 memory */ 4717typedef struct _ATOM_MEMORY_TIMING_FORMAT_V2
3899 }; 4718{
3900 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */ 4719 ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
3901 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */ 4720 USHORT usMRS; // mode register
3902 UCHAR ucRow; /* Number of Row,in power of 2; */ 4721 USHORT usEMRS; // extended mode register
3903 UCHAR ucColumn; /* Number of Column,in power of 2; */ 4722 UCHAR ucCL; // CAS latency
3904 UCHAR ucBank; /* Nunber of Bank; */ 4723 UCHAR ucWL; // WRITE Latency
3905 UCHAR ucRank; /* Number of Rank, in power of 2 */ 4724 UCHAR uctRAS; // tRAS
3906 UCHAR ucBurstSize; /* burst size, 0= burst size=4 1= burst size=8 */ 4725 UCHAR uctRC; // tRC
3907 UCHAR ucDllDisBit; /* position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) */ 4726 UCHAR uctRFC; // tRFC
3908 UCHAR ucRefreshRateFactor; /* memory refresh rate in unit of ms */ 4727 UCHAR uctRCDR; // tRCDR
3909 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ 4728 UCHAR uctRCDW; // tRCDW
3910 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ 4729 UCHAR uctRP; // tRP
3911 UCHAR ucMemAttrib; /* Memory Device Addribute, like RDBI/WDBI etc */ 4730 UCHAR uctRRD; // tRRD
3912 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ 4731 UCHAR uctWR; // tWR
3913} ATOM_MEMORY_FORMAT; 4732 UCHAR uctWTR; // tWTR
3914 4733 UCHAR uctPDIX; // tPDIX
3915typedef struct _ATOM_VRAM_MODULE_V3 { 4734 UCHAR uctFAW; // tFAW
3916 ULONG ulChannelMapCfg; /* board dependent paramenter:Channel combination */ 4735 UCHAR uctAOND; // tAOND
3917 USHORT usSize; /* size of ATOM_VRAM_MODULE_V3 */ 4736 UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
3918 USHORT usDefaultMVDDQ; /* board dependent parameter:Default Memory Core Voltage */ 4737////////////////////////////////////GDDR parameters///////////////////////////////////
3919 USHORT usDefaultMVDDC; /* board dependent parameter:Default Memory IO Voltage */ 4738 UCHAR uctCCDL; //
3920 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4739 UCHAR uctCRCRL; //
3921 UCHAR ucChannelNum; /* board dependent parameter:Number of channel; */ 4740 UCHAR uctCRCWL; //
3922 UCHAR ucChannelSize; /* board dependent parameter:32bit or 64bit */ 4741 UCHAR uctCKE; //
3923 UCHAR ucVREFI; /* board dependnt parameter: EXT or INT +160mv to -140mv */ 4742 UCHAR uctCKRSE; //
3924 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ 4743 UCHAR uctCKRSX; //
3925 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ 4744 UCHAR uctFAW32; //
3926 ATOM_MEMORY_FORMAT asMemory; /* describ all of video memory parameters from memory spec */ 4745 UCHAR ucMR4lo; //
3927} ATOM_VRAM_MODULE_V3; 4746 UCHAR ucMR4hi; //
3928 4747 UCHAR ucMR5lo; //
3929/* ATOM_VRAM_MODULE_V3.ucNPL_RT */ 4748 UCHAR ucMR5hi; //
4749 UCHAR ucTerminator;
4750 UCHAR ucReserved;
4751}ATOM_MEMORY_TIMING_FORMAT_V2;
4752
4753typedef struct _ATOM_MEMORY_FORMAT
4754{
4755 ULONG ulDllDisClock; // memory DLL will be disable when target memory clock is below this clock
4756 union{
4757 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
4758 USHORT usDDR3_Reserved; // Not used for DDR3 memory
4759 };
4760 union{
4761 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
4762 USHORT usDDR3_MR3; // Used for DDR3 memory
4763 };
4764 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
4765 UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
4766 UCHAR ucRow; // Number of Row,in power of 2;
4767 UCHAR ucColumn; // Number of Column,in power of 2;
4768 UCHAR ucBank; // Nunber of Bank;
4769 UCHAR ucRank; // Number of Rank, in power of 2
4770 UCHAR ucBurstSize; // burst size, 0= burst size=4 1= burst size=8
4771 UCHAR ucDllDisBit; // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register )
4772 UCHAR ucRefreshRateFactor; // memory refresh rate in unit of ms
4773 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
4774 UCHAR ucPreamble; //[7:4] Write Preamble, [3:0] Read Preamble
4775 UCHAR ucMemAttrib; // Memory Device Addribute, like RDBI/WDBI etc
4776 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; //Memory Timing block sort from lower clock to higher clock
4777}ATOM_MEMORY_FORMAT;
4778
4779
4780typedef struct _ATOM_VRAM_MODULE_V3
4781{
4782 ULONG ulChannelMapCfg; // board dependent paramenter:Channel combination
4783 USHORT usSize; // size of ATOM_VRAM_MODULE_V3
4784 USHORT usDefaultMVDDQ; // board dependent parameter:Default Memory Core Voltage
4785 USHORT usDefaultMVDDC; // board dependent parameter:Default Memory IO Voltage
4786 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
4787 UCHAR ucChannelNum; // board dependent parameter:Number of channel;
4788 UCHAR ucChannelSize; // board dependent parameter:32bit or 64bit
4789 UCHAR ucVREFI; // board dependnt parameter: EXT or INT +160mv to -140mv
4790 UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
4791 UCHAR ucFlag; // To enable/disable functionalities based on memory type
4792 ATOM_MEMORY_FORMAT asMemory; // describ all of video memory parameters from memory spec
4793}ATOM_VRAM_MODULE_V3;
4794
4795
4796//ATOM_VRAM_MODULE_V3.ucNPL_RT
3930#define NPL_RT_MASK 0x0f 4797#define NPL_RT_MASK 0x0f
3931#define BATTERY_ODT_MASK 0xc0 4798#define BATTERY_ODT_MASK 0xc0
3932 4799
3933#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3 4800#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3
3934 4801
3935typedef struct _ATOM_VRAM_MODULE_V4 { 4802typedef struct _ATOM_VRAM_MODULE_V4
3936 ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */ 4803{
3937 USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */ 4804 ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
3938 USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ 4805 USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
3939 /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */ 4806 USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
3940 USHORT usReserved; 4807 // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
3941 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4808 USHORT usReserved;
3942 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */ 4809 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
3943 UCHAR ucChannelNum; /* Number of channels present in this module config */ 4810 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
3944 UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */ 4811 UCHAR ucChannelNum; // Number of channels present in this module config
3945 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ 4812 UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
3946 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ 4813 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
3947 UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */ 4814 UCHAR ucFlag; // To enable/disable functionalities based on memory type
3948 UCHAR ucVREFI; /* board dependent parameter */ 4815 UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
3949 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ 4816 UCHAR ucVREFI; // board dependent parameter
3950 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ 4817 UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
3951 UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ 4818 UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
3952 /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */ 4819 UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
3953 UCHAR ucReserved[3]; 4820 // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
3954 4821 UCHAR ucReserved[3];
3955/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */ 4822
3956 union { 4823//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
3957 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ 4824 union{
3958 USHORT usDDR3_Reserved; 4825 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
3959 }; 4826 USHORT usDDR3_Reserved;
3960 union { 4827 };
3961 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ 4828 union{
3962 USHORT usDDR3_MR3; /* Used for DDR3 memory */ 4829 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
3963 }; 4830 USHORT usDDR3_MR3; // Used for DDR3 memory
3964 UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */ 4831 };
3965 UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */ 4832 UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
3966 UCHAR ucReserved2[2]; 4833 UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
3967 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ 4834 UCHAR ucReserved2[2];
3968} ATOM_VRAM_MODULE_V4; 4835 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
4836}ATOM_VRAM_MODULE_V4;
3969 4837
3970#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3 4838#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3
3971#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1 4839#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1
@@ -3973,96 +4841,139 @@ typedef struct _ATOM_VRAM_MODULE_V4 {
3973#define VRAM_MODULE_V4_MISC_BL8 0x4 4841#define VRAM_MODULE_V4_MISC_BL8 0x4
3974#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10 4842#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10
3975 4843
3976typedef struct _ATOM_VRAM_MODULE_V5 { 4844typedef struct _ATOM_VRAM_MODULE_V5
3977 ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */ 4845{
3978 USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */ 4846 ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
3979 USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ 4847 USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
3980 /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */ 4848 USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
3981 USHORT usReserved; 4849 // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
3982 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4850 USHORT usReserved;
3983 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */ 4851 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
3984 UCHAR ucChannelNum; /* Number of channels present in this module config */ 4852 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
3985 UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */ 4853 UCHAR ucChannelNum; // Number of channels present in this module config
3986 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ 4854 UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
3987 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ 4855 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
3988 UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */ 4856 UCHAR ucFlag; // To enable/disable functionalities based on memory type
3989 UCHAR ucVREFI; /* board dependent parameter */ 4857 UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
3990 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ 4858 UCHAR ucVREFI; // board dependent parameter
3991 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ 4859 UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
3992 UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ 4860 UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
3993 /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */ 4861 UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
3994 UCHAR ucReserved[3]; 4862 // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
4863 UCHAR ucReserved[3];
4864
4865//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
4866 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
4867 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
4868 UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
4869 UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
4870 UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
4871 UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
4872 ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
4873}ATOM_VRAM_MODULE_V5;
4874
4875typedef struct _ATOM_VRAM_MODULE_V6
4876{
4877 ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
4878 USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
4879 USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
4880 // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
4881 USHORT usReserved;
4882 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
4883 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
4884 UCHAR ucChannelNum; // Number of channels present in this module config
4885 UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
4886 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
4887 UCHAR ucFlag; // To enable/disable functionalities based on memory type
4888 UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
4889 UCHAR ucVREFI; // board dependent parameter
4890 UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
4891 UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
4892 UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
4893 // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
4894 UCHAR ucReserved[3];
4895
4896//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
4897 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
4898 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
4899 UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
4900 UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
4901 UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
4902 UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
4903 ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
4904}ATOM_VRAM_MODULE_V6;
4905
4906
4907
4908typedef struct _ATOM_VRAM_INFO_V2
4909{
4910 ATOM_COMMON_TABLE_HEADER sHeader;
4911 UCHAR ucNumOfVRAMModule;
4912 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
4913}ATOM_VRAM_INFO_V2;
3995 4914
3996/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */ 4915typedef struct _ATOM_VRAM_INFO_V3
3997 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ 4916{
3998 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ 4917 ATOM_COMMON_TABLE_HEADER sHeader;
3999 UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */ 4918 USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
4000 UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */ 4919 USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
4001 UCHAR ucFIFODepth; /* FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth */ 4920 USHORT usRerseved;
4002 UCHAR ucCDR_Bandwidth; /* [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth */ 4921 UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator
4003 ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ 4922 UCHAR ucNumOfVRAMModule;
4004} ATOM_VRAM_MODULE_V5; 4923 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
4005 4924 ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
4006typedef struct _ATOM_VRAM_INFO_V2 { 4925 // ATOM_INIT_REG_BLOCK aMemAdjust;
4007 ATOM_COMMON_TABLE_HEADER sHeader; 4926}ATOM_VRAM_INFO_V3;
4008 UCHAR ucNumOfVRAMModule;
4009 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
4010} ATOM_VRAM_INFO_V2;
4011
4012typedef struct _ATOM_VRAM_INFO_V3 {
4013 ATOM_COMMON_TABLE_HEADER sHeader;
4014 USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
4015 USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
4016 USHORT usRerseved;
4017 UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
4018 UCHAR ucNumOfVRAMModule;
4019 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
4020 ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
4021 /* ATOM_INIT_REG_BLOCK aMemAdjust; */
4022} ATOM_VRAM_INFO_V3;
4023 4927
4024#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3 4928#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3
4025 4929
4026typedef struct _ATOM_VRAM_INFO_V4 { 4930typedef struct _ATOM_VRAM_INFO_V4
4027 ATOM_COMMON_TABLE_HEADER sHeader; 4931{
4028 USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */ 4932 ATOM_COMMON_TABLE_HEADER sHeader;
4029 USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */ 4933 USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
4030 USHORT usRerseved; 4934 USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
4031 UCHAR ucMemDQ7_0ByteRemap; /* DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 */ 4935 USHORT usRerseved;
4032 ULONG ulMemDQ7_0BitRemap; /* each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] */ 4936 UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
4033 UCHAR ucReservde[4]; 4937 ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
4034 UCHAR ucNumOfVRAMModule; 4938 UCHAR ucReservde[4];
4035 ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */ 4939 UCHAR ucNumOfVRAMModule;
4036 ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */ 4940 ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
4037 /* ATOM_INIT_REG_BLOCK aMemAdjust; */ 4941 ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
4038} ATOM_VRAM_INFO_V4; 4942 // ATOM_INIT_REG_BLOCK aMemAdjust;
4039 4943}ATOM_VRAM_INFO_V4;
4040typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO { 4944
4041 ATOM_COMMON_TABLE_HEADER sHeader; 4945typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
4042 UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */ 4946{
4043} ATOM_VRAM_GPIO_DETECTION_INFO; 4947 ATOM_COMMON_TABLE_HEADER sHeader;
4044 4948 UCHAR aVID_PinsShift[9]; //8 bit strap maximum+terminator
4045typedef struct _ATOM_MEMORY_TRAINING_INFO { 4949}ATOM_VRAM_GPIO_DETECTION_INFO;
4046 ATOM_COMMON_TABLE_HEADER sHeader; 4950
4047 UCHAR ucTrainingLoop; 4951
4048 UCHAR ucReserved[3]; 4952typedef struct _ATOM_MEMORY_TRAINING_INFO
4049 ATOM_INIT_REG_BLOCK asMemTrainingSetting; 4953{
4050} ATOM_MEMORY_TRAINING_INFO; 4954 ATOM_COMMON_TABLE_HEADER sHeader;
4051 4955 UCHAR ucTrainingLoop;
4052typedef struct SW_I2C_CNTL_DATA_PARAMETERS { 4956 UCHAR ucReserved[3];
4053 UCHAR ucControl; 4957 ATOM_INIT_REG_BLOCK asMemTrainingSetting;
4054 UCHAR ucData; 4958}ATOM_MEMORY_TRAINING_INFO;
4055 UCHAR ucSatus; 4959
4056 UCHAR ucTemp; 4960
4961typedef struct SW_I2C_CNTL_DATA_PARAMETERS
4962{
4963 UCHAR ucControl;
4964 UCHAR ucData;
4965 UCHAR ucSatus;
4966 UCHAR ucTemp;
4057} SW_I2C_CNTL_DATA_PARAMETERS; 4967} SW_I2C_CNTL_DATA_PARAMETERS;
4058 4968
4059#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS 4969#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS
4060 4970
4061typedef struct _SW_I2C_IO_DATA_PARAMETERS { 4971typedef struct _SW_I2C_IO_DATA_PARAMETERS
4062 USHORT GPIO_Info; 4972{
4063 UCHAR ucAct; 4973 USHORT GPIO_Info;
4064 UCHAR ucData; 4974 UCHAR ucAct;
4065} SW_I2C_IO_DATA_PARAMETERS; 4975 UCHAR ucData;
4976 } SW_I2C_IO_DATA_PARAMETERS;
4066 4977
4067#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS 4978#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS
4068 4979
@@ -4087,127 +4998,136 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS {
4087#define SW_I2C_CNTL_CLOSE 5 4998#define SW_I2C_CNTL_CLOSE 5
4088#define SW_I2C_CNTL_WRITE1BIT 6 4999#define SW_I2C_CNTL_WRITE1BIT 6
4089 5000
4090/* ==============================VESA definition Portion=============================== */ 5001//==============================VESA definition Portion===============================
4091#define VESA_OEM_PRODUCT_REV '01.00' 5002#define VESA_OEM_PRODUCT_REV '01.00'
4092#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB /* refer to VBE spec p.32, no TTY support */ 5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
4093#define VESA_MODE_WIN_ATTRIBUTE 7 5004#define VESA_MODE_WIN_ATTRIBUTE 7
4094#define VESA_WIN_SIZE 64 5005#define VESA_WIN_SIZE 64
4095 5006
4096typedef struct _PTR_32_BIT_STRUCTURE { 5007typedef struct _PTR_32_BIT_STRUCTURE
4097 USHORT Offset16; 5008{
4098 USHORT Segment16; 5009 USHORT Offset16;
5010 USHORT Segment16;
4099} PTR_32_BIT_STRUCTURE; 5011} PTR_32_BIT_STRUCTURE;
4100 5012
4101typedef union _PTR_32_BIT_UNION { 5013typedef union _PTR_32_BIT_UNION
4102 PTR_32_BIT_STRUCTURE SegmentOffset; 5014{
4103 ULONG Ptr32_Bit; 5015 PTR_32_BIT_STRUCTURE SegmentOffset;
5016 ULONG Ptr32_Bit;
4104} PTR_32_BIT_UNION; 5017} PTR_32_BIT_UNION;
4105 5018
4106typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE { 5019typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE
4107 UCHAR VbeSignature[4]; 5020{
4108 USHORT VbeVersion; 5021 UCHAR VbeSignature[4];
4109 PTR_32_BIT_UNION OemStringPtr; 5022 USHORT VbeVersion;
4110 UCHAR Capabilities[4]; 5023 PTR_32_BIT_UNION OemStringPtr;
4111 PTR_32_BIT_UNION VideoModePtr; 5024 UCHAR Capabilities[4];
4112 USHORT TotalMemory; 5025 PTR_32_BIT_UNION VideoModePtr;
5026 USHORT TotalMemory;
4113} VBE_1_2_INFO_BLOCK_UPDATABLE; 5027} VBE_1_2_INFO_BLOCK_UPDATABLE;
4114 5028
4115typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE { 5029
4116 VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock; 5030typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE
4117 USHORT OemSoftRev; 5031{
4118 PTR_32_BIT_UNION OemVendorNamePtr; 5032 VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
4119 PTR_32_BIT_UNION OemProductNamePtr; 5033 USHORT OemSoftRev;
4120 PTR_32_BIT_UNION OemProductRevPtr; 5034 PTR_32_BIT_UNION OemVendorNamePtr;
5035 PTR_32_BIT_UNION OemProductNamePtr;
5036 PTR_32_BIT_UNION OemProductRevPtr;
4121} VBE_2_0_INFO_BLOCK_UPDATABLE; 5037} VBE_2_0_INFO_BLOCK_UPDATABLE;
4122 5038
4123typedef union _VBE_VERSION_UNION { 5039typedef union _VBE_VERSION_UNION
4124 VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock; 5040{
4125 VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock; 5041 VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
5042 VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
4126} VBE_VERSION_UNION; 5043} VBE_VERSION_UNION;
4127 5044
4128typedef struct _VBE_INFO_BLOCK { 5045typedef struct _VBE_INFO_BLOCK
4129 VBE_VERSION_UNION UpdatableVBE_Info; 5046{
4130 UCHAR Reserved[222]; 5047 VBE_VERSION_UNION UpdatableVBE_Info;
4131 UCHAR OemData[256]; 5048 UCHAR Reserved[222];
5049 UCHAR OemData[256];
4132} VBE_INFO_BLOCK; 5050} VBE_INFO_BLOCK;
4133 5051
4134typedef struct _VBE_FP_INFO { 5052typedef struct _VBE_FP_INFO
4135 USHORT HSize; 5053{
4136 USHORT VSize; 5054 USHORT HSize;
4137 USHORT FPType; 5055 USHORT VSize;
4138 UCHAR RedBPP; 5056 USHORT FPType;
4139 UCHAR GreenBPP; 5057 UCHAR RedBPP;
4140 UCHAR BlueBPP; 5058 UCHAR GreenBPP;
4141 UCHAR ReservedBPP; 5059 UCHAR BlueBPP;
4142 ULONG RsvdOffScrnMemSize; 5060 UCHAR ReservedBPP;
4143 ULONG RsvdOffScrnMEmPtr; 5061 ULONG RsvdOffScrnMemSize;
4144 UCHAR Reserved[14]; 5062 ULONG RsvdOffScrnMEmPtr;
5063 UCHAR Reserved[14];
4145} VBE_FP_INFO; 5064} VBE_FP_INFO;
4146 5065
4147typedef struct _VESA_MODE_INFO_BLOCK { 5066typedef struct _VESA_MODE_INFO_BLOCK
4148/* Mandatory information for all VBE revisions */ 5067{
4149 USHORT ModeAttributes; /* dw ? ; mode attributes */ 5068// Mandatory information for all VBE revisions
4150 UCHAR WinAAttributes; /* db ? ; window A attributes */ 5069 USHORT ModeAttributes; // dw ? ; mode attributes
4151 UCHAR WinBAttributes; /* db ? ; window B attributes */ 5070 UCHAR WinAAttributes; // db ? ; window A attributes
4152 USHORT WinGranularity; /* dw ? ; window granularity */ 5071 UCHAR WinBAttributes; // db ? ; window B attributes
4153 USHORT WinSize; /* dw ? ; window size */ 5072 USHORT WinGranularity; // dw ? ; window granularity
4154 USHORT WinASegment; /* dw ? ; window A start segment */ 5073 USHORT WinSize; // dw ? ; window size
4155 USHORT WinBSegment; /* dw ? ; window B start segment */ 5074 USHORT WinASegment; // dw ? ; window A start segment
4156 ULONG WinFuncPtr; /* dd ? ; real mode pointer to window function */ 5075 USHORT WinBSegment; // dw ? ; window B start segment
4157 USHORT BytesPerScanLine; /* dw ? ; bytes per scan line */ 5076 ULONG WinFuncPtr; // dd ? ; real mode pointer to window function
4158 5077 USHORT BytesPerScanLine;// dw ? ; bytes per scan line
4159/* ; Mandatory information for VBE 1.2 and above */ 5078
4160 USHORT XResolution; /* dw ? ; horizontal resolution in pixels or characters */ 5079//; Mandatory information for VBE 1.2 and above
4161 USHORT YResolution; /* dw ? ; vertical resolution in pixels or characters */ 5080 USHORT XResolution; // dw ? ; horizontal resolution in pixels or characters
4162 UCHAR XCharSize; /* db ? ; character cell width in pixels */ 5081 USHORT YResolution; // dw ? ; vertical resolution in pixels or characters
4163 UCHAR YCharSize; /* db ? ; character cell height in pixels */ 5082 UCHAR XCharSize; // db ? ; character cell width in pixels
4164 UCHAR NumberOfPlanes; /* db ? ; number of memory planes */ 5083 UCHAR YCharSize; // db ? ; character cell height in pixels
4165 UCHAR BitsPerPixel; /* db ? ; bits per pixel */ 5084 UCHAR NumberOfPlanes; // db ? ; number of memory planes
4166 UCHAR NumberOfBanks; /* db ? ; number of banks */ 5085 UCHAR BitsPerPixel; // db ? ; bits per pixel
4167 UCHAR MemoryModel; /* db ? ; memory model type */ 5086 UCHAR NumberOfBanks; // db ? ; number of banks
4168 UCHAR BankSize; /* db ? ; bank size in KB */ 5087 UCHAR MemoryModel; // db ? ; memory model type
4169 UCHAR NumberOfImagePages; /* db ? ; number of images */ 5088 UCHAR BankSize; // db ? ; bank size in KB
4170 UCHAR ReservedForPageFunction; /* db 1 ; reserved for page function */ 5089 UCHAR NumberOfImagePages;// db ? ; number of images
4171 5090 UCHAR ReservedForPageFunction;//db 1 ; reserved for page function
4172/* ; Direct Color fields(required for direct/6 and YUV/7 memory models) */ 5091
4173 UCHAR RedMaskSize; /* db ? ; size of direct color red mask in bits */ 5092//; Direct Color fields(required for direct/6 and YUV/7 memory models)
4174 UCHAR RedFieldPosition; /* db ? ; bit position of lsb of red mask */ 5093 UCHAR RedMaskSize; // db ? ; size of direct color red mask in bits
4175 UCHAR GreenMaskSize; /* db ? ; size of direct color green mask in bits */ 5094 UCHAR RedFieldPosition; // db ? ; bit position of lsb of red mask
4176 UCHAR GreenFieldPosition; /* db ? ; bit position of lsb of green mask */ 5095 UCHAR GreenMaskSize; // db ? ; size of direct color green mask in bits
4177 UCHAR BlueMaskSize; /* db ? ; size of direct color blue mask in bits */ 5096 UCHAR GreenFieldPosition; // db ? ; bit position of lsb of green mask
4178 UCHAR BlueFieldPosition; /* db ? ; bit position of lsb of blue mask */ 5097 UCHAR BlueMaskSize; // db ? ; size of direct color blue mask in bits
4179 UCHAR RsvdMaskSize; /* db ? ; size of direct color reserved mask in bits */ 5098 UCHAR BlueFieldPosition; // db ? ; bit position of lsb of blue mask
4180 UCHAR RsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask */ 5099 UCHAR RsvdMaskSize; // db ? ; size of direct color reserved mask in bits
4181 UCHAR DirectColorModeInfo; /* db ? ; direct color mode attributes */ 5100 UCHAR RsvdFieldPosition; // db ? ; bit position of lsb of reserved mask
4182 5101 UCHAR DirectColorModeInfo;// db ? ; direct color mode attributes
4183/* ; Mandatory information for VBE 2.0 and above */ 5102
4184 ULONG PhysBasePtr; /* dd ? ; physical address for flat memory frame buffer */ 5103//; Mandatory information for VBE 2.0 and above
4185 ULONG Reserved_1; /* dd 0 ; reserved - always set to 0 */ 5104 ULONG PhysBasePtr; // dd ? ; physical address for flat memory frame buffer
4186 USHORT Reserved_2; /* dw 0 ; reserved - always set to 0 */ 5105 ULONG Reserved_1; // dd 0 ; reserved - always set to 0
4187 5106 USHORT Reserved_2; // dw 0 ; reserved - always set to 0
4188/* ; Mandatory information for VBE 3.0 and above */ 5107
4189 USHORT LinBytesPerScanLine; /* dw ? ; bytes per scan line for linear modes */ 5108//; Mandatory information for VBE 3.0 and above
4190 UCHAR BnkNumberOfImagePages; /* db ? ; number of images for banked modes */ 5109 USHORT LinBytesPerScanLine; // dw ? ; bytes per scan line for linear modes
4191 UCHAR LinNumberOfImagPages; /* db ? ; number of images for linear modes */ 5110 UCHAR BnkNumberOfImagePages;// db ? ; number of images for banked modes
4192 UCHAR LinRedMaskSize; /* db ? ; size of direct color red mask(linear modes) */ 5111 UCHAR LinNumberOfImagPages; // db ? ; number of images for linear modes
4193 UCHAR LinRedFieldPosition; /* db ? ; bit position of lsb of red mask(linear modes) */ 5112 UCHAR LinRedMaskSize; // db ? ; size of direct color red mask(linear modes)
4194 UCHAR LinGreenMaskSize; /* db ? ; size of direct color green mask(linear modes) */ 5113 UCHAR LinRedFieldPosition; // db ? ; bit position of lsb of red mask(linear modes)
4195 UCHAR LinGreenFieldPosition; /* db ? ; bit position of lsb of green mask(linear modes) */ 5114 UCHAR LinGreenMaskSize; // db ? ; size of direct color green mask(linear modes)
4196 UCHAR LinBlueMaskSize; /* db ? ; size of direct color blue mask(linear modes) */ 5115 UCHAR LinGreenFieldPosition;// db ? ; bit position of lsb of green mask(linear modes)
4197 UCHAR LinBlueFieldPosition; /* db ? ; bit position of lsb of blue mask(linear modes) */ 5116 UCHAR LinBlueMaskSize; // db ? ; size of direct color blue mask(linear modes)
4198 UCHAR LinRsvdMaskSize; /* db ? ; size of direct color reserved mask(linear modes) */ 5117 UCHAR LinBlueFieldPosition; // db ? ; bit position of lsb of blue mask(linear modes)
4199 UCHAR LinRsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask(linear modes) */ 5118 UCHAR LinRsvdMaskSize; // db ? ; size of direct color reserved mask(linear modes)
4200 ULONG MaxPixelClock; /* dd ? ; maximum pixel clock(in Hz) for graphics mode */ 5119 UCHAR LinRsvdFieldPosition; // db ? ; bit position of lsb of reserved mask(linear modes)
4201 UCHAR Reserved; /* db 190 dup (0) */ 5120 ULONG MaxPixelClock; // dd ? ; maximum pixel clock(in Hz) for graphics mode
5121 UCHAR Reserved; // db 190 dup (0)
4202} VESA_MODE_INFO_BLOCK; 5122} VESA_MODE_INFO_BLOCK;
4203 5123
4204/* BIOS function CALLS */ 5124// BIOS function CALLS
4205#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 /* ATI Extended Function code */ 5125#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 // ATI Extended Function code
4206#define ATOM_BIOS_FUNCTION_COP_MODE 0x00 5126#define ATOM_BIOS_FUNCTION_COP_MODE 0x00
4207#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04 5127#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04
4208#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05 5128#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05
4209#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06 5129#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06
4210#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B 5130#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
4211#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E 5131#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E
4212#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F 5132#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F
4213#define ATOM_BIOS_FUNCTION_STV_STD 0x16 5133#define ATOM_BIOS_FUNCTION_STV_STD 0x16
@@ -4217,100 +5137,135 @@ typedef struct _VESA_MODE_INFO_BLOCK {
4217#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82 5137#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82
4218#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83 5138#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83
4219#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84 5139#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84
4220#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A 5140#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
4221#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B 5141#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B
4222#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 /* Sub function 80 */ 5142#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 // Sub function 80
4223#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 /* Sub function 80 */ 5143#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 // Sub function 80
4224 5144
4225#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D 5145#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D
4226#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E 5146#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E
4227#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F 5147#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
4228#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 /* Sub function 03 */ 5148#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 // Sub function 03
4229#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 /* Sub function 7 */ 5149#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 // Sub function 7
4230#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 /* Notify caller the current thermal state */ 5150#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 // Notify caller the current thermal state
4231#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 /* Notify caller the current critical state */ 5151#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 // Notify caller the current critical state
4232#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 /* Sub function 85 */ 5152#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 // Sub function 85
4233#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900 /* Sub function 89 */ 5153#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89
4234#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 /* Notify caller that ADC is supported */ 5154#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 // Notify caller that ADC is supported
4235 5155
4236#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 /* Set DPMS */ 5156
4237#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 /* BL: Sub function 01 */ 5157#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 // Set DPMS
4238#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 /* BL: Sub function 02 */ 5158#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 // BL: Sub function 01
4239#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 /* BH Parameter for DPMS ON. */ 5159#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 // BL: Sub function 02
4240#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 /* BH Parameter for DPMS STANDBY */ 5160#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 // BH Parameter for DPMS ON.
4241#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 /* BH Parameter for DPMS SUSPEND */ 5161#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 // BH Parameter for DPMS STANDBY
4242#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 /* BH Parameter for DPMS OFF */ 5162#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 // BH Parameter for DPMS SUSPEND
4243#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 /* BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) */ 5163#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 // BH Parameter for DPMS OFF
5164#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED)
4244 5165
4245#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L 5166#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L
4246#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L 5167#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L
4247#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL 5168#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL
4248 5169
4249/* structure used for VBIOS only */ 5170// structure used for VBIOS only
4250 5171
4251/* DispOutInfoTable */ 5172//DispOutInfoTable
4252typedef struct _ASIC_TRANSMITTER_INFO { 5173typedef struct _ASIC_TRANSMITTER_INFO
5174{
4253 USHORT usTransmitterObjId; 5175 USHORT usTransmitterObjId;
4254 USHORT usSupportDevice; 5176 USHORT usSupportDevice;
4255 UCHAR ucTransmitterCmdTblId; 5177 UCHAR ucTransmitterCmdTblId;
4256 UCHAR ucConfig; 5178 UCHAR ucConfig;
4257 UCHAR ucEncoderID; /* available 1st encoder ( default ) */ 5179 UCHAR ucEncoderID; //available 1st encoder ( default )
4258 UCHAR ucOptionEncoderID; /* available 2nd encoder ( optional ) */ 5180 UCHAR ucOptionEncoderID; //available 2nd encoder ( optional )
4259 UCHAR uc2ndEncoderID; 5181 UCHAR uc2ndEncoderID;
4260 UCHAR ucReserved; 5182 UCHAR ucReserved;
4261} ASIC_TRANSMITTER_INFO; 5183}ASIC_TRANSMITTER_INFO;
4262 5184
4263typedef struct _ASIC_ENCODER_INFO { 5185typedef struct _ASIC_ENCODER_INFO
5186{
4264 UCHAR ucEncoderID; 5187 UCHAR ucEncoderID;
4265 UCHAR ucEncoderConfig; 5188 UCHAR ucEncoderConfig;
4266 USHORT usEncoderCmdTblId; 5189 USHORT usEncoderCmdTblId;
4267} ASIC_ENCODER_INFO; 5190}ASIC_ENCODER_INFO;
5191
5192typedef struct _ATOM_DISP_OUT_INFO
5193{
5194 ATOM_COMMON_TABLE_HEADER sHeader;
5195 USHORT ptrTransmitterInfo;
5196 USHORT ptrEncoderInfo;
5197 ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
5198 ASIC_ENCODER_INFO asEncoderInfo[1];
5199}ATOM_DISP_OUT_INFO;
4268 5200
4269typedef struct _ATOM_DISP_OUT_INFO { 5201typedef struct _ATOM_DISP_OUT_INFO_V2
4270 ATOM_COMMON_TABLE_HEADER sHeader; 5202{
5203 ATOM_COMMON_TABLE_HEADER sHeader;
4271 USHORT ptrTransmitterInfo; 5204 USHORT ptrTransmitterInfo;
4272 USHORT ptrEncoderInfo; 5205 USHORT ptrEncoderInfo;
4273 ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; 5206 USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
4274 ASIC_ENCODER_INFO asEncoderInfo[1]; 5207 ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
4275} ATOM_DISP_OUT_INFO; 5208 ASIC_ENCODER_INFO asEncoderInfo[1];
5209}ATOM_DISP_OUT_INFO_V2;
4276 5210
4277/* DispDevicePriorityInfo */ 5211// DispDevicePriorityInfo
4278typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO { 5212typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
4279 ATOM_COMMON_TABLE_HEADER sHeader; 5213{
5214 ATOM_COMMON_TABLE_HEADER sHeader;
4280 USHORT asDevicePriority[16]; 5215 USHORT asDevicePriority[16];
4281} ATOM_DISPLAY_DEVICE_PRIORITY_INFO; 5216}ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
4282 5217
4283/* ProcessAuxChannelTransactionTable */ 5218//ProcessAuxChannelTransactionTable
4284typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS { 5219typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
4285 USHORT lpAuxRequest; 5220{
4286 USHORT lpDataOut; 5221 USHORT lpAuxRequest;
4287 UCHAR ucChannelID; 5222 USHORT lpDataOut;
4288 union { 5223 UCHAR ucChannelID;
4289 UCHAR ucReplyStatus; 5224 union
4290 UCHAR ucDelay; 5225 {
5226 UCHAR ucReplyStatus;
5227 UCHAR ucDelay;
5228 };
5229 UCHAR ucDataOutLen;
5230 UCHAR ucReserved;
5231}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
5232
5233//ProcessAuxChannelTransactionTable
5234typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2
5235{
5236 USHORT lpAuxRequest;
5237 USHORT lpDataOut;
5238 UCHAR ucChannelID;
5239 union
5240 {
5241 UCHAR ucReplyStatus;
5242 UCHAR ucDelay;
4291 }; 5243 };
4292 UCHAR ucDataOutLen; 5244 UCHAR ucDataOutLen;
4293 UCHAR ucReserved; 5245 UCHAR ucHPD_ID; //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6
4294} PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS; 5246}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2;
4295 5247
4296#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS 5248#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
4297 5249
4298/* GetSinkType */ 5250//GetSinkType
4299 5251
4300typedef struct _DP_ENCODER_SERVICE_PARAMETERS { 5252typedef struct _DP_ENCODER_SERVICE_PARAMETERS
5253{
4301 USHORT ucLinkClock; 5254 USHORT ucLinkClock;
4302 union { 5255 union
4303 UCHAR ucConfig; /* for DP training command */ 5256 {
4304 UCHAR ucI2cId; /* use for GET_SINK_TYPE command */ 5257 UCHAR ucConfig; // for DP training command
5258 UCHAR ucI2cId; // use for GET_SINK_TYPE command
4305 }; 5259 };
4306 UCHAR ucAction; 5260 UCHAR ucAction;
4307 UCHAR ucStatus; 5261 UCHAR ucStatus;
4308 UCHAR ucLaneNum; 5262 UCHAR ucLaneNum;
4309 UCHAR ucReserved[2]; 5263 UCHAR ucReserved[2];
4310} DP_ENCODER_SERVICE_PARAMETERS; 5264}DP_ENCODER_SERVICE_PARAMETERS;
4311 5265
4312/* ucAction */ 5266// ucAction
4313#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01 5267#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01
5268/* obselete */
4314#define ATOM_DP_ACTION_TRAINING_START 0x02 5269#define ATOM_DP_ACTION_TRAINING_START 0x02
4315#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03 5270#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03
4316#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04 5271#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04
@@ -4318,7 +5273,7 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
4318#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06 5273#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06
4319#define ATOM_DP_ACTION_BLANKING 0x07 5274#define ATOM_DP_ACTION_BLANKING 0x07
4320 5275
4321/* ucConfig */ 5276// ucConfig
4322#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03 5277#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03
4323#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00 5278#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00
4324#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01 5279#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01
@@ -4326,14 +5281,14 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
4326#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04 5281#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04
4327#define ATOM_DP_CONFIG_LINK_A 0x00 5282#define ATOM_DP_CONFIG_LINK_A 0x00
4328#define ATOM_DP_CONFIG_LINK_B 0x04 5283#define ATOM_DP_CONFIG_LINK_B 0x04
4329 5284/* /obselete */
4330#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS 5285#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
4331 5286
4332/* DP_TRAINING_TABLE */ 5287// DP_TRAINING_TABLE
4333#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR 5288#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
4334#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) 5289#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
4335#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16) 5290#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16 )
4336#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24) 5291#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24 )
4337#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32) 5292#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32)
4338#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40) 5293#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40)
4339#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48) 5294#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48)
@@ -4341,183 +5296,241 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
4341#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64) 5296#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64)
4342#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72) 5297#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72)
4343#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76) 5298#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76)
4344#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80) 5299#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
5300#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 84)
4345 5301
4346typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS { 5302typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
4347 UCHAR ucI2CSpeed; 5303{
4348 union { 5304 UCHAR ucI2CSpeed;
4349 UCHAR ucRegIndex; 5305 union
4350 UCHAR ucStatus; 5306 {
5307 UCHAR ucRegIndex;
5308 UCHAR ucStatus;
4351 }; 5309 };
4352 USHORT lpI2CDataOut; 5310 USHORT lpI2CDataOut;
4353 UCHAR ucFlag; 5311 UCHAR ucFlag;
4354 UCHAR ucTransBytes; 5312 UCHAR ucTransBytes;
4355 UCHAR ucSlaveAddr; 5313 UCHAR ucSlaveAddr;
4356 UCHAR ucLineNumber; 5314 UCHAR ucLineNumber;
4357} PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS; 5315}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
4358 5316
4359#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS 5317#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
4360 5318
4361/* ucFlag */ 5319//ucFlag
4362#define HW_I2C_WRITE 1 5320#define HW_I2C_WRITE 1
4363#define HW_I2C_READ 0 5321#define HW_I2C_READ 0
5322#define I2C_2BYTE_ADDR 0x02
4364 5323
5324typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
5325{
5326 UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ...
5327 UCHAR ucReserved[3];
5328}SET_HWBLOCK_INSTANCE_PARAMETER_V2;
5329
5330#define HWBLKINST_INSTANCE_MASK 0x07
5331#define HWBLKINST_HWBLK_MASK 0xF0
5332#define HWBLKINST_HWBLK_SHIFT 0x04
5333
5334//ucHWBlock
5335#define SELECT_DISP_ENGINE 0
5336#define SELECT_DISP_PLL 1
5337#define SELECT_DCIO_UNIPHY_LINK0 2
5338#define SELECT_DCIO_UNIPHY_LINK1 3
5339#define SELECT_DCIO_IMPCAL 4
5340#define SELECT_DCIO_DIG 6
5341#define SELECT_CRTC_PIXEL_RATE 7
5342
5343/****************************************************************************/
5344//Portion VI: Definitinos for vbios MC scratch registers that driver used
4365/****************************************************************************/ 5345/****************************************************************************/
4366/* Portion VI: Definitinos being oboselete */ 5346
5347#define MC_MISC0__MEMORY_TYPE_MASK 0xF0000000
5348#define MC_MISC0__MEMORY_TYPE__GDDR1 0x10000000
5349#define MC_MISC0__MEMORY_TYPE__DDR2 0x20000000
5350#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000
5351#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000
5352#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000
5353#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000
5354
5355/****************************************************************************/
5356//Portion VI: Definitinos being oboselete
4367/****************************************************************************/ 5357/****************************************************************************/
4368 5358
4369/* ========================================================================================== */ 5359//==========================================================================================
4370/* Remove the definitions below when driver is ready! */ 5360//Remove the definitions below when driver is ready!
4371typedef struct _ATOM_DAC_INFO { 5361typedef struct _ATOM_DAC_INFO
4372 ATOM_COMMON_TABLE_HEADER sHeader; 5362{
4373 USHORT usMaxFrequency; /* in 10kHz unit */ 5363 ATOM_COMMON_TABLE_HEADER sHeader;
4374 USHORT usReserved; 5364 USHORT usMaxFrequency; // in 10kHz unit
4375} ATOM_DAC_INFO; 5365 USHORT usReserved;
4376 5366}ATOM_DAC_INFO;
4377typedef struct _COMPASSIONATE_DATA { 5367
4378 ATOM_COMMON_TABLE_HEADER sHeader; 5368
4379 5369typedef struct _COMPASSIONATE_DATA
4380 /* ============================== DAC1 portion */ 5370{
4381 UCHAR ucDAC1_BG_Adjustment; 5371 ATOM_COMMON_TABLE_HEADER sHeader;
4382 UCHAR ucDAC1_DAC_Adjustment; 5372
4383 USHORT usDAC1_FORCE_Data; 5373 //============================== DAC1 portion
4384 /* ============================== DAC2 portion */ 5374 UCHAR ucDAC1_BG_Adjustment;
4385 UCHAR ucDAC2_CRT2_BG_Adjustment; 5375 UCHAR ucDAC1_DAC_Adjustment;
4386 UCHAR ucDAC2_CRT2_DAC_Adjustment; 5376 USHORT usDAC1_FORCE_Data;
4387 USHORT usDAC2_CRT2_FORCE_Data; 5377 //============================== DAC2 portion
4388 USHORT usDAC2_CRT2_MUX_RegisterIndex; 5378 UCHAR ucDAC2_CRT2_BG_Adjustment;
4389 UCHAR ucDAC2_CRT2_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ 5379 UCHAR ucDAC2_CRT2_DAC_Adjustment;
4390 UCHAR ucDAC2_NTSC_BG_Adjustment; 5380 USHORT usDAC2_CRT2_FORCE_Data;
4391 UCHAR ucDAC2_NTSC_DAC_Adjustment; 5381 USHORT usDAC2_CRT2_MUX_RegisterIndex;
4392 USHORT usDAC2_TV1_FORCE_Data; 5382 UCHAR ucDAC2_CRT2_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
4393 USHORT usDAC2_TV1_MUX_RegisterIndex; 5383 UCHAR ucDAC2_NTSC_BG_Adjustment;
4394 UCHAR ucDAC2_TV1_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ 5384 UCHAR ucDAC2_NTSC_DAC_Adjustment;
4395 UCHAR ucDAC2_CV_BG_Adjustment; 5385 USHORT usDAC2_TV1_FORCE_Data;
4396 UCHAR ucDAC2_CV_DAC_Adjustment; 5386 USHORT usDAC2_TV1_MUX_RegisterIndex;
4397 USHORT usDAC2_CV_FORCE_Data; 5387 UCHAR ucDAC2_TV1_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
4398 USHORT usDAC2_CV_MUX_RegisterIndex; 5388 UCHAR ucDAC2_CV_BG_Adjustment;
4399 UCHAR ucDAC2_CV_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ 5389 UCHAR ucDAC2_CV_DAC_Adjustment;
4400 UCHAR ucDAC2_PAL_BG_Adjustment; 5390 USHORT usDAC2_CV_FORCE_Data;
4401 UCHAR ucDAC2_PAL_DAC_Adjustment; 5391 USHORT usDAC2_CV_MUX_RegisterIndex;
4402 USHORT usDAC2_TV2_FORCE_Data; 5392 UCHAR ucDAC2_CV_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
4403} COMPASSIONATE_DATA; 5393 UCHAR ucDAC2_PAL_BG_Adjustment;
5394 UCHAR ucDAC2_PAL_DAC_Adjustment;
5395 USHORT usDAC2_TV2_FORCE_Data;
5396}COMPASSIONATE_DATA;
4404 5397
4405/****************************Supported Device Info Table Definitions**********************/ 5398/****************************Supported Device Info Table Definitions**********************/
4406/* ucConnectInfo: */ 5399// ucConnectInfo:
4407/* [7:4] - connector type */ 5400// [7:4] - connector type
4408/* = 1 - VGA connector */ 5401// = 1 - VGA connector
4409/* = 2 - DVI-I */ 5402// = 2 - DVI-I
4410/* = 3 - DVI-D */ 5403// = 3 - DVI-D
4411/* = 4 - DVI-A */ 5404// = 4 - DVI-A
4412/* = 5 - SVIDEO */ 5405// = 5 - SVIDEO
4413/* = 6 - COMPOSITE */ 5406// = 6 - COMPOSITE
4414/* = 7 - LVDS */ 5407// = 7 - LVDS
4415/* = 8 - DIGITAL LINK */ 5408// = 8 - DIGITAL LINK
4416/* = 9 - SCART */ 5409// = 9 - SCART
4417/* = 0xA - HDMI_type A */ 5410// = 0xA - HDMI_type A
4418/* = 0xB - HDMI_type B */ 5411// = 0xB - HDMI_type B
4419/* = 0xE - Special case1 (DVI+DIN) */ 5412// = 0xE - Special case1 (DVI+DIN)
4420/* Others=TBD */ 5413// Others=TBD
4421/* [3:0] - DAC Associated */ 5414// [3:0] - DAC Associated
4422/* = 0 - no DAC */ 5415// = 0 - no DAC
4423/* = 1 - DACA */ 5416// = 1 - DACA
4424/* = 2 - DACB */ 5417// = 2 - DACB
4425/* = 3 - External DAC */ 5418// = 3 - External DAC
4426/* Others=TBD */ 5419// Others=TBD
4427/* */ 5420//
4428 5421
4429typedef struct _ATOM_CONNECTOR_INFO { 5422typedef struct _ATOM_CONNECTOR_INFO
5423{
4430#if ATOM_BIG_ENDIAN 5424#if ATOM_BIG_ENDIAN
4431 UCHAR bfConnectorType:4; 5425 UCHAR bfConnectorType:4;
4432 UCHAR bfAssociatedDAC:4; 5426 UCHAR bfAssociatedDAC:4;
4433#else 5427#else
4434 UCHAR bfAssociatedDAC:4; 5428 UCHAR bfAssociatedDAC:4;
4435 UCHAR bfConnectorType:4; 5429 UCHAR bfConnectorType:4;
4436#endif 5430#endif
4437} ATOM_CONNECTOR_INFO; 5431}ATOM_CONNECTOR_INFO;
5432
5433typedef union _ATOM_CONNECTOR_INFO_ACCESS
5434{
5435 ATOM_CONNECTOR_INFO sbfAccess;
5436 UCHAR ucAccess;
5437}ATOM_CONNECTOR_INFO_ACCESS;
4438 5438
4439typedef union _ATOM_CONNECTOR_INFO_ACCESS { 5439typedef struct _ATOM_CONNECTOR_INFO_I2C
4440 ATOM_CONNECTOR_INFO sbfAccess; 5440{
4441 UCHAR ucAccess; 5441 ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
4442} ATOM_CONNECTOR_INFO_ACCESS; 5442 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
5443}ATOM_CONNECTOR_INFO_I2C;
4443 5444
4444typedef struct _ATOM_CONNECTOR_INFO_I2C {
4445 ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
4446 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
4447} ATOM_CONNECTOR_INFO_I2C;
4448 5445
4449typedef struct _ATOM_SUPPORTED_DEVICES_INFO { 5446typedef struct _ATOM_SUPPORTED_DEVICES_INFO
4450 ATOM_COMMON_TABLE_HEADER sHeader; 5447{
4451 USHORT usDeviceSupport; 5448 ATOM_COMMON_TABLE_HEADER sHeader;
4452 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO]; 5449 USHORT usDeviceSupport;
4453} ATOM_SUPPORTED_DEVICES_INFO; 5450 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
5451}ATOM_SUPPORTED_DEVICES_INFO;
4454 5452
4455#define NO_INT_SRC_MAPPED 0xFF 5453#define NO_INT_SRC_MAPPED 0xFF
4456 5454
4457typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP { 5455typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP
4458 UCHAR ucIntSrcBitmap; 5456{
4459} ATOM_CONNECTOR_INC_SRC_BITMAP; 5457 UCHAR ucIntSrcBitmap;
4460 5458}ATOM_CONNECTOR_INC_SRC_BITMAP;
4461typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 { 5459
4462 ATOM_COMMON_TABLE_HEADER sHeader; 5460typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2
4463 USHORT usDeviceSupport; 5461{
4464 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; 5462 ATOM_COMMON_TABLE_HEADER sHeader;
4465 ATOM_CONNECTOR_INC_SRC_BITMAP 5463 USHORT usDeviceSupport;
4466 asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; 5464 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
4467} ATOM_SUPPORTED_DEVICES_INFO_2; 5465 ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
4468 5466}ATOM_SUPPORTED_DEVICES_INFO_2;
4469typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 { 5467
4470 ATOM_COMMON_TABLE_HEADER sHeader; 5468typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1
4471 USHORT usDeviceSupport; 5469{
4472 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE]; 5470 ATOM_COMMON_TABLE_HEADER sHeader;
4473 ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE]; 5471 USHORT usDeviceSupport;
4474} ATOM_SUPPORTED_DEVICES_INFO_2d1; 5472 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
5473 ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
5474}ATOM_SUPPORTED_DEVICES_INFO_2d1;
4475 5475
4476#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1 5476#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
4477 5477
4478typedef struct _ATOM_MISC_CONTROL_INFO { 5478
4479 USHORT usFrequency; 5479
4480 UCHAR ucPLL_ChargePump; /* PLL charge-pump gain control */ 5480typedef struct _ATOM_MISC_CONTROL_INFO
4481 UCHAR ucPLL_DutyCycle; /* PLL duty cycle control */ 5481{
4482 UCHAR ucPLL_VCO_Gain; /* PLL VCO gain control */ 5482 USHORT usFrequency;
4483 UCHAR ucPLL_VoltageSwing; /* PLL driver voltage swing control */ 5483 UCHAR ucPLL_ChargePump; // PLL charge-pump gain control
4484} ATOM_MISC_CONTROL_INFO; 5484 UCHAR ucPLL_DutyCycle; // PLL duty cycle control
5485 UCHAR ucPLL_VCO_Gain; // PLL VCO gain control
5486 UCHAR ucPLL_VoltageSwing; // PLL driver voltage swing control
5487}ATOM_MISC_CONTROL_INFO;
5488
4485 5489
4486#define ATOM_MAX_MISC_INFO 4 5490#define ATOM_MAX_MISC_INFO 4
4487 5491
4488typedef struct _ATOM_TMDS_INFO { 5492typedef struct _ATOM_TMDS_INFO
4489 ATOM_COMMON_TABLE_HEADER sHeader; 5493{
4490 USHORT usMaxFrequency; /* in 10Khz */ 5494 ATOM_COMMON_TABLE_HEADER sHeader;
4491 ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO]; 5495 USHORT usMaxFrequency; // in 10Khz
4492} ATOM_TMDS_INFO; 5496 ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
5497}ATOM_TMDS_INFO;
5498
5499
5500typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE
5501{
5502 UCHAR ucTVStandard; //Same as TV standards defined above,
5503 UCHAR ucPadding[1];
5504}ATOM_ENCODER_ANALOG_ATTRIBUTE;
4493 5505
4494typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE { 5506typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE
4495 UCHAR ucTVStandard; /* Same as TV standards defined above, */ 5507{
4496 UCHAR ucPadding[1]; 5508 UCHAR ucAttribute; //Same as other digital encoder attributes defined above
4497} ATOM_ENCODER_ANALOG_ATTRIBUTE; 5509 UCHAR ucPadding[1];
5510}ATOM_ENCODER_DIGITAL_ATTRIBUTE;
4498 5511
4499typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE { 5512typedef union _ATOM_ENCODER_ATTRIBUTE
4500 UCHAR ucAttribute; /* Same as other digital encoder attributes defined above */ 5513{
4501 UCHAR ucPadding[1]; 5514 ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
4502} ATOM_ENCODER_DIGITAL_ATTRIBUTE; 5515 ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
5516}ATOM_ENCODER_ATTRIBUTE;
4503 5517
4504typedef union _ATOM_ENCODER_ATTRIBUTE {
4505 ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
4506 ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
4507} ATOM_ENCODER_ATTRIBUTE;
4508 5518
4509typedef struct _DVO_ENCODER_CONTROL_PARAMETERS { 5519typedef struct _DVO_ENCODER_CONTROL_PARAMETERS
4510 USHORT usPixelClock; 5520{
4511 USHORT usEncoderID; 5521 USHORT usPixelClock;
4512 UCHAR ucDeviceType; /* Use ATOM_DEVICE_xxx1_Index to indicate device type only. */ 5522 USHORT usEncoderID;
4513 UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */ 5523 UCHAR ucDeviceType; //Use ATOM_DEVICE_xxx1_Index to indicate device type only.
4514 ATOM_ENCODER_ATTRIBUTE usDevAttr; 5524 UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
4515} DVO_ENCODER_CONTROL_PARAMETERS; 5525 ATOM_ENCODER_ATTRIBUTE usDevAttr;
5526}DVO_ENCODER_CONTROL_PARAMETERS;
5527
5528typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION
5529{
5530 DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
5531 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
5532}DVO_ENCODER_CONTROL_PS_ALLOCATION;
4516 5533
4517typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
4518 DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
4519 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
4520} DVO_ENCODER_CONTROL_PS_ALLOCATION;
4521 5534
4522#define ATOM_XTMDS_ASIC_SI164_ID 1 5535#define ATOM_XTMDS_ASIC_SI164_ID 1
4523#define ATOM_XTMDS_ASIC_SI178_ID 2 5536#define ATOM_XTMDS_ASIC_SI178_ID 2
@@ -4526,27 +5539,30 @@ typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
4526#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002 5539#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002
4527#define ATOM_XTMDS_MVPU_FPGA 0x00000004 5540#define ATOM_XTMDS_MVPU_FPGA 0x00000004
4528 5541
4529typedef struct _ATOM_XTMDS_INFO { 5542
4530 ATOM_COMMON_TABLE_HEADER sHeader; 5543typedef struct _ATOM_XTMDS_INFO
4531 USHORT usSingleLinkMaxFrequency; 5544{
4532 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* Point the ID on which I2C is used to control external chip */ 5545 ATOM_COMMON_TABLE_HEADER sHeader;
4533 UCHAR ucXtransimitterID; 5546 USHORT usSingleLinkMaxFrequency;
4534 UCHAR ucSupportedLink; /* Bit field, bit0=1, single link supported;bit1=1,dual link supported */ 5547 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //Point the ID on which I2C is used to control external chip
4535 UCHAR ucSequnceAlterID; /* Even with the same external TMDS asic, it's possible that the program seqence alters */ 5548 UCHAR ucXtransimitterID;
4536 /* due to design. This ID is used to alert driver that the sequence is not "standard"! */ 5549 UCHAR ucSupportedLink; // Bit field, bit0=1, single link supported;bit1=1,dual link supported
4537 UCHAR ucMasterAddress; /* Address to control Master xTMDS Chip */ 5550 UCHAR ucSequnceAlterID; // Even with the same external TMDS asic, it's possible that the program seqence alters
4538 UCHAR ucSlaveAddress; /* Address to control Slave xTMDS Chip */ 5551 // due to design. This ID is used to alert driver that the sequence is not "standard"!
4539} ATOM_XTMDS_INFO; 5552 UCHAR ucMasterAddress; // Address to control Master xTMDS Chip
4540 5553 UCHAR ucSlaveAddress; // Address to control Slave xTMDS Chip
4541typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS { 5554}ATOM_XTMDS_INFO;
4542 UCHAR ucEnable; /* ATOM_ENABLE=On or ATOM_DISABLE=Off */ 5555
4543 UCHAR ucDevice; /* ATOM_DEVICE_DFP1_INDEX.... */ 5556typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS
4544 UCHAR ucPadding[2]; 5557{
4545} DFP_DPMS_STATUS_CHANGE_PARAMETERS; 5558 UCHAR ucEnable; // ATOM_ENABLE=On or ATOM_DISABLE=Off
5559 UCHAR ucDevice; // ATOM_DEVICE_DFP1_INDEX....
5560 UCHAR ucPadding[2];
5561}DFP_DPMS_STATUS_CHANGE_PARAMETERS;
4546 5562
4547/****************************Legacy Power Play Table Definitions **********************/ 5563/****************************Legacy Power Play Table Definitions **********************/
4548 5564
4549/* Definitions for ulPowerPlayMiscInfo */ 5565//Definitions for ulPowerPlayMiscInfo
4550#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L 5566#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L
4551#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L 5567#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L
4552#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L 5568#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L
@@ -4558,8 +5574,8 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
4558 5574
4559#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L 5575#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L
4560#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L 5576#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L
4561#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L /* When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program */ 5577#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program
4562 5578
4563#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L 5579#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L
4564#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L 5580#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L
4565#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L 5581#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L
@@ -4569,22 +5585,22 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
4569#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L 5585#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L
4570 5586
4571#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L 5587#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L
4572#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L 5588#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
4573#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L 5589#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L
4574#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L 5590#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L
4575#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L 5591#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L
4576 5592
4577#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L /* 0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved */ 5593#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved
4578#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20 5594#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
4579 5595
4580#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L 5596#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L
4581#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L 5597#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L
4582#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L 5598#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L
4583#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L /* When set, Dynamic */ 5599#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L //When set, Dynamic
4584#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L /* When set, Dynamic */ 5600#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L //When set, Dynamic
4585#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L /* When set, This mode is for acceleated 3D mode */ 5601#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L //When set, This mode is for acceleated 3D mode
4586 5602
4587#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L /* 1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) */ 5603#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks)
4588#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28 5604#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28
4589#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L 5605#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L
4590 5606
@@ -4594,55 +5610,59 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
4594#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L 5610#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L
4595#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L 5611#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L
4596#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L 5612#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L
4597#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L /* If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. */ 5613#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption.
4598 /* If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback */ 5614 //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback
4599#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L 5615#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L
4600#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L 5616#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L
4601#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L 5617#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
4602 5618
4603/* ucTableFormatRevision=1 */ 5619//ucTableFormatRevision=1
4604/* ucTableContentRevision=1 */ 5620//ucTableContentRevision=1
4605typedef struct _ATOM_POWERMODE_INFO { 5621typedef struct _ATOM_POWERMODE_INFO
4606 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ 5622{
4607 ULONG ulReserved1; /* must set to 0 */ 5623 ULONG ulMiscInfo; //The power level should be arranged in ascending order
4608 ULONG ulReserved2; /* must set to 0 */ 5624 ULONG ulReserved1; // must set to 0
4609 USHORT usEngineClock; 5625 ULONG ulReserved2; // must set to 0
4610 USHORT usMemoryClock; 5626 USHORT usEngineClock;
4611 UCHAR ucVoltageDropIndex; /* index to GPIO table */ 5627 USHORT usMemoryClock;
4612 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ 5628 UCHAR ucVoltageDropIndex; // index to GPIO table
4613 UCHAR ucMinTemperature; 5629 UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
4614 UCHAR ucMaxTemperature; 5630 UCHAR ucMinTemperature;
4615 UCHAR ucNumPciELanes; /* number of PCIE lanes */ 5631 UCHAR ucMaxTemperature;
4616} ATOM_POWERMODE_INFO; 5632 UCHAR ucNumPciELanes; // number of PCIE lanes
4617 5633}ATOM_POWERMODE_INFO;
4618/* ucTableFormatRevision=2 */ 5634
4619/* ucTableContentRevision=1 */ 5635//ucTableFormatRevision=2
4620typedef struct _ATOM_POWERMODE_INFO_V2 { 5636//ucTableContentRevision=1
4621 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ 5637typedef struct _ATOM_POWERMODE_INFO_V2
4622 ULONG ulMiscInfo2; 5638{
4623 ULONG ulEngineClock; 5639 ULONG ulMiscInfo; //The power level should be arranged in ascending order
4624 ULONG ulMemoryClock; 5640 ULONG ulMiscInfo2;
4625 UCHAR ucVoltageDropIndex; /* index to GPIO table */ 5641 ULONG ulEngineClock;
4626 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ 5642 ULONG ulMemoryClock;
4627 UCHAR ucMinTemperature; 5643 UCHAR ucVoltageDropIndex; // index to GPIO table
4628 UCHAR ucMaxTemperature; 5644 UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
4629 UCHAR ucNumPciELanes; /* number of PCIE lanes */ 5645 UCHAR ucMinTemperature;
4630} ATOM_POWERMODE_INFO_V2; 5646 UCHAR ucMaxTemperature;
4631 5647 UCHAR ucNumPciELanes; // number of PCIE lanes
4632/* ucTableFormatRevision=2 */ 5648}ATOM_POWERMODE_INFO_V2;
4633/* ucTableContentRevision=2 */ 5649
4634typedef struct _ATOM_POWERMODE_INFO_V3 { 5650//ucTableFormatRevision=2
4635 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ 5651//ucTableContentRevision=2
4636 ULONG ulMiscInfo2; 5652typedef struct _ATOM_POWERMODE_INFO_V3
4637 ULONG ulEngineClock; 5653{
4638 ULONG ulMemoryClock; 5654 ULONG ulMiscInfo; //The power level should be arranged in ascending order
4639 UCHAR ucVoltageDropIndex; /* index to Core (VDDC) votage table */ 5655 ULONG ulMiscInfo2;
4640 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ 5656 ULONG ulEngineClock;
4641 UCHAR ucMinTemperature; 5657 ULONG ulMemoryClock;
4642 UCHAR ucMaxTemperature; 5658 UCHAR ucVoltageDropIndex; // index to Core (VDDC) votage table
4643 UCHAR ucNumPciELanes; /* number of PCIE lanes */ 5659 UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
4644 UCHAR ucVDDCI_VoltageDropIndex; /* index to VDDCI votage table */ 5660 UCHAR ucMinTemperature;
4645} ATOM_POWERMODE_INFO_V3; 5661 UCHAR ucMaxTemperature;
5662 UCHAR ucNumPciELanes; // number of PCIE lanes
5663 UCHAR ucVDDCI_VoltageDropIndex; // index to VDDCI votage table
5664}ATOM_POWERMODE_INFO_V3;
5665
4646 5666
4647#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8 5667#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8
4648 5668
@@ -4655,40 +5675,44 @@ typedef struct _ATOM_POWERMODE_INFO_V3 {
4655#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04 5675#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04
4656#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05 5676#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05
4657#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06 5677#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06
4658#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 /* Andigilog */ 5678#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 // Andigilog
4659 5679
4660typedef struct _ATOM_POWERPLAY_INFO { 5680
4661 ATOM_COMMON_TABLE_HEADER sHeader; 5681typedef struct _ATOM_POWERPLAY_INFO
4662 UCHAR ucOverdriveThermalController; 5682{
4663 UCHAR ucOverdriveI2cLine; 5683 ATOM_COMMON_TABLE_HEADER sHeader;
4664 UCHAR ucOverdriveIntBitmap; 5684 UCHAR ucOverdriveThermalController;
4665 UCHAR ucOverdriveControllerAddress; 5685 UCHAR ucOverdriveI2cLine;
4666 UCHAR ucSizeOfPowerModeEntry; 5686 UCHAR ucOverdriveIntBitmap;
4667 UCHAR ucNumOfPowerModeEntries; 5687 UCHAR ucOverdriveControllerAddress;
4668 ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; 5688 UCHAR ucSizeOfPowerModeEntry;
4669} ATOM_POWERPLAY_INFO; 5689 UCHAR ucNumOfPowerModeEntries;
4670 5690 ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4671typedef struct _ATOM_POWERPLAY_INFO_V2 { 5691}ATOM_POWERPLAY_INFO;
4672 ATOM_COMMON_TABLE_HEADER sHeader; 5692
4673 UCHAR ucOverdriveThermalController; 5693typedef struct _ATOM_POWERPLAY_INFO_V2
4674 UCHAR ucOverdriveI2cLine; 5694{
4675 UCHAR ucOverdriveIntBitmap; 5695 ATOM_COMMON_TABLE_HEADER sHeader;
4676 UCHAR ucOverdriveControllerAddress; 5696 UCHAR ucOverdriveThermalController;
4677 UCHAR ucSizeOfPowerModeEntry; 5697 UCHAR ucOverdriveI2cLine;
4678 UCHAR ucNumOfPowerModeEntries; 5698 UCHAR ucOverdriveIntBitmap;
4679 ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; 5699 UCHAR ucOverdriveControllerAddress;
4680} ATOM_POWERPLAY_INFO_V2; 5700 UCHAR ucSizeOfPowerModeEntry;
4681 5701 UCHAR ucNumOfPowerModeEntries;
4682typedef struct _ATOM_POWERPLAY_INFO_V3 { 5702 ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4683 ATOM_COMMON_TABLE_HEADER sHeader; 5703}ATOM_POWERPLAY_INFO_V2;
4684 UCHAR ucOverdriveThermalController; 5704
4685 UCHAR ucOverdriveI2cLine; 5705typedef struct _ATOM_POWERPLAY_INFO_V3
4686 UCHAR ucOverdriveIntBitmap; 5706{
4687 UCHAR ucOverdriveControllerAddress; 5707 ATOM_COMMON_TABLE_HEADER sHeader;
4688 UCHAR ucSizeOfPowerModeEntry; 5708 UCHAR ucOverdriveThermalController;
4689 UCHAR ucNumOfPowerModeEntries; 5709 UCHAR ucOverdriveI2cLine;
4690 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; 5710 UCHAR ucOverdriveIntBitmap;
4691} ATOM_POWERPLAY_INFO_V3; 5711 UCHAR ucOverdriveControllerAddress;
5712 UCHAR ucSizeOfPowerModeEntry;
5713 UCHAR ucNumOfPowerModeEntries;
5714 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
5715}ATOM_POWERPLAY_INFO_V3;
4692 5716
4693/* New PPlib */ 5717/* New PPlib */
4694/**************************************************************************/ 5718/**************************************************************************/
@@ -4873,40 +5897,42 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4873 UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} 5897 UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
4874 UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement. 5898 UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
4875 USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). 5899 USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
4876 ULONG ulFlags; 5900 ULONG ulFlags;
4877} ATOM_PPLIB_RS780_CLOCK_INFO; 5901} ATOM_PPLIB_RS780_CLOCK_INFO;
4878 5902
4879#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 5903#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
4880#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 5904#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
4881#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 5905#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
4882#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 5906#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
4883 5907
4884#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is. 5908#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
4885#define ATOM_PPLIB_RS780_SPMCLK_LOW 1 5909#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
4886#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2 5910#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
4887 5911
4888#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 5912#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
4889#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 5913#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
4890#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 5914#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
4891 5915
4892/**************************************************************************/ 5916/**************************************************************************/
4893 5917
4894/* Following definitions are for compatiblity issue in different SW components. */ 5918
5919// Following definitions are for compatiblity issue in different SW components.
4895#define ATOM_MASTER_DATA_TABLE_REVISION 0x01 5920#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
4896#define Object_Info Object_Header 5921#define Object_Info Object_Header
4897#define AdjustARB_SEQ MC_InitParameter 5922#define AdjustARB_SEQ MC_InitParameter
4898#define VRAM_GPIO_DetectionInfo VoltageObjectInfo 5923#define VRAM_GPIO_DetectionInfo VoltageObjectInfo
4899#define ASIC_VDDCI_Info ASIC_ProfilingInfo 5924#define ASIC_VDDCI_Info ASIC_ProfilingInfo
4900#define ASIC_MVDDQ_Info MemoryTrainingInfo 5925#define ASIC_MVDDQ_Info MemoryTrainingInfo
4901#define SS_Info PPLL_SS_Info 5926#define SS_Info PPLL_SS_Info
4902#define ASIC_MVDDC_Info ASIC_InternalSS_Info 5927#define ASIC_MVDDC_Info ASIC_InternalSS_Info
4903#define DispDevicePriorityInfo SaveRestoreInfo 5928#define DispDevicePriorityInfo SaveRestoreInfo
4904#define DispOutInfo TV_VideoMode 5929#define DispOutInfo TV_VideoMode
4905 5930
5931
4906#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE 5932#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE
4907#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE 5933#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE
4908 5934
4909/* New device naming, remove them when both DAL/VBIOS is ready */ 5935//New device naming, remove them when both DAL/VBIOS is ready
4910#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS 5936#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
4911#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS 5937#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
4912 5938
@@ -4921,7 +5947,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4921 5947
4922#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX 5948#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX
4923#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX 5949#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX
4924 5950
4925#define ATOM_DEVICE_DFP2I_INDEX 0x00000009 5951#define ATOM_DEVICE_DFP2I_INDEX 0x00000009
4926#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX) 5952#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX)
4927 5953
@@ -4939,7 +5965,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4939 5965
4940#define ATOM_S3_DFP2I_ACTIVEb1 0x02 5966#define ATOM_S3_DFP2I_ACTIVEb1 0x02
4941 5967
4942#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE 5968#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
4943#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE 5969#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE
4944 5970
4945#define ATOM_S3_DFP2I_ACTIVE 0x00000200L 5971#define ATOM_S3_DFP2I_ACTIVE 0x00000200L
@@ -4958,14 +5984,14 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4958#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02 5984#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02
4959#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L 5985#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L
4960 5986
4961#define TMDS1XEncoderControl DVOEncoderControl 5987#define TMDS1XEncoderControl DVOEncoderControl
4962#define DFP1XOutputControl DVOOutputControl 5988#define DFP1XOutputControl DVOOutputControl
4963 5989
4964#define ExternalDFPOutputControl DFP1XOutputControl 5990#define ExternalDFPOutputControl DFP1XOutputControl
4965#define EnableExternalTMDS_Encoder TMDS1XEncoderControl 5991#define EnableExternalTMDS_Encoder TMDS1XEncoderControl
4966 5992
4967#define DFP1IOutputControl TMDSAOutputControl 5993#define DFP1IOutputControl TMDSAOutputControl
4968#define DFP2IOutputControl LVTMAOutputControl 5994#define DFP2IOutputControl LVTMAOutputControl
4969 5995
4970#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS 5996#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
4971#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION 5997#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
@@ -4974,7 +6000,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4974#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION 6000#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
4975 6001
4976#define ucDac1Standard ucDacStandard 6002#define ucDac1Standard ucDacStandard
4977#define ucDac2Standard ucDacStandard 6003#define ucDac2Standard ucDacStandard
4978 6004
4979#define TMDS1EncoderControl TMDSAEncoderControl 6005#define TMDS1EncoderControl TMDSAEncoderControl
4980#define TMDS2EncoderControl LVTMAEncoderControl 6006#define TMDS2EncoderControl LVTMAEncoderControl
@@ -4984,12 +6010,56 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4984#define CRT1OutputControl DAC1OutputControl 6010#define CRT1OutputControl DAC1OutputControl
4985#define CRT2OutputControl DAC2OutputControl 6011#define CRT2OutputControl DAC2OutputControl
4986 6012
4987/* These two lines will be removed for sure in a few days, will follow up with Michael V. */ 6013//These two lines will be removed for sure in a few days, will follow up with Michael V.
4988#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL 6014#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL
4989#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL 6015#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
6016
6017//#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
6018//#define ATOM_S2_LCD1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
6019//#define ATOM_S2_TV1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
6020//#define ATOM_S2_DFP1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
6021//#define ATOM_S2_CRT2_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
6022
6023#define ATOM_S6_ACC_REQ_TV2 0x00400000L
6024#define ATOM_DEVICE_TV2_INDEX 0x00000006
6025#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
6026#define ATOM_S0_TV2 0x00100000L
6027#define ATOM_S3_TV2_ACTIVE ATOM_S3_DFP6_ACTIVE
6028#define ATOM_S3_TV2_CRTC_ACTIVE ATOM_S3_DFP6_CRTC_ACTIVE
6029
6030//
6031#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
6032#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
6033#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
6034#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
6035#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
6036#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
6037#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
6038#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
6039#define ATOM_S2_CV_DPMS_STATE 0x01000000L
6040#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
6041#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
6042#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
6043
6044#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
6045#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
6046#define ATOM_S2_TV1_DPMS_STATEb2 0x04
6047#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
6048#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
6049#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
6050#define ATOM_S2_TV2_DPMS_STATEb2 0x40
6051#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
6052#define ATOM_S2_CV_DPMS_STATEb3 0x01
6053#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
6054#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
6055#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
6056
6057#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
6058#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
6059#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
4990 6060
4991/*********************************************************************************/ 6061/*********************************************************************************/
4992 6062
4993#pragma pack() /* BIOS data must use byte aligment */ 6063#pragma pack() // BIOS data must use byte aligment
4994 6064
4995#endif /* _ATOMBIOS_H */ 6065#endif /* _ATOMBIOS_H */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index af464e351fbd..dd9fdf560611 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -245,21 +245,25 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
245 245
246 switch (mode) { 246 switch (mode) {
247 case DRM_MODE_DPMS_ON: 247 case DRM_MODE_DPMS_ON:
248 atombios_enable_crtc(crtc, 1); 248 atombios_enable_crtc(crtc, ATOM_ENABLE);
249 if (ASIC_IS_DCE3(rdev)) 249 if (ASIC_IS_DCE3(rdev))
250 atombios_enable_crtc_memreq(crtc, 1); 250 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
251 atombios_blank_crtc(crtc, 0); 251 atombios_blank_crtc(crtc, ATOM_DISABLE);
252 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); 252 /* XXX re-enable when interrupt support is added */
253 if (!ASIC_IS_DCE4(rdev))
254 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
253 radeon_crtc_load_lut(crtc); 255 radeon_crtc_load_lut(crtc);
254 break; 256 break;
255 case DRM_MODE_DPMS_STANDBY: 257 case DRM_MODE_DPMS_STANDBY:
256 case DRM_MODE_DPMS_SUSPEND: 258 case DRM_MODE_DPMS_SUSPEND:
257 case DRM_MODE_DPMS_OFF: 259 case DRM_MODE_DPMS_OFF:
258 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); 260 /* XXX re-enable when interrupt support is added */
259 atombios_blank_crtc(crtc, 1); 261 if (!ASIC_IS_DCE4(rdev))
262 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
263 atombios_blank_crtc(crtc, ATOM_ENABLE);
260 if (ASIC_IS_DCE3(rdev)) 264 if (ASIC_IS_DCE3(rdev))
261 atombios_enable_crtc_memreq(crtc, 0); 265 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
262 atombios_enable_crtc(crtc, 0); 266 atombios_enable_crtc(crtc, ATOM_DISABLE);
263 break; 267 break;
264 } 268 }
265} 269}
@@ -349,6 +353,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
349 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 353 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
350} 354}
351 355
356union atom_enable_ss {
357 ENABLE_LVDS_SS_PARAMETERS legacy;
358 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
359};
360
352static void atombios_set_ss(struct drm_crtc *crtc, int enable) 361static void atombios_set_ss(struct drm_crtc *crtc, int enable)
353{ 362{
354 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 363 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -358,11 +367,14 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
358 struct radeon_encoder *radeon_encoder = NULL; 367 struct radeon_encoder *radeon_encoder = NULL;
359 struct radeon_encoder_atom_dig *dig = NULL; 368 struct radeon_encoder_atom_dig *dig = NULL;
360 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); 369 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
361 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION args; 370 union atom_enable_ss args;
362 ENABLE_LVDS_SS_PARAMETERS legacy_args;
363 uint16_t percentage = 0; 371 uint16_t percentage = 0;
364 uint8_t type = 0, step = 0, delay = 0, range = 0; 372 uint8_t type = 0, step = 0, delay = 0, range = 0;
365 373
374 /* XXX add ss support for DCE4 */
375 if (ASIC_IS_DCE4(rdev))
376 return;
377
366 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 378 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
367 if (encoder->crtc == crtc) { 379 if (encoder->crtc == crtc) {
368 radeon_encoder = to_radeon_encoder(encoder); 380 radeon_encoder = to_radeon_encoder(encoder);
@@ -386,29 +398,28 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
386 if (!radeon_encoder) 398 if (!radeon_encoder)
387 return; 399 return;
388 400
401 memset(&args, 0, sizeof(args));
389 if (ASIC_IS_AVIVO(rdev)) { 402 if (ASIC_IS_AVIVO(rdev)) {
390 memset(&args, 0, sizeof(args)); 403 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
391 args.usSpreadSpectrumPercentage = cpu_to_le16(percentage); 404 args.v1.ucSpreadSpectrumType = type;
392 args.ucSpreadSpectrumType = type; 405 args.v1.ucSpreadSpectrumStep = step;
393 args.ucSpreadSpectrumStep = step; 406 args.v1.ucSpreadSpectrumDelay = delay;
394 args.ucSpreadSpectrumDelay = delay; 407 args.v1.ucSpreadSpectrumRange = range;
395 args.ucSpreadSpectrumRange = range; 408 args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
396 args.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 409 args.v1.ucEnable = enable;
397 args.ucEnable = enable;
398 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
399 } else { 410 } else {
400 memset(&legacy_args, 0, sizeof(legacy_args)); 411 args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
401 legacy_args.usSpreadSpectrumPercentage = cpu_to_le16(percentage); 412 args.legacy.ucSpreadSpectrumType = type;
402 legacy_args.ucSpreadSpectrumType = type; 413 args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
403 legacy_args.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; 414 args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
404 legacy_args.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; 415 args.legacy.ucEnable = enable;
405 legacy_args.ucEnable = enable;
406 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&legacy_args);
407 } 416 }
417 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
408} 418}
409 419
410union adjust_pixel_clock { 420union adjust_pixel_clock {
411 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; 421 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
422 ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
412}; 423};
413 424
414static u32 atombios_adjust_pll(struct drm_crtc *crtc, 425static u32 atombios_adjust_pll(struct drm_crtc *crtc,
@@ -420,10 +431,24 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
420 struct drm_encoder *encoder = NULL; 431 struct drm_encoder *encoder = NULL;
421 struct radeon_encoder *radeon_encoder = NULL; 432 struct radeon_encoder *radeon_encoder = NULL;
422 u32 adjusted_clock = mode->clock; 433 u32 adjusted_clock = mode->clock;
434 int encoder_mode = 0;
423 435
424 /* reset the pll flags */ 436 /* reset the pll flags */
425 pll->flags = 0; 437 pll->flags = 0;
426 438
439 /* select the PLL algo */
440 if (ASIC_IS_AVIVO(rdev)) {
441 if (radeon_new_pll == 0)
442 pll->algo = PLL_ALGO_LEGACY;
443 else
444 pll->algo = PLL_ALGO_NEW;
445 } else {
446 if (radeon_new_pll == 1)
447 pll->algo = PLL_ALGO_NEW;
448 else
449 pll->algo = PLL_ALGO_LEGACY;
450 }
451
427 if (ASIC_IS_AVIVO(rdev)) { 452 if (ASIC_IS_AVIVO(rdev)) {
428 if ((rdev->family == CHIP_RS600) || 453 if ((rdev->family == CHIP_RS600) ||
429 (rdev->family == CHIP_RS690) || 454 (rdev->family == CHIP_RS690) ||
@@ -448,10 +473,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
448 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 473 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
449 if (encoder->crtc == crtc) { 474 if (encoder->crtc == crtc) {
450 radeon_encoder = to_radeon_encoder(encoder); 475 radeon_encoder = to_radeon_encoder(encoder);
476 encoder_mode = atombios_get_encoder_mode(encoder);
451 if (ASIC_IS_AVIVO(rdev)) { 477 if (ASIC_IS_AVIVO(rdev)) {
452 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 478 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
453 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 479 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
454 adjusted_clock = mode->clock * 2; 480 adjusted_clock = mode->clock * 2;
481 /* LVDS PLL quirks */
482 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
483 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
484 pll->algo = dig->pll_algo;
485 }
455 } else { 486 } else {
456 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 487 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
457 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 488 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -468,14 +499,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
468 */ 499 */
469 if (ASIC_IS_DCE3(rdev)) { 500 if (ASIC_IS_DCE3(rdev)) {
470 union adjust_pixel_clock args; 501 union adjust_pixel_clock args;
471 struct radeon_encoder_atom_dig *dig;
472 u8 frev, crev; 502 u8 frev, crev;
473 int index; 503 int index;
474 504
475 if (!radeon_encoder->enc_priv)
476 return adjusted_clock;
477 dig = radeon_encoder->enc_priv;
478
479 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 505 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
480 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 506 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
481 &crev); 507 &crev);
@@ -489,12 +515,51 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
489 case 2: 515 case 2:
490 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 516 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
491 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 517 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
492 args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder); 518 args.v1.ucEncodeMode = encoder_mode;
493 519
494 atom_execute_table(rdev->mode_info.atom_context, 520 atom_execute_table(rdev->mode_info.atom_context,
495 index, (uint32_t *)&args); 521 index, (uint32_t *)&args);
496 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; 522 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
497 break; 523 break;
524 case 3:
525 args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
526 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
527 args.v3.sInput.ucEncodeMode = encoder_mode;
528 args.v3.sInput.ucDispPllConfig = 0;
529 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
530 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
531
532 if (encoder_mode == ATOM_ENCODER_MODE_DP)
533 args.v3.sInput.ucDispPllConfig |=
534 DISPPLL_CONFIG_COHERENT_MODE;
535 else {
536 if (dig->coherent_mode)
537 args.v3.sInput.ucDispPllConfig |=
538 DISPPLL_CONFIG_COHERENT_MODE;
539 if (mode->clock > 165000)
540 args.v3.sInput.ucDispPllConfig |=
541 DISPPLL_CONFIG_DUAL_LINK;
542 }
543 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
544 /* may want to enable SS on DP/eDP eventually */
545 args.v3.sInput.ucDispPllConfig |=
546 DISPPLL_CONFIG_SS_ENABLE;
547 if (mode->clock > 165000)
548 args.v3.sInput.ucDispPllConfig |=
549 DISPPLL_CONFIG_DUAL_LINK;
550 }
551 atom_execute_table(rdev->mode_info.atom_context,
552 index, (uint32_t *)&args);
553 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
554 if (args.v3.sOutput.ucRefDiv) {
555 pll->flags |= RADEON_PLL_USE_REF_DIV;
556 pll->reference_div = args.v3.sOutput.ucRefDiv;
557 }
558 if (args.v3.sOutput.ucPostDiv) {
559 pll->flags |= RADEON_PLL_USE_POST_DIV;
560 pll->post_div = args.v3.sOutput.ucPostDiv;
561 }
562 break;
498 default: 563 default:
499 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 564 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
500 return adjusted_clock; 565 return adjusted_clock;
@@ -513,9 +578,47 @@ union set_pixel_clock {
513 PIXEL_CLOCK_PARAMETERS v1; 578 PIXEL_CLOCK_PARAMETERS v1;
514 PIXEL_CLOCK_PARAMETERS_V2 v2; 579 PIXEL_CLOCK_PARAMETERS_V2 v2;
515 PIXEL_CLOCK_PARAMETERS_V3 v3; 580 PIXEL_CLOCK_PARAMETERS_V3 v3;
581 PIXEL_CLOCK_PARAMETERS_V5 v5;
516}; 582};
517 583
518void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 584static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
585{
586 struct drm_device *dev = crtc->dev;
587 struct radeon_device *rdev = dev->dev_private;
588 u8 frev, crev;
589 int index;
590 union set_pixel_clock args;
591
592 memset(&args, 0, sizeof(args));
593
594 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
595 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
596 &crev);
597
598 switch (frev) {
599 case 1:
600 switch (crev) {
601 case 5:
602 /* if the default dcpll clock is specified,
603 * SetPixelClock provides the dividers
604 */
605 args.v5.ucCRTC = ATOM_CRTC_INVALID;
606 args.v5.usPixelClock = rdev->clock.default_dispclk;
607 args.v5.ucPpll = ATOM_DCPLL;
608 break;
609 default:
610 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
611 return;
612 }
613 break;
614 default:
615 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
616 return;
617 }
618 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
619}
620
621static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
519{ 622{
520 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 623 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
521 struct drm_device *dev = crtc->dev; 624 struct drm_device *dev = crtc->dev;
@@ -529,12 +632,14 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
529 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; 632 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
530 struct radeon_pll *pll; 633 struct radeon_pll *pll;
531 u32 adjusted_clock; 634 u32 adjusted_clock;
635 int encoder_mode = 0;
532 636
533 memset(&args, 0, sizeof(args)); 637 memset(&args, 0, sizeof(args));
534 638
535 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 639 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
536 if (encoder->crtc == crtc) { 640 if (encoder->crtc == crtc) {
537 radeon_encoder = to_radeon_encoder(encoder); 641 radeon_encoder = to_radeon_encoder(encoder);
642 encoder_mode = atombios_get_encoder_mode(encoder);
538 break; 643 break;
539 } 644 }
540 } 645 }
@@ -542,26 +647,24 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
542 if (!radeon_encoder) 647 if (!radeon_encoder)
543 return; 648 return;
544 649
545 if (radeon_crtc->crtc_id == 0) 650 switch (radeon_crtc->pll_id) {
651 case ATOM_PPLL1:
546 pll = &rdev->clock.p1pll; 652 pll = &rdev->clock.p1pll;
547 else 653 break;
654 case ATOM_PPLL2:
548 pll = &rdev->clock.p2pll; 655 pll = &rdev->clock.p2pll;
656 break;
657 case ATOM_DCPLL:
658 case ATOM_PPLL_INVALID:
659 pll = &rdev->clock.dcpll;
660 break;
661 }
549 662
550 /* adjust pixel clock as needed */ 663 /* adjust pixel clock as needed */
551 adjusted_clock = atombios_adjust_pll(crtc, mode, pll); 664 adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
552 665
553 if (ASIC_IS_AVIVO(rdev)) { 666 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
554 if (radeon_new_pll) 667 &ref_div, &post_div);
555 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
556 &fb_div, &frac_fb_div,
557 &ref_div, &post_div);
558 else
559 radeon_compute_pll(pll, adjusted_clock, &pll_clock,
560 &fb_div, &frac_fb_div,
561 &ref_div, &post_div);
562 } else
563 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
564 &ref_div, &post_div);
565 668
566 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 669 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
567 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 670 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -576,8 +679,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
576 args.v1.usFbDiv = cpu_to_le16(fb_div); 679 args.v1.usFbDiv = cpu_to_le16(fb_div);
577 args.v1.ucFracFbDiv = frac_fb_div; 680 args.v1.ucFracFbDiv = frac_fb_div;
578 args.v1.ucPostDiv = post_div; 681 args.v1.ucPostDiv = post_div;
579 args.v1.ucPpll = 682 args.v1.ucPpll = radeon_crtc->pll_id;
580 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
581 args.v1.ucCRTC = radeon_crtc->crtc_id; 683 args.v1.ucCRTC = radeon_crtc->crtc_id;
582 args.v1.ucRefDivSrc = 1; 684 args.v1.ucRefDivSrc = 1;
583 break; 685 break;
@@ -587,8 +689,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
587 args.v2.usFbDiv = cpu_to_le16(fb_div); 689 args.v2.usFbDiv = cpu_to_le16(fb_div);
588 args.v2.ucFracFbDiv = frac_fb_div; 690 args.v2.ucFracFbDiv = frac_fb_div;
589 args.v2.ucPostDiv = post_div; 691 args.v2.ucPostDiv = post_div;
590 args.v2.ucPpll = 692 args.v2.ucPpll = radeon_crtc->pll_id;
591 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
592 args.v2.ucCRTC = radeon_crtc->crtc_id; 693 args.v2.ucCRTC = radeon_crtc->crtc_id;
593 args.v2.ucRefDivSrc = 1; 694 args.v2.ucRefDivSrc = 1;
594 break; 695 break;
@@ -598,12 +699,22 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
598 args.v3.usFbDiv = cpu_to_le16(fb_div); 699 args.v3.usFbDiv = cpu_to_le16(fb_div);
599 args.v3.ucFracFbDiv = frac_fb_div; 700 args.v3.ucFracFbDiv = frac_fb_div;
600 args.v3.ucPostDiv = post_div; 701 args.v3.ucPostDiv = post_div;
601 args.v3.ucPpll = 702 args.v3.ucPpll = radeon_crtc->pll_id;
602 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 703 args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2);
603 args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
604 args.v3.ucTransmitterId = radeon_encoder->encoder_id; 704 args.v3.ucTransmitterId = radeon_encoder->encoder_id;
605 args.v3.ucEncoderMode = 705 args.v3.ucEncoderMode = encoder_mode;
606 atombios_get_encoder_mode(encoder); 706 break;
707 case 5:
708 args.v5.ucCRTC = radeon_crtc->crtc_id;
709 args.v5.usPixelClock = cpu_to_le16(mode->clock / 10);
710 args.v5.ucRefDiv = ref_div;
711 args.v5.usFbDiv = cpu_to_le16(fb_div);
712 args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
713 args.v5.ucPostDiv = post_div;
714 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
715 args.v5.ucTransmitterID = radeon_encoder->encoder_id;
716 args.v5.ucEncoderMode = encoder_mode;
717 args.v5.ucPpll = radeon_crtc->pll_id;
607 break; 718 break;
608 default: 719 default:
609 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 720 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
@@ -618,6 +729,140 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
618 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 729 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
619} 730}
620 731
732static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
733 struct drm_framebuffer *old_fb)
734{
735 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
736 struct drm_device *dev = crtc->dev;
737 struct radeon_device *rdev = dev->dev_private;
738 struct radeon_framebuffer *radeon_fb;
739 struct drm_gem_object *obj;
740 struct radeon_bo *rbo;
741 uint64_t fb_location;
742 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
743 int r;
744
745 /* no fb bound */
746 if (!crtc->fb) {
747 DRM_DEBUG("No FB bound\n");
748 return 0;
749 }
750
751 radeon_fb = to_radeon_framebuffer(crtc->fb);
752
753 /* Pin framebuffer & get tilling informations */
754 obj = radeon_fb->obj;
755 rbo = obj->driver_private;
756 r = radeon_bo_reserve(rbo, false);
757 if (unlikely(r != 0))
758 return r;
759 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
760 if (unlikely(r != 0)) {
761 radeon_bo_unreserve(rbo);
762 return -EINVAL;
763 }
764 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
765 radeon_bo_unreserve(rbo);
766
767 switch (crtc->fb->bits_per_pixel) {
768 case 8:
769 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
770 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
771 break;
772 case 15:
773 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
774 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
775 break;
776 case 16:
777 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
778 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
779 break;
780 case 24:
781 case 32:
782 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
783 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
784 break;
785 default:
786 DRM_ERROR("Unsupported screen depth %d\n",
787 crtc->fb->bits_per_pixel);
788 return -EINVAL;
789 }
790
791 switch (radeon_crtc->crtc_id) {
792 case 0:
793 WREG32(AVIVO_D1VGA_CONTROL, 0);
794 break;
795 case 1:
796 WREG32(AVIVO_D2VGA_CONTROL, 0);
797 break;
798 case 2:
799 WREG32(EVERGREEN_D3VGA_CONTROL, 0);
800 break;
801 case 3:
802 WREG32(EVERGREEN_D4VGA_CONTROL, 0);
803 break;
804 case 4:
805 WREG32(EVERGREEN_D5VGA_CONTROL, 0);
806 break;
807 case 5:
808 WREG32(EVERGREEN_D6VGA_CONTROL, 0);
809 break;
810 default:
811 break;
812 }
813
814 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
815 upper_32_bits(fb_location));
816 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
817 upper_32_bits(fb_location));
818 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
819 (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
820 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
821 (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
822 WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
823
824 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
825 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
826 WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
827 WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
828 WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
829 WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
830
831 fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
832 WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
833 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
834
835 WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
836 crtc->mode.vdisplay);
837 x &= ~3;
838 y &= ~1;
839 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
840 (x << 16) | y);
841 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
842 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
843
844 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
845 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
846 EVERGREEN_INTERLEAVE_EN);
847 else
848 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
849
850 if (old_fb && old_fb != crtc->fb) {
851 radeon_fb = to_radeon_framebuffer(old_fb);
852 rbo = radeon_fb->obj->driver_private;
853 r = radeon_bo_reserve(rbo, false);
854 if (unlikely(r != 0))
855 return r;
856 radeon_bo_unpin(rbo);
857 radeon_bo_unreserve(rbo);
858 }
859
860 /* Bytes per pixel may have changed */
861 radeon_bandwidth_update(rdev);
862
863 return 0;
864}
865
621static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, 866static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
622 struct drm_framebuffer *old_fb) 867 struct drm_framebuffer *old_fb)
623{ 868{
@@ -755,7 +1000,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
755 struct drm_device *dev = crtc->dev; 1000 struct drm_device *dev = crtc->dev;
756 struct radeon_device *rdev = dev->dev_private; 1001 struct radeon_device *rdev = dev->dev_private;
757 1002
758 if (ASIC_IS_AVIVO(rdev)) 1003 if (ASIC_IS_DCE4(rdev))
1004 return evergreen_crtc_set_base(crtc, x, y, old_fb);
1005 else if (ASIC_IS_AVIVO(rdev))
759 return avivo_crtc_set_base(crtc, x, y, old_fb); 1006 return avivo_crtc_set_base(crtc, x, y, old_fb);
760 else 1007 else
761 return radeon_crtc_set_base(crtc, x, y, old_fb); 1008 return radeon_crtc_set_base(crtc, x, y, old_fb);
@@ -785,6 +1032,46 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
785 } 1032 }
786} 1033}
787 1034
1035static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1036{
1037 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1038 struct drm_device *dev = crtc->dev;
1039 struct radeon_device *rdev = dev->dev_private;
1040 struct drm_encoder *test_encoder;
1041 struct drm_crtc *test_crtc;
1042 uint32_t pll_in_use = 0;
1043
1044 if (ASIC_IS_DCE4(rdev)) {
1045 /* if crtc is driving DP and we have an ext clock, use that */
1046 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
1047 if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
1048 if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
1049 if (rdev->clock.dp_extclk)
1050 return ATOM_PPLL_INVALID;
1051 }
1052 }
1053 }
1054
1055 /* otherwise, pick one of the plls */
1056 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
1057 struct radeon_crtc *radeon_test_crtc;
1058
1059 if (crtc == test_crtc)
1060 continue;
1061
1062 radeon_test_crtc = to_radeon_crtc(test_crtc);
1063 if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
1064 (radeon_test_crtc->pll_id <= ATOM_PPLL2))
1065 pll_in_use |= (1 << radeon_test_crtc->pll_id);
1066 }
1067 if (!(pll_in_use & 1))
1068 return ATOM_PPLL1;
1069 return ATOM_PPLL2;
1070 } else
1071 return radeon_crtc->crtc_id;
1072
1073}
1074
788int atombios_crtc_mode_set(struct drm_crtc *crtc, 1075int atombios_crtc_mode_set(struct drm_crtc *crtc,
789 struct drm_display_mode *mode, 1076 struct drm_display_mode *mode,
790 struct drm_display_mode *adjusted_mode, 1077 struct drm_display_mode *adjusted_mode,
@@ -796,19 +1083,27 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
796 1083
797 /* TODO color tiling */ 1084 /* TODO color tiling */
798 1085
1086 /* pick pll */
1087 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1088
799 atombios_set_ss(crtc, 0); 1089 atombios_set_ss(crtc, 0);
1090 /* always set DCPLL */
1091 if (ASIC_IS_DCE4(rdev))
1092 atombios_crtc_set_dcpll(crtc);
800 atombios_crtc_set_pll(crtc, adjusted_mode); 1093 atombios_crtc_set_pll(crtc, adjusted_mode);
801 atombios_set_ss(crtc, 1); 1094 atombios_set_ss(crtc, 1);
802 atombios_crtc_set_timing(crtc, adjusted_mode);
803 1095
804 if (ASIC_IS_AVIVO(rdev)) 1096 if (ASIC_IS_DCE4(rdev))
805 atombios_crtc_set_base(crtc, x, y, old_fb); 1097 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1098 else if (ASIC_IS_AVIVO(rdev))
1099 atombios_crtc_set_timing(crtc, adjusted_mode);
806 else { 1100 else {
1101 atombios_crtc_set_timing(crtc, adjusted_mode);
807 if (radeon_crtc->crtc_id == 0) 1102 if (radeon_crtc->crtc_id == 0)
808 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1103 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
809 atombios_crtc_set_base(crtc, x, y, old_fb);
810 radeon_legacy_atom_fixup(crtc); 1104 radeon_legacy_atom_fixup(crtc);
811 } 1105 }
1106 atombios_crtc_set_base(crtc, x, y, old_fb);
812 atombios_overscan_setup(crtc, mode, adjusted_mode); 1107 atombios_overscan_setup(crtc, mode, adjusted_mode);
813 atombios_scaler_setup(crtc); 1108 atombios_scaler_setup(crtc);
814 return 0; 1109 return 0;
@@ -825,14 +1120,14 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
825 1120
826static void atombios_crtc_prepare(struct drm_crtc *crtc) 1121static void atombios_crtc_prepare(struct drm_crtc *crtc)
827{ 1122{
828 atombios_lock_crtc(crtc, 1); 1123 atombios_lock_crtc(crtc, ATOM_ENABLE);
829 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1124 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
830} 1125}
831 1126
832static void atombios_crtc_commit(struct drm_crtc *crtc) 1127static void atombios_crtc_commit(struct drm_crtc *crtc)
833{ 1128{
834 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 1129 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
835 atombios_lock_crtc(crtc, 0); 1130 atombios_lock_crtc(crtc, ATOM_DISABLE);
836} 1131}
837 1132
838static const struct drm_crtc_helper_funcs atombios_helper_funcs = { 1133static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
@@ -848,8 +1143,37 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
848void radeon_atombios_init_crtc(struct drm_device *dev, 1143void radeon_atombios_init_crtc(struct drm_device *dev,
849 struct radeon_crtc *radeon_crtc) 1144 struct radeon_crtc *radeon_crtc)
850{ 1145{
851 if (radeon_crtc->crtc_id == 1) 1146 struct radeon_device *rdev = dev->dev_private;
852 radeon_crtc->crtc_offset = 1147
853 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; 1148 if (ASIC_IS_DCE4(rdev)) {
1149 switch (radeon_crtc->crtc_id) {
1150 case 0:
1151 default:
1152 radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
1153 break;
1154 case 1:
1155 radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
1156 break;
1157 case 2:
1158 radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
1159 break;
1160 case 3:
1161 radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
1162 break;
1163 case 4:
1164 radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
1165 break;
1166 case 5:
1167 radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
1168 break;
1169 }
1170 } else {
1171 if (radeon_crtc->crtc_id == 1)
1172 radeon_crtc->crtc_offset =
1173 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
1174 else
1175 radeon_crtc->crtc_offset = 0;
1176 }
1177 radeon_crtc->pll_id = -1;
854 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); 1178 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
855} 1179}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 99915a682d59..8a133bda00a2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -321,6 +321,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
321 train_set[lane] = v | p; 321 train_set[lane] = v | p;
322} 322}
323 323
324union aux_channel_transaction {
325 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
326 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
327};
324 328
325/* radeon aux chan functions */ 329/* radeon aux chan functions */
326bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, 330bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
@@ -329,7 +333,7 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
329{ 333{
330 struct drm_device *dev = chan->dev; 334 struct drm_device *dev = chan->dev;
331 struct radeon_device *rdev = dev->dev_private; 335 struct radeon_device *rdev = dev->dev_private;
332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; 336 union aux_channel_transaction args;
333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 337 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
334 unsigned char *base; 338 unsigned char *base;
335 int retry_count = 0; 339 int retry_count = 0;
@@ -341,31 +345,33 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
341retry: 345retry:
342 memcpy(base, req_bytes, num_bytes); 346 memcpy(base, req_bytes, num_bytes);
343 347
344 args.lpAuxRequest = 0; 348 args.v1.lpAuxRequest = 0;
345 args.lpDataOut = 16; 349 args.v1.lpDataOut = 16;
346 args.ucDataOutLen = 0; 350 args.v1.ucDataOutLen = 0;
347 args.ucChannelID = chan->rec.i2c_id; 351 args.v1.ucChannelID = chan->rec.i2c_id;
348 args.ucDelay = delay / 10; 352 args.v1.ucDelay = delay / 10;
353 if (ASIC_IS_DCE4(rdev))
354 args.v2.ucHPD_ID = chan->rec.hpd_id;
349 355
350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 356 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
351 357
352 if (args.ucReplyStatus && !args.ucDataOutLen) { 358 if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
353 if (args.ucReplyStatus == 0x20 && retry_count++ < 10) 359 if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
354 goto retry; 360 goto retry;
355 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", 361 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
356 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 362 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
357 chan->rec.i2c_id, args.ucReplyStatus, retry_count); 363 chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
358 return false; 364 return false;
359 } 365 }
360 366
361 if (args.ucDataOutLen && read_byte && read_buf_len) { 367 if (args.v1.ucDataOutLen && read_byte && read_buf_len) {
362 if (read_buf_len < args.ucDataOutLen) { 368 if (read_buf_len < args.v1.ucDataOutLen) {
363 DRM_ERROR("Buffer to small for return answer %d %d\n", 369 DRM_ERROR("Buffer to small for return answer %d %d\n",
364 read_buf_len, args.ucDataOutLen); 370 read_buf_len, args.v1.ucDataOutLen);
365 return false; 371 return false;
366 } 372 }
367 { 373 {
368 int len = min(read_buf_len, args.ucDataOutLen); 374 int len = min(read_buf_len, args.v1.ucDataOutLen);
369 memcpy(read_byte, base + 16, len); 375 memcpy(read_byte, base + 16, len);
370 } 376 }
371 } 377 }
@@ -626,12 +632,19 @@ void dp_link_train(struct drm_encoder *encoder,
626 dp_set_link_bw_lanes(radeon_connector, link_configuration); 632 dp_set_link_bw_lanes(radeon_connector, link_configuration);
627 /* disable downspread on the sink */ 633 /* disable downspread on the sink */
628 dp_set_downspread(radeon_connector, 0); 634 dp_set_downspread(radeon_connector, 0);
629 /* start training on the source */ 635 if (ASIC_IS_DCE4(rdev)) {
630 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START, 636 /* start training on the source */
631 dig_connector->dp_clock, enc_id, 0); 637 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
632 /* set training pattern 1 on the source */ 638 /* set training pattern 1 on the source */
633 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, 639 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
634 dig_connector->dp_clock, enc_id, 0); 640 } else {
641 /* start training on the source */
642 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
643 dig_connector->dp_clock, enc_id, 0);
644 /* set training pattern 1 on the source */
645 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
646 dig_connector->dp_clock, enc_id, 0);
647 }
635 648
636 /* set initial vs/emph */ 649 /* set initial vs/emph */
637 memset(train_set, 0, 4); 650 memset(train_set, 0, 4);
@@ -691,8 +704,11 @@ void dp_link_train(struct drm_encoder *encoder,
691 /* set training pattern 2 on the sink */ 704 /* set training pattern 2 on the sink */
692 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2); 705 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
693 /* set training pattern 2 on the source */ 706 /* set training pattern 2 on the source */
694 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, 707 if (ASIC_IS_DCE4(rdev))
695 dig_connector->dp_clock, enc_id, 1); 708 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
709 else
710 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
711 dig_connector->dp_clock, enc_id, 1);
696 712
697 /* channel equalization loop */ 713 /* channel equalization loop */
698 tries = 0; 714 tries = 0;
@@ -729,7 +745,11 @@ void dp_link_train(struct drm_encoder *encoder,
729 >> DP_TRAIN_PRE_EMPHASIS_SHIFT); 745 >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
730 746
731 /* disable the training pattern on the sink */ 747 /* disable the training pattern on the sink */
732 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); 748 if (ASIC_IS_DCE4(rdev))
749 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
750 else
751 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
752 dig_connector->dp_clock, enc_id, 0);
733 753
734 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, 754 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
735 dig_connector->dp_clock, enc_id, 0); 755 dig_connector->dp_clock, enc_id, 0);
diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h
index d4e6e6e4a938..3c391e7e9fd4 100644
--- a/drivers/gpu/drm/radeon/avivod.h
+++ b/drivers/gpu/drm/radeon/avivod.h
@@ -30,11 +30,13 @@
30 30
31#define D1CRTC_CONTROL 0x6080 31#define D1CRTC_CONTROL 0x6080
32#define CRTC_EN (1 << 0) 32#define CRTC_EN (1 << 0)
33#define D1CRTC_STATUS 0x609c
33#define D1CRTC_UPDATE_LOCK 0x60E8 34#define D1CRTC_UPDATE_LOCK 0x60E8
34#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 35#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
35#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 36#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
36 37
37#define D2CRTC_CONTROL 0x6880 38#define D2CRTC_CONTROL 0x6880
39#define D2CRTC_STATUS 0x689c
38#define D2CRTC_UPDATE_LOCK 0x68E8 40#define D2CRTC_UPDATE_LOCK 0x68E8
39#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910 41#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
40#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918 42#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
new file mode 100644
index 000000000000..bd2e7aa85c1d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -0,0 +1,767 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include "drmP.h"
27#include "radeon.h"
28#include "radeon_drm.h"
29#include "rv770d.h"
30#include "atom.h"
31#include "avivod.h"
32#include "evergreen_reg.h"
33
34static void evergreen_gpu_init(struct radeon_device *rdev);
35void evergreen_fini(struct radeon_device *rdev);
36
37bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
38{
39 bool connected = false;
40 /* XXX */
41 return connected;
42}
43
44void evergreen_hpd_set_polarity(struct radeon_device *rdev,
45 enum radeon_hpd_id hpd)
46{
47 /* XXX */
48}
49
50void evergreen_hpd_init(struct radeon_device *rdev)
51{
52 /* XXX */
53}
54
55
56void evergreen_bandwidth_update(struct radeon_device *rdev)
57{
58 /* XXX */
59}
60
61void evergreen_hpd_fini(struct radeon_device *rdev)
62{
63 /* XXX */
64}
65
66static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
67{
68 unsigned i;
69 u32 tmp;
70
71 for (i = 0; i < rdev->usec_timeout; i++) {
72 /* read MC_STATUS */
73 tmp = RREG32(SRBM_STATUS) & 0x1F00;
74 if (!tmp)
75 return 0;
76 udelay(1);
77 }
78 return -1;
79}
80
81/*
82 * GART
83 */
84int evergreen_pcie_gart_enable(struct radeon_device *rdev)
85{
86 u32 tmp;
87 int r, i;
88
89 if (rdev->gart.table.vram.robj == NULL) {
90 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
91 return -EINVAL;
92 }
93 r = radeon_gart_table_vram_pin(rdev);
94 if (r)
95 return r;
96 radeon_gart_restore(rdev);
97 /* Setup L2 cache */
98 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
99 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
100 EFFECTIVE_L2_QUEUE_SIZE(7));
101 WREG32(VM_L2_CNTL2, 0);
102 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
103 /* Setup TLB control */
104 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
105 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
106 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
107 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
108 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
109 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
110 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
111 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
112 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
113 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
114 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
115 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
116 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
117 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
118 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
119 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
120 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
121 (u32)(rdev->dummy_page.addr >> 12));
122 for (i = 1; i < 7; i++)
123 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
124
125 r600_pcie_gart_tlb_flush(rdev);
126 rdev->gart.ready = true;
127 return 0;
128}
129
130void evergreen_pcie_gart_disable(struct radeon_device *rdev)
131{
132 u32 tmp;
133 int i, r;
134
135 /* Disable all tables */
136 for (i = 0; i < 7; i++)
137 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
138
139 /* Setup L2 cache */
140 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
141 EFFECTIVE_L2_QUEUE_SIZE(7));
142 WREG32(VM_L2_CNTL2, 0);
143 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
144 /* Setup TLB control */
145 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
146 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
147 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
148 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
149 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
150 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
151 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
152 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
153 if (rdev->gart.table.vram.robj) {
154 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
155 if (likely(r == 0)) {
156 radeon_bo_kunmap(rdev->gart.table.vram.robj);
157 radeon_bo_unpin(rdev->gart.table.vram.robj);
158 radeon_bo_unreserve(rdev->gart.table.vram.robj);
159 }
160 }
161}
162
163void evergreen_pcie_gart_fini(struct radeon_device *rdev)
164{
165 evergreen_pcie_gart_disable(rdev);
166 radeon_gart_table_vram_free(rdev);
167 radeon_gart_fini(rdev);
168}
169
170
171void evergreen_agp_enable(struct radeon_device *rdev)
172{
173 u32 tmp;
174 int i;
175
176 /* Setup L2 cache */
177 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
178 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
179 EFFECTIVE_L2_QUEUE_SIZE(7));
180 WREG32(VM_L2_CNTL2, 0);
181 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
182 /* Setup TLB control */
183 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
184 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
185 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
186 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
187 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
188 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
189 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
190 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
191 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
192 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
193 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
194 for (i = 0; i < 7; i++)
195 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
196}
197
198static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
199{
200 save->vga_control[0] = RREG32(D1VGA_CONTROL);
201 save->vga_control[1] = RREG32(D2VGA_CONTROL);
202 save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
203 save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
204 save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
205 save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
206 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
207 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
208 save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
209 save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
210 save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
211 save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
212 save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
213 save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
214
215 /* Stop all video */
216 WREG32(VGA_RENDER_CONTROL, 0);
217 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
218 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
219 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
220 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
221 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
222 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
223 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
224 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
225 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
226 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
227 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
228 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
229 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
230 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
231 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
232 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
233 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
234 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
235
236 WREG32(D1VGA_CONTROL, 0);
237 WREG32(D2VGA_CONTROL, 0);
238 WREG32(EVERGREEN_D3VGA_CONTROL, 0);
239 WREG32(EVERGREEN_D4VGA_CONTROL, 0);
240 WREG32(EVERGREEN_D5VGA_CONTROL, 0);
241 WREG32(EVERGREEN_D6VGA_CONTROL, 0);
242}
243
244static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
245{
246 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
247 upper_32_bits(rdev->mc.vram_start));
248 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
249 upper_32_bits(rdev->mc.vram_start));
250 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
251 (u32)rdev->mc.vram_start);
252 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
253 (u32)rdev->mc.vram_start);
254
255 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
256 upper_32_bits(rdev->mc.vram_start));
257 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
258 upper_32_bits(rdev->mc.vram_start));
259 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
260 (u32)rdev->mc.vram_start);
261 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
262 (u32)rdev->mc.vram_start);
263
264 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
265 upper_32_bits(rdev->mc.vram_start));
266 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
267 upper_32_bits(rdev->mc.vram_start));
268 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
269 (u32)rdev->mc.vram_start);
270 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
271 (u32)rdev->mc.vram_start);
272
273 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
274 upper_32_bits(rdev->mc.vram_start));
275 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
276 upper_32_bits(rdev->mc.vram_start));
277 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
278 (u32)rdev->mc.vram_start);
279 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
280 (u32)rdev->mc.vram_start);
281
282 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
283 upper_32_bits(rdev->mc.vram_start));
284 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
285 upper_32_bits(rdev->mc.vram_start));
286 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
287 (u32)rdev->mc.vram_start);
288 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
289 (u32)rdev->mc.vram_start);
290
291 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
292 upper_32_bits(rdev->mc.vram_start));
293 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
294 upper_32_bits(rdev->mc.vram_start));
295 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
296 (u32)rdev->mc.vram_start);
297 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
298 (u32)rdev->mc.vram_start);
299
300 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
301 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
302 /* Unlock host access */
303 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
304 mdelay(1);
305 /* Restore video state */
306 WREG32(D1VGA_CONTROL, save->vga_control[0]);
307 WREG32(D2VGA_CONTROL, save->vga_control[1]);
308 WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
309 WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
310 WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
311 WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
312 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
313 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
314 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
315 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
316 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
317 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
318 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
319 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
320 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
321 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
322 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
323 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
324 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
325 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
326 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
327 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
328 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
329 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
330 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
331}
332
333static void evergreen_mc_program(struct radeon_device *rdev)
334{
335 struct evergreen_mc_save save;
336 u32 tmp;
337 int i, j;
338
339 /* Initialize HDP */
340 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
341 WREG32((0x2c14 + j), 0x00000000);
342 WREG32((0x2c18 + j), 0x00000000);
343 WREG32((0x2c1c + j), 0x00000000);
344 WREG32((0x2c20 + j), 0x00000000);
345 WREG32((0x2c24 + j), 0x00000000);
346 }
347 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
348
349 evergreen_mc_stop(rdev, &save);
350 if (evergreen_mc_wait_for_idle(rdev)) {
351 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
352 }
353 /* Lockout access through VGA aperture*/
354 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
355 /* Update configuration */
356 if (rdev->flags & RADEON_IS_AGP) {
357 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
358 /* VRAM before AGP */
359 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
360 rdev->mc.vram_start >> 12);
361 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
362 rdev->mc.gtt_end >> 12);
363 } else {
364 /* VRAM after AGP */
365 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
366 rdev->mc.gtt_start >> 12);
367 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
368 rdev->mc.vram_end >> 12);
369 }
370 } else {
371 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
372 rdev->mc.vram_start >> 12);
373 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
374 rdev->mc.vram_end >> 12);
375 }
376 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
377 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
378 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
379 WREG32(MC_VM_FB_LOCATION, tmp);
380 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
381 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
382 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
383 if (rdev->flags & RADEON_IS_AGP) {
384 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
385 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
386 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
387 } else {
388 WREG32(MC_VM_AGP_BASE, 0);
389 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
390 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
391 }
392 if (evergreen_mc_wait_for_idle(rdev)) {
393 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
394 }
395 evergreen_mc_resume(rdev, &save);
396 /* we need to own VRAM, so turn off the VGA renderer here
397 * to stop it overwriting our objects */
398 rv515_vga_render_disable(rdev);
399}
400
401#if 0
402/*
403 * CP.
404 */
405static void evergreen_cp_stop(struct radeon_device *rdev)
406{
407 /* XXX */
408}
409
410
411static int evergreen_cp_load_microcode(struct radeon_device *rdev)
412{
413 /* XXX */
414
415 return 0;
416}
417
418
419/*
420 * Core functions
421 */
422static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
423 u32 num_backends,
424 u32 backend_disable_mask)
425{
426 u32 backend_map = 0;
427
428 return backend_map;
429}
430#endif
431
432static void evergreen_gpu_init(struct radeon_device *rdev)
433{
434 /* XXX */
435}
436
437int evergreen_mc_init(struct radeon_device *rdev)
438{
439 fixed20_12 a;
440 u32 tmp;
441 int chansize, numchan;
442
443 /* Get VRAM informations */
444 rdev->mc.vram_is_ddr = true;
445 tmp = RREG32(MC_ARB_RAMCFG);
446 if (tmp & CHANSIZE_OVERRIDE) {
447 chansize = 16;
448 } else if (tmp & CHANSIZE_MASK) {
449 chansize = 64;
450 } else {
451 chansize = 32;
452 }
453 tmp = RREG32(MC_SHARED_CHMAP);
454 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
455 case 0:
456 default:
457 numchan = 1;
458 break;
459 case 1:
460 numchan = 2;
461 break;
462 case 2:
463 numchan = 4;
464 break;
465 case 3:
466 numchan = 8;
467 break;
468 }
469 rdev->mc.vram_width = numchan * chansize;
470 /* Could aper size report 0 ? */
471 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
472 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
473 /* Setup GPU memory space */
474 /* size in MB on evergreen */
475 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
476 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
477 rdev->mc.visible_vram_size = rdev->mc.aper_size;
478 /* FIXME remove this once we support unmappable VRAM */
479 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
480 rdev->mc.mc_vram_size = rdev->mc.aper_size;
481 rdev->mc.real_vram_size = rdev->mc.aper_size;
482 }
483 r600_vram_gtt_location(rdev, &rdev->mc);
484 /* FIXME: we should enforce default clock in case GPU is not in
485 * default setup
486 */
487 a.full = rfixed_const(100);
488 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
489 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
490 return 0;
491}
492
493int evergreen_gpu_reset(struct radeon_device *rdev)
494{
495 /* FIXME: implement for evergreen */
496 return 0;
497}
498
499static int evergreen_startup(struct radeon_device *rdev)
500{
501#if 0
502 int r;
503
504 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
505 r = r600_init_microcode(rdev);
506 if (r) {
507 DRM_ERROR("Failed to load firmware!\n");
508 return r;
509 }
510 }
511#endif
512 evergreen_mc_program(rdev);
513#if 0
514 if (rdev->flags & RADEON_IS_AGP) {
515 evergreem_agp_enable(rdev);
516 } else {
517 r = evergreen_pcie_gart_enable(rdev);
518 if (r)
519 return r;
520 }
521#endif
522 evergreen_gpu_init(rdev);
523#if 0
524 if (!rdev->r600_blit.shader_obj) {
525 r = r600_blit_init(rdev);
526 if (r) {
527 DRM_ERROR("radeon: failed blitter (%d).\n", r);
528 return r;
529 }
530 }
531
532 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
533 if (unlikely(r != 0))
534 return r;
535 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
536 &rdev->r600_blit.shader_gpu_addr);
537 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
538 if (r) {
539 DRM_ERROR("failed to pin blit object %d\n", r);
540 return r;
541 }
542
543 /* Enable IRQ */
544 r = r600_irq_init(rdev);
545 if (r) {
546 DRM_ERROR("radeon: IH init failed (%d).\n", r);
547 radeon_irq_kms_fini(rdev);
548 return r;
549 }
550 r600_irq_set(rdev);
551
552 r = radeon_ring_init(rdev, rdev->cp.ring_size);
553 if (r)
554 return r;
555 r = evergreen_cp_load_microcode(rdev);
556 if (r)
557 return r;
558 r = r600_cp_resume(rdev);
559 if (r)
560 return r;
561 /* write back buffer are not vital so don't worry about failure */
562 r600_wb_enable(rdev);
563#endif
564 return 0;
565}
566
567int evergreen_resume(struct radeon_device *rdev)
568{
569 int r;
570
571 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
572 * posting will perform necessary task to bring back GPU into good
573 * shape.
574 */
575 /* post card */
576 atom_asic_init(rdev->mode_info.atom_context);
577 /* Initialize clocks */
578 r = radeon_clocks_init(rdev);
579 if (r) {
580 return r;
581 }
582
583 r = evergreen_startup(rdev);
584 if (r) {
585 DRM_ERROR("r600 startup failed on resume\n");
586 return r;
587 }
588#if 0
589 r = r600_ib_test(rdev);
590 if (r) {
591 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
592 return r;
593 }
594#endif
595 return r;
596
597}
598
599int evergreen_suspend(struct radeon_device *rdev)
600{
601#if 0
602 int r;
603
604 /* FIXME: we should wait for ring to be empty */
605 r700_cp_stop(rdev);
606 rdev->cp.ready = false;
607 r600_wb_disable(rdev);
608 evergreen_pcie_gart_disable(rdev);
609 /* unpin shaders bo */
610 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
611 if (likely(r == 0)) {
612 radeon_bo_unpin(rdev->r600_blit.shader_obj);
613 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
614 }
615#endif
616 return 0;
617}
618
619static bool evergreen_card_posted(struct radeon_device *rdev)
620{
621 u32 reg;
622
623 /* first check CRTCs */
624 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
625 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
626 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
627 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
628 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
629 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
630 if (reg & EVERGREEN_CRTC_MASTER_EN)
631 return true;
632
633 /* then check MEM_SIZE, in case the crtcs are off */
634 if (RREG32(CONFIG_MEMSIZE))
635 return true;
636
637 return false;
638}
639
640/* Plan is to move initialization in that function and use
641 * helper function so that radeon_device_init pretty much
642 * do nothing more than calling asic specific function. This
643 * should also allow to remove a bunch of callback function
644 * like vram_info.
645 */
646int evergreen_init(struct radeon_device *rdev)
647{
648 int r;
649
650 r = radeon_dummy_page_init(rdev);
651 if (r)
652 return r;
653 /* This don't do much */
654 r = radeon_gem_init(rdev);
655 if (r)
656 return r;
657 /* Read BIOS */
658 if (!radeon_get_bios(rdev)) {
659 if (ASIC_IS_AVIVO(rdev))
660 return -EINVAL;
661 }
662 /* Must be an ATOMBIOS */
663 if (!rdev->is_atom_bios) {
664 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
665 return -EINVAL;
666 }
667 r = radeon_atombios_init(rdev);
668 if (r)
669 return r;
670 /* Post card if necessary */
671 if (!evergreen_card_posted(rdev)) {
672 if (!rdev->bios) {
673 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
674 return -EINVAL;
675 }
676 DRM_INFO("GPU not posted. posting now...\n");
677 atom_asic_init(rdev->mode_info.atom_context);
678 }
679 /* Initialize scratch registers */
680 r600_scratch_init(rdev);
681 /* Initialize surface registers */
682 radeon_surface_init(rdev);
683 /* Initialize clocks */
684 radeon_get_clock_info(rdev->ddev);
685 r = radeon_clocks_init(rdev);
686 if (r)
687 return r;
688 /* Initialize power management */
689 radeon_pm_init(rdev);
690 /* Fence driver */
691 r = radeon_fence_driver_init(rdev);
692 if (r)
693 return r;
694 /* initialize AGP */
695 if (rdev->flags & RADEON_IS_AGP) {
696 r = radeon_agp_init(rdev);
697 if (r)
698 radeon_agp_disable(rdev);
699 }
700 /* initialize memory controller */
701 r = evergreen_mc_init(rdev);
702 if (r)
703 return r;
704 /* Memory manager */
705 r = radeon_bo_init(rdev);
706 if (r)
707 return r;
708#if 0
709 r = radeon_irq_kms_init(rdev);
710 if (r)
711 return r;
712
713 rdev->cp.ring_obj = NULL;
714 r600_ring_init(rdev, 1024 * 1024);
715
716 rdev->ih.ring_obj = NULL;
717 r600_ih_ring_init(rdev, 64 * 1024);
718
719 r = r600_pcie_gart_init(rdev);
720 if (r)
721 return r;
722#endif
723 rdev->accel_working = false;
724 r = evergreen_startup(rdev);
725 if (r) {
726 evergreen_suspend(rdev);
727 /*r600_wb_fini(rdev);*/
728 /*radeon_ring_fini(rdev);*/
729 /*evergreen_pcie_gart_fini(rdev);*/
730 rdev->accel_working = false;
731 }
732 if (rdev->accel_working) {
733 r = radeon_ib_pool_init(rdev);
734 if (r) {
735 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
736 rdev->accel_working = false;
737 }
738 r = r600_ib_test(rdev);
739 if (r) {
740 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
741 rdev->accel_working = false;
742 }
743 }
744 return 0;
745}
746
747void evergreen_fini(struct radeon_device *rdev)
748{
749 evergreen_suspend(rdev);
750#if 0
751 r600_blit_fini(rdev);
752 r600_irq_fini(rdev);
753 radeon_irq_kms_fini(rdev);
754 radeon_ring_fini(rdev);
755 r600_wb_fini(rdev);
756 evergreen_pcie_gart_fini(rdev);
757#endif
758 radeon_gem_fini(rdev);
759 radeon_fence_driver_fini(rdev);
760 radeon_clocks_fini(rdev);
761 radeon_agp_fini(rdev);
762 radeon_bo_fini(rdev);
763 radeon_atombios_fini(rdev);
764 kfree(rdev->bios);
765 rdev->bios = NULL;
766 radeon_dummy_page_fini(rdev);
767}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
new file mode 100644
index 000000000000..f7c7c9643433
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -0,0 +1,176 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef __EVERGREEN_REG_H__
25#define __EVERGREEN_REG_H__
26
27/* evergreen */
28#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
29#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
30#define EVERGREEN_D3VGA_CONTROL 0x3e0
31#define EVERGREEN_D4VGA_CONTROL 0x3e4
32#define EVERGREEN_D5VGA_CONTROL 0x3e8
33#define EVERGREEN_D6VGA_CONTROL 0x3ec
34
35#define EVERGREEN_P1PLL_SS_CNTL 0x414
36#define EVERGREEN_P2PLL_SS_CNTL 0x454
37# define EVERGREEN_PxPLL_SS_EN (1 << 12)
38/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
39#define EVERGREEN_GRPH_ENABLE 0x6800
40#define EVERGREEN_GRPH_CONTROL 0x6804
41# define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
42# define EVERGREEN_GRPH_DEPTH_8BPP 0
43# define EVERGREEN_GRPH_DEPTH_16BPP 1
44# define EVERGREEN_GRPH_DEPTH_32BPP 2
45# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
46/* 8 BPP */
47# define EVERGREEN_GRPH_FORMAT_INDEXED 0
48/* 16 BPP */
49# define EVERGREEN_GRPH_FORMAT_ARGB1555 0
50# define EVERGREEN_GRPH_FORMAT_ARGB565 1
51# define EVERGREEN_GRPH_FORMAT_ARGB4444 2
52# define EVERGREEN_GRPH_FORMAT_AI88 3
53# define EVERGREEN_GRPH_FORMAT_MONO16 4
54# define EVERGREEN_GRPH_FORMAT_BGRA5551 5
55/* 32 BPP */
56# define EVERGREEN_GRPH_FORMAT_ARGB8888 0
57# define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
58# define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
59# define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
60# define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
61# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
62# define EVERGREEN_GRPH_FORMAT_RGB111110 6
63# define EVERGREEN_GRPH_FORMAT_BGR101111 7
64#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
65# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
66# define EVERGREEN_GRPH_ENDIAN_NONE 0
67# define EVERGREEN_GRPH_ENDIAN_8IN16 1
68# define EVERGREEN_GRPH_ENDIAN_8IN32 2
69# define EVERGREEN_GRPH_ENDIAN_8IN64 3
70# define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
71# define EVERGREEN_GRPH_RED_SEL_R 0
72# define EVERGREEN_GRPH_RED_SEL_G 1
73# define EVERGREEN_GRPH_RED_SEL_B 2
74# define EVERGREEN_GRPH_RED_SEL_A 3
75# define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
76# define EVERGREEN_GRPH_GREEN_SEL_G 0
77# define EVERGREEN_GRPH_GREEN_SEL_B 1
78# define EVERGREEN_GRPH_GREEN_SEL_A 2
79# define EVERGREEN_GRPH_GREEN_SEL_R 3
80# define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
81# define EVERGREEN_GRPH_BLUE_SEL_B 0
82# define EVERGREEN_GRPH_BLUE_SEL_A 1
83# define EVERGREEN_GRPH_BLUE_SEL_R 2
84# define EVERGREEN_GRPH_BLUE_SEL_G 3
85# define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
86# define EVERGREEN_GRPH_ALPHA_SEL_A 0
87# define EVERGREEN_GRPH_ALPHA_SEL_R 1
88# define EVERGREEN_GRPH_ALPHA_SEL_G 2
89# define EVERGREEN_GRPH_ALPHA_SEL_B 3
90#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x6810
91#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x6814
92# define EVERGREEN_GRPH_DFQ_ENABLE (1 << 0)
93# define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
94#define EVERGREEN_GRPH_PITCH 0x6818
95#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x681c
96#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x6820
97#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x6824
98#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x6828
99#define EVERGREEN_GRPH_X_START 0x682c
100#define EVERGREEN_GRPH_Y_START 0x6830
101#define EVERGREEN_GRPH_X_END 0x6834
102#define EVERGREEN_GRPH_Y_END 0x6838
103
104/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
105#define EVERGREEN_CUR_CONTROL 0x6998
106# define EVERGREEN_CURSOR_EN (1 << 0)
107# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
108# define EVERGREEN_CURSOR_MONO 0
109# define EVERGREEN_CURSOR_24_1 1
110# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
111# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
112# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
113# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
114# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
115# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
116# define EVERGREEN_CURSOR_URGENT_1_8 1
117# define EVERGREEN_CURSOR_URGENT_1_4 2
118# define EVERGREEN_CURSOR_URGENT_3_8 3
119# define EVERGREEN_CURSOR_URGENT_1_2 4
120#define EVERGREEN_CUR_SURFACE_ADDRESS 0x699c
121# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
122#define EVERGREEN_CUR_SIZE 0x69a0
123#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x69a4
124#define EVERGREEN_CUR_POSITION 0x69a8
125#define EVERGREEN_CUR_HOT_SPOT 0x69ac
126#define EVERGREEN_CUR_COLOR1 0x69b0
127#define EVERGREEN_CUR_COLOR2 0x69b4
128#define EVERGREEN_CUR_UPDATE 0x69b8
129# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
130# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
131# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
132# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
133
134/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */
135#define EVERGREEN_DC_LUT_RW_MODE 0x69e0
136#define EVERGREEN_DC_LUT_RW_INDEX 0x69e4
137#define EVERGREEN_DC_LUT_SEQ_COLOR 0x69e8
138#define EVERGREEN_DC_LUT_PWL_DATA 0x69ec
139#define EVERGREEN_DC_LUT_30_COLOR 0x69f0
140#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE 0x69f4
141#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x69f8
142#define EVERGREEN_DC_LUT_AUTOFILL 0x69fc
143#define EVERGREEN_DC_LUT_CONTROL 0x6a00
144#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x6a04
145#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x6a08
146#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x6a0c
147#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x6a10
148#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x6a14
149#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x6a18
150
151#define EVERGREEN_DATA_FORMAT 0x6b00
152# define EVERGREEN_INTERLEAVE_EN (1 << 0)
153#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
154
155#define EVERGREEN_VIEWPORT_START 0x6d70
156#define EVERGREEN_VIEWPORT_SIZE 0x6d74
157
158/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
159#define EVERGREEN_CRTC0_REGISTER_OFFSET (0x6df0 - 0x6df0)
160#define EVERGREEN_CRTC1_REGISTER_OFFSET (0x79f0 - 0x6df0)
161#define EVERGREEN_CRTC2_REGISTER_OFFSET (0x105f0 - 0x6df0)
162#define EVERGREEN_CRTC3_REGISTER_OFFSET (0x111f0 - 0x6df0)
163#define EVERGREEN_CRTC4_REGISTER_OFFSET (0x11df0 - 0x6df0)
164#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
165
166/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
167#define EVERGREEN_CRTC_CONTROL 0x6e70
168# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
169#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
170
171#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
172#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
173#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
174#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
175
176#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c0d4650cdb79..91eb762eb3f9 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -197,13 +197,13 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
197{ 197{
198 uint32_t tmp; 198 uint32_t tmp;
199 199
200 radeon_gart_restore(rdev);
200 /* discard memory request outside of configured range */ 201 /* discard memory request outside of configured range */
201 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 202 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
202 WREG32(RADEON_AIC_CNTL, tmp); 203 WREG32(RADEON_AIC_CNTL, tmp);
203 /* set address range for PCI address translate */ 204 /* set address range for PCI address translate */
204 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); 205 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
205 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 206 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
206 WREG32(RADEON_AIC_HI_ADDR, tmp);
207 /* set PCI GART page-table base address */ 207 /* set PCI GART page-table base address */
208 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 208 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
209 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 209 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -312,9 +312,11 @@ int r100_irq_process(struct radeon_device *rdev)
312 /* Vertical blank interrupts */ 312 /* Vertical blank interrupts */
313 if (status & RADEON_CRTC_VBLANK_STAT) { 313 if (status & RADEON_CRTC_VBLANK_STAT) {
314 drm_handle_vblank(rdev->ddev, 0); 314 drm_handle_vblank(rdev->ddev, 0);
315 wake_up(&rdev->irq.vblank_queue);
315 } 316 }
316 if (status & RADEON_CRTC2_VBLANK_STAT) { 317 if (status & RADEON_CRTC2_VBLANK_STAT) {
317 drm_handle_vblank(rdev->ddev, 1); 318 drm_handle_vblank(rdev->ddev, 1);
319 wake_up(&rdev->irq.vblank_queue);
318 } 320 }
319 if (status & RADEON_FP_DETECT_STAT) { 321 if (status & RADEON_FP_DETECT_STAT) {
320 queue_hotplug = true; 322 queue_hotplug = true;
@@ -366,8 +368,8 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
366 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 368 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
367 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); 369 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
368 /* Wait until IDLE & CLEAN */ 370 /* Wait until IDLE & CLEAN */
369 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 371 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
370 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 372 radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
371 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 373 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
372 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl | 374 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
373 RADEON_HDP_READ_BUFFER_INVALIDATE); 375 RADEON_HDP_READ_BUFFER_INVALIDATE);
@@ -1701,7 +1703,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev)
1701 } 1703 }
1702 for (i = 0; i < rdev->usec_timeout; i++) { 1704 for (i = 0; i < rdev->usec_timeout; i++) {
1703 tmp = RREG32(RADEON_RBBM_STATUS); 1705 tmp = RREG32(RADEON_RBBM_STATUS);
1704 if (!(tmp & (1 << 31))) { 1706 if (!(tmp & RADEON_RBBM_ACTIVE)) {
1705 return 0; 1707 return 0;
1706 } 1708 }
1707 DRM_UDELAY(1); 1709 DRM_UDELAY(1);
@@ -1716,8 +1718,8 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
1716 1718
1717 for (i = 0; i < rdev->usec_timeout; i++) { 1719 for (i = 0; i < rdev->usec_timeout; i++) {
1718 /* read MC_STATUS */ 1720 /* read MC_STATUS */
1719 tmp = RREG32(0x0150); 1721 tmp = RREG32(RADEON_MC_STATUS);
1720 if (tmp & (1 << 2)) { 1722 if (tmp & RADEON_MC_IDLE) {
1721 return 0; 1723 return 0;
1722 } 1724 }
1723 DRM_UDELAY(1); 1725 DRM_UDELAY(1);
@@ -1790,7 +1792,7 @@ int r100_gpu_reset(struct radeon_device *rdev)
1790 } 1792 }
1791 /* Check if GPU is idle */ 1793 /* Check if GPU is idle */
1792 status = RREG32(RADEON_RBBM_STATUS); 1794 status = RREG32(RADEON_RBBM_STATUS);
1793 if (status & (1 << 31)) { 1795 if (status & RADEON_RBBM_ACTIVE) {
1794 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 1796 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1795 return -1; 1797 return -1;
1796 } 1798 }
@@ -1800,6 +1802,9 @@ int r100_gpu_reset(struct radeon_device *rdev)
1800 1802
1801void r100_set_common_regs(struct radeon_device *rdev) 1803void r100_set_common_regs(struct radeon_device *rdev)
1802{ 1804{
1805 struct drm_device *dev = rdev->ddev;
1806 bool force_dac2 = false;
1807
1803 /* set these so they don't interfere with anything */ 1808 /* set these so they don't interfere with anything */
1804 WREG32(RADEON_OV0_SCALE_CNTL, 0); 1809 WREG32(RADEON_OV0_SCALE_CNTL, 0);
1805 WREG32(RADEON_SUBPIC_CNTL, 0); 1810 WREG32(RADEON_SUBPIC_CNTL, 0);
@@ -1808,6 +1813,68 @@ void r100_set_common_regs(struct radeon_device *rdev)
1808 WREG32(RADEON_DVI_I2C_CNTL_1, 0); 1813 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
1809 WREG32(RADEON_CAP0_TRIG_CNTL, 0); 1814 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
1810 WREG32(RADEON_CAP1_TRIG_CNTL, 0); 1815 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
1816
1817 /* always set up dac2 on rn50 and some rv100 as lots
1818 * of servers seem to wire it up to a VGA port but
1819 * don't report it in the bios connector
1820 * table.
1821 */
1822 switch (dev->pdev->device) {
1823 /* RN50 */
1824 case 0x515e:
1825 case 0x5969:
1826 force_dac2 = true;
1827 break;
1828 /* RV100*/
1829 case 0x5159:
1830 case 0x515a:
1831 /* DELL triple head servers */
1832 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
1833 ((dev->pdev->subsystem_device == 0x016c) ||
1834 (dev->pdev->subsystem_device == 0x016d) ||
1835 (dev->pdev->subsystem_device == 0x016e) ||
1836 (dev->pdev->subsystem_device == 0x016f) ||
1837 (dev->pdev->subsystem_device == 0x0170) ||
1838 (dev->pdev->subsystem_device == 0x017d) ||
1839 (dev->pdev->subsystem_device == 0x017e) ||
1840 (dev->pdev->subsystem_device == 0x0183) ||
1841 (dev->pdev->subsystem_device == 0x018a) ||
1842 (dev->pdev->subsystem_device == 0x019a)))
1843 force_dac2 = true;
1844 break;
1845 }
1846
1847 if (force_dac2) {
1848 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
1849 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
1850 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
1851
1852 /* For CRT on DAC2, don't turn it on if BIOS didn't
1853 enable it, even it's detected.
1854 */
1855
1856 /* force it to crtc0 */
1857 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
1858 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
1859 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
1860
1861 /* set up the TV DAC */
1862 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
1863 RADEON_TV_DAC_STD_MASK |
1864 RADEON_TV_DAC_RDACPD |
1865 RADEON_TV_DAC_GDACPD |
1866 RADEON_TV_DAC_BDACPD |
1867 RADEON_TV_DAC_BGADJ_MASK |
1868 RADEON_TV_DAC_DACADJ_MASK);
1869 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
1870 RADEON_TV_DAC_NHOLD |
1871 RADEON_TV_DAC_STD_PS2 |
1872 (0x58 << 16));
1873
1874 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1875 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1876 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
1877 }
1811} 1878}
1812 1879
1813/* 1880/*
@@ -1889,17 +1956,20 @@ static u32 r100_get_accessible_vram(struct radeon_device *rdev)
1889void r100_vram_init_sizes(struct radeon_device *rdev) 1956void r100_vram_init_sizes(struct radeon_device *rdev)
1890{ 1957{
1891 u64 config_aper_size; 1958 u64 config_aper_size;
1892 u32 accessible;
1893 1959
1960 /* work out accessible VRAM */
1961 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1962 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1963 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
1964 /* FIXME we don't use the second aperture yet when we could use it */
1965 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
1966 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1894 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 1967 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1895
1896 if (rdev->flags & RADEON_IS_IGP) { 1968 if (rdev->flags & RADEON_IS_IGP) {
1897 uint32_t tom; 1969 uint32_t tom;
1898 /* read NB_TOM to get the amount of ram stolen for the GPU */ 1970 /* read NB_TOM to get the amount of ram stolen for the GPU */
1899 tom = RREG32(RADEON_NB_TOM); 1971 tom = RREG32(RADEON_NB_TOM);
1900 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 1972 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
1901 /* for IGPs we need to keep VRAM where it was put by the BIOS */
1902 rdev->mc.vram_location = (tom & 0xffff) << 16;
1903 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 1973 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1904 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 1974 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1905 } else { 1975 } else {
@@ -1911,30 +1981,19 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
1911 rdev->mc.real_vram_size = 8192 * 1024; 1981 rdev->mc.real_vram_size = 8192 * 1024;
1912 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 1982 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1913 } 1983 }
1914 /* let driver place VRAM */ 1984 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
1915 rdev->mc.vram_location = 0xFFFFFFFFUL; 1985 * Novell bug 204882 + along with lots of ubuntu ones
1916 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 1986 */
1917 * Novell bug 204882 + along with lots of ubuntu ones */
1918 if (config_aper_size > rdev->mc.real_vram_size) 1987 if (config_aper_size > rdev->mc.real_vram_size)
1919 rdev->mc.mc_vram_size = config_aper_size; 1988 rdev->mc.mc_vram_size = config_aper_size;
1920 else 1989 else
1921 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 1990 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1922 } 1991 }
1923 1992 /* FIXME remove this once we support unmappable VRAM */
1924 /* work out accessible VRAM */ 1993 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
1925 accessible = r100_get_accessible_vram(rdev);
1926
1927 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1928 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1929
1930 if (accessible > rdev->mc.aper_size)
1931 accessible = rdev->mc.aper_size;
1932
1933 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
1934 rdev->mc.mc_vram_size = rdev->mc.aper_size; 1994 rdev->mc.mc_vram_size = rdev->mc.aper_size;
1935
1936 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
1937 rdev->mc.real_vram_size = rdev->mc.aper_size; 1995 rdev->mc.real_vram_size = rdev->mc.aper_size;
1996 }
1938} 1997}
1939 1998
1940void r100_vga_set_state(struct radeon_device *rdev, bool state) 1999void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -1951,11 +2010,18 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
1951 WREG32(RADEON_CONFIG_CNTL, temp); 2010 WREG32(RADEON_CONFIG_CNTL, temp);
1952} 2011}
1953 2012
1954void r100_vram_info(struct radeon_device *rdev) 2013void r100_mc_init(struct radeon_device *rdev)
1955{ 2014{
1956 r100_vram_get_type(rdev); 2015 u64 base;
1957 2016
2017 r100_vram_get_type(rdev);
1958 r100_vram_init_sizes(rdev); 2018 r100_vram_init_sizes(rdev);
2019 base = rdev->mc.aper_base;
2020 if (rdev->flags & RADEON_IS_IGP)
2021 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2022 radeon_vram_location(rdev, &rdev->mc, base);
2023 if (!(rdev->flags & RADEON_IS_AGP))
2024 radeon_gtt_location(rdev, &rdev->mc);
1959} 2025}
1960 2026
1961 2027
@@ -3226,10 +3292,9 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3226void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3292void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3227{ 3293{
3228 /* Update base address for crtc */ 3294 /* Update base address for crtc */
3229 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location); 3295 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3230 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3296 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3231 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, 3297 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3232 rdev->mc.vram_location);
3233 } 3298 }
3234 /* Restore CRTC registers */ 3299 /* Restore CRTC registers */
3235 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3300 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
@@ -3390,32 +3455,6 @@ void r100_fini(struct radeon_device *rdev)
3390 rdev->bios = NULL; 3455 rdev->bios = NULL;
3391} 3456}
3392 3457
3393int r100_mc_init(struct radeon_device *rdev)
3394{
3395 int r;
3396 u32 tmp;
3397
3398 /* Setup GPU memory space */
3399 rdev->mc.vram_location = 0xFFFFFFFFUL;
3400 rdev->mc.gtt_location = 0xFFFFFFFFUL;
3401 if (rdev->flags & RADEON_IS_IGP) {
3402 tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
3403 rdev->mc.vram_location = tmp << 16;
3404 }
3405 if (rdev->flags & RADEON_IS_AGP) {
3406 r = radeon_agp_init(rdev);
3407 if (r) {
3408 radeon_agp_disable(rdev);
3409 } else {
3410 rdev->mc.gtt_location = rdev->mc.agp_base;
3411 }
3412 }
3413 r = radeon_mc_setup(rdev);
3414 if (r)
3415 return r;
3416 return 0;
3417}
3418
3419int r100_init(struct radeon_device *rdev) 3458int r100_init(struct radeon_device *rdev)
3420{ 3459{
3421 int r; 3460 int r;
@@ -3458,12 +3497,15 @@ int r100_init(struct radeon_device *rdev)
3458 radeon_get_clock_info(rdev->ddev); 3497 radeon_get_clock_info(rdev->ddev);
3459 /* Initialize power management */ 3498 /* Initialize power management */
3460 radeon_pm_init(rdev); 3499 radeon_pm_init(rdev);
3461 /* Get vram informations */ 3500 /* initialize AGP */
3462 r100_vram_info(rdev); 3501 if (rdev->flags & RADEON_IS_AGP) {
3463 /* Initialize memory controller (also test AGP) */ 3502 r = radeon_agp_init(rdev);
3464 r = r100_mc_init(rdev); 3503 if (r) {
3465 if (r) 3504 radeon_agp_disable(rdev);
3466 return r; 3505 }
3506 }
3507 /* initialize VRAM */
3508 r100_mc_init(rdev);
3467 /* Fence driver */ 3509 /* Fence driver */
3468 r = radeon_fence_driver_init(rdev); 3510 r = radeon_fence_driver_init(rdev);
3469 if (r) 3511 if (r)
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index ff1e0cd608bf..1146c9909c2c 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -31,6 +31,7 @@
31#include "radeon_reg.h" 31#include "radeon_reg.h"
32#include "radeon.h" 32#include "radeon.h"
33 33
34#include "r100d.h"
34#include "r200_reg_safe.h" 35#include "r200_reg_safe.h"
35 36
36#include "r100_track.h" 37#include "r100_track.h"
@@ -79,6 +80,51 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
79 return vtx_size; 80 return vtx_size;
80} 81}
81 82
83int r200_copy_dma(struct radeon_device *rdev,
84 uint64_t src_offset,
85 uint64_t dst_offset,
86 unsigned num_pages,
87 struct radeon_fence *fence)
88{
89 uint32_t size;
90 uint32_t cur_size;
91 int i, num_loops;
92 int r = 0;
93
94 /* radeon pitch is /64 */
95 size = num_pages << PAGE_SHIFT;
96 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
97 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
98 if (r) {
99 DRM_ERROR("radeon: moving bo (%d).\n", r);
100 return r;
101 }
102 /* Must wait for 2D idle & clean before DMA or hangs might happen */
103 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
104 radeon_ring_write(rdev, (1 << 16));
105 for (i = 0; i < num_loops; i++) {
106 cur_size = size;
107 if (cur_size > 0x1FFFFF) {
108 cur_size = 0x1FFFFF;
109 }
110 size -= cur_size;
111 radeon_ring_write(rdev, PACKET0(0x720, 2));
112 radeon_ring_write(rdev, src_offset);
113 radeon_ring_write(rdev, dst_offset);
114 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
115 src_offset += cur_size;
116 dst_offset += cur_size;
117 }
118 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
119 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
120 if (fence) {
121 r = radeon_fence_emit(rdev, fence);
122 }
123 radeon_ring_unlock_commit(rdev);
124 return r;
125}
126
127
82static int r200_get_vtx_size_1(uint32_t vtx_fmt_1) 128static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
83{ 129{
84 int vtx_size, i, tex_size; 130 int vtx_size, i, tex_size;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 43b55a030b4d..4cef90cd74e5 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -117,18 +117,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
117 r = radeon_gart_table_vram_pin(rdev); 117 r = radeon_gart_table_vram_pin(rdev);
118 if (r) 118 if (r)
119 return r; 119 return r;
120 radeon_gart_restore(rdev);
120 /* discard memory request outside of configured range */ 121 /* discard memory request outside of configured range */
121 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 122 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
122 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 123 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
123 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); 124 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
124 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE; 125 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
125 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); 126 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
126 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 127 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
127 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 128 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
128 table_addr = rdev->gart.table_addr; 129 table_addr = rdev->gart.table_addr;
129 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); 130 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
130 /* FIXME: setup default page */ 131 /* FIXME: setup default page */
131 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location); 132 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
132 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); 133 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
133 /* Clear error */ 134 /* Clear error */
134 WREG32_PCIE(0x18, 0); 135 WREG32_PCIE(0x18, 0);
@@ -174,18 +175,20 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
174 /* Who ever call radeon_fence_emit should call ring_lock and ask 175 /* Who ever call radeon_fence_emit should call ring_lock and ask
175 * for enough space (today caller are ib schedule and buffer move) */ 176 * for enough space (today caller are ib schedule and buffer move) */
176 /* Write SC register so SC & US assert idle */ 177 /* Write SC register so SC & US assert idle */
177 radeon_ring_write(rdev, PACKET0(0x43E0, 0)); 178 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
178 radeon_ring_write(rdev, 0); 179 radeon_ring_write(rdev, 0);
179 radeon_ring_write(rdev, PACKET0(0x43E4, 0)); 180 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
180 radeon_ring_write(rdev, 0); 181 radeon_ring_write(rdev, 0);
181 /* Flush 3D cache */ 182 /* Flush 3D cache */
182 radeon_ring_write(rdev, PACKET0(0x4E4C, 0)); 183 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
183 radeon_ring_write(rdev, (2 << 0)); 184 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
184 radeon_ring_write(rdev, PACKET0(0x4F18, 0)); 185 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
185 radeon_ring_write(rdev, (1 << 0)); 186 radeon_ring_write(rdev, R300_ZC_FLUSH);
186 /* Wait until IDLE & CLEAN */ 187 /* Wait until IDLE & CLEAN */
187 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 188 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
188 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); 189 radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
190 RADEON_WAIT_2D_IDLECLEAN |
191 RADEON_WAIT_DMA_GUI_IDLE));
189 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 192 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
190 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | 193 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
191 RADEON_HDP_READ_BUFFER_INVALIDATE); 194 RADEON_HDP_READ_BUFFER_INVALIDATE);
@@ -198,50 +201,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
198 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 201 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
199} 202}
200 203
201int r300_copy_dma(struct radeon_device *rdev,
202 uint64_t src_offset,
203 uint64_t dst_offset,
204 unsigned num_pages,
205 struct radeon_fence *fence)
206{
207 uint32_t size;
208 uint32_t cur_size;
209 int i, num_loops;
210 int r = 0;
211
212 /* radeon pitch is /64 */
213 size = num_pages << PAGE_SHIFT;
214 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
215 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
216 if (r) {
217 DRM_ERROR("radeon: moving bo (%d).\n", r);
218 return r;
219 }
220 /* Must wait for 2D idle & clean before DMA or hangs might happen */
221 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
222 radeon_ring_write(rdev, (1 << 16));
223 for (i = 0; i < num_loops; i++) {
224 cur_size = size;
225 if (cur_size > 0x1FFFFF) {
226 cur_size = 0x1FFFFF;
227 }
228 size -= cur_size;
229 radeon_ring_write(rdev, PACKET0(0x720, 2));
230 radeon_ring_write(rdev, src_offset);
231 radeon_ring_write(rdev, dst_offset);
232 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
233 src_offset += cur_size;
234 dst_offset += cur_size;
235 }
236 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
237 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
238 if (fence) {
239 r = radeon_fence_emit(rdev, fence);
240 }
241 radeon_ring_unlock_commit(rdev);
242 return r;
243}
244
245void r300_ring_start(struct radeon_device *rdev) 204void r300_ring_start(struct radeon_device *rdev)
246{ 205{
247 unsigned gb_tile_config; 206 unsigned gb_tile_config;
@@ -281,8 +240,8 @@ void r300_ring_start(struct radeon_device *rdev)
281 radeon_ring_write(rdev, 240 radeon_ring_write(rdev,
282 RADEON_WAIT_2D_IDLECLEAN | 241 RADEON_WAIT_2D_IDLECLEAN |
283 RADEON_WAIT_3D_IDLECLEAN); 242 RADEON_WAIT_3D_IDLECLEAN);
284 radeon_ring_write(rdev, PACKET0(0x170C, 0)); 243 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
285 radeon_ring_write(rdev, 1 << 31); 244 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
286 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); 245 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
287 radeon_ring_write(rdev, 0); 246 radeon_ring_write(rdev, 0);
288 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); 247 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
@@ -349,8 +308,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
349 308
350 for (i = 0; i < rdev->usec_timeout; i++) { 309 for (i = 0; i < rdev->usec_timeout; i++) {
351 /* read MC_STATUS */ 310 /* read MC_STATUS */
352 tmp = RREG32(0x0150); 311 tmp = RREG32(RADEON_MC_STATUS);
353 if (tmp & (1 << 4)) { 312 if (tmp & R300_MC_IDLE) {
354 return 0; 313 return 0;
355 } 314 }
356 DRM_UDELAY(1); 315 DRM_UDELAY(1);
@@ -395,8 +354,8 @@ void r300_gpu_init(struct radeon_device *rdev)
395 "programming pipes. Bad things might happen.\n"); 354 "programming pipes. Bad things might happen.\n");
396 } 355 }
397 356
398 tmp = RREG32(0x170C); 357 tmp = RREG32(R300_DST_PIPE_CONFIG);
399 WREG32(0x170C, tmp | (1 << 31)); 358 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
400 359
401 WREG32(R300_RB2D_DSTCACHE_MODE, 360 WREG32(R300_RB2D_DSTCACHE_MODE,
402 R300_DC_AUTOFLUSH_ENABLE | 361 R300_DC_AUTOFLUSH_ENABLE |
@@ -437,8 +396,8 @@ int r300_ga_reset(struct radeon_device *rdev)
437 /* GA still busy soft reset it */ 396 /* GA still busy soft reset it */
438 WREG32(0x429C, 0x200); 397 WREG32(0x429C, 0x200);
439 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); 398 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
440 WREG32(0x43E0, 0); 399 WREG32(R300_RE_SCISSORS_TL, 0);
441 WREG32(0x43E4, 0); 400 WREG32(R300_RE_SCISSORS_BR, 0);
442 WREG32(0x24AC, 0); 401 WREG32(0x24AC, 0);
443 } 402 }
444 /* Wait to prevent race in RBBM_STATUS */ 403 /* Wait to prevent race in RBBM_STATUS */
@@ -488,7 +447,7 @@ int r300_gpu_reset(struct radeon_device *rdev)
488 } 447 }
489 /* Check if GPU is idle */ 448 /* Check if GPU is idle */
490 status = RREG32(RADEON_RBBM_STATUS); 449 status = RREG32(RADEON_RBBM_STATUS);
491 if (status & (1 << 31)) { 450 if (status & RADEON_RBBM_ACTIVE) {
492 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 451 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
493 return -1; 452 return -1;
494 } 453 }
@@ -500,13 +459,13 @@ int r300_gpu_reset(struct radeon_device *rdev)
500/* 459/*
501 * r300,r350,rv350,rv380 VRAM info 460 * r300,r350,rv350,rv380 VRAM info
502 */ 461 */
503void r300_vram_info(struct radeon_device *rdev) 462void r300_mc_init(struct radeon_device *rdev)
504{ 463{
505 uint32_t tmp; 464 u64 base;
465 u32 tmp;
506 466
507 /* DDR for all card after R300 & IGP */ 467 /* DDR for all card after R300 & IGP */
508 rdev->mc.vram_is_ddr = true; 468 rdev->mc.vram_is_ddr = true;
509
510 tmp = RREG32(RADEON_MEM_CNTL); 469 tmp = RREG32(RADEON_MEM_CNTL);
511 tmp &= R300_MEM_NUM_CHANNELS_MASK; 470 tmp &= R300_MEM_NUM_CHANNELS_MASK;
512 switch (tmp) { 471 switch (tmp) {
@@ -515,8 +474,13 @@ void r300_vram_info(struct radeon_device *rdev)
515 case 2: rdev->mc.vram_width = 256; break; 474 case 2: rdev->mc.vram_width = 256; break;
516 default: rdev->mc.vram_width = 128; break; 475 default: rdev->mc.vram_width = 128; break;
517 } 476 }
518
519 r100_vram_init_sizes(rdev); 477 r100_vram_init_sizes(rdev);
478 base = rdev->mc.aper_base;
479 if (rdev->flags & RADEON_IS_IGP)
480 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
481 radeon_vram_location(rdev, &rdev->mc, base);
482 if (!(rdev->flags & RADEON_IS_AGP))
483 radeon_gtt_location(rdev, &rdev->mc);
520} 484}
521 485
522void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 486void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@@ -578,6 +542,40 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
578 542
579} 543}
580 544
545int rv370_get_pcie_lanes(struct radeon_device *rdev)
546{
547 u32 link_width_cntl;
548
549 if (rdev->flags & RADEON_IS_IGP)
550 return 0;
551
552 if (!(rdev->flags & RADEON_IS_PCIE))
553 return 0;
554
555 /* FIXME wait for idle */
556
557 if (rdev->family < CHIP_R600)
558 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
559 else
560 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
561
562 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
563 case RADEON_PCIE_LC_LINK_WIDTH_X0:
564 return 0;
565 case RADEON_PCIE_LC_LINK_WIDTH_X1:
566 return 1;
567 case RADEON_PCIE_LC_LINK_WIDTH_X2:
568 return 2;
569 case RADEON_PCIE_LC_LINK_WIDTH_X4:
570 return 4;
571 case RADEON_PCIE_LC_LINK_WIDTH_X8:
572 return 8;
573 case RADEON_PCIE_LC_LINK_WIDTH_X16:
574 default:
575 return 16;
576 }
577}
578
581#if defined(CONFIG_DEBUG_FS) 579#if defined(CONFIG_DEBUG_FS)
582static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) 580static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
583{ 581{
@@ -707,6 +705,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
707 tile_flags |= R300_TXO_MACRO_TILE; 705 tile_flags |= R300_TXO_MACRO_TILE;
708 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 706 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
709 tile_flags |= R300_TXO_MICRO_TILE; 707 tile_flags |= R300_TXO_MICRO_TILE;
708 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
709 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
710 710
711 tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 711 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
712 tmp |= tile_flags; 712 tmp |= tile_flags;
@@ -757,6 +757,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
757 tile_flags |= R300_COLOR_TILE_ENABLE; 757 tile_flags |= R300_COLOR_TILE_ENABLE;
758 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 758 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
759 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 759 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
760 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
761 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
760 762
761 tmp = idx_value & ~(0x7 << 16); 763 tmp = idx_value & ~(0x7 << 16);
762 tmp |= tile_flags; 764 tmp |= tile_flags;
@@ -828,7 +830,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
828 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 830 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
829 tile_flags |= R300_DEPTHMACROTILE_ENABLE; 831 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
830 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 832 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
831 tile_flags |= R300_DEPTHMICROTILE_TILED;; 833 tile_flags |= R300_DEPTHMICROTILE_TILED;
834 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
835 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
832 836
833 tmp = idx_value & ~(0x7 << 16); 837 tmp = idx_value & ~(0x7 << 16);
834 tmp |= tile_flags; 838 tmp |= tile_flags;
@@ -1387,12 +1391,15 @@ int r300_init(struct radeon_device *rdev)
1387 radeon_get_clock_info(rdev->ddev); 1391 radeon_get_clock_info(rdev->ddev);
1388 /* Initialize power management */ 1392 /* Initialize power management */
1389 radeon_pm_init(rdev); 1393 radeon_pm_init(rdev);
1390 /* Get vram informations */ 1394 /* initialize AGP */
1391 r300_vram_info(rdev); 1395 if (rdev->flags & RADEON_IS_AGP) {
1392 /* Initialize memory controller (also test AGP) */ 1396 r = radeon_agp_init(rdev);
1393 r = r420_mc_init(rdev); 1397 if (r) {
1394 if (r) 1398 radeon_agp_disable(rdev);
1395 return r; 1399 }
1400 }
1401 /* initialize memory controller */
1402 r300_mc_init(rdev);
1396 /* Fence driver */ 1403 /* Fence driver */
1397 r = radeon_fence_driver_init(rdev); 1404 r = radeon_fence_driver_init(rdev);
1398 if (r) 1405 if (r)
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 34bffa0e4b73..ea46d558e8f3 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -33,6 +33,7 @@
33 33
34#include "drmP.h" 34#include "drmP.h"
35#include "drm.h" 35#include "drm.h"
36#include "drm_buffer.h"
36#include "radeon_drm.h" 37#include "radeon_drm.h"
37#include "radeon_drv.h" 38#include "radeon_drv.h"
38#include "r300_reg.h" 39#include "r300_reg.h"
@@ -299,46 +300,42 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
299 int reg; 300 int reg;
300 int sz; 301 int sz;
301 int i; 302 int i;
302 int values[64]; 303 u32 *value;
303 RING_LOCALS; 304 RING_LOCALS;
304 305
305 sz = header.packet0.count; 306 sz = header.packet0.count;
306 reg = (header.packet0.reghi << 8) | header.packet0.reglo; 307 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
307 308
308 if ((sz > 64) || (sz < 0)) { 309 if ((sz > 64) || (sz < 0)) {
309 DRM_ERROR 310 DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
310 ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", 311 reg, sz);
311 reg, sz);
312 return -EINVAL; 312 return -EINVAL;
313 } 313 }
314
314 for (i = 0; i < sz; i++) { 315 for (i = 0; i < sz; i++) {
315 values[i] = ((int *)cmdbuf->buf)[i];
316 switch (r300_reg_flags[(reg >> 2) + i]) { 316 switch (r300_reg_flags[(reg >> 2) + i]) {
317 case MARK_SAFE: 317 case MARK_SAFE:
318 break; 318 break;
319 case MARK_CHECK_OFFSET: 319 case MARK_CHECK_OFFSET:
320 if (!radeon_check_offset(dev_priv, (u32) values[i])) { 320 value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
321 DRM_ERROR 321 if (!radeon_check_offset(dev_priv, *value)) {
322 ("Offset failed range check (reg=%04x sz=%d)\n", 322 DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
323 reg, sz); 323 reg, sz);
324 return -EINVAL; 324 return -EINVAL;
325 } 325 }
326 break; 326 break;
327 default: 327 default:
328 DRM_ERROR("Register %04x failed check as flag=%02x\n", 328 DRM_ERROR("Register %04x failed check as flag=%02x\n",
329 reg + i * 4, r300_reg_flags[(reg >> 2) + i]); 329 reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
330 return -EINVAL; 330 return -EINVAL;
331 } 331 }
332 } 332 }
333 333
334 BEGIN_RING(1 + sz); 334 BEGIN_RING(1 + sz);
335 OUT_RING(CP_PACKET0(reg, sz - 1)); 335 OUT_RING(CP_PACKET0(reg, sz - 1));
336 OUT_RING_TABLE(values, sz); 336 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
337 ADVANCE_RING(); 337 ADVANCE_RING();
338 338
339 cmdbuf->buf += sz * 4;
340 cmdbuf->bufsz -= sz * 4;
341
342 return 0; 339 return 0;
343} 340}
344 341
@@ -362,7 +359,7 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
362 if (!sz) 359 if (!sz)
363 return 0; 360 return 0;
364 361
365 if (sz * 4 > cmdbuf->bufsz) 362 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
366 return -EINVAL; 363 return -EINVAL;
367 364
368 if (reg + sz * 4 >= 0x10000) { 365 if (reg + sz * 4 >= 0x10000) {
@@ -380,12 +377,9 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
380 377
381 BEGIN_RING(1 + sz); 378 BEGIN_RING(1 + sz);
382 OUT_RING(CP_PACKET0(reg, sz - 1)); 379 OUT_RING(CP_PACKET0(reg, sz - 1));
383 OUT_RING_TABLE((int *)cmdbuf->buf, sz); 380 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
384 ADVANCE_RING(); 381 ADVANCE_RING();
385 382
386 cmdbuf->buf += sz * 4;
387 cmdbuf->bufsz -= sz * 4;
388
389 return 0; 383 return 0;
390} 384}
391 385
@@ -407,7 +401,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
407 401
408 if (!sz) 402 if (!sz)
409 return 0; 403 return 0;
410 if (sz * 16 > cmdbuf->bufsz) 404 if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
411 return -EINVAL; 405 return -EINVAL;
412 406
413 /* VAP is very sensitive so we purge cache before we program it 407 /* VAP is very sensitive so we purge cache before we program it
@@ -426,7 +420,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
426 BEGIN_RING(3 + sz * 4); 420 BEGIN_RING(3 + sz * 4);
427 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); 421 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
428 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); 422 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
429 OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); 423 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
430 ADVANCE_RING(); 424 ADVANCE_RING();
431 425
432 BEGIN_RING(2); 426 BEGIN_RING(2);
@@ -434,9 +428,6 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
434 OUT_RING(0); 428 OUT_RING(0);
435 ADVANCE_RING(); 429 ADVANCE_RING();
436 430
437 cmdbuf->buf += sz * 16;
438 cmdbuf->bufsz -= sz * 16;
439
440 return 0; 431 return 0;
441} 432}
442 433
@@ -449,14 +440,14 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
449{ 440{
450 RING_LOCALS; 441 RING_LOCALS;
451 442
452 if (8 * 4 > cmdbuf->bufsz) 443 if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
453 return -EINVAL; 444 return -EINVAL;
454 445
455 BEGIN_RING(10); 446 BEGIN_RING(10);
456 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); 447 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
457 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING | 448 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
458 (1 << R300_PRIM_NUM_VERTICES_SHIFT)); 449 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
459 OUT_RING_TABLE((int *)cmdbuf->buf, 8); 450 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
460 ADVANCE_RING(); 451 ADVANCE_RING();
461 452
462 BEGIN_RING(4); 453 BEGIN_RING(4);
@@ -468,9 +459,6 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
468 /* set flush flag */ 459 /* set flush flag */
469 dev_priv->track_flush |= RADEON_FLUSH_EMITED; 460 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
470 461
471 cmdbuf->buf += 8 * 4;
472 cmdbuf->bufsz -= 8 * 4;
473
474 return 0; 462 return 0;
475} 463}
476 464
@@ -480,28 +468,29 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
480{ 468{
481 int count, i, k; 469 int count, i, k;
482#define MAX_ARRAY_PACKET 64 470#define MAX_ARRAY_PACKET 64
483 u32 payload[MAX_ARRAY_PACKET]; 471 u32 *data;
484 u32 narrays; 472 u32 narrays;
485 RING_LOCALS; 473 RING_LOCALS;
486 474
487 count = (header >> 16) & 0x3fff; 475 count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
488 476
489 if ((count + 1) > MAX_ARRAY_PACKET) { 477 if ((count + 1) > MAX_ARRAY_PACKET) {
490 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 478 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
491 count); 479 count);
492 return -EINVAL; 480 return -EINVAL;
493 } 481 }
494 memset(payload, 0, MAX_ARRAY_PACKET * 4);
495 memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
496
497 /* carefully check packet contents */ 482 /* carefully check packet contents */
498 483
499 narrays = payload[0]; 484 /* We have already read the header so advance the buffer. */
485 drm_buffer_advance(cmdbuf->buffer, 4);
486
487 narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
500 k = 0; 488 k = 0;
501 i = 1; 489 i = 1;
502 while ((k < narrays) && (i < (count + 1))) { 490 while ((k < narrays) && (i < (count + 1))) {
503 i++; /* skip attribute field */ 491 i++; /* skip attribute field */
504 if (!radeon_check_offset(dev_priv, payload[i])) { 492 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
493 if (!radeon_check_offset(dev_priv, *data)) {
505 DRM_ERROR 494 DRM_ERROR
506 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 495 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
507 k, i); 496 k, i);
@@ -512,7 +501,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
512 if (k == narrays) 501 if (k == narrays)
513 break; 502 break;
514 /* have one more to process, they come in pairs */ 503 /* have one more to process, they come in pairs */
515 if (!radeon_check_offset(dev_priv, payload[i])) { 504 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
505 if (!radeon_check_offset(dev_priv, *data)) {
516 DRM_ERROR 506 DRM_ERROR
517 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 507 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
518 k, i); 508 k, i);
@@ -533,30 +523,30 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
533 523
534 BEGIN_RING(count + 2); 524 BEGIN_RING(count + 2);
535 OUT_RING(header); 525 OUT_RING(header);
536 OUT_RING_TABLE(payload, count + 1); 526 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
537 ADVANCE_RING(); 527 ADVANCE_RING();
538 528
539 cmdbuf->buf += (count + 2) * 4;
540 cmdbuf->bufsz -= (count + 2) * 4;
541
542 return 0; 529 return 0;
543} 530}
544 531
545static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, 532static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
546 drm_radeon_kcmd_buffer_t *cmdbuf) 533 drm_radeon_kcmd_buffer_t *cmdbuf)
547{ 534{
548 u32 *cmd = (u32 *) cmdbuf->buf; 535 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
549 int count, ret; 536 int count, ret;
550 RING_LOCALS; 537 RING_LOCALS;
551 538
552 count=(cmd[0]>>16) & 0x3fff;
553 539
554 if (cmd[0] & 0x8000) { 540 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
555 u32 offset;
556 541
557 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL 542 if (*cmd & 0x8000) {
543 u32 offset;
544 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
545 if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
558 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 546 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
559 offset = cmd[2] << 10; 547
548 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
549 offset = *cmd2 << 10;
560 ret = !radeon_check_offset(dev_priv, offset); 550 ret = !radeon_check_offset(dev_priv, offset);
561 if (ret) { 551 if (ret) {
562 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); 552 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
@@ -564,9 +554,10 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
564 } 554 }
565 } 555 }
566 556
567 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && 557 if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
568 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 558 (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
569 offset = cmd[3] << 10; 559 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
560 offset = *cmd3 << 10;
570 ret = !radeon_check_offset(dev_priv, offset); 561 ret = !radeon_check_offset(dev_priv, offset);
571 if (ret) { 562 if (ret) {
572 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); 563 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
@@ -577,28 +568,25 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
577 } 568 }
578 569
579 BEGIN_RING(count+2); 570 BEGIN_RING(count+2);
580 OUT_RING(cmd[0]); 571 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
581 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
582 ADVANCE_RING(); 572 ADVANCE_RING();
583 573
584 cmdbuf->buf += (count+2)*4;
585 cmdbuf->bufsz -= (count+2)*4;
586
587 return 0; 574 return 0;
588} 575}
589 576
590static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, 577static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
591 drm_radeon_kcmd_buffer_t *cmdbuf) 578 drm_radeon_kcmd_buffer_t *cmdbuf)
592{ 579{
593 u32 *cmd; 580 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
581 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
594 int count; 582 int count;
595 int expected_count; 583 int expected_count;
596 RING_LOCALS; 584 RING_LOCALS;
597 585
598 cmd = (u32 *) cmdbuf->buf; 586 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
599 count = (cmd[0]>>16) & 0x3fff; 587
600 expected_count = cmd[1] >> 16; 588 expected_count = *cmd1 >> 16;
601 if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit)) 589 if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
602 expected_count = (expected_count+1)/2; 590 expected_count = (expected_count+1)/2;
603 591
604 if (count && count != expected_count) { 592 if (count && count != expected_count) {
@@ -608,55 +596,53 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
608 } 596 }
609 597
610 BEGIN_RING(count+2); 598 BEGIN_RING(count+2);
611 OUT_RING(cmd[0]); 599 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
612 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
613 ADVANCE_RING(); 600 ADVANCE_RING();
614 601
615 cmdbuf->buf += (count+2)*4;
616 cmdbuf->bufsz -= (count+2)*4;
617
618 if (!count) { 602 if (!count) {
619 drm_r300_cmd_header_t header; 603 drm_r300_cmd_header_t stack_header, *header;
604 u32 *cmd1, *cmd2, *cmd3;
620 605
621 if (cmdbuf->bufsz < 4*4 + sizeof(header)) { 606 if (drm_buffer_unprocessed(cmdbuf->buffer)
607 < 4*4 + sizeof(stack_header)) {
622 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n"); 608 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
623 return -EINVAL; 609 return -EINVAL;
624 } 610 }
625 611
626 header.u = *(unsigned int *)cmdbuf->buf; 612 header = drm_buffer_read_object(cmdbuf->buffer,
613 sizeof(stack_header), &stack_header);
627 614
628 cmdbuf->buf += sizeof(header); 615 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
629 cmdbuf->bufsz -= sizeof(header); 616 cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
630 cmd = (u32 *) cmdbuf->buf; 617 cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
618 cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
631 619
632 if (header.header.cmd_type != R300_CMD_PACKET3 || 620 if (header->header.cmd_type != R300_CMD_PACKET3 ||
633 header.packet3.packet != R300_CMD_PACKET3_RAW || 621 header->packet3.packet != R300_CMD_PACKET3_RAW ||
634 cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) { 622 *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
635 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n"); 623 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
636 return -EINVAL; 624 return -EINVAL;
637 } 625 }
638 626
639 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 627 if ((*cmd1 & 0x8000ffff) != 0x80000810) {
640 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 628 DRM_ERROR("Invalid indx_buffer reg address %08X\n",
629 *cmd1);
641 return -EINVAL; 630 return -EINVAL;
642 } 631 }
643 if (!radeon_check_offset(dev_priv, cmd[2])) { 632 if (!radeon_check_offset(dev_priv, *cmd2)) {
644 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 633 DRM_ERROR("Invalid indx_buffer offset is %08X\n",
634 *cmd2);
645 return -EINVAL; 635 return -EINVAL;
646 } 636 }
647 if (cmd[3] != expected_count) { 637 if (*cmd3 != expected_count) {
648 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n", 638 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
649 cmd[3], expected_count); 639 *cmd3, expected_count);
650 return -EINVAL; 640 return -EINVAL;
651 } 641 }
652 642
653 BEGIN_RING(4); 643 BEGIN_RING(4);
654 OUT_RING(cmd[0]); 644 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
655 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
656 ADVANCE_RING(); 645 ADVANCE_RING();
657
658 cmdbuf->buf += 4*4;
659 cmdbuf->bufsz -= 4*4;
660 } 646 }
661 647
662 return 0; 648 return 0;
@@ -665,39 +651,39 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
665static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, 651static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
666 drm_radeon_kcmd_buffer_t *cmdbuf) 652 drm_radeon_kcmd_buffer_t *cmdbuf)
667{ 653{
668 u32 header; 654 u32 *header;
669 int count; 655 int count;
670 RING_LOCALS; 656 RING_LOCALS;
671 657
672 if (4 > cmdbuf->bufsz) 658 if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
673 return -EINVAL; 659 return -EINVAL;
674 660
675 /* Fixme !! This simply emits a packet without much checking. 661 /* Fixme !! This simply emits a packet without much checking.
676 We need to be smarter. */ 662 We need to be smarter. */
677 663
678 /* obtain first word - actual packet3 header */ 664 /* obtain first word - actual packet3 header */
679 header = *(u32 *) cmdbuf->buf; 665 header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
680 666
681 /* Is it packet 3 ? */ 667 /* Is it packet 3 ? */
682 if ((header >> 30) != 0x3) { 668 if ((*header >> 30) != 0x3) {
683 DRM_ERROR("Not a packet3 header (0x%08x)\n", header); 669 DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
684 return -EINVAL; 670 return -EINVAL;
685 } 671 }
686 672
687 count = (header >> 16) & 0x3fff; 673 count = (*header >> 16) & 0x3fff;
688 674
689 /* Check again now that we know how much data to expect */ 675 /* Check again now that we know how much data to expect */
690 if ((count + 2) * 4 > cmdbuf->bufsz) { 676 if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
691 DRM_ERROR 677 DRM_ERROR
692 ("Expected packet3 of length %d but have only %d bytes left\n", 678 ("Expected packet3 of length %d but have only %d bytes left\n",
693 (count + 2) * 4, cmdbuf->bufsz); 679 (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
694 return -EINVAL; 680 return -EINVAL;
695 } 681 }
696 682
697 /* Is it a packet type we know about ? */ 683 /* Is it a packet type we know about ? */
698 switch (header & 0xff00) { 684 switch (*header & 0xff00) {
699 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ 685 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
700 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); 686 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
701 687
702 case RADEON_CNTL_BITBLT_MULTI: 688 case RADEON_CNTL_BITBLT_MULTI:
703 return r300_emit_bitblt_multi(dev_priv, cmdbuf); 689 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
@@ -723,18 +709,14 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
723 /* these packets are safe */ 709 /* these packets are safe */
724 break; 710 break;
725 default: 711 default:
726 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); 712 DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
727 return -EINVAL; 713 return -EINVAL;
728 } 714 }
729 715
730 BEGIN_RING(count + 2); 716 BEGIN_RING(count + 2);
731 OUT_RING(header); 717 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
732 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
733 ADVANCE_RING(); 718 ADVANCE_RING();
734 719
735 cmdbuf->buf += (count + 2) * 4;
736 cmdbuf->bufsz -= (count + 2) * 4;
737
738 return 0; 720 return 0;
739} 721}
740 722
@@ -748,8 +730,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
748{ 730{
749 int n; 731 int n;
750 int ret; 732 int ret;
751 char *orig_buf = cmdbuf->buf; 733 int orig_iter = cmdbuf->buffer->iterator;
752 int orig_bufsz = cmdbuf->bufsz;
753 734
754 /* This is a do-while-loop so that we run the interior at least once, 735 /* This is a do-while-loop so that we run the interior at least once,
755 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale. 736 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
@@ -761,8 +742,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
761 if (ret) 742 if (ret)
762 return ret; 743 return ret;
763 744
764 cmdbuf->buf = orig_buf; 745 cmdbuf->buffer->iterator = orig_iter;
765 cmdbuf->bufsz = orig_bufsz;
766 } 746 }
767 747
768 switch (header.packet3.packet) { 748 switch (header.packet3.packet) {
@@ -785,9 +765,9 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
785 break; 765 break;
786 766
787 default: 767 default:
788 DRM_ERROR("bad packet3 type %i at %p\n", 768 DRM_ERROR("bad packet3 type %i at byte %d\n",
789 header.packet3.packet, 769 header.packet3.packet,
790 cmdbuf->buf - sizeof(header)); 770 cmdbuf->buffer->iterator - (int)sizeof(header));
791 return -EINVAL; 771 return -EINVAL;
792 } 772 }
793 773
@@ -923,12 +903,13 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
923 drm_r300_cmd_header_t header) 903 drm_r300_cmd_header_t header)
924{ 904{
925 u32 *ref_age_base; 905 u32 *ref_age_base;
926 u32 i, buf_idx, h_pending; 906 u32 i, *buf_idx, h_pending;
927 u64 ptr_addr; 907 u64 *ptr_addr;
908 u64 stack_ptr_addr;
928 RING_LOCALS; 909 RING_LOCALS;
929 910
930 if (cmdbuf->bufsz < 911 if (drm_buffer_unprocessed(cmdbuf->buffer) <
931 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { 912 (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
932 return -EINVAL; 913 return -EINVAL;
933 } 914 }
934 915
@@ -938,36 +919,35 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
938 919
939 dev_priv->scratch_ages[header.scratch.reg]++; 920 dev_priv->scratch_ages[header.scratch.reg]++;
940 921
941 ptr_addr = get_unaligned((u64 *)cmdbuf->buf); 922 ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
942 ref_age_base = (u32 *)(unsigned long)ptr_addr; 923 sizeof(stack_ptr_addr), &stack_ptr_addr);
943 924 ref_age_base = (u32 *)(unsigned long)*ptr_addr;
944 cmdbuf->buf += sizeof(u64);
945 cmdbuf->bufsz -= sizeof(u64);
946 925
947 for (i=0; i < header.scratch.n_bufs; i++) { 926 for (i=0; i < header.scratch.n_bufs; i++) {
948 buf_idx = *(u32 *)cmdbuf->buf; 927 buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
949 buf_idx *= 2; /* 8 bytes per buf */ 928 *buf_idx *= 2; /* 8 bytes per buf */
950 929
951 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { 930 if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
931 &dev_priv->scratch_ages[header.scratch.reg],
932 sizeof(u32)))
952 return -EINVAL; 933 return -EINVAL;
953 }
954 934
955 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { 935 if (DRM_COPY_FROM_USER(&h_pending,
936 ref_age_base + *buf_idx + 1,
937 sizeof(u32)))
956 return -EINVAL; 938 return -EINVAL;
957 }
958 939
959 if (h_pending == 0) { 940 if (h_pending == 0)
960 return -EINVAL; 941 return -EINVAL;
961 }
962 942
963 h_pending--; 943 h_pending--;
964 944
965 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { 945 if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
946 &h_pending,
947 sizeof(u32)))
966 return -EINVAL; 948 return -EINVAL;
967 }
968 949
969 cmdbuf->buf += sizeof(buf_idx); 950 drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
970 cmdbuf->bufsz -= sizeof(buf_idx);
971 } 951 }
972 952
973 BEGIN_RING(2); 953 BEGIN_RING(2);
@@ -1009,19 +989,16 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
1009 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type); 989 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
1010 if (!sz) 990 if (!sz)
1011 return 0; 991 return 0;
1012 if (sz * stride * 4 > cmdbuf->bufsz) 992 if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
1013 return -EINVAL; 993 return -EINVAL;
1014 994
1015 BEGIN_RING(3 + sz * stride); 995 BEGIN_RING(3 + sz * stride);
1016 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr); 996 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
1017 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1)); 997 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
1018 OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride); 998 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
1019 999
1020 ADVANCE_RING(); 1000 ADVANCE_RING();
1021 1001
1022 cmdbuf->buf += sz * stride * 4;
1023 cmdbuf->bufsz -= sz * stride * 4;
1024
1025 return 0; 1002 return 0;
1026} 1003}
1027 1004
@@ -1053,19 +1030,18 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1053 goto cleanup; 1030 goto cleanup;
1054 } 1031 }
1055 1032
1056 while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) { 1033 while (drm_buffer_unprocessed(cmdbuf->buffer)
1034 >= sizeof(drm_r300_cmd_header_t)) {
1057 int idx; 1035 int idx;
1058 drm_r300_cmd_header_t header; 1036 drm_r300_cmd_header_t *header, stack_header;
1059
1060 header.u = *(unsigned int *)cmdbuf->buf;
1061 1037
1062 cmdbuf->buf += sizeof(header); 1038 header = drm_buffer_read_object(cmdbuf->buffer,
1063 cmdbuf->bufsz -= sizeof(header); 1039 sizeof(stack_header), &stack_header);
1064 1040
1065 switch (header.header.cmd_type) { 1041 switch (header->header.cmd_type) {
1066 case R300_CMD_PACKET0: 1042 case R300_CMD_PACKET0:
1067 DRM_DEBUG("R300_CMD_PACKET0\n"); 1043 DRM_DEBUG("R300_CMD_PACKET0\n");
1068 ret = r300_emit_packet0(dev_priv, cmdbuf, header); 1044 ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
1069 if (ret) { 1045 if (ret) {
1070 DRM_ERROR("r300_emit_packet0 failed\n"); 1046 DRM_ERROR("r300_emit_packet0 failed\n");
1071 goto cleanup; 1047 goto cleanup;
@@ -1074,7 +1050,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1074 1050
1075 case R300_CMD_VPU: 1051 case R300_CMD_VPU:
1076 DRM_DEBUG("R300_CMD_VPU\n"); 1052 DRM_DEBUG("R300_CMD_VPU\n");
1077 ret = r300_emit_vpu(dev_priv, cmdbuf, header); 1053 ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
1078 if (ret) { 1054 if (ret) {
1079 DRM_ERROR("r300_emit_vpu failed\n"); 1055 DRM_ERROR("r300_emit_vpu failed\n");
1080 goto cleanup; 1056 goto cleanup;
@@ -1083,7 +1059,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1083 1059
1084 case R300_CMD_PACKET3: 1060 case R300_CMD_PACKET3:
1085 DRM_DEBUG("R300_CMD_PACKET3\n"); 1061 DRM_DEBUG("R300_CMD_PACKET3\n");
1086 ret = r300_emit_packet3(dev_priv, cmdbuf, header); 1062 ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
1087 if (ret) { 1063 if (ret) {
1088 DRM_ERROR("r300_emit_packet3 failed\n"); 1064 DRM_ERROR("r300_emit_packet3 failed\n");
1089 goto cleanup; 1065 goto cleanup;
@@ -1117,8 +1093,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1117 int i; 1093 int i;
1118 RING_LOCALS; 1094 RING_LOCALS;
1119 1095
1120 BEGIN_RING(header.delay.count); 1096 BEGIN_RING(header->delay.count);
1121 for (i = 0; i < header.delay.count; i++) 1097 for (i = 0; i < header->delay.count; i++)
1122 OUT_RING(RADEON_CP_PACKET2); 1098 OUT_RING(RADEON_CP_PACKET2);
1123 ADVANCE_RING(); 1099 ADVANCE_RING();
1124 } 1100 }
@@ -1126,7 +1102,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1126 1102
1127 case R300_CMD_DMA_DISCARD: 1103 case R300_CMD_DMA_DISCARD:
1128 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); 1104 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
1129 idx = header.dma.buf_idx; 1105 idx = header->dma.buf_idx;
1130 if (idx < 0 || idx >= dma->buf_count) { 1106 if (idx < 0 || idx >= dma->buf_count) {
1131 DRM_ERROR("buffer index %d (of %d max)\n", 1107 DRM_ERROR("buffer index %d (of %d max)\n",
1132 idx, dma->buf_count - 1); 1108 idx, dma->buf_count - 1);
@@ -1149,12 +1125,12 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1149 1125
1150 case R300_CMD_WAIT: 1126 case R300_CMD_WAIT:
1151 DRM_DEBUG("R300_CMD_WAIT\n"); 1127 DRM_DEBUG("R300_CMD_WAIT\n");
1152 r300_cmd_wait(dev_priv, header); 1128 r300_cmd_wait(dev_priv, *header);
1153 break; 1129 break;
1154 1130
1155 case R300_CMD_SCRATCH: 1131 case R300_CMD_SCRATCH:
1156 DRM_DEBUG("R300_CMD_SCRATCH\n"); 1132 DRM_DEBUG("R300_CMD_SCRATCH\n");
1157 ret = r300_scratch(dev_priv, cmdbuf, header); 1133 ret = r300_scratch(dev_priv, cmdbuf, *header);
1158 if (ret) { 1134 if (ret) {
1159 DRM_ERROR("r300_scratch failed\n"); 1135 DRM_ERROR("r300_scratch failed\n");
1160 goto cleanup; 1136 goto cleanup;
@@ -1168,16 +1144,16 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1168 goto cleanup; 1144 goto cleanup;
1169 } 1145 }
1170 DRM_DEBUG("R300_CMD_R500FP\n"); 1146 DRM_DEBUG("R300_CMD_R500FP\n");
1171 ret = r300_emit_r500fp(dev_priv, cmdbuf, header); 1147 ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
1172 if (ret) { 1148 if (ret) {
1173 DRM_ERROR("r300_emit_r500fp failed\n"); 1149 DRM_ERROR("r300_emit_r500fp failed\n");
1174 goto cleanup; 1150 goto cleanup;
1175 } 1151 }
1176 break; 1152 break;
1177 default: 1153 default:
1178 DRM_ERROR("bad cmd_type %i at %p\n", 1154 DRM_ERROR("bad cmd_type %i at byte %d\n",
1179 header.header.cmd_type, 1155 header->header.cmd_type,
1180 cmdbuf->buf - sizeof(header)); 1156 cmdbuf->buffer->iterator - (int)sizeof(*header));
1181 ret = -EINVAL; 1157 ret = -EINVAL;
1182 goto cleanup; 1158 goto cleanup;
1183 } 1159 }
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1735a2b69580..1a0d5362cd79 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -952,6 +952,7 @@
952# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0) 952# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
953# define R300_TXO_MACRO_TILE (1 << 2) 953# define R300_TXO_MACRO_TILE (1 << 2)
954# define R300_TXO_MICRO_TILE (1 << 3) 954# define R300_TXO_MICRO_TILE (1 << 3)
955# define R300_TXO_MICRO_TILE_SQUARE (2 << 3)
955# define R300_TXO_OFFSET_MASK 0xffffffe0 956# define R300_TXO_OFFSET_MASK 0xffffffe0
956# define R300_TXO_OFFSET_SHIFT 5 957# define R300_TXO_OFFSET_SHIFT 5
957 /* END: Guess from R200 */ 958 /* END: Guess from R200 */
@@ -1360,6 +1361,7 @@
1360# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */ 1361# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
1361# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */ 1362# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
1362# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */ 1363# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
1364# define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17)
1363# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */ 1365# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
1364# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */ 1366# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
1365# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */ 1367# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index d9373246c97f..c7593b8f58ee 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -40,28 +40,6 @@ static void r420_set_reg_safe(struct radeon_device *rdev)
40 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm); 40 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
41} 41}
42 42
43int r420_mc_init(struct radeon_device *rdev)
44{
45 int r;
46
47 /* Setup GPU memory space */
48 rdev->mc.vram_location = 0xFFFFFFFFUL;
49 rdev->mc.gtt_location = 0xFFFFFFFFUL;
50 if (rdev->flags & RADEON_IS_AGP) {
51 r = radeon_agp_init(rdev);
52 if (r) {
53 radeon_agp_disable(rdev);
54 } else {
55 rdev->mc.gtt_location = rdev->mc.agp_base;
56 }
57 }
58 r = radeon_mc_setup(rdev);
59 if (r) {
60 return r;
61 }
62 return 0;
63}
64
65void r420_pipes_init(struct radeon_device *rdev) 43void r420_pipes_init(struct radeon_device *rdev)
66{ 44{
67 unsigned tmp; 45 unsigned tmp;
@@ -69,7 +47,8 @@ void r420_pipes_init(struct radeon_device *rdev)
69 unsigned num_pipes; 47 unsigned num_pipes;
70 48
71 /* GA_ENHANCE workaround TCL deadlock issue */ 49 /* GA_ENHANCE workaround TCL deadlock issue */
72 WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)); 50 WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
51 (1 << 2) | (1 << 3));
73 /* add idle wait as per freedesktop.org bug 24041 */ 52 /* add idle wait as per freedesktop.org bug 24041 */
74 if (r100_gui_wait_for_idle(rdev)) { 53 if (r100_gui_wait_for_idle(rdev)) {
75 printk(KERN_WARNING "Failed to wait GUI idle while " 54 printk(KERN_WARNING "Failed to wait GUI idle while "
@@ -97,17 +76,17 @@ void r420_pipes_init(struct radeon_device *rdev)
97 tmp = (7 << 1); 76 tmp = (7 << 1);
98 break; 77 break;
99 } 78 }
100 WREG32(0x42C8, (1 << num_pipes) - 1); 79 WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
101 /* Sub pixel 1/12 so we can have 4K rendering according to doc */ 80 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
102 tmp |= (1 << 4) | (1 << 0); 81 tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
103 WREG32(0x4018, tmp); 82 WREG32(R300_GB_TILE_CONFIG, tmp);
104 if (r100_gui_wait_for_idle(rdev)) { 83 if (r100_gui_wait_for_idle(rdev)) {
105 printk(KERN_WARNING "Failed to wait GUI idle while " 84 printk(KERN_WARNING "Failed to wait GUI idle while "
106 "programming pipes. Bad things might happen.\n"); 85 "programming pipes. Bad things might happen.\n");
107 } 86 }
108 87
109 tmp = RREG32(0x170C); 88 tmp = RREG32(R300_DST_PIPE_CONFIG);
110 WREG32(0x170C, tmp | (1 << 31)); 89 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
111 90
112 WREG32(R300_RB2D_DSTCACHE_MODE, 91 WREG32(R300_RB2D_DSTCACHE_MODE,
113 RREG32(R300_RB2D_DSTCACHE_MODE) | 92 RREG32(R300_RB2D_DSTCACHE_MODE) |
@@ -348,13 +327,15 @@ int r420_init(struct radeon_device *rdev)
348 radeon_get_clock_info(rdev->ddev); 327 radeon_get_clock_info(rdev->ddev);
349 /* Initialize power management */ 328 /* Initialize power management */
350 radeon_pm_init(rdev); 329 radeon_pm_init(rdev);
351 /* Get vram informations */ 330 /* initialize AGP */
352 r300_vram_info(rdev); 331 if (rdev->flags & RADEON_IS_AGP) {
353 /* Initialize memory controller (also test AGP) */ 332 r = radeon_agp_init(rdev);
354 r = r420_mc_init(rdev); 333 if (r) {
355 if (r) { 334 radeon_agp_disable(rdev);
356 return r; 335 }
357 } 336 }
337 /* initialize memory controller */
338 r300_mc_init(rdev);
358 r420_debugfs(rdev); 339 r420_debugfs(rdev);
359 /* Fence driver */ 340 /* Fence driver */
360 r = radeon_fence_driver_init(rdev); 341 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 74ad89bdf2b5..0cf2ad2a5585 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -717,54 +717,62 @@
717#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988 717#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
718 718
719#define AVIVO_DC_GPIO_HPD_A 0x7e94 719#define AVIVO_DC_GPIO_HPD_A 0x7e94
720
721#define AVIVO_GPIO_0 0x7e30
722#define AVIVO_GPIO_1 0x7e40
723#define AVIVO_GPIO_2 0x7e50
724#define AVIVO_GPIO_3 0x7e60
725
726#define AVIVO_DC_GPIO_HPD_Y 0x7e9c 720#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
727 721
728#define AVIVO_I2C_STATUS 0x7d30 722#define AVIVO_DC_I2C_STATUS1 0x7d30
729# define AVIVO_I2C_STATUS_DONE (1 << 0) 723# define AVIVO_DC_I2C_DONE (1 << 0)
730# define AVIVO_I2C_STATUS_NACK (1 << 1) 724# define AVIVO_DC_I2C_NACK (1 << 1)
731# define AVIVO_I2C_STATUS_HALT (1 << 2) 725# define AVIVO_DC_I2C_HALT (1 << 2)
732# define AVIVO_I2C_STATUS_GO (1 << 3) 726# define AVIVO_DC_I2C_GO (1 << 3)
733# define AVIVO_I2C_STATUS_MASK 0x7 727#define AVIVO_DC_I2C_RESET 0x7d34
734/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe 728# define AVIVO_DC_I2C_SOFT_RESET (1 << 0)
735 * DONE? */ 729# define AVIVO_DC_I2C_ABORT (1 << 8)
736# define AVIVO_I2C_STATUS_CMD_RESET 0x7 730#define AVIVO_DC_I2C_CONTROL1 0x7d38
737# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3) 731# define AVIVO_DC_I2C_START (1 << 0)
738#define AVIVO_I2C_STOP 0x7d34 732# define AVIVO_DC_I2C_STOP (1 << 1)
739#define AVIVO_I2C_START_CNTL 0x7d38 733# define AVIVO_DC_I2C_RECEIVE (1 << 2)
740# define AVIVO_I2C_START (1 << 8) 734# define AVIVO_DC_I2C_EN (1 << 8)
741# define AVIVO_I2C_CONNECTOR0 (0 << 16) 735# define AVIVO_DC_I2C_PIN_SELECT(x) ((x) << 16)
742# define AVIVO_I2C_CONNECTOR1 (1 << 16) 736# define AVIVO_SEL_DDC1 0
743#define R520_I2C_START (1<<0) 737# define AVIVO_SEL_DDC2 1
744#define R520_I2C_STOP (1<<1) 738# define AVIVO_SEL_DDC3 2
745#define R520_I2C_RX (1<<2) 739#define AVIVO_DC_I2C_CONTROL2 0x7d3c
746#define R520_I2C_EN (1<<8) 740# define AVIVO_DC_I2C_ADDR_COUNT(x) ((x) << 0)
747#define R520_I2C_DDC1 (0<<16) 741# define AVIVO_DC_I2C_DATA_COUNT(x) ((x) << 8)
748#define R520_I2C_DDC2 (1<<16) 742#define AVIVO_DC_I2C_CONTROL3 0x7d40
749#define R520_I2C_DDC3 (2<<16) 743# define AVIVO_DC_I2C_DATA_DRIVE_EN (1 << 0)
750#define R520_I2C_DDC_MASK (3<<16) 744# define AVIVO_DC_I2C_DATA_DRIVE_SEL (1 << 1)
751#define AVIVO_I2C_CONTROL2 0x7d3c 745# define AVIVO_DC_I2C_CLK_DRIVE_EN (1 << 7)
752# define AVIVO_I2C_7D3C_SIZE_SHIFT 8 746# define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x) ((x) << 8)
753# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8) 747# define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x) ((x) << 16)
754#define AVIVO_I2C_CONTROL3 0x7d40 748# define AVIVO_DC_I2C_TIME_LIMIT(x) ((x) << 24)
755/* Reading is done 4 bytes at a time: read the bottom 8 bits from 749#define AVIVO_DC_I2C_DATA 0x7d44
756 * 7d44, four times in a row. 750#define AVIVO_DC_I2C_INTERRUPT_CONTROL 0x7d48
757 * Writing is a little more complex. First write DATA with 751# define AVIVO_DC_I2C_INTERRUPT_STATUS (1 << 0)
758 * 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic 752# define AVIVO_DC_I2C_INTERRUPT_AK (1 << 8)
759 * magic number, zz is, I think, the slave address, and yy is the byte 753# define AVIVO_DC_I2C_INTERRUPT_ENABLE (1 << 16)
760 * you want to write. */ 754#define AVIVO_DC_I2C_ARBITRATION 0x7d50
761#define AVIVO_I2C_DATA 0x7d44 755# define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C (1 << 0)
762#define R520_I2C_ADDR_COUNT_MASK (0x7) 756# define AVIVO_DC_I2C_SW_CAN_USE_I2C (1 << 1)
763#define R520_I2C_DATA_COUNT_SHIFT (8) 757# define AVIVO_DC_I2C_SW_DONE_USING_I2C (1 << 8)
764#define R520_I2C_DATA_COUNT_MASK (0xF00) 758# define AVIVO_DC_I2C_HW_NEEDS_I2C (1 << 9)
765#define AVIVO_I2C_CNTL 0x7d50 759# define AVIVO_DC_I2C_ABORT_HDCP_I2C (1 << 16)
766# define AVIVO_I2C_EN (1 << 0) 760# define AVIVO_DC_I2C_HW_USING_I2C (1 << 17)
767# define AVIVO_I2C_RESET (1 << 8) 761
762#define AVIVO_DC_GPIO_DDC1_MASK 0x7e40
763#define AVIVO_DC_GPIO_DDC1_A 0x7e44
764#define AVIVO_DC_GPIO_DDC1_EN 0x7e48
765#define AVIVO_DC_GPIO_DDC1_Y 0x7e4c
766
767#define AVIVO_DC_GPIO_DDC2_MASK 0x7e50
768#define AVIVO_DC_GPIO_DDC2_A 0x7e54
769#define AVIVO_DC_GPIO_DDC2_EN 0x7e58
770#define AVIVO_DC_GPIO_DDC2_Y 0x7e5c
771
772#define AVIVO_DC_GPIO_DDC3_MASK 0x7e60
773#define AVIVO_DC_GPIO_DDC3_A 0x7e64
774#define AVIVO_DC_GPIO_DDC3_EN 0x7e68
775#define AVIVO_DC_GPIO_DDC3_Y 0x7e6c
768 776
769#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc 777#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc
770# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4) 778# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index ddf5731eba0d..2b8a5dd13516 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -119,13 +119,15 @@ static void r520_vram_get_type(struct radeon_device *rdev)
119 rdev->mc.vram_width *= 2; 119 rdev->mc.vram_width *= 2;
120} 120}
121 121
122void r520_vram_info(struct radeon_device *rdev) 122void r520_mc_init(struct radeon_device *rdev)
123{ 123{
124 fixed20_12 a; 124 fixed20_12 a;
125 125
126 r520_vram_get_type(rdev); 126 r520_vram_get_type(rdev);
127
128 r100_vram_init_sizes(rdev); 127 r100_vram_init_sizes(rdev);
128 radeon_vram_location(rdev, &rdev->mc, 0);
129 if (!(rdev->flags & RADEON_IS_AGP))
130 radeon_gtt_location(rdev, &rdev->mc);
129 /* FIXME: we should enforce default clock in case GPU is not in 131 /* FIXME: we should enforce default clock in case GPU is not in
130 * default setup 132 * default setup
131 */ 133 */
@@ -267,12 +269,15 @@ int r520_init(struct radeon_device *rdev)
267 radeon_get_clock_info(rdev->ddev); 269 radeon_get_clock_info(rdev->ddev);
268 /* Initialize power management */ 270 /* Initialize power management */
269 radeon_pm_init(rdev); 271 radeon_pm_init(rdev);
270 /* Get vram informations */ 272 /* initialize AGP */
271 r520_vram_info(rdev); 273 if (rdev->flags & RADEON_IS_AGP) {
272 /* Initialize memory controller (also test AGP) */ 274 r = radeon_agp_init(rdev);
273 r = r420_mc_init(rdev); 275 if (r) {
274 if (r) 276 radeon_agp_disable(rdev);
275 return r; 277 }
278 }
279 /* initialize memory controller */
280 r520_mc_init(rdev);
276 rv515_debugfs(rdev); 281 rv515_debugfs(rdev);
277 /* Fence driver */ 282 /* Fence driver */
278 r = radeon_fence_driver_init(rdev); 283 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2ffcf5a03551..c52290197292 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -353,23 +353,14 @@ void r600_hpd_fini(struct radeon_device *rdev)
353/* 353/*
354 * R600 PCIE GART 354 * R600 PCIE GART
355 */ 355 */
356int r600_gart_clear_page(struct radeon_device *rdev, int i)
357{
358 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
359 u64 pte;
360
361 if (i < 0 || i > rdev->gart.num_gpu_pages)
362 return -EINVAL;
363 pte = 0;
364 writeq(pte, ((void __iomem *)ptr) + (i * 8));
365 return 0;
366}
367
368void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) 356void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
369{ 357{
370 unsigned i; 358 unsigned i;
371 u32 tmp; 359 u32 tmp;
372 360
361 /* flush hdp cache so updates hit vram */
362 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
363
373 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); 364 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
374 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); 365 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
375 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 366 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
@@ -416,6 +407,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
416 r = radeon_gart_table_vram_pin(rdev); 407 r = radeon_gart_table_vram_pin(rdev);
417 if (r) 408 if (r)
418 return r; 409 return r;
410 radeon_gart_restore(rdev);
419 411
420 /* Setup L2 cache */ 412 /* Setup L2 cache */
421 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 413 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -619,6 +611,68 @@ static void r600_mc_program(struct radeon_device *rdev)
619 rv515_vga_render_disable(rdev); 611 rv515_vga_render_disable(rdev);
620} 612}
621 613
614/**
615 * r600_vram_gtt_location - try to find VRAM & GTT location
616 * @rdev: radeon device structure holding all necessary informations
617 * @mc: memory controller structure holding memory informations
618 *
619 * Function will place try to place VRAM at same place as in CPU (PCI)
620 * address space as some GPU seems to have issue when we reprogram at
621 * different address space.
622 *
623 * If there is not enough space to fit the unvisible VRAM after the
624 * aperture then we limit the VRAM size to the aperture.
625 *
626 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
627 * them to be in one from GPU point of view so that we can program GPU to
628 * catch access outside them (weird GPU policy see ??).
629 *
630 * This function will never fails, worst case are limiting VRAM or GTT.
631 *
632 * Note: GTT start, end, size should be initialized before calling this
633 * function on AGP platform.
634 */
635void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
636{
637 u64 size_bf, size_af;
638
639 if (mc->mc_vram_size > 0xE0000000) {
640 /* leave room for at least 512M GTT */
641 dev_warn(rdev->dev, "limiting VRAM\n");
642 mc->real_vram_size = 0xE0000000;
643 mc->mc_vram_size = 0xE0000000;
644 }
645 if (rdev->flags & RADEON_IS_AGP) {
646 size_bf = mc->gtt_start;
647 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
648 if (size_bf > size_af) {
649 if (mc->mc_vram_size > size_bf) {
650 dev_warn(rdev->dev, "limiting VRAM\n");
651 mc->real_vram_size = size_bf;
652 mc->mc_vram_size = size_bf;
653 }
654 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
655 } else {
656 if (mc->mc_vram_size > size_af) {
657 dev_warn(rdev->dev, "limiting VRAM\n");
658 mc->real_vram_size = size_af;
659 mc->mc_vram_size = size_af;
660 }
661 mc->vram_start = mc->gtt_end;
662 }
663 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
664 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
665 mc->mc_vram_size >> 20, mc->vram_start,
666 mc->vram_end, mc->real_vram_size >> 20);
667 } else {
668 u64 base = 0;
669 if (rdev->flags & RADEON_IS_IGP)
670 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
671 radeon_vram_location(rdev, &rdev->mc, base);
672 radeon_gtt_location(rdev, mc);
673 }
674}
675
622int r600_mc_init(struct radeon_device *rdev) 676int r600_mc_init(struct radeon_device *rdev)
623{ 677{
624 fixed20_12 a; 678 fixed20_12 a;
@@ -658,75 +712,21 @@ int r600_mc_init(struct radeon_device *rdev)
658 /* Setup GPU memory space */ 712 /* Setup GPU memory space */
659 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 713 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
660 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 714 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
661 715 rdev->mc.visible_vram_size = rdev->mc.aper_size;
662 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 716 /* FIXME remove this once we support unmappable VRAM */
717 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
663 rdev->mc.mc_vram_size = rdev->mc.aper_size; 718 rdev->mc.mc_vram_size = rdev->mc.aper_size;
664
665 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
666 rdev->mc.real_vram_size = rdev->mc.aper_size; 719 rdev->mc.real_vram_size = rdev->mc.aper_size;
667
668 if (rdev->flags & RADEON_IS_AGP) {
669 /* gtt_size is setup by radeon_agp_init */
670 rdev->mc.gtt_location = rdev->mc.agp_base;
671 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
672 /* Try to put vram before or after AGP because we
673 * we want SYSTEM_APERTURE to cover both VRAM and
674 * AGP so that GPU can catch out of VRAM/AGP access
675 */
676 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
677 /* Enough place before */
678 rdev->mc.vram_location = rdev->mc.gtt_location -
679 rdev->mc.mc_vram_size;
680 } else if (tmp > rdev->mc.mc_vram_size) {
681 /* Enough place after */
682 rdev->mc.vram_location = rdev->mc.gtt_location +
683 rdev->mc.gtt_size;
684 } else {
685 /* Try to setup VRAM then AGP might not
686 * not work on some card
687 */
688 rdev->mc.vram_location = 0x00000000UL;
689 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
690 }
691 } else {
692 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
693 rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
694 0xFFFF) << 24;
695 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
696 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
697 /* Enough place after vram */
698 rdev->mc.gtt_location = tmp;
699 } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
700 /* Enough place before vram */
701 rdev->mc.gtt_location = 0;
702 } else {
703 /* Not enough place after or before shrink
704 * gart size
705 */
706 if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
707 rdev->mc.gtt_location = 0;
708 rdev->mc.gtt_size = rdev->mc.vram_location;
709 } else {
710 rdev->mc.gtt_location = tmp;
711 rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
712 }
713 }
714 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
715 } 720 }
716 rdev->mc.vram_start = rdev->mc.vram_location; 721 r600_vram_gtt_location(rdev, &rdev->mc);
717 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
718 rdev->mc.gtt_start = rdev->mc.gtt_location;
719 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
720 /* FIXME: we should enforce default clock in case GPU is not in 722 /* FIXME: we should enforce default clock in case GPU is not in
721 * default setup 723 * default setup
722 */ 724 */
723 a.full = rfixed_const(100); 725 a.full = rfixed_const(100);
724 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); 726 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
725 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 727 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
726
727 if (rdev->flags & RADEON_IS_IGP) 728 if (rdev->flags & RADEON_IS_IGP)
728 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 729 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
729
730 return 0; 730 return 0;
731} 731}
732 732
@@ -981,6 +981,9 @@ void r600_gpu_init(struct radeon_device *rdev)
981{ 981{
982 u32 tiling_config; 982 u32 tiling_config;
983 u32 ramcfg; 983 u32 ramcfg;
984 u32 backend_map;
985 u32 cc_rb_backend_disable;
986 u32 cc_gc_shader_pipe_config;
984 u32 tmp; 987 u32 tmp;
985 int i, j; 988 int i, j;
986 u32 sq_config; 989 u32 sq_config;
@@ -1090,8 +1093,11 @@ void r600_gpu_init(struct radeon_device *rdev)
1090 default: 1093 default:
1091 break; 1094 break;
1092 } 1095 }
1096 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1097 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1093 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1098 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1094 tiling_config |= GROUP_SIZE(0); 1099 tiling_config |= GROUP_SIZE(0);
1100 rdev->config.r600.tiling_group_size = 256;
1095 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 1101 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1096 if (tmp > 3) { 1102 if (tmp > 3) {
1097 tiling_config |= ROW_TILING(3); 1103 tiling_config |= ROW_TILING(3);
@@ -1101,24 +1107,33 @@ void r600_gpu_init(struct radeon_device *rdev)
1101 tiling_config |= SAMPLE_SPLIT(tmp); 1107 tiling_config |= SAMPLE_SPLIT(tmp);
1102 } 1108 }
1103 tiling_config |= BANK_SWAPS(1); 1109 tiling_config |= BANK_SWAPS(1);
1104 tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, 1110
1105 rdev->config.r600.max_backends, 1111 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1106 (0xff << rdev->config.r600.max_backends) & 0xff); 1112 cc_rb_backend_disable |=
1107 tiling_config |= BACKEND_MAP(tmp); 1113 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1114
1115 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1116 cc_gc_shader_pipe_config |=
1117 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1118 cc_gc_shader_pipe_config |=
1119 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1120
1121 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1122 (R6XX_MAX_BACKENDS -
1123 r600_count_pipe_bits((cc_rb_backend_disable &
1124 R6XX_MAX_BACKENDS_MASK) >> 16)),
1125 (cc_rb_backend_disable >> 16));
1126
1127 tiling_config |= BACKEND_MAP(backend_map);
1108 WREG32(GB_TILING_CONFIG, tiling_config); 1128 WREG32(GB_TILING_CONFIG, tiling_config);
1109 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1129 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1110 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 1130 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1111 1131
1112 tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1113 WREG32(CC_RB_BACKEND_DISABLE, tmp);
1114
1115 /* Setup pipes */ 1132 /* Setup pipes */
1116 tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); 1133 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1117 tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); 1134 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1118 WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
1119 WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
1120 1135
1121 tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK); 1136 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1122 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1137 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1123 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); 1138 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1124 1139
@@ -1783,12 +1798,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
1783 struct radeon_fence *fence) 1798 struct radeon_fence *fence)
1784{ 1799{
1785 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ 1800 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
1801
1802 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
1803 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
1804 /* wait for 3D idle clean */
1805 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1806 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1807 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1786 /* Emit fence sequence & fire IRQ */ 1808 /* Emit fence sequence & fire IRQ */
1787 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1809 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1788 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 1810 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1789 radeon_ring_write(rdev, fence->seq); 1811 radeon_ring_write(rdev, fence->seq);
1790 radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
1791 radeon_ring_write(rdev, 1);
1792 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 1812 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1793 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); 1813 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1794 radeon_ring_write(rdev, RB_INT_STAT); 1814 radeon_ring_write(rdev, RB_INT_STAT);
@@ -2745,6 +2765,7 @@ restart_ih:
2745 case 0: /* D1 vblank */ 2765 case 0: /* D1 vblank */
2746 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2766 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2747 drm_handle_vblank(rdev->ddev, 0); 2767 drm_handle_vblank(rdev->ddev, 0);
2768 wake_up(&rdev->irq.vblank_queue);
2748 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2769 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2749 DRM_DEBUG("IH: D1 vblank\n"); 2770 DRM_DEBUG("IH: D1 vblank\n");
2750 } 2771 }
@@ -2765,6 +2786,7 @@ restart_ih:
2765 case 0: /* D2 vblank */ 2786 case 0: /* D2 vblank */
2766 if (disp_int & LB_D2_VBLANK_INTERRUPT) { 2787 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2767 drm_handle_vblank(rdev->ddev, 1); 2788 drm_handle_vblank(rdev->ddev, 1);
2789 wake_up(&rdev->irq.vblank_queue);
2768 disp_int &= ~LB_D2_VBLANK_INTERRUPT; 2790 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2769 DRM_DEBUG("IH: D2 vblank\n"); 2791 DRM_DEBUG("IH: D2 vblank\n");
2770 } 2792 }
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 0dcb6904c4ff..db928016d034 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) 38 return rdev->family >= CHIP_R600
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
@@ -147,15 +147,23 @@ static void r600_audio_update_hdmi(unsigned long param)
147} 147}
148 148
149/* 149/*
150 * turn on/off audio engine
151 */
152static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
153{
154 DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling");
155 WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
156}
157
158/*
150 * initialize the audio vars and register the update timer 159 * initialize the audio vars and register the update timer
151 */ 160 */
152int r600_audio_init(struct radeon_device *rdev) 161int r600_audio_init(struct radeon_device *rdev)
153{ 162{
154 if (!r600_audio_chipset_supported(rdev)) 163 if (!radeon_audio || !r600_audio_chipset_supported(rdev))
155 return 0; 164 return 0;
156 165
157 DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling"); 166 r600_audio_engine_enable(rdev, true);
158 WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
159 167
160 rdev->audio_channels = -1; 168 rdev->audio_channels = -1;
161 rdev->audio_rate = -1; 169 rdev->audio_rate = -1;
@@ -258,9 +266,10 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
258 */ 266 */
259void r600_audio_fini(struct radeon_device *rdev) 267void r600_audio_fini(struct radeon_device *rdev)
260{ 268{
261 if (!r600_audio_chipset_supported(rdev)) 269 if (!radeon_audio || !r600_audio_chipset_supported(rdev))
262 return; 270 return;
263 271
264 del_timer(&rdev->audio_timer); 272 del_timer(&rdev->audio_timer);
265 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); 273
274 r600_audio_engine_enable(rdev, false);
266} 275}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 5ea432347589..f4fb88ece2bb 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -49,7 +49,7 @@ set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64
49 RING_LOCALS; 49 RING_LOCALS;
50 DRM_DEBUG("\n"); 50 DRM_DEBUG("\n");
51 51
52 h = (h + 7) & ~7; 52 h = ALIGN(h, 8);
53 if (h < 8) 53 if (h < 8)
54 h = 8; 54 h = 8;
55 55
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 446b765ac72a..f6c6c77db7e0 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -25,7 +25,7 @@ set_render_target(struct radeon_device *rdev, int format,
25 u32 cb_color_info; 25 u32 cb_color_info;
26 int pitch, slice; 26 int pitch, slice;
27 27
28 h = (h + 7) & ~7; 28 h = ALIGN(h, 8);
29 if (h < 8) 29 if (h < 8)
30 h = 8; 30 h = 8;
31 31
@@ -396,15 +396,13 @@ set_default_state(struct radeon_device *rdev)
396 NUM_ES_STACK_ENTRIES(num_es_stack_entries)); 396 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
397 397
398 /* emit an IB pointing at default state */ 398 /* emit an IB pointing at default state */
399 dwords = (rdev->r600_blit.state_len + 0xf) & ~0xf; 399 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
400 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; 400 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
401 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 401 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
402 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); 402 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
403 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); 403 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
404 radeon_ring_write(rdev, dwords); 404 radeon_ring_write(rdev, dwords);
405 405
406 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
407 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
408 /* SQ config */ 406 /* SQ config */
409 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6)); 407 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
410 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 408 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -578,9 +576,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
578 ring_size = num_loops * dwords_per_loop; 576 ring_size = num_loops * dwords_per_loop;
579 /* set default + shaders */ 577 /* set default + shaders */
580 ring_size += 40; /* shaders + def state */ 578 ring_size += 40; /* shaders + def state */
581 ring_size += 7; /* fence emit for VB IB */ 579 ring_size += 10; /* fence emit for VB IB */
582 ring_size += 5; /* done copy */ 580 ring_size += 5; /* done copy */
583 ring_size += 7; /* fence emit for done copy */ 581 ring_size += 10; /* fence emit for done copy */
584 r = radeon_ring_lock(rdev, ring_size); 582 r = radeon_ring_lock(rdev, ring_size);
585 if (r) 583 if (r)
586 return r; 584 return r;
@@ -594,13 +592,6 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
594{ 592{
595 int r; 593 int r;
596 594
597 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
598 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
599 /* wait for 3D idle clean */
600 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
601 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
602 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
603
604 if (rdev->r600_blit.vb_ib) 595 if (rdev->r600_blit.vb_ib)
605 r600_vb_ib_put(rdev); 596 r600_vb_ib_put(rdev);
606 597
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index d745e815c2e8..a112c59f9d82 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -9,11 +9,6 @@ const u32 r6xx_default_state[] =
9 0xc0012800, 9 0xc0012800,
10 0x80000000, 10 0x80000000,
11 0x80000000, 11 0x80000000,
12 0xc0004600,
13 0x00000016,
14 0xc0016800,
15 0x00000010,
16 0x00028000,
17 0xc0016800, 12 0xc0016800,
18 0x00000010, 13 0x00000010,
19 0x00008000, 14 0x00008000,
@@ -531,11 +526,6 @@ const u32 r7xx_default_state[] =
531 0xc0012800, 526 0xc0012800,
532 0x80000000, 527 0x80000000,
533 0x80000000, 528 0x80000000,
534 0xc0004600,
535 0x00000016,
536 0xc0016800,
537 0x00000010,
538 0x00028000,
539 0xc0016800, 529 0xc0016800,
540 0x00000010, 530 0x00000010,
541 0x00008000, 531 0x00008000,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 75bcf35a0931..40416c068d9f 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -734,8 +734,8 @@ static void r600_gfx_init(struct drm_device *dev,
734 u32 hdp_host_path_cntl; 734 u32 hdp_host_path_cntl;
735 u32 backend_map; 735 u32 backend_map;
736 u32 gb_tiling_config = 0; 736 u32 gb_tiling_config = 0;
737 u32 cc_rb_backend_disable = 0; 737 u32 cc_rb_backend_disable;
738 u32 cc_gc_shader_pipe_config = 0; 738 u32 cc_gc_shader_pipe_config;
739 u32 ramcfg; 739 u32 ramcfg;
740 740
741 /* setup chip specs */ 741 /* setup chip specs */
@@ -857,29 +857,44 @@ static void r600_gfx_init(struct drm_device *dev,
857 857
858 gb_tiling_config |= R600_BANK_SWAPS(1); 858 gb_tiling_config |= R600_BANK_SWAPS(1);
859 859
860 backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, 860 cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
861 dev_priv->r600_max_backends, 861 cc_rb_backend_disable |=
862 (0xff << dev_priv->r600_max_backends) & 0xff); 862 R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
863 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
864 863
865 cc_gc_shader_pipe_config = 864 cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
865 cc_gc_shader_pipe_config |=
866 R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK); 866 R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK);
867 cc_gc_shader_pipe_config |= 867 cc_gc_shader_pipe_config |=
868 R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK); 868 R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK);
869 869
870 cc_rb_backend_disable = 870 backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
871 R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK); 871 (R6XX_MAX_BACKENDS -
872 r600_count_pipe_bits((cc_rb_backend_disable &
873 R6XX_MAX_BACKENDS_MASK) >> 16)),
874 (cc_rb_backend_disable >> 16));
875 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
872 876
873 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config); 877 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
874 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 878 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
875 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 879 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
880 if (gb_tiling_config & 0xc0) {
881 dev_priv->r600_group_size = 512;
882 } else {
883 dev_priv->r600_group_size = 256;
884 }
885 dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
886 if (gb_tiling_config & 0x30) {
887 dev_priv->r600_nbanks = 8;
888 } else {
889 dev_priv->r600_nbanks = 4;
890 }
876 891
877 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 892 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
878 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 893 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
879 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 894 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
880 895
881 num_qd_pipes = 896 num_qd_pipes =
882 R6XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK); 897 R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
883 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK); 898 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
884 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK); 899 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
885 900
@@ -1151,7 +1166,8 @@ static void r600_gfx_init(struct drm_device *dev,
1151 1166
1152} 1167}
1153 1168
1154static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, 1169static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv,
1170 u32 num_tile_pipes,
1155 u32 num_backends, 1171 u32 num_backends,
1156 u32 backend_disable_mask) 1172 u32 backend_disable_mask)
1157{ 1173{
@@ -1162,6 +1178,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1162 u32 swizzle_pipe[R7XX_MAX_PIPES]; 1178 u32 swizzle_pipe[R7XX_MAX_PIPES];
1163 u32 cur_backend; 1179 u32 cur_backend;
1164 u32 i; 1180 u32 i;
1181 bool force_no_swizzle;
1165 1182
1166 if (num_tile_pipes > R7XX_MAX_PIPES) 1183 if (num_tile_pipes > R7XX_MAX_PIPES)
1167 num_tile_pipes = R7XX_MAX_PIPES; 1184 num_tile_pipes = R7XX_MAX_PIPES;
@@ -1191,6 +1208,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1191 if (enabled_backends_count != num_backends) 1208 if (enabled_backends_count != num_backends)
1192 num_backends = enabled_backends_count; 1209 num_backends = enabled_backends_count;
1193 1210
1211 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1212 case CHIP_RV770:
1213 case CHIP_RV730:
1214 force_no_swizzle = false;
1215 break;
1216 case CHIP_RV710:
1217 case CHIP_RV740:
1218 default:
1219 force_no_swizzle = true;
1220 break;
1221 }
1222
1194 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); 1223 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
1195 switch (num_tile_pipes) { 1224 switch (num_tile_pipes) {
1196 case 1: 1225 case 1:
@@ -1201,49 +1230,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1201 swizzle_pipe[1] = 1; 1230 swizzle_pipe[1] = 1;
1202 break; 1231 break;
1203 case 3: 1232 case 3:
1204 swizzle_pipe[0] = 0; 1233 if (force_no_swizzle) {
1205 swizzle_pipe[1] = 2; 1234 swizzle_pipe[0] = 0;
1206 swizzle_pipe[2] = 1; 1235 swizzle_pipe[1] = 1;
1236 swizzle_pipe[2] = 2;
1237 } else {
1238 swizzle_pipe[0] = 0;
1239 swizzle_pipe[1] = 2;
1240 swizzle_pipe[2] = 1;
1241 }
1207 break; 1242 break;
1208 case 4: 1243 case 4:
1209 swizzle_pipe[0] = 0; 1244 if (force_no_swizzle) {
1210 swizzle_pipe[1] = 2; 1245 swizzle_pipe[0] = 0;
1211 swizzle_pipe[2] = 3; 1246 swizzle_pipe[1] = 1;
1212 swizzle_pipe[3] = 1; 1247 swizzle_pipe[2] = 2;
1248 swizzle_pipe[3] = 3;
1249 } else {
1250 swizzle_pipe[0] = 0;
1251 swizzle_pipe[1] = 2;
1252 swizzle_pipe[2] = 3;
1253 swizzle_pipe[3] = 1;
1254 }
1213 break; 1255 break;
1214 case 5: 1256 case 5:
1215 swizzle_pipe[0] = 0; 1257 if (force_no_swizzle) {
1216 swizzle_pipe[1] = 2; 1258 swizzle_pipe[0] = 0;
1217 swizzle_pipe[2] = 4; 1259 swizzle_pipe[1] = 1;
1218 swizzle_pipe[3] = 1; 1260 swizzle_pipe[2] = 2;
1219 swizzle_pipe[4] = 3; 1261 swizzle_pipe[3] = 3;
1262 swizzle_pipe[4] = 4;
1263 } else {
1264 swizzle_pipe[0] = 0;
1265 swizzle_pipe[1] = 2;
1266 swizzle_pipe[2] = 4;
1267 swizzle_pipe[3] = 1;
1268 swizzle_pipe[4] = 3;
1269 }
1220 break; 1270 break;
1221 case 6: 1271 case 6:
1222 swizzle_pipe[0] = 0; 1272 if (force_no_swizzle) {
1223 swizzle_pipe[1] = 2; 1273 swizzle_pipe[0] = 0;
1224 swizzle_pipe[2] = 4; 1274 swizzle_pipe[1] = 1;
1225 swizzle_pipe[3] = 5; 1275 swizzle_pipe[2] = 2;
1226 swizzle_pipe[4] = 3; 1276 swizzle_pipe[3] = 3;
1227 swizzle_pipe[5] = 1; 1277 swizzle_pipe[4] = 4;
1278 swizzle_pipe[5] = 5;
1279 } else {
1280 swizzle_pipe[0] = 0;
1281 swizzle_pipe[1] = 2;
1282 swizzle_pipe[2] = 4;
1283 swizzle_pipe[3] = 5;
1284 swizzle_pipe[4] = 3;
1285 swizzle_pipe[5] = 1;
1286 }
1228 break; 1287 break;
1229 case 7: 1288 case 7:
1230 swizzle_pipe[0] = 0; 1289 if (force_no_swizzle) {
1231 swizzle_pipe[1] = 2; 1290 swizzle_pipe[0] = 0;
1232 swizzle_pipe[2] = 4; 1291 swizzle_pipe[1] = 1;
1233 swizzle_pipe[3] = 6; 1292 swizzle_pipe[2] = 2;
1234 swizzle_pipe[4] = 3; 1293 swizzle_pipe[3] = 3;
1235 swizzle_pipe[5] = 1; 1294 swizzle_pipe[4] = 4;
1236 swizzle_pipe[6] = 5; 1295 swizzle_pipe[5] = 5;
1296 swizzle_pipe[6] = 6;
1297 } else {
1298 swizzle_pipe[0] = 0;
1299 swizzle_pipe[1] = 2;
1300 swizzle_pipe[2] = 4;
1301 swizzle_pipe[3] = 6;
1302 swizzle_pipe[4] = 3;
1303 swizzle_pipe[5] = 1;
1304 swizzle_pipe[6] = 5;
1305 }
1237 break; 1306 break;
1238 case 8: 1307 case 8:
1239 swizzle_pipe[0] = 0; 1308 if (force_no_swizzle) {
1240 swizzle_pipe[1] = 2; 1309 swizzle_pipe[0] = 0;
1241 swizzle_pipe[2] = 4; 1310 swizzle_pipe[1] = 1;
1242 swizzle_pipe[3] = 6; 1311 swizzle_pipe[2] = 2;
1243 swizzle_pipe[4] = 3; 1312 swizzle_pipe[3] = 3;
1244 swizzle_pipe[5] = 1; 1313 swizzle_pipe[4] = 4;
1245 swizzle_pipe[6] = 7; 1314 swizzle_pipe[5] = 5;
1246 swizzle_pipe[7] = 5; 1315 swizzle_pipe[6] = 6;
1316 swizzle_pipe[7] = 7;
1317 } else {
1318 swizzle_pipe[0] = 0;
1319 swizzle_pipe[1] = 2;
1320 swizzle_pipe[2] = 4;
1321 swizzle_pipe[3] = 6;
1322 swizzle_pipe[4] = 3;
1323 swizzle_pipe[5] = 1;
1324 swizzle_pipe[6] = 7;
1325 swizzle_pipe[7] = 5;
1326 }
1247 break; 1327 break;
1248 } 1328 }
1249 1329
@@ -1264,8 +1344,10 @@ static void r700_gfx_init(struct drm_device *dev,
1264 drm_radeon_private_t *dev_priv) 1344 drm_radeon_private_t *dev_priv)
1265{ 1345{
1266 int i, j, num_qd_pipes; 1346 int i, j, num_qd_pipes;
1347 u32 ta_aux_cntl;
1267 u32 sx_debug_1; 1348 u32 sx_debug_1;
1268 u32 smx_dc_ctl0; 1349 u32 smx_dc_ctl0;
1350 u32 db_debug3;
1269 u32 num_gs_verts_per_thread; 1351 u32 num_gs_verts_per_thread;
1270 u32 vgt_gs_per_es; 1352 u32 vgt_gs_per_es;
1271 u32 gs_prim_buffer_depth = 0; 1353 u32 gs_prim_buffer_depth = 0;
@@ -1276,8 +1358,8 @@ static void r700_gfx_init(struct drm_device *dev,
1276 u32 sq_dyn_gpr_size_simd_ab_0; 1358 u32 sq_dyn_gpr_size_simd_ab_0;
1277 u32 backend_map; 1359 u32 backend_map;
1278 u32 gb_tiling_config = 0; 1360 u32 gb_tiling_config = 0;
1279 u32 cc_rb_backend_disable = 0; 1361 u32 cc_rb_backend_disable;
1280 u32 cc_gc_shader_pipe_config = 0; 1362 u32 cc_gc_shader_pipe_config;
1281 u32 mc_arb_ramcfg; 1363 u32 mc_arb_ramcfg;
1282 u32 db_debug4; 1364 u32 db_debug4;
1283 1365
@@ -1428,38 +1510,51 @@ static void r700_gfx_init(struct drm_device *dev,
1428 1510
1429 gb_tiling_config |= R600_BANK_SWAPS(1); 1511 gb_tiling_config |= R600_BANK_SWAPS(1);
1430 1512
1431 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740) 1513 cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1432 backend_map = 0x28; 1514 cc_rb_backend_disable |=
1433 else 1515 R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
1434 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
1435 dev_priv->r600_max_backends,
1436 (0xff << dev_priv->r600_max_backends) & 0xff);
1437 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
1438 1516
1439 cc_gc_shader_pipe_config = 1517 cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1518 cc_gc_shader_pipe_config |=
1440 R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK); 1519 R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK);
1441 cc_gc_shader_pipe_config |= 1520 cc_gc_shader_pipe_config |=
1442 R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK); 1521 R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK);
1443 1522
1444 cc_rb_backend_disable = 1523 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
1445 R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK); 1524 backend_map = 0x28;
1525 else
1526 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv,
1527 dev_priv->r600_max_tile_pipes,
1528 (R7XX_MAX_BACKENDS -
1529 r600_count_pipe_bits((cc_rb_backend_disable &
1530 R7XX_MAX_BACKENDS_MASK) >> 16)),
1531 (cc_rb_backend_disable >> 16));
1532 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
1446 1533
1447 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config); 1534 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
1448 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 1535 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
1449 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 1536 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
1537 if (gb_tiling_config & 0xc0) {
1538 dev_priv->r600_group_size = 512;
1539 } else {
1540 dev_priv->r600_group_size = 256;
1541 }
1542 dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
1543 if (gb_tiling_config & 0x30) {
1544 dev_priv->r600_nbanks = 8;
1545 } else {
1546 dev_priv->r600_nbanks = 4;
1547 }
1450 1548
1451 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1549 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1452 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 1550 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1453 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1454 1551
1455 RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1552 RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1456 RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0); 1553 RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
1457 RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0); 1554 RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
1458 RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
1459 RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
1460 1555
1461 num_qd_pipes = 1556 num_qd_pipes =
1462 R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK); 1557 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
1463 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK); 1558 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
1464 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK); 1559 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
1465 1560
@@ -1469,10 +1564,8 @@ static void r700_gfx_init(struct drm_device *dev,
1469 1564
1470 RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30)); 1565 RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30));
1471 1566
1472 RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO | 1567 ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX);
1473 R600_SYNC_GRADIENT | 1568 RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO);
1474 R600_SYNC_WALKER |
1475 R600_SYNC_ALIGNER));
1476 1569
1477 sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1); 1570 sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1);
1478 sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS; 1571 sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS;
@@ -1483,14 +1576,28 @@ static void r700_gfx_init(struct drm_device *dev,
1483 smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1); 1576 smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1);
1484 RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0); 1577 RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0);
1485 1578
1486 RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) | 1579 if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740)
1487 R700_GS_FLUSH_CTL(4) | 1580 RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
1488 R700_ACK_FLUSH_CTL(3) | 1581 R700_GS_FLUSH_CTL(4) |
1489 R700_SYNC_FLUSH_CTL)); 1582 R700_ACK_FLUSH_CTL(3) |
1583 R700_SYNC_FLUSH_CTL));
1490 1584
1491 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770) 1585 db_debug3 = RADEON_READ(R700_DB_DEBUG3);
1492 RADEON_WRITE(R700_DB_DEBUG3, R700_DB_CLK_OFF_DELAY(0x1f)); 1586 db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f);
1493 else { 1587 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1588 case CHIP_RV770:
1589 case CHIP_RV740:
1590 db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f);
1591 break;
1592 case CHIP_RV710:
1593 case CHIP_RV730:
1594 default:
1595 db_debug3 |= R700_DB_CLK_OFF_DELAY(2);
1596 break;
1597 }
1598 RADEON_WRITE(R700_DB_DEBUG3, db_debug3);
1599
1600 if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) {
1494 db_debug4 = RADEON_READ(RV700_DB_DEBUG4); 1601 db_debug4 = RADEON_READ(RV700_DB_DEBUG4);
1495 db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER; 1602 db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER;
1496 RADEON_WRITE(RV700_DB_DEBUG4, db_debug4); 1603 RADEON_WRITE(RV700_DB_DEBUG4, db_debug4);
@@ -1519,10 +1626,10 @@ static void r700_gfx_init(struct drm_device *dev,
1519 R600_ALU_UPDATE_FIFO_HIWATER(0x8)); 1626 R600_ALU_UPDATE_FIFO_HIWATER(0x8));
1520 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1627 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1521 case CHIP_RV770: 1628 case CHIP_RV770:
1522 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
1523 break;
1524 case CHIP_RV730: 1629 case CHIP_RV730:
1525 case CHIP_RV710: 1630 case CHIP_RV710:
1631 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
1632 break;
1526 case CHIP_RV740: 1633 case CHIP_RV740:
1527 default: 1634 default:
1528 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4); 1635 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
@@ -2529,3 +2636,12 @@ out:
2529 mutex_unlock(&dev_priv->cs_mutex); 2636 mutex_unlock(&dev_priv->cs_mutex);
2530 return r; 2637 return r;
2531} 2638}
2639
2640void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size)
2641{
2642 struct drm_radeon_private *dev_priv = dev->dev_private;
2643
2644 *npipes = dev_priv->r600_npipes;
2645 *nbanks = dev_priv->r600_nbanks;
2646 *group_size = dev_priv->r600_group_size;
2647}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index e4c45ec16507..cd2c63bce501 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "r600d.h" 30#include "r600d.h"
31#include "r600_reg_safe.h"
31 32
32static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 33static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
33 struct radeon_cs_reloc **cs_reloc); 34 struct radeon_cs_reloc **cs_reloc);
@@ -35,11 +36,313 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
35 struct radeon_cs_reloc **cs_reloc); 36 struct radeon_cs_reloc **cs_reloc);
36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 37typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 38static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
39extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
40
38 41
39struct r600_cs_track { 42struct r600_cs_track {
40 u32 cb_color0_base_last; 43 /* configuration we miror so that we use same code btw kms/ums */
44 u32 group_size;
45 u32 nbanks;
46 u32 npipes;
47 /* value we track */
48 u32 nsamples;
49 u32 cb_color_base_last[8];
50 struct radeon_bo *cb_color_bo[8];
51 u32 cb_color_bo_offset[8];
52 struct radeon_bo *cb_color_frag_bo[8];
53 struct radeon_bo *cb_color_tile_bo[8];
54 u32 cb_color_info[8];
55 u32 cb_color_size_idx[8];
56 u32 cb_target_mask;
57 u32 cb_shader_mask;
58 u32 cb_color_size[8];
59 u32 vgt_strmout_en;
60 u32 vgt_strmout_buffer_en;
61 u32 db_depth_control;
62 u32 db_depth_info;
63 u32 db_depth_size_idx;
64 u32 db_depth_view;
65 u32 db_depth_size;
66 u32 db_offset;
67 struct radeon_bo *db_bo;
41}; 68};
42 69
70static inline int r600_bpe_from_format(u32 *bpe, u32 format)
71{
72 switch (format) {
73 case V_038004_COLOR_8:
74 case V_038004_COLOR_4_4:
75 case V_038004_COLOR_3_3_2:
76 case V_038004_FMT_1:
77 *bpe = 1;
78 break;
79 case V_038004_COLOR_16:
80 case V_038004_COLOR_16_FLOAT:
81 case V_038004_COLOR_8_8:
82 case V_038004_COLOR_5_6_5:
83 case V_038004_COLOR_6_5_5:
84 case V_038004_COLOR_1_5_5_5:
85 case V_038004_COLOR_4_4_4_4:
86 case V_038004_COLOR_5_5_5_1:
87 *bpe = 2;
88 break;
89 case V_038004_FMT_8_8_8:
90 *bpe = 3;
91 break;
92 case V_038004_COLOR_32:
93 case V_038004_COLOR_32_FLOAT:
94 case V_038004_COLOR_16_16:
95 case V_038004_COLOR_16_16_FLOAT:
96 case V_038004_COLOR_8_24:
97 case V_038004_COLOR_8_24_FLOAT:
98 case V_038004_COLOR_24_8:
99 case V_038004_COLOR_24_8_FLOAT:
100 case V_038004_COLOR_10_11_11:
101 case V_038004_COLOR_10_11_11_FLOAT:
102 case V_038004_COLOR_11_11_10:
103 case V_038004_COLOR_11_11_10_FLOAT:
104 case V_038004_COLOR_2_10_10_10:
105 case V_038004_COLOR_8_8_8_8:
106 case V_038004_COLOR_10_10_10_2:
107 case V_038004_FMT_5_9_9_9_SHAREDEXP:
108 case V_038004_FMT_32_AS_8:
109 case V_038004_FMT_32_AS_8_8:
110 *bpe = 4;
111 break;
112 case V_038004_COLOR_X24_8_32_FLOAT:
113 case V_038004_COLOR_32_32:
114 case V_038004_COLOR_32_32_FLOAT:
115 case V_038004_COLOR_16_16_16_16:
116 case V_038004_COLOR_16_16_16_16_FLOAT:
117 *bpe = 8;
118 break;
119 case V_038004_FMT_16_16_16:
120 case V_038004_FMT_16_16_16_FLOAT:
121 *bpe = 6;
122 break;
123 case V_038004_FMT_32_32_32:
124 case V_038004_FMT_32_32_32_FLOAT:
125 *bpe = 12;
126 break;
127 case V_038004_COLOR_32_32_32_32:
128 case V_038004_COLOR_32_32_32_32_FLOAT:
129 *bpe = 16;
130 break;
131 case V_038004_FMT_GB_GR:
132 case V_038004_FMT_BG_RG:
133 case V_038004_COLOR_INVALID:
134 *bpe = 16;
135 return -EINVAL;
136 }
137 return 0;
138}
139
140static void r600_cs_track_init(struct r600_cs_track *track)
141{
142 int i;
143
144 for (i = 0; i < 8; i++) {
145 track->cb_color_base_last[i] = 0;
146 track->cb_color_size[i] = 0;
147 track->cb_color_size_idx[i] = 0;
148 track->cb_color_info[i] = 0;
149 track->cb_color_bo[i] = NULL;
150 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
151 }
152 track->cb_target_mask = 0xFFFFFFFF;
153 track->cb_shader_mask = 0xFFFFFFFF;
154 track->db_bo = NULL;
155 /* assume the biggest format and that htile is enabled */
156 track->db_depth_info = 7 | (1 << 25);
157 track->db_depth_view = 0xFFFFC000;
158 track->db_depth_size = 0xFFFFFFFF;
159 track->db_depth_size_idx = 0;
160 track->db_depth_control = 0xFFFFFFFF;
161}
162
163static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
164{
165 struct r600_cs_track *track = p->track;
166 u32 bpe = 0, pitch, slice_tile_max, size, tmp, height;
167 volatile u32 *ib = p->ib->ptr;
168
169 if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
170 dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
171 return -EINVAL;
172 }
173 size = radeon_bo_size(track->cb_color_bo[i]);
174 if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
175 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
176 __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
177 i, track->cb_color_info[i]);
178 return -EINVAL;
179 }
180 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) << 3;
181 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
182 if (!pitch) {
183 dev_warn(p->dev, "%s:%d cb pitch (%d) for %d invalid (0x%08X)\n",
184 __func__, __LINE__, pitch, i, track->cb_color_size[i]);
185 return -EINVAL;
186 }
187 height = size / (pitch * bpe);
188 if (height > 8192)
189 height = 8192;
190 switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
191 case V_0280A0_ARRAY_LINEAR_GENERAL:
192 case V_0280A0_ARRAY_LINEAR_ALIGNED:
193 if (pitch & 0x3f) {
194 dev_warn(p->dev, "%s:%d cb pitch (%d x %d = %d) invalid\n",
195 __func__, __LINE__, pitch, bpe, pitch * bpe);
196 return -EINVAL;
197 }
198 if ((pitch * bpe) & (track->group_size - 1)) {
199 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
200 __func__, __LINE__, pitch);
201 return -EINVAL;
202 }
203 break;
204 case V_0280A0_ARRAY_1D_TILED_THIN1:
205 if ((pitch * 8 * bpe * track->nsamples) & (track->group_size - 1)) {
206 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
207 __func__, __LINE__, pitch);
208 return -EINVAL;
209 }
210 height &= ~0x7;
211 if (!height)
212 height = 8;
213 break;
214 case V_0280A0_ARRAY_2D_TILED_THIN1:
215 if (pitch & ((8 * track->nbanks) - 1)) {
216 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
217 __func__, __LINE__, pitch);
218 return -EINVAL;
219 }
220 tmp = pitch * 8 * bpe * track->nsamples;
221 tmp = tmp / track->nbanks;
222 if (tmp & (track->group_size - 1)) {
223 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
224 __func__, __LINE__, pitch);
225 return -EINVAL;
226 }
227 height &= ~((16 * track->npipes) - 1);
228 if (!height)
229 height = 16 * track->npipes;
230 break;
231 default:
232 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
233 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
234 track->cb_color_info[i]);
235 return -EINVAL;
236 }
237 /* check offset */
238 tmp = height * pitch;
239 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
240 dev_warn(p->dev, "%s offset[%d] %d to big\n", __func__, i, track->cb_color_bo_offset[i]);
241 return -EINVAL;
242 }
243 /* limit max tile */
244 tmp = (height * pitch) >> 6;
245 if (tmp < slice_tile_max)
246 slice_tile_max = tmp;
247 tmp = S_028060_PITCH_TILE_MAX((pitch >> 3) - 1) |
248 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
249 ib[track->cb_color_size_idx[i]] = tmp;
250 return 0;
251}
252
253static int r600_cs_track_check(struct radeon_cs_parser *p)
254{
255 struct r600_cs_track *track = p->track;
256 u32 tmp;
257 int r, i;
258 volatile u32 *ib = p->ib->ptr;
259
260 /* on legacy kernel we don't perform advanced check */
261 if (p->rdev == NULL)
262 return 0;
263 /* we don't support out buffer yet */
264 if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
265 dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
266 return -EINVAL;
267 }
268 /* check that we have a cb for each enabled target, we don't check
269 * shader_mask because it seems mesa isn't always setting it :(
270 */
271 tmp = track->cb_target_mask;
272 for (i = 0; i < 8; i++) {
273 if ((tmp >> (i * 4)) & 0xF) {
274 /* at least one component is enabled */
275 if (track->cb_color_bo[i] == NULL) {
276 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
277 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
278 return -EINVAL;
279 }
280 /* perform rewrite of CB_COLOR[0-7]_SIZE */
281 r = r600_cs_track_validate_cb(p, i);
282 if (r)
283 return r;
284 }
285 }
286 /* Check depth buffer */
287 if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
288 G_028800_Z_ENABLE(track->db_depth_control)) {
289 u32 nviews, bpe, ntiles;
290 if (track->db_bo == NULL) {
291 dev_warn(p->dev, "z/stencil with no depth buffer\n");
292 return -EINVAL;
293 }
294 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
295 dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
296 return -EINVAL;
297 }
298 switch (G_028010_FORMAT(track->db_depth_info)) {
299 case V_028010_DEPTH_16:
300 bpe = 2;
301 break;
302 case V_028010_DEPTH_X8_24:
303 case V_028010_DEPTH_8_24:
304 case V_028010_DEPTH_X8_24_FLOAT:
305 case V_028010_DEPTH_8_24_FLOAT:
306 case V_028010_DEPTH_32_FLOAT:
307 bpe = 4;
308 break;
309 case V_028010_DEPTH_X24_8_32_FLOAT:
310 bpe = 8;
311 break;
312 default:
313 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
314 return -EINVAL;
315 }
316 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
317 if (!track->db_depth_size_idx) {
318 dev_warn(p->dev, "z/stencil buffer size not set\n");
319 return -EINVAL;
320 }
321 printk_once(KERN_WARNING "You have old & broken userspace please consider updating mesa\n");
322 tmp = radeon_bo_size(track->db_bo) - track->db_offset;
323 tmp = (tmp / bpe) >> 6;
324 if (!tmp) {
325 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
326 track->db_depth_size, bpe, track->db_offset,
327 radeon_bo_size(track->db_bo));
328 return -EINVAL;
329 }
330 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
331 } else {
332 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
333 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
334 tmp = ntiles * bpe * 64 * nviews;
335 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
336 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n",
337 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
338 radeon_bo_size(track->db_bo));
339 return -EINVAL;
340 }
341 }
342 }
343 return 0;
344}
345
43/** 346/**
44 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 347 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
45 * @parser: parser structure holding parsing context. 348 * @parser: parser structure holding parsing context.
@@ -359,6 +662,334 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
359 return 0; 662 return 0;
360} 663}
361 664
665/**
666 * r600_cs_check_reg() - check if register is authorized or not
667 * @parser: parser structure holding parsing context
668 * @reg: register we are testing
669 * @idx: index into the cs buffer
670 *
671 * This function will test against r600_reg_safe_bm and return 0
672 * if register is safe. If register is not flag as safe this function
673 * will test it against a list of register needind special handling.
674 */
675static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
676{
677 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
678 struct radeon_cs_reloc *reloc;
679 u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
680 u32 m, i, tmp, *ib;
681 int r;
682
683 i = (reg >> 7);
684 if (i > last_reg) {
685 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
686 return -EINVAL;
687 }
688 m = 1 << ((reg >> 2) & 31);
689 if (!(r600_reg_safe_bm[i] & m))
690 return 0;
691 ib = p->ib->ptr;
692 switch (reg) {
693 /* force following reg to 0 in an attemp to disable out buffer
694 * which will need us to better understand how it works to perform
695 * security check on it (Jerome)
696 */
697 case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
698 case R_008C44_SQ_ESGS_RING_SIZE:
699 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
700 case R_008C54_SQ_ESTMP_RING_SIZE:
701 case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
702 case R_008C74_SQ_FBUF_RING_SIZE:
703 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
704 case R_008C5C_SQ_GSTMP_RING_SIZE:
705 case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
706 case R_008C4C_SQ_GSVS_RING_SIZE:
707 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
708 case R_008C6C_SQ_PSTMP_RING_SIZE:
709 case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
710 case R_008C7C_SQ_REDUC_RING_SIZE:
711 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
712 case R_008C64_SQ_VSTMP_RING_SIZE:
713 case R_0288C8_SQ_GS_VERT_ITEMSIZE:
714 /* get value to populate the IB don't remove */
715 tmp =radeon_get_ib_value(p, idx);
716 ib[idx] = 0;
717 break;
718 case R_028800_DB_DEPTH_CONTROL:
719 track->db_depth_control = radeon_get_ib_value(p, idx);
720 break;
721 case R_028010_DB_DEPTH_INFO:
722 track->db_depth_info = radeon_get_ib_value(p, idx);
723 break;
724 case R_028004_DB_DEPTH_VIEW:
725 track->db_depth_view = radeon_get_ib_value(p, idx);
726 break;
727 case R_028000_DB_DEPTH_SIZE:
728 track->db_depth_size = radeon_get_ib_value(p, idx);
729 track->db_depth_size_idx = idx;
730 break;
731 case R_028AB0_VGT_STRMOUT_EN:
732 track->vgt_strmout_en = radeon_get_ib_value(p, idx);
733 break;
734 case R_028B20_VGT_STRMOUT_BUFFER_EN:
735 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
736 break;
737 case R_028238_CB_TARGET_MASK:
738 track->cb_target_mask = radeon_get_ib_value(p, idx);
739 break;
740 case R_02823C_CB_SHADER_MASK:
741 track->cb_shader_mask = radeon_get_ib_value(p, idx);
742 break;
743 case R_028C04_PA_SC_AA_CONFIG:
744 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
745 track->nsamples = 1 << tmp;
746 break;
747 case R_0280A0_CB_COLOR0_INFO:
748 case R_0280A4_CB_COLOR1_INFO:
749 case R_0280A8_CB_COLOR2_INFO:
750 case R_0280AC_CB_COLOR3_INFO:
751 case R_0280B0_CB_COLOR4_INFO:
752 case R_0280B4_CB_COLOR5_INFO:
753 case R_0280B8_CB_COLOR6_INFO:
754 case R_0280BC_CB_COLOR7_INFO:
755 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
756 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
757 break;
758 case R_028060_CB_COLOR0_SIZE:
759 case R_028064_CB_COLOR1_SIZE:
760 case R_028068_CB_COLOR2_SIZE:
761 case R_02806C_CB_COLOR3_SIZE:
762 case R_028070_CB_COLOR4_SIZE:
763 case R_028074_CB_COLOR5_SIZE:
764 case R_028078_CB_COLOR6_SIZE:
765 case R_02807C_CB_COLOR7_SIZE:
766 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
767 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
768 track->cb_color_size_idx[tmp] = idx;
769 break;
770 /* This register were added late, there is userspace
771 * which does provide relocation for those but set
772 * 0 offset. In order to avoid breaking old userspace
773 * we detect this and set address to point to last
774 * CB_COLOR0_BASE, note that if userspace doesn't set
775 * CB_COLOR0_BASE before this register we will report
776 * error. Old userspace always set CB_COLOR0_BASE
777 * before any of this.
778 */
779 case R_0280E0_CB_COLOR0_FRAG:
780 case R_0280E4_CB_COLOR1_FRAG:
781 case R_0280E8_CB_COLOR2_FRAG:
782 case R_0280EC_CB_COLOR3_FRAG:
783 case R_0280F0_CB_COLOR4_FRAG:
784 case R_0280F4_CB_COLOR5_FRAG:
785 case R_0280F8_CB_COLOR6_FRAG:
786 case R_0280FC_CB_COLOR7_FRAG:
787 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
788 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
789 if (!track->cb_color_base_last[tmp]) {
790 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
791 return -EINVAL;
792 }
793 ib[idx] = track->cb_color_base_last[tmp];
794 printk_once(KERN_WARNING "You have old & broken userspace "
795 "please consider updating mesa & xf86-video-ati\n");
796 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
797 } else {
798 r = r600_cs_packet_next_reloc(p, &reloc);
799 if (r) {
800 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
801 return -EINVAL;
802 }
803 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
804 track->cb_color_frag_bo[tmp] = reloc->robj;
805 }
806 break;
807 case R_0280C0_CB_COLOR0_TILE:
808 case R_0280C4_CB_COLOR1_TILE:
809 case R_0280C8_CB_COLOR2_TILE:
810 case R_0280CC_CB_COLOR3_TILE:
811 case R_0280D0_CB_COLOR4_TILE:
812 case R_0280D4_CB_COLOR5_TILE:
813 case R_0280D8_CB_COLOR6_TILE:
814 case R_0280DC_CB_COLOR7_TILE:
815 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
816 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
817 if (!track->cb_color_base_last[tmp]) {
818 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
819 return -EINVAL;
820 }
821 ib[idx] = track->cb_color_base_last[tmp];
822 printk_once(KERN_WARNING "You have old & broken userspace "
823 "please consider updating mesa & xf86-video-ati\n");
824 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
825 } else {
826 r = r600_cs_packet_next_reloc(p, &reloc);
827 if (r) {
828 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
829 return -EINVAL;
830 }
831 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
832 track->cb_color_tile_bo[tmp] = reloc->robj;
833 }
834 break;
835 case CB_COLOR0_BASE:
836 case CB_COLOR1_BASE:
837 case CB_COLOR2_BASE:
838 case CB_COLOR3_BASE:
839 case CB_COLOR4_BASE:
840 case CB_COLOR5_BASE:
841 case CB_COLOR6_BASE:
842 case CB_COLOR7_BASE:
843 r = r600_cs_packet_next_reloc(p, &reloc);
844 if (r) {
845 dev_warn(p->dev, "bad SET_CONTEXT_REG "
846 "0x%04X\n", reg);
847 return -EINVAL;
848 }
849 tmp = (reg - CB_COLOR0_BASE) / 4;
850 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
851 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
852 track->cb_color_base_last[tmp] = ib[idx];
853 track->cb_color_bo[tmp] = reloc->robj;
854 break;
855 case DB_DEPTH_BASE:
856 r = r600_cs_packet_next_reloc(p, &reloc);
857 if (r) {
858 dev_warn(p->dev, "bad SET_CONTEXT_REG "
859 "0x%04X\n", reg);
860 return -EINVAL;
861 }
862 track->db_offset = radeon_get_ib_value(p, idx);
863 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
864 track->db_bo = reloc->robj;
865 break;
866 case DB_HTILE_DATA_BASE:
867 case SQ_PGM_START_FS:
868 case SQ_PGM_START_ES:
869 case SQ_PGM_START_VS:
870 case SQ_PGM_START_GS:
871 case SQ_PGM_START_PS:
872 r = r600_cs_packet_next_reloc(p, &reloc);
873 if (r) {
874 dev_warn(p->dev, "bad SET_CONTEXT_REG "
875 "0x%04X\n", reg);
876 return -EINVAL;
877 }
878 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
879 break;
880 default:
881 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
882 return -EINVAL;
883 }
884 return 0;
885}
886
887static inline unsigned minify(unsigned size, unsigned levels)
888{
889 size = size >> levels;
890 if (size < 1)
891 size = 1;
892 return size;
893}
894
895static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
896 unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
897 unsigned *l0_size, unsigned *mipmap_size)
898{
899 unsigned offset, i, level, face;
900 unsigned width, height, depth, rowstride, size;
901
902 w0 = minify(w0, 0);
903 h0 = minify(h0, 0);
904 d0 = minify(d0, 0);
905 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
906 width = minify(w0, i);
907 height = minify(h0, i);
908 depth = minify(d0, i);
909 for(face = 0; face < nfaces; face++) {
910 rowstride = ((width * bpe) + 255) & ~255;
911 size = height * rowstride * depth;
912 offset += size;
913 offset = (offset + 0x1f) & ~0x1f;
914 }
915 }
916 *l0_size = (((w0 * bpe) + 255) & ~255) * h0 * d0;
917 *mipmap_size = offset;
918 if (!blevel)
919 *mipmap_size -= *l0_size;
920 if (!nlevels)
921 *mipmap_size = *l0_size;
922}
923
924/**
925 * r600_check_texture_resource() - check if register is authorized or not
926 * @p: parser structure holding parsing context
927 * @idx: index into the cs buffer
928 * @texture: texture's bo structure
929 * @mipmap: mipmap's bo structure
930 *
931 * This function will check that the resource has valid field and that
932 * the texture and mipmap bo object are big enough to cover this resource.
933 */
934static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
935 struct radeon_bo *texture,
936 struct radeon_bo *mipmap)
937{
938 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
939 u32 word0, word1, l0_size, mipmap_size;
940
941 /* on legacy kernel we don't perform advanced check */
942 if (p->rdev == NULL)
943 return 0;
944 word0 = radeon_get_ib_value(p, idx + 0);
945 word1 = radeon_get_ib_value(p, idx + 1);
946 w0 = G_038000_TEX_WIDTH(word0) + 1;
947 h0 = G_038004_TEX_HEIGHT(word1) + 1;
948 d0 = G_038004_TEX_DEPTH(word1);
949 nfaces = 1;
950 switch (G_038000_DIM(word0)) {
951 case V_038000_SQ_TEX_DIM_1D:
952 case V_038000_SQ_TEX_DIM_2D:
953 case V_038000_SQ_TEX_DIM_3D:
954 break;
955 case V_038000_SQ_TEX_DIM_CUBEMAP:
956 nfaces = 6;
957 break;
958 case V_038000_SQ_TEX_DIM_1D_ARRAY:
959 case V_038000_SQ_TEX_DIM_2D_ARRAY:
960 case V_038000_SQ_TEX_DIM_2D_MSAA:
961 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
962 default:
963 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
964 return -EINVAL;
965 }
966 if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) {
967 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
968 __func__, __LINE__, G_038004_DATA_FORMAT(word1));
969 return -EINVAL;
970 }
971 word0 = radeon_get_ib_value(p, idx + 4);
972 word1 = radeon_get_ib_value(p, idx + 5);
973 blevel = G_038010_BASE_LEVEL(word0);
974 nlevels = G_038014_LAST_LEVEL(word1);
975 r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, &l0_size, &mipmap_size);
976 /* using get ib will give us the offset into the texture bo */
977 word0 = radeon_get_ib_value(p, idx + 2);
978 if ((l0_size + word0) > radeon_bo_size(texture)) {
979 dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
980 w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
981 return -EINVAL;
982 }
983 /* using get ib will give us the offset into the mipmap bo */
984 word0 = radeon_get_ib_value(p, idx + 3);
985 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
986 dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
987 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
988 return -EINVAL;
989 }
990 return 0;
991}
992
362static int r600_packet3_check(struct radeon_cs_parser *p, 993static int r600_packet3_check(struct radeon_cs_parser *p,
363 struct radeon_cs_packet *pkt) 994 struct radeon_cs_packet *pkt)
364{ 995{
@@ -408,12 +1039,22 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
408 } 1039 }
409 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); 1040 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
410 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 1041 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1042 r = r600_cs_track_check(p);
1043 if (r) {
1044 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1045 return r;
1046 }
411 break; 1047 break;
412 case PACKET3_DRAW_INDEX_AUTO: 1048 case PACKET3_DRAW_INDEX_AUTO:
413 if (pkt->count != 1) { 1049 if (pkt->count != 1) {
414 DRM_ERROR("bad DRAW_INDEX_AUTO\n"); 1050 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
415 return -EINVAL; 1051 return -EINVAL;
416 } 1052 }
1053 r = r600_cs_track_check(p);
1054 if (r) {
1055 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1056 return r;
1057 }
417 break; 1058 break;
418 case PACKET3_DRAW_INDEX_IMMD_BE: 1059 case PACKET3_DRAW_INDEX_IMMD_BE:
419 case PACKET3_DRAW_INDEX_IMMD: 1060 case PACKET3_DRAW_INDEX_IMMD:
@@ -421,6 +1062,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
421 DRM_ERROR("bad DRAW_INDEX_IMMD\n"); 1062 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
422 return -EINVAL; 1063 return -EINVAL;
423 } 1064 }
1065 r = r600_cs_track_check(p);
1066 if (r) {
1067 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1068 return r;
1069 }
424 break; 1070 break;
425 case PACKET3_WAIT_REG_MEM: 1071 case PACKET3_WAIT_REG_MEM:
426 if (pkt->count != 5) { 1072 if (pkt->count != 5) {
@@ -493,30 +1139,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
493 } 1139 }
494 for (i = 0; i < pkt->count; i++) { 1140 for (i = 0; i < pkt->count; i++) {
495 reg = start_reg + (4 * i); 1141 reg = start_reg + (4 * i);
496 switch (reg) { 1142 r = r600_cs_check_reg(p, reg, idx+1+i);
497 case SQ_ESGS_RING_BASE: 1143 if (r)
498 case SQ_GSVS_RING_BASE: 1144 return r;
499 case SQ_ESTMP_RING_BASE:
500 case SQ_GSTMP_RING_BASE:
501 case SQ_VSTMP_RING_BASE:
502 case SQ_PSTMP_RING_BASE:
503 case SQ_FBUF_RING_BASE:
504 case SQ_REDUC_RING_BASE:
505 case SX_MEMORY_EXPORT_BASE:
506 r = r600_cs_packet_next_reloc(p, &reloc);
507 if (r) {
508 DRM_ERROR("bad SET_CONFIG_REG "
509 "0x%04X\n", reg);
510 return -EINVAL;
511 }
512 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
513 break;
514 case CP_COHER_BASE:
515 /* use PACKET3_SURFACE_SYNC */
516 return -EINVAL;
517 default:
518 break;
519 }
520 } 1145 }
521 break; 1146 break;
522 case PACKET3_SET_CONTEXT_REG: 1147 case PACKET3_SET_CONTEXT_REG:
@@ -530,106 +1155,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
530 } 1155 }
531 for (i = 0; i < pkt->count; i++) { 1156 for (i = 0; i < pkt->count; i++) {
532 reg = start_reg + (4 * i); 1157 reg = start_reg + (4 * i);
533 switch (reg) { 1158 r = r600_cs_check_reg(p, reg, idx+1+i);
534 /* This register were added late, there is userspace 1159 if (r)
535 * which does provide relocation for those but set 1160 return r;
536 * 0 offset. In order to avoid breaking old userspace
537 * we detect this and set address to point to last
538 * CB_COLOR0_BASE, note that if userspace doesn't set
539 * CB_COLOR0_BASE before this register we will report
540 * error. Old userspace always set CB_COLOR0_BASE
541 * before any of this.
542 */
543 case R_0280E0_CB_COLOR0_FRAG:
544 case R_0280E4_CB_COLOR1_FRAG:
545 case R_0280E8_CB_COLOR2_FRAG:
546 case R_0280EC_CB_COLOR3_FRAG:
547 case R_0280F0_CB_COLOR4_FRAG:
548 case R_0280F4_CB_COLOR5_FRAG:
549 case R_0280F8_CB_COLOR6_FRAG:
550 case R_0280FC_CB_COLOR7_FRAG:
551 case R_0280C0_CB_COLOR0_TILE:
552 case R_0280C4_CB_COLOR1_TILE:
553 case R_0280C8_CB_COLOR2_TILE:
554 case R_0280CC_CB_COLOR3_TILE:
555 case R_0280D0_CB_COLOR4_TILE:
556 case R_0280D4_CB_COLOR5_TILE:
557 case R_0280D8_CB_COLOR6_TILE:
558 case R_0280DC_CB_COLOR7_TILE:
559 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
560 if (!track->cb_color0_base_last) {
561 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
562 return -EINVAL;
563 }
564 ib[idx+1+i] = track->cb_color0_base_last;
565 printk_once(KERN_WARNING "radeon: You have old & broken userspace "
566 "please consider updating mesa & xf86-video-ati\n");
567 } else {
568 r = r600_cs_packet_next_reloc(p, &reloc);
569 if (r) {
570 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
571 return -EINVAL;
572 }
573 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
574 }
575 break;
576 case DB_DEPTH_BASE:
577 case DB_HTILE_DATA_BASE:
578 case CB_COLOR0_BASE:
579 r = r600_cs_packet_next_reloc(p, &reloc);
580 if (r) {
581 DRM_ERROR("bad SET_CONTEXT_REG "
582 "0x%04X\n", reg);
583 return -EINVAL;
584 }
585 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
586 track->cb_color0_base_last = ib[idx+1+i];
587 break;
588 case CB_COLOR1_BASE:
589 case CB_COLOR2_BASE:
590 case CB_COLOR3_BASE:
591 case CB_COLOR4_BASE:
592 case CB_COLOR5_BASE:
593 case CB_COLOR6_BASE:
594 case CB_COLOR7_BASE:
595 case SQ_PGM_START_FS:
596 case SQ_PGM_START_ES:
597 case SQ_PGM_START_VS:
598 case SQ_PGM_START_GS:
599 case SQ_PGM_START_PS:
600 r = r600_cs_packet_next_reloc(p, &reloc);
601 if (r) {
602 DRM_ERROR("bad SET_CONTEXT_REG "
603 "0x%04X\n", reg);
604 return -EINVAL;
605 }
606 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
607 break;
608 case VGT_DMA_BASE:
609 case VGT_DMA_BASE_HI:
610 /* These should be handled by DRAW_INDEX packet 3 */
611 case VGT_STRMOUT_BASE_OFFSET_0:
612 case VGT_STRMOUT_BASE_OFFSET_1:
613 case VGT_STRMOUT_BASE_OFFSET_2:
614 case VGT_STRMOUT_BASE_OFFSET_3:
615 case VGT_STRMOUT_BASE_OFFSET_HI_0:
616 case VGT_STRMOUT_BASE_OFFSET_HI_1:
617 case VGT_STRMOUT_BASE_OFFSET_HI_2:
618 case VGT_STRMOUT_BASE_OFFSET_HI_3:
619 case VGT_STRMOUT_BUFFER_BASE_0:
620 case VGT_STRMOUT_BUFFER_BASE_1:
621 case VGT_STRMOUT_BUFFER_BASE_2:
622 case VGT_STRMOUT_BUFFER_BASE_3:
623 case VGT_STRMOUT_BUFFER_OFFSET_0:
624 case VGT_STRMOUT_BUFFER_OFFSET_1:
625 case VGT_STRMOUT_BUFFER_OFFSET_2:
626 case VGT_STRMOUT_BUFFER_OFFSET_3:
627 /* These should be handled by STRMOUT_BUFFER packet 3 */
628 DRM_ERROR("bad context reg: 0x%08x\n", reg);
629 return -EINVAL;
630 default:
631 break;
632 }
633 } 1161 }
634 break; 1162 break;
635 case PACKET3_SET_RESOURCE: 1163 case PACKET3_SET_RESOURCE:
@@ -646,6 +1174,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
646 return -EINVAL; 1174 return -EINVAL;
647 } 1175 }
648 for (i = 0; i < (pkt->count / 7); i++) { 1176 for (i = 0; i < (pkt->count / 7); i++) {
1177 struct radeon_bo *texture, *mipmap;
1178 u32 size, offset;
1179
649 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { 1180 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
650 case SQ_TEX_VTX_VALID_TEXTURE: 1181 case SQ_TEX_VTX_VALID_TEXTURE:
651 /* tex base */ 1182 /* tex base */
@@ -655,6 +1186,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
655 return -EINVAL; 1186 return -EINVAL;
656 } 1187 }
657 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1188 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1189 texture = reloc->robj;
658 /* tex mip base */ 1190 /* tex mip base */
659 r = r600_cs_packet_next_reloc(p, &reloc); 1191 r = r600_cs_packet_next_reloc(p, &reloc);
660 if (r) { 1192 if (r) {
@@ -662,6 +1194,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
662 return -EINVAL; 1194 return -EINVAL;
663 } 1195 }
664 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1196 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1197 mipmap = reloc->robj;
1198 r = r600_check_texture_resource(p, idx+(i*7)+1,
1199 texture, mipmap);
1200 if (r)
1201 return r;
665 break; 1202 break;
666 case SQ_TEX_VTX_VALID_BUFFER: 1203 case SQ_TEX_VTX_VALID_BUFFER:
667 /* vtx base */ 1204 /* vtx base */
@@ -670,6 +1207,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
670 DRM_ERROR("bad SET_RESOURCE\n"); 1207 DRM_ERROR("bad SET_RESOURCE\n");
671 return -EINVAL; 1208 return -EINVAL;
672 } 1209 }
1210 offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1211 size = radeon_get_ib_value(p, idx+1+(i*7)+1);
1212 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1213 /* force size to size of the buffer */
1214 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
1215 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
1216 }
673 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); 1217 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
674 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 1218 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
675 break; 1219 break;
@@ -760,11 +1304,28 @@ int r600_cs_parse(struct radeon_cs_parser *p)
760 struct r600_cs_track *track; 1304 struct r600_cs_track *track;
761 int r; 1305 int r;
762 1306
763 track = kzalloc(sizeof(*track), GFP_KERNEL); 1307 if (p->track == NULL) {
764 p->track = track; 1308 /* initialize tracker, we are in kms */
1309 track = kzalloc(sizeof(*track), GFP_KERNEL);
1310 if (track == NULL)
1311 return -ENOMEM;
1312 r600_cs_track_init(track);
1313 if (p->rdev->family < CHIP_RV770) {
1314 track->npipes = p->rdev->config.r600.tiling_npipes;
1315 track->nbanks = p->rdev->config.r600.tiling_nbanks;
1316 track->group_size = p->rdev->config.r600.tiling_group_size;
1317 } else if (p->rdev->family <= CHIP_RV740) {
1318 track->npipes = p->rdev->config.rv770.tiling_npipes;
1319 track->nbanks = p->rdev->config.rv770.tiling_nbanks;
1320 track->group_size = p->rdev->config.rv770.tiling_group_size;
1321 }
1322 p->track = track;
1323 }
765 do { 1324 do {
766 r = r600_cs_packet_parse(p, &pkt, p->idx); 1325 r = r600_cs_packet_parse(p, &pkt, p->idx);
767 if (r) { 1326 if (r) {
1327 kfree(p->track);
1328 p->track = NULL;
768 return r; 1329 return r;
769 } 1330 }
770 p->idx += pkt.count + 2; 1331 p->idx += pkt.count + 2;
@@ -779,9 +1340,13 @@ int r600_cs_parse(struct radeon_cs_parser *p)
779 break; 1340 break;
780 default: 1341 default:
781 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 1342 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1343 kfree(p->track);
1344 p->track = NULL;
782 return -EINVAL; 1345 return -EINVAL;
783 } 1346 }
784 if (r) { 1347 if (r) {
1348 kfree(p->track);
1349 p->track = NULL;
785 return r; 1350 return r;
786 } 1351 }
787 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1352 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
@@ -791,6 +1356,8 @@ int r600_cs_parse(struct radeon_cs_parser *p)
791 mdelay(1); 1356 mdelay(1);
792 } 1357 }
793#endif 1358#endif
1359 kfree(p->track);
1360 p->track = NULL;
794 return 0; 1361 return 0;
795} 1362}
796 1363
@@ -833,9 +1400,16 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
833{ 1400{
834 struct radeon_cs_parser parser; 1401 struct radeon_cs_parser parser;
835 struct radeon_cs_chunk *ib_chunk; 1402 struct radeon_cs_chunk *ib_chunk;
836 struct radeon_ib fake_ib; 1403 struct radeon_ib fake_ib;
1404 struct r600_cs_track *track;
837 int r; 1405 int r;
838 1406
1407 /* initialize tracker */
1408 track = kzalloc(sizeof(*track), GFP_KERNEL);
1409 if (track == NULL)
1410 return -ENOMEM;
1411 r600_cs_track_init(track);
1412 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
839 /* initialize parser */ 1413 /* initialize parser */
840 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 1414 memset(&parser, 0, sizeof(struct radeon_cs_parser));
841 parser.filp = filp; 1415 parser.filp = filp;
@@ -843,6 +1417,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
843 parser.rdev = NULL; 1417 parser.rdev = NULL;
844 parser.family = family; 1418 parser.family = family;
845 parser.ib = &fake_ib; 1419 parser.ib = &fake_ib;
1420 parser.track = track;
846 fake_ib.ptr = ib; 1421 fake_ib.ptr = ib;
847 r = radeon_cs_parser_init(&parser, data); 1422 r = radeon_cs_parser_init(&parser, data);
848 if (r) { 1423 if (r) {
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 30480881aed1..5b2e4d442823 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -883,6 +883,16 @@
883 883
884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
885 885
886#define R_028C04_PA_SC_AA_CONFIG 0x028C04
887#define S_028C04_MSAA_NUM_SAMPLES(x) (((x) & 0x3) << 0)
888#define G_028C04_MSAA_NUM_SAMPLES(x) (((x) >> 0) & 0x3)
889#define C_028C04_MSAA_NUM_SAMPLES 0xFFFFFFFC
890#define S_028C04_AA_MASK_CENTROID_DTMN(x) (((x) & 0x1) << 4)
891#define G_028C04_AA_MASK_CENTROID_DTMN(x) (((x) >> 4) & 0x1)
892#define C_028C04_AA_MASK_CENTROID_DTMN 0xFFFFFFEF
893#define S_028C04_MAX_SAMPLE_DIST(x) (((x) & 0xF) << 13)
894#define G_028C04_MAX_SAMPLE_DIST(x) (((x) >> 13) & 0xF)
895#define C_028C04_MAX_SAMPLE_DIST 0xFFFE1FFF
886#define R_0280E0_CB_COLOR0_FRAG 0x0280E0 896#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
887#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) 897#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
888#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) 898#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
@@ -905,6 +915,461 @@
905#define R_0280D4_CB_COLOR5_TILE 0x0280D4 915#define R_0280D4_CB_COLOR5_TILE 0x0280D4
906#define R_0280D8_CB_COLOR6_TILE 0x0280D8 916#define R_0280D8_CB_COLOR6_TILE 0x0280D8
907#define R_0280DC_CB_COLOR7_TILE 0x0280DC 917#define R_0280DC_CB_COLOR7_TILE 0x0280DC
908 918#define R_0280A0_CB_COLOR0_INFO 0x0280A0
919#define S_0280A0_ENDIAN(x) (((x) & 0x3) << 0)
920#define G_0280A0_ENDIAN(x) (((x) >> 0) & 0x3)
921#define C_0280A0_ENDIAN 0xFFFFFFFC
922#define S_0280A0_FORMAT(x) (((x) & 0x3F) << 2)
923#define G_0280A0_FORMAT(x) (((x) >> 2) & 0x3F)
924#define C_0280A0_FORMAT 0xFFFFFF03
925#define V_0280A0_COLOR_INVALID 0x00000000
926#define V_0280A0_COLOR_8 0x00000001
927#define V_0280A0_COLOR_4_4 0x00000002
928#define V_0280A0_COLOR_3_3_2 0x00000003
929#define V_0280A0_COLOR_16 0x00000005
930#define V_0280A0_COLOR_16_FLOAT 0x00000006
931#define V_0280A0_COLOR_8_8 0x00000007
932#define V_0280A0_COLOR_5_6_5 0x00000008
933#define V_0280A0_COLOR_6_5_5 0x00000009
934#define V_0280A0_COLOR_1_5_5_5 0x0000000A
935#define V_0280A0_COLOR_4_4_4_4 0x0000000B
936#define V_0280A0_COLOR_5_5_5_1 0x0000000C
937#define V_0280A0_COLOR_32 0x0000000D
938#define V_0280A0_COLOR_32_FLOAT 0x0000000E
939#define V_0280A0_COLOR_16_16 0x0000000F
940#define V_0280A0_COLOR_16_16_FLOAT 0x00000010
941#define V_0280A0_COLOR_8_24 0x00000011
942#define V_0280A0_COLOR_8_24_FLOAT 0x00000012
943#define V_0280A0_COLOR_24_8 0x00000013
944#define V_0280A0_COLOR_24_8_FLOAT 0x00000014
945#define V_0280A0_COLOR_10_11_11 0x00000015
946#define V_0280A0_COLOR_10_11_11_FLOAT 0x00000016
947#define V_0280A0_COLOR_11_11_10 0x00000017
948#define V_0280A0_COLOR_11_11_10_FLOAT 0x00000018
949#define V_0280A0_COLOR_2_10_10_10 0x00000019
950#define V_0280A0_COLOR_8_8_8_8 0x0000001A
951#define V_0280A0_COLOR_10_10_10_2 0x0000001B
952#define V_0280A0_COLOR_X24_8_32_FLOAT 0x0000001C
953#define V_0280A0_COLOR_32_32 0x0000001D
954#define V_0280A0_COLOR_32_32_FLOAT 0x0000001E
955#define V_0280A0_COLOR_16_16_16_16 0x0000001F
956#define V_0280A0_COLOR_16_16_16_16_FLOAT 0x00000020
957#define V_0280A0_COLOR_32_32_32_32 0x00000022
958#define V_0280A0_COLOR_32_32_32_32_FLOAT 0x00000023
959#define S_0280A0_ARRAY_MODE(x) (((x) & 0xF) << 8)
960#define G_0280A0_ARRAY_MODE(x) (((x) >> 8) & 0xF)
961#define C_0280A0_ARRAY_MODE 0xFFFFF0FF
962#define V_0280A0_ARRAY_LINEAR_GENERAL 0x00000000
963#define V_0280A0_ARRAY_LINEAR_ALIGNED 0x00000001
964#define V_0280A0_ARRAY_1D_TILED_THIN1 0x00000002
965#define V_0280A0_ARRAY_2D_TILED_THIN1 0x00000004
966#define S_0280A0_NUMBER_TYPE(x) (((x) & 0x7) << 12)
967#define G_0280A0_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
968#define C_0280A0_NUMBER_TYPE 0xFFFF8FFF
969#define S_0280A0_READ_SIZE(x) (((x) & 0x1) << 15)
970#define G_0280A0_READ_SIZE(x) (((x) >> 15) & 0x1)
971#define C_0280A0_READ_SIZE 0xFFFF7FFF
972#define S_0280A0_COMP_SWAP(x) (((x) & 0x3) << 16)
973#define G_0280A0_COMP_SWAP(x) (((x) >> 16) & 0x3)
974#define C_0280A0_COMP_SWAP 0xFFFCFFFF
975#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
976#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
977#define C_0280A0_TILE_MODE 0xFFF3FFFF
978#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
979#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
980#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
981#define S_0280A0_CLEAR_COLOR(x) (((x) & 0x1) << 21)
982#define G_0280A0_CLEAR_COLOR(x) (((x) >> 21) & 0x1)
983#define C_0280A0_CLEAR_COLOR 0xFFDFFFFF
984#define S_0280A0_BLEND_BYPASS(x) (((x) & 0x1) << 22)
985#define G_0280A0_BLEND_BYPASS(x) (((x) >> 22) & 0x1)
986#define C_0280A0_BLEND_BYPASS 0xFFBFFFFF
987#define S_0280A0_BLEND_FLOAT32(x) (((x) & 0x1) << 23)
988#define G_0280A0_BLEND_FLOAT32(x) (((x) >> 23) & 0x1)
989#define C_0280A0_BLEND_FLOAT32 0xFF7FFFFF
990#define S_0280A0_SIMPLE_FLOAT(x) (((x) & 0x1) << 24)
991#define G_0280A0_SIMPLE_FLOAT(x) (((x) >> 24) & 0x1)
992#define C_0280A0_SIMPLE_FLOAT 0xFEFFFFFF
993#define S_0280A0_ROUND_MODE(x) (((x) & 0x1) << 25)
994#define G_0280A0_ROUND_MODE(x) (((x) >> 25) & 0x1)
995#define C_0280A0_ROUND_MODE 0xFDFFFFFF
996#define S_0280A0_TILE_COMPACT(x) (((x) & 0x1) << 26)
997#define G_0280A0_TILE_COMPACT(x) (((x) >> 26) & 0x1)
998#define C_0280A0_TILE_COMPACT 0xFBFFFFFF
999#define S_0280A0_SOURCE_FORMAT(x) (((x) & 0x1) << 27)
1000#define G_0280A0_SOURCE_FORMAT(x) (((x) >> 27) & 0x1)
1001#define C_0280A0_SOURCE_FORMAT 0xF7FFFFFF
1002#define R_0280A4_CB_COLOR1_INFO 0x0280A4
1003#define R_0280A8_CB_COLOR2_INFO 0x0280A8
1004#define R_0280AC_CB_COLOR3_INFO 0x0280AC
1005#define R_0280B0_CB_COLOR4_INFO 0x0280B0
1006#define R_0280B4_CB_COLOR5_INFO 0x0280B4
1007#define R_0280B8_CB_COLOR6_INFO 0x0280B8
1008#define R_0280BC_CB_COLOR7_INFO 0x0280BC
1009#define R_028060_CB_COLOR0_SIZE 0x028060
1010#define S_028060_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
1011#define G_028060_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
1012#define C_028060_PITCH_TILE_MAX 0xFFFFFC00
1013#define S_028060_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
1014#define G_028060_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
1015#define C_028060_SLICE_TILE_MAX 0xC00003FF
1016#define R_028064_CB_COLOR1_SIZE 0x028064
1017#define R_028068_CB_COLOR2_SIZE 0x028068
1018#define R_02806C_CB_COLOR3_SIZE 0x02806C
1019#define R_028070_CB_COLOR4_SIZE 0x028070
1020#define R_028074_CB_COLOR5_SIZE 0x028074
1021#define R_028078_CB_COLOR6_SIZE 0x028078
1022#define R_02807C_CB_COLOR7_SIZE 0x02807C
1023#define R_028238_CB_TARGET_MASK 0x028238
1024#define S_028238_TARGET0_ENABLE(x) (((x) & 0xF) << 0)
1025#define G_028238_TARGET0_ENABLE(x) (((x) >> 0) & 0xF)
1026#define C_028238_TARGET0_ENABLE 0xFFFFFFF0
1027#define S_028238_TARGET1_ENABLE(x) (((x) & 0xF) << 4)
1028#define G_028238_TARGET1_ENABLE(x) (((x) >> 4) & 0xF)
1029#define C_028238_TARGET1_ENABLE 0xFFFFFF0F
1030#define S_028238_TARGET2_ENABLE(x) (((x) & 0xF) << 8)
1031#define G_028238_TARGET2_ENABLE(x) (((x) >> 8) & 0xF)
1032#define C_028238_TARGET2_ENABLE 0xFFFFF0FF
1033#define S_028238_TARGET3_ENABLE(x) (((x) & 0xF) << 12)
1034#define G_028238_TARGET3_ENABLE(x) (((x) >> 12) & 0xF)
1035#define C_028238_TARGET3_ENABLE 0xFFFF0FFF
1036#define S_028238_TARGET4_ENABLE(x) (((x) & 0xF) << 16)
1037#define G_028238_TARGET4_ENABLE(x) (((x) >> 16) & 0xF)
1038#define C_028238_TARGET4_ENABLE 0xFFF0FFFF
1039#define S_028238_TARGET5_ENABLE(x) (((x) & 0xF) << 20)
1040#define G_028238_TARGET5_ENABLE(x) (((x) >> 20) & 0xF)
1041#define C_028238_TARGET5_ENABLE 0xFF0FFFFF
1042#define S_028238_TARGET6_ENABLE(x) (((x) & 0xF) << 24)
1043#define G_028238_TARGET6_ENABLE(x) (((x) >> 24) & 0xF)
1044#define C_028238_TARGET6_ENABLE 0xF0FFFFFF
1045#define S_028238_TARGET7_ENABLE(x) (((x) & 0xF) << 28)
1046#define G_028238_TARGET7_ENABLE(x) (((x) >> 28) & 0xF)
1047#define C_028238_TARGET7_ENABLE 0x0FFFFFFF
1048#define R_02823C_CB_SHADER_MASK 0x02823C
1049#define S_02823C_OUTPUT0_ENABLE(x) (((x) & 0xF) << 0)
1050#define G_02823C_OUTPUT0_ENABLE(x) (((x) >> 0) & 0xF)
1051#define C_02823C_OUTPUT0_ENABLE 0xFFFFFFF0
1052#define S_02823C_OUTPUT1_ENABLE(x) (((x) & 0xF) << 4)
1053#define G_02823C_OUTPUT1_ENABLE(x) (((x) >> 4) & 0xF)
1054#define C_02823C_OUTPUT1_ENABLE 0xFFFFFF0F
1055#define S_02823C_OUTPUT2_ENABLE(x) (((x) & 0xF) << 8)
1056#define G_02823C_OUTPUT2_ENABLE(x) (((x) >> 8) & 0xF)
1057#define C_02823C_OUTPUT2_ENABLE 0xFFFFF0FF
1058#define S_02823C_OUTPUT3_ENABLE(x) (((x) & 0xF) << 12)
1059#define G_02823C_OUTPUT3_ENABLE(x) (((x) >> 12) & 0xF)
1060#define C_02823C_OUTPUT3_ENABLE 0xFFFF0FFF
1061#define S_02823C_OUTPUT4_ENABLE(x) (((x) & 0xF) << 16)
1062#define G_02823C_OUTPUT4_ENABLE(x) (((x) >> 16) & 0xF)
1063#define C_02823C_OUTPUT4_ENABLE 0xFFF0FFFF
1064#define S_02823C_OUTPUT5_ENABLE(x) (((x) & 0xF) << 20)
1065#define G_02823C_OUTPUT5_ENABLE(x) (((x) >> 20) & 0xF)
1066#define C_02823C_OUTPUT5_ENABLE 0xFF0FFFFF
1067#define S_02823C_OUTPUT6_ENABLE(x) (((x) & 0xF) << 24)
1068#define G_02823C_OUTPUT6_ENABLE(x) (((x) >> 24) & 0xF)
1069#define C_02823C_OUTPUT6_ENABLE 0xF0FFFFFF
1070#define S_02823C_OUTPUT7_ENABLE(x) (((x) & 0xF) << 28)
1071#define G_02823C_OUTPUT7_ENABLE(x) (((x) >> 28) & 0xF)
1072#define C_02823C_OUTPUT7_ENABLE 0x0FFFFFFF
1073#define R_028AB0_VGT_STRMOUT_EN 0x028AB0
1074#define S_028AB0_STREAMOUT(x) (((x) & 0x1) << 0)
1075#define G_028AB0_STREAMOUT(x) (((x) >> 0) & 0x1)
1076#define C_028AB0_STREAMOUT 0xFFFFFFFE
1077#define R_028B20_VGT_STRMOUT_BUFFER_EN 0x028B20
1078#define S_028B20_BUFFER_0_EN(x) (((x) & 0x1) << 0)
1079#define G_028B20_BUFFER_0_EN(x) (((x) >> 0) & 0x1)
1080#define C_028B20_BUFFER_0_EN 0xFFFFFFFE
1081#define S_028B20_BUFFER_1_EN(x) (((x) & 0x1) << 1)
1082#define G_028B20_BUFFER_1_EN(x) (((x) >> 1) & 0x1)
1083#define C_028B20_BUFFER_1_EN 0xFFFFFFFD
1084#define S_028B20_BUFFER_2_EN(x) (((x) & 0x1) << 2)
1085#define G_028B20_BUFFER_2_EN(x) (((x) >> 2) & 0x1)
1086#define C_028B20_BUFFER_2_EN 0xFFFFFFFB
1087#define S_028B20_BUFFER_3_EN(x) (((x) & 0x1) << 3)
1088#define G_028B20_BUFFER_3_EN(x) (((x) >> 3) & 0x1)
1089#define C_028B20_BUFFER_3_EN 0xFFFFFFF7
1090#define S_028B20_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1091#define G_028B20_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1092#define C_028B20_SIZE 0x00000000
1093#define R_038000_SQ_TEX_RESOURCE_WORD0_0 0x038000
1094#define S_038000_DIM(x) (((x) & 0x7) << 0)
1095#define G_038000_DIM(x) (((x) >> 0) & 0x7)
1096#define C_038000_DIM 0xFFFFFFF8
1097#define V_038000_SQ_TEX_DIM_1D 0x00000000
1098#define V_038000_SQ_TEX_DIM_2D 0x00000001
1099#define V_038000_SQ_TEX_DIM_3D 0x00000002
1100#define V_038000_SQ_TEX_DIM_CUBEMAP 0x00000003
1101#define V_038000_SQ_TEX_DIM_1D_ARRAY 0x00000004
1102#define V_038000_SQ_TEX_DIM_2D_ARRAY 0x00000005
1103#define V_038000_SQ_TEX_DIM_2D_MSAA 0x00000006
1104#define V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
1105#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3)
1106#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF)
1107#define C_038000_TILE_MODE 0xFFFFFF87
1108#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7)
1109#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1)
1110#define C_038000_TILE_TYPE 0xFFFFFF7F
1111#define S_038000_PITCH(x) (((x) & 0x7FF) << 8)
1112#define G_038000_PITCH(x) (((x) >> 8) & 0x7FF)
1113#define C_038000_PITCH 0xFFF800FF
1114#define S_038000_TEX_WIDTH(x) (((x) & 0x1FFF) << 19)
1115#define G_038000_TEX_WIDTH(x) (((x) >> 19) & 0x1FFF)
1116#define C_038000_TEX_WIDTH 0x0007FFFF
1117#define R_038004_SQ_TEX_RESOURCE_WORD1_0 0x038004
1118#define S_038004_TEX_HEIGHT(x) (((x) & 0x1FFF) << 0)
1119#define G_038004_TEX_HEIGHT(x) (((x) >> 0) & 0x1FFF)
1120#define C_038004_TEX_HEIGHT 0xFFFFE000
1121#define S_038004_TEX_DEPTH(x) (((x) & 0x1FFF) << 13)
1122#define G_038004_TEX_DEPTH(x) (((x) >> 13) & 0x1FFF)
1123#define C_038004_TEX_DEPTH 0xFC001FFF
1124#define S_038004_DATA_FORMAT(x) (((x) & 0x3F) << 26)
1125#define G_038004_DATA_FORMAT(x) (((x) >> 26) & 0x3F)
1126#define C_038004_DATA_FORMAT 0x03FFFFFF
1127#define V_038004_COLOR_INVALID 0x00000000
1128#define V_038004_COLOR_8 0x00000001
1129#define V_038004_COLOR_4_4 0x00000002
1130#define V_038004_COLOR_3_3_2 0x00000003
1131#define V_038004_COLOR_16 0x00000005
1132#define V_038004_COLOR_16_FLOAT 0x00000006
1133#define V_038004_COLOR_8_8 0x00000007
1134#define V_038004_COLOR_5_6_5 0x00000008
1135#define V_038004_COLOR_6_5_5 0x00000009
1136#define V_038004_COLOR_1_5_5_5 0x0000000A
1137#define V_038004_COLOR_4_4_4_4 0x0000000B
1138#define V_038004_COLOR_5_5_5_1 0x0000000C
1139#define V_038004_COLOR_32 0x0000000D
1140#define V_038004_COLOR_32_FLOAT 0x0000000E
1141#define V_038004_COLOR_16_16 0x0000000F
1142#define V_038004_COLOR_16_16_FLOAT 0x00000010
1143#define V_038004_COLOR_8_24 0x00000011
1144#define V_038004_COLOR_8_24_FLOAT 0x00000012
1145#define V_038004_COLOR_24_8 0x00000013
1146#define V_038004_COLOR_24_8_FLOAT 0x00000014
1147#define V_038004_COLOR_10_11_11 0x00000015
1148#define V_038004_COLOR_10_11_11_FLOAT 0x00000016
1149#define V_038004_COLOR_11_11_10 0x00000017
1150#define V_038004_COLOR_11_11_10_FLOAT 0x00000018
1151#define V_038004_COLOR_2_10_10_10 0x00000019
1152#define V_038004_COLOR_8_8_8_8 0x0000001A
1153#define V_038004_COLOR_10_10_10_2 0x0000001B
1154#define V_038004_COLOR_X24_8_32_FLOAT 0x0000001C
1155#define V_038004_COLOR_32_32 0x0000001D
1156#define V_038004_COLOR_32_32_FLOAT 0x0000001E
1157#define V_038004_COLOR_16_16_16_16 0x0000001F
1158#define V_038004_COLOR_16_16_16_16_FLOAT 0x00000020
1159#define V_038004_COLOR_32_32_32_32 0x00000022
1160#define V_038004_COLOR_32_32_32_32_FLOAT 0x00000023
1161#define V_038004_FMT_1 0x00000025
1162#define V_038004_FMT_GB_GR 0x00000027
1163#define V_038004_FMT_BG_RG 0x00000028
1164#define V_038004_FMT_32_AS_8 0x00000029
1165#define V_038004_FMT_32_AS_8_8 0x0000002A
1166#define V_038004_FMT_5_9_9_9_SHAREDEXP 0x0000002B
1167#define V_038004_FMT_8_8_8 0x0000002C
1168#define V_038004_FMT_16_16_16 0x0000002D
1169#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
1170#define V_038004_FMT_32_32_32 0x0000002F
1171#define V_038004_FMT_32_32_32_FLOAT 0x00000030
1172#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
1173#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
1174#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
1175#define C_038010_FORMAT_COMP_X 0xFFFFFFFC
1176#define S_038010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
1177#define G_038010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
1178#define C_038010_FORMAT_COMP_Y 0xFFFFFFF3
1179#define S_038010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
1180#define G_038010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
1181#define C_038010_FORMAT_COMP_Z 0xFFFFFFCF
1182#define S_038010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
1183#define G_038010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
1184#define C_038010_FORMAT_COMP_W 0xFFFFFF3F
1185#define S_038010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
1186#define G_038010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
1187#define C_038010_NUM_FORMAT_ALL 0xFFFFFCFF
1188#define S_038010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
1189#define G_038010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
1190#define C_038010_SRF_MODE_ALL 0xFFFFFBFF
1191#define S_038010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
1192#define G_038010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
1193#define C_038010_FORCE_DEGAMMA 0xFFFFF7FF
1194#define S_038010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
1195#define G_038010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
1196#define C_038010_ENDIAN_SWAP 0xFFFFCFFF
1197#define S_038010_REQUEST_SIZE(x) (((x) & 0x3) << 14)
1198#define G_038010_REQUEST_SIZE(x) (((x) >> 14) & 0x3)
1199#define C_038010_REQUEST_SIZE 0xFFFF3FFF
1200#define S_038010_DST_SEL_X(x) (((x) & 0x7) << 16)
1201#define G_038010_DST_SEL_X(x) (((x) >> 16) & 0x7)
1202#define C_038010_DST_SEL_X 0xFFF8FFFF
1203#define S_038010_DST_SEL_Y(x) (((x) & 0x7) << 19)
1204#define G_038010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
1205#define C_038010_DST_SEL_Y 0xFFC7FFFF
1206#define S_038010_DST_SEL_Z(x) (((x) & 0x7) << 22)
1207#define G_038010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
1208#define C_038010_DST_SEL_Z 0xFE3FFFFF
1209#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25)
1210#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7)
1211#define C_038010_DST_SEL_W 0xF1FFFFFF
1212#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28)
1213#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
1214#define C_038010_BASE_LEVEL 0x0FFFFFFF
1215#define R_038014_SQ_TEX_RESOURCE_WORD5_0 0x038014
1216#define S_038014_LAST_LEVEL(x) (((x) & 0xF) << 0)
1217#define G_038014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
1218#define C_038014_LAST_LEVEL 0xFFFFFFF0
1219#define S_038014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
1220#define G_038014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
1221#define C_038014_BASE_ARRAY 0xFFFE000F
1222#define S_038014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
1223#define G_038014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
1224#define C_038014_LAST_ARRAY 0xC001FFFF
1225#define R_0288A8_SQ_ESGS_RING_ITEMSIZE 0x0288A8
1226#define S_0288A8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1227#define G_0288A8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1228#define C_0288A8_ITEMSIZE 0xFFFF8000
1229#define R_008C44_SQ_ESGS_RING_SIZE 0x008C44
1230#define S_008C44_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1231#define G_008C44_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1232#define C_008C44_MEM_SIZE 0x00000000
1233#define R_0288B0_SQ_ESTMP_RING_ITEMSIZE 0x0288B0
1234#define S_0288B0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1235#define G_0288B0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1236#define C_0288B0_ITEMSIZE 0xFFFF8000
1237#define R_008C54_SQ_ESTMP_RING_SIZE 0x008C54
1238#define S_008C54_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1239#define G_008C54_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1240#define C_008C54_MEM_SIZE 0x00000000
1241#define R_0288C0_SQ_FBUF_RING_ITEMSIZE 0x0288C0
1242#define S_0288C0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1243#define G_0288C0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1244#define C_0288C0_ITEMSIZE 0xFFFF8000
1245#define R_008C74_SQ_FBUF_RING_SIZE 0x008C74
1246#define S_008C74_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1247#define G_008C74_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1248#define C_008C74_MEM_SIZE 0x00000000
1249#define R_0288B4_SQ_GSTMP_RING_ITEMSIZE 0x0288B4
1250#define S_0288B4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1251#define G_0288B4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1252#define C_0288B4_ITEMSIZE 0xFFFF8000
1253#define R_008C5C_SQ_GSTMP_RING_SIZE 0x008C5C
1254#define S_008C5C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1255#define G_008C5C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1256#define C_008C5C_MEM_SIZE 0x00000000
1257#define R_0288AC_SQ_GSVS_RING_ITEMSIZE 0x0288AC
1258#define S_0288AC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1259#define G_0288AC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1260#define C_0288AC_ITEMSIZE 0xFFFF8000
1261#define R_008C4C_SQ_GSVS_RING_SIZE 0x008C4C
1262#define S_008C4C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1263#define G_008C4C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1264#define C_008C4C_MEM_SIZE 0x00000000
1265#define R_0288BC_SQ_PSTMP_RING_ITEMSIZE 0x0288BC
1266#define S_0288BC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1267#define G_0288BC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1268#define C_0288BC_ITEMSIZE 0xFFFF8000
1269#define R_008C6C_SQ_PSTMP_RING_SIZE 0x008C6C
1270#define S_008C6C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1271#define G_008C6C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1272#define C_008C6C_MEM_SIZE 0x00000000
1273#define R_0288C4_SQ_REDUC_RING_ITEMSIZE 0x0288C4
1274#define S_0288C4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1275#define G_0288C4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1276#define C_0288C4_ITEMSIZE 0xFFFF8000
1277#define R_008C7C_SQ_REDUC_RING_SIZE 0x008C7C
1278#define S_008C7C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1279#define G_008C7C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1280#define C_008C7C_MEM_SIZE 0x00000000
1281#define R_0288B8_SQ_VSTMP_RING_ITEMSIZE 0x0288B8
1282#define S_0288B8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1283#define G_0288B8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1284#define C_0288B8_ITEMSIZE 0xFFFF8000
1285#define R_008C64_SQ_VSTMP_RING_SIZE 0x008C64
1286#define S_008C64_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1287#define G_008C64_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1288#define C_008C64_MEM_SIZE 0x00000000
1289#define R_0288C8_SQ_GS_VERT_ITEMSIZE 0x0288C8
1290#define S_0288C8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1291#define G_0288C8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1292#define C_0288C8_ITEMSIZE 0xFFFF8000
1293#define R_028010_DB_DEPTH_INFO 0x028010
1294#define S_028010_FORMAT(x) (((x) & 0x7) << 0)
1295#define G_028010_FORMAT(x) (((x) >> 0) & 0x7)
1296#define C_028010_FORMAT 0xFFFFFFF8
1297#define V_028010_DEPTH_INVALID 0x00000000
1298#define V_028010_DEPTH_16 0x00000001
1299#define V_028010_DEPTH_X8_24 0x00000002
1300#define V_028010_DEPTH_8_24 0x00000003
1301#define V_028010_DEPTH_X8_24_FLOAT 0x00000004
1302#define V_028010_DEPTH_8_24_FLOAT 0x00000005
1303#define V_028010_DEPTH_32_FLOAT 0x00000006
1304#define V_028010_DEPTH_X24_8_32_FLOAT 0x00000007
1305#define S_028010_READ_SIZE(x) (((x) & 0x1) << 3)
1306#define G_028010_READ_SIZE(x) (((x) >> 3) & 0x1)
1307#define C_028010_READ_SIZE 0xFFFFFFF7
1308#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15)
1309#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF)
1310#define C_028010_ARRAY_MODE 0xFFF87FFF
1311#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25)
1312#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1)
1313#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF
1314#define S_028010_TILE_COMPACT(x) (((x) & 0x1) << 26)
1315#define G_028010_TILE_COMPACT(x) (((x) >> 26) & 0x1)
1316#define C_028010_TILE_COMPACT 0xFBFFFFFF
1317#define S_028010_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
1318#define G_028010_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
1319#define C_028010_ZRANGE_PRECISION 0x7FFFFFFF
1320#define R_028000_DB_DEPTH_SIZE 0x028000
1321#define S_028000_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
1322#define G_028000_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
1323#define C_028000_PITCH_TILE_MAX 0xFFFFFC00
1324#define S_028000_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
1325#define G_028000_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
1326#define C_028000_SLICE_TILE_MAX 0xC00003FF
1327#define R_028004_DB_DEPTH_VIEW 0x028004
1328#define S_028004_SLICE_START(x) (((x) & 0x7FF) << 0)
1329#define G_028004_SLICE_START(x) (((x) >> 0) & 0x7FF)
1330#define C_028004_SLICE_START 0xFFFFF800
1331#define S_028004_SLICE_MAX(x) (((x) & 0x7FF) << 13)
1332#define G_028004_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
1333#define C_028004_SLICE_MAX 0xFF001FFF
1334#define R_028800_DB_DEPTH_CONTROL 0x028800
1335#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
1336#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
1337#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
1338#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
1339#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
1340#define C_028800_Z_ENABLE 0xFFFFFFFD
1341#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
1342#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
1343#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
1344#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
1345#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
1346#define C_028800_ZFUNC 0xFFFFFF8F
1347#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
1348#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
1349#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
1350#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
1351#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
1352#define C_028800_STENCILFUNC 0xFFFFF8FF
1353#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
1354#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
1355#define C_028800_STENCILFAIL 0xFFFFC7FF
1356#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
1357#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
1358#define C_028800_STENCILZPASS 0xFFFE3FFF
1359#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
1360#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
1361#define C_028800_STENCILZFAIL 0xFFF1FFFF
1362#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
1363#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
1364#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
1365#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
1366#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
1367#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
1368#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
1369#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
1370#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
1371#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
1372#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
1373#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
909 1374
910#endif 1375#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c0356bb193e5..829e26e8a4bb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,6 +89,7 @@ extern int radeon_testing;
89extern int radeon_connector_table; 89extern int radeon_connector_table;
90extern int radeon_tv; 90extern int radeon_tv;
91extern int radeon_new_pll; 91extern int radeon_new_pll;
92extern int radeon_dynpm;
92extern int radeon_audio; 93extern int radeon_audio;
93 94
94/* 95/*
@@ -118,6 +119,21 @@ struct radeon_device;
118/* 119/*
119 * BIOS. 120 * BIOS.
120 */ 121 */
122#define ATRM_BIOS_PAGE 4096
123
124#if defined(CONFIG_VGA_SWITCHEROO)
125bool radeon_atrm_supported(struct pci_dev *pdev);
126int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
127#else
128static inline bool radeon_atrm_supported(struct pci_dev *pdev)
129{
130 return false;
131}
132
133static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
134 return -EINVAL;
135}
136#endif
121bool radeon_get_bios(struct radeon_device *rdev); 137bool radeon_get_bios(struct radeon_device *rdev);
122 138
123 139
@@ -138,17 +154,23 @@ void radeon_dummy_page_fini(struct radeon_device *rdev);
138struct radeon_clock { 154struct radeon_clock {
139 struct radeon_pll p1pll; 155 struct radeon_pll p1pll;
140 struct radeon_pll p2pll; 156 struct radeon_pll p2pll;
157 struct radeon_pll dcpll;
141 struct radeon_pll spll; 158 struct radeon_pll spll;
142 struct radeon_pll mpll; 159 struct radeon_pll mpll;
143 /* 10 Khz units */ 160 /* 10 Khz units */
144 uint32_t default_mclk; 161 uint32_t default_mclk;
145 uint32_t default_sclk; 162 uint32_t default_sclk;
163 uint32_t default_dispclk;
164 uint32_t dp_extclk;
146}; 165};
147 166
148/* 167/*
149 * Power management 168 * Power management
150 */ 169 */
151int radeon_pm_init(struct radeon_device *rdev); 170int radeon_pm_init(struct radeon_device *rdev);
171void radeon_pm_compute_clocks(struct radeon_device *rdev);
172void radeon_combios_get_power_modes(struct radeon_device *rdev);
173void radeon_atombios_get_power_modes(struct radeon_device *rdev);
152 174
153/* 175/*
154 * Fences. 176 * Fences.
@@ -275,6 +297,7 @@ union radeon_gart_table {
275}; 297};
276 298
277#define RADEON_GPU_PAGE_SIZE 4096 299#define RADEON_GPU_PAGE_SIZE 4096
300#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
278 301
279struct radeon_gart { 302struct radeon_gart {
280 dma_addr_t table_addr; 303 dma_addr_t table_addr;
@@ -309,21 +332,19 @@ struct radeon_mc {
309 /* for some chips with <= 32MB we need to lie 332 /* for some chips with <= 32MB we need to lie
310 * about vram size near mc fb location */ 333 * about vram size near mc fb location */
311 u64 mc_vram_size; 334 u64 mc_vram_size;
312 u64 gtt_location; 335 u64 visible_vram_size;
313 u64 gtt_size; 336 u64 gtt_size;
314 u64 gtt_start; 337 u64 gtt_start;
315 u64 gtt_end; 338 u64 gtt_end;
316 u64 vram_location;
317 u64 vram_start; 339 u64 vram_start;
318 u64 vram_end; 340 u64 vram_end;
319 unsigned vram_width; 341 unsigned vram_width;
320 u64 real_vram_size; 342 u64 real_vram_size;
321 int vram_mtrr; 343 int vram_mtrr;
322 bool vram_is_ddr; 344 bool vram_is_ddr;
323 bool igp_sideport_enabled; 345 bool igp_sideport_enabled;
324}; 346};
325 347
326int radeon_mc_setup(struct radeon_device *rdev);
327bool radeon_combios_sideport_present(struct radeon_device *rdev); 348bool radeon_combios_sideport_present(struct radeon_device *rdev);
328bool radeon_atombios_sideport_present(struct radeon_device *rdev); 349bool radeon_atombios_sideport_present(struct radeon_device *rdev);
329 350
@@ -348,6 +369,7 @@ struct radeon_irq {
348 bool sw_int; 369 bool sw_int;
349 /* FIXME: use a define max crtc rather than hardcode it */ 370 /* FIXME: use a define max crtc rather than hardcode it */
350 bool crtc_vblank_int[2]; 371 bool crtc_vblank_int[2];
372 wait_queue_head_t vblank_queue;
351 /* FIXME: use defines for max hpd/dacs */ 373 /* FIXME: use defines for max hpd/dacs */
352 bool hpd[6]; 374 bool hpd[6];
353 spinlock_t sw_lock; 375 spinlock_t sw_lock;
@@ -379,6 +401,7 @@ struct radeon_ib {
379struct radeon_ib_pool { 401struct radeon_ib_pool {
380 struct mutex mutex; 402 struct mutex mutex;
381 struct radeon_bo *robj; 403 struct radeon_bo *robj;
404 struct list_head bogus_ib;
382 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 405 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
383 bool ready; 406 bool ready;
384 unsigned head_id; 407 unsigned head_id;
@@ -433,6 +456,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
433int radeon_ib_pool_init(struct radeon_device *rdev); 456int radeon_ib_pool_init(struct radeon_device *rdev);
434void radeon_ib_pool_fini(struct radeon_device *rdev); 457void radeon_ib_pool_fini(struct radeon_device *rdev);
435int radeon_ib_test(struct radeon_device *rdev); 458int radeon_ib_test(struct radeon_device *rdev);
459extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
436/* Ring access between begin & end cannot sleep */ 460/* Ring access between begin & end cannot sleep */
437void radeon_ring_free_size(struct radeon_device *rdev); 461void radeon_ring_free_size(struct radeon_device *rdev);
438int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); 462int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
@@ -570,7 +594,99 @@ struct radeon_wb {
570 * Equation between gpu/memory clock and available bandwidth is hw dependent 594 * Equation between gpu/memory clock and available bandwidth is hw dependent
571 * (type of memory, bus size, efficiency, ...) 595 * (type of memory, bus size, efficiency, ...)
572 */ 596 */
597enum radeon_pm_state {
598 PM_STATE_DISABLED,
599 PM_STATE_MINIMUM,
600 PM_STATE_PAUSED,
601 PM_STATE_ACTIVE
602};
603enum radeon_pm_action {
604 PM_ACTION_NONE,
605 PM_ACTION_MINIMUM,
606 PM_ACTION_DOWNCLOCK,
607 PM_ACTION_UPCLOCK
608};
609
610enum radeon_voltage_type {
611 VOLTAGE_NONE = 0,
612 VOLTAGE_GPIO,
613 VOLTAGE_VDDC,
614 VOLTAGE_SW
615};
616
617enum radeon_pm_state_type {
618 POWER_STATE_TYPE_DEFAULT,
619 POWER_STATE_TYPE_POWERSAVE,
620 POWER_STATE_TYPE_BATTERY,
621 POWER_STATE_TYPE_BALANCED,
622 POWER_STATE_TYPE_PERFORMANCE,
623};
624
625enum radeon_pm_clock_mode_type {
626 POWER_MODE_TYPE_DEFAULT,
627 POWER_MODE_TYPE_LOW,
628 POWER_MODE_TYPE_MID,
629 POWER_MODE_TYPE_HIGH,
630};
631
632struct radeon_voltage {
633 enum radeon_voltage_type type;
634 /* gpio voltage */
635 struct radeon_gpio_rec gpio;
636 u32 delay; /* delay in usec from voltage drop to sclk change */
637 bool active_high; /* voltage drop is active when bit is high */
638 /* VDDC voltage */
639 u8 vddc_id; /* index into vddc voltage table */
640 u8 vddci_id; /* index into vddci voltage table */
641 bool vddci_enabled;
642 /* r6xx+ sw */
643 u32 voltage;
644};
645
646struct radeon_pm_non_clock_info {
647 /* pcie lanes */
648 int pcie_lanes;
649 /* standardized non-clock flags */
650 u32 flags;
651};
652
653struct radeon_pm_clock_info {
654 /* memory clock */
655 u32 mclk;
656 /* engine clock */
657 u32 sclk;
658 /* voltage info */
659 struct radeon_voltage voltage;
660 /* standardized clock flags - not sure we'll need these */
661 u32 flags;
662};
663
664struct radeon_power_state {
665 enum radeon_pm_state_type type;
666 /* XXX: use a define for num clock modes */
667 struct radeon_pm_clock_info clock_info[8];
668 /* number of valid clock modes in this power state */
669 int num_clock_modes;
670 struct radeon_pm_clock_info *default_clock_mode;
671 /* non clock info about this state */
672 struct radeon_pm_non_clock_info non_clock_info;
673 bool voltage_drop_active;
674};
675
676/*
677 * Some modes are overclocked by very low value, accept them
678 */
679#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
680
573struct radeon_pm { 681struct radeon_pm {
682 struct mutex mutex;
683 struct delayed_work idle_work;
684 enum radeon_pm_state state;
685 enum radeon_pm_action planned_action;
686 unsigned long action_timeout;
687 bool downclocked;
688 int active_crtcs;
689 int req_vblank;
574 fixed20_12 max_bandwidth; 690 fixed20_12 max_bandwidth;
575 fixed20_12 igp_sideport_mclk; 691 fixed20_12 igp_sideport_mclk;
576 fixed20_12 igp_system_mclk; 692 fixed20_12 igp_system_mclk;
@@ -582,6 +698,15 @@ struct radeon_pm {
582 fixed20_12 core_bandwidth; 698 fixed20_12 core_bandwidth;
583 fixed20_12 sclk; 699 fixed20_12 sclk;
584 fixed20_12 needed_bandwidth; 700 fixed20_12 needed_bandwidth;
701 /* XXX: use a define for num power modes */
702 struct radeon_power_state power_state[8];
703 /* number of valid power states */
704 int num_power_states;
705 struct radeon_power_state *current_power_state;
706 struct radeon_pm_clock_info *current_clock_mode;
707 struct radeon_power_state *requested_power_state;
708 struct radeon_pm_clock_info *requested_clock_mode;
709 struct radeon_power_state *default_power_state;
585}; 710};
586 711
587 712
@@ -651,6 +776,7 @@ struct radeon_asic {
651 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); 776 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
652 uint32_t (*get_memory_clock)(struct radeon_device *rdev); 777 uint32_t (*get_memory_clock)(struct radeon_device *rdev);
653 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); 778 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
779 int (*get_pcie_lanes)(struct radeon_device *rdev);
654 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 780 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
655 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 781 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
656 int (*set_surface_reg)(struct radeon_device *rdev, int reg, 782 int (*set_surface_reg)(struct radeon_device *rdev, int reg,
@@ -701,6 +827,9 @@ struct r600_asic {
701 unsigned sx_max_export_pos_size; 827 unsigned sx_max_export_pos_size;
702 unsigned sx_max_export_smx_size; 828 unsigned sx_max_export_smx_size;
703 unsigned sq_num_cf_insts; 829 unsigned sq_num_cf_insts;
830 unsigned tiling_nbanks;
831 unsigned tiling_npipes;
832 unsigned tiling_group_size;
704}; 833};
705 834
706struct rv770_asic { 835struct rv770_asic {
@@ -721,6 +850,9 @@ struct rv770_asic {
721 unsigned sc_prim_fifo_size; 850 unsigned sc_prim_fifo_size;
722 unsigned sc_hiz_tile_fifo_size; 851 unsigned sc_hiz_tile_fifo_size;
723 unsigned sc_earlyz_tile_fifo_fize; 852 unsigned sc_earlyz_tile_fifo_fize;
853 unsigned tiling_nbanks;
854 unsigned tiling_npipes;
855 unsigned tiling_group_size;
724}; 856};
725 857
726union radeon_asic_config { 858union radeon_asic_config {
@@ -830,6 +962,8 @@ struct radeon_device {
830 struct r600_ih ih; /* r6/700 interrupt ring */ 962 struct r600_ih ih; /* r6/700 interrupt ring */
831 struct workqueue_struct *wq; 963 struct workqueue_struct *wq;
832 struct work_struct hotplug_work; 964 struct work_struct hotplug_work;
965 int num_crtc; /* number of crtcs */
966 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
833 967
834 /* audio stuff */ 968 /* audio stuff */
835 struct timer_list audio_timer; 969 struct timer_list audio_timer;
@@ -838,6 +972,8 @@ struct radeon_device {
838 int audio_bits_per_sample; 972 int audio_bits_per_sample;
839 uint8_t audio_status_bits; 973 uint8_t audio_status_bits;
840 uint8_t audio_category_code; 974 uint8_t audio_category_code;
975
976 bool powered_down;
841}; 977};
842 978
843int radeon_device_init(struct radeon_device *rdev, 979int radeon_device_init(struct radeon_device *rdev,
@@ -895,6 +1031,8 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
895#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) 1031#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
896#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) 1032#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
897#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) 1033#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
1034#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
1035#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
898#define WREG32_P(reg, val, mask) \ 1036#define WREG32_P(reg, val, mask) \
899 do { \ 1037 do { \
900 uint32_t tmp_ = RREG32(reg); \ 1038 uint32_t tmp_ = RREG32(reg); \
@@ -956,7 +1094,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
956#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 1094#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
957#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 1095#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
958#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 1096#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
959 1097#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
960 1098
961/* 1099/*
962 * BIOS helpers. 1100 * BIOS helpers.
@@ -1015,6 +1153,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1015#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 1153#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
1016#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) 1154#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
1017#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e)) 1155#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
1156#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
1018#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) 1157#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
1019#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) 1158#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
1020#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) 1159#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
@@ -1029,6 +1168,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1029/* AGP */ 1168/* AGP */
1030extern void radeon_agp_disable(struct radeon_device *rdev); 1169extern void radeon_agp_disable(struct radeon_device *rdev);
1031extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1170extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
1171extern void radeon_gart_restore(struct radeon_device *rdev);
1032extern int radeon_modeset_init(struct radeon_device *rdev); 1172extern int radeon_modeset_init(struct radeon_device *rdev);
1033extern void radeon_modeset_fini(struct radeon_device *rdev); 1173extern void radeon_modeset_fini(struct radeon_device *rdev);
1034extern bool radeon_card_posted(struct radeon_device *rdev); 1174extern bool radeon_card_posted(struct radeon_device *rdev);
@@ -1042,6 +1182,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl
1042extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 1182extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
1043extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); 1183extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
1044extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); 1184extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
1185extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
1186extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1187extern int radeon_resume_kms(struct drm_device *dev);
1188extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1045 1189
1046/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1190/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
1047struct r100_mc_save { 1191struct r100_mc_save {
@@ -1096,7 +1240,7 @@ extern void r200_set_safe_registers(struct radeon_device *rdev);
1096/* r300,r350,rv350,rv370,rv380 */ 1240/* r300,r350,rv350,rv370,rv380 */
1097extern void r300_set_reg_safe(struct radeon_device *rdev); 1241extern void r300_set_reg_safe(struct radeon_device *rdev);
1098extern void r300_mc_program(struct radeon_device *rdev); 1242extern void r300_mc_program(struct radeon_device *rdev);
1099extern void r300_vram_info(struct radeon_device *rdev); 1243extern void r300_mc_init(struct radeon_device *rdev);
1100extern void r300_clock_startup(struct radeon_device *rdev); 1244extern void r300_clock_startup(struct radeon_device *rdev);
1101extern int r300_mc_wait_for_idle(struct radeon_device *rdev); 1245extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
1102extern int rv370_pcie_gart_init(struct radeon_device *rdev); 1246extern int rv370_pcie_gart_init(struct radeon_device *rdev);
@@ -1105,7 +1249,6 @@ extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
1105extern void rv370_pcie_gart_disable(struct radeon_device *rdev); 1249extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
1106 1250
1107/* r420,r423,rv410 */ 1251/* r420,r423,rv410 */
1108extern int r420_mc_init(struct radeon_device *rdev);
1109extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); 1252extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
1110extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); 1253extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1111extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); 1254extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
@@ -1147,13 +1290,13 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
1147 struct drm_display_mode *mode2); 1290 struct drm_display_mode *mode2);
1148 1291
1149/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ 1292/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
1293extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1150extern bool r600_card_posted(struct radeon_device *rdev); 1294extern bool r600_card_posted(struct radeon_device *rdev);
1151extern void r600_cp_stop(struct radeon_device *rdev); 1295extern void r600_cp_stop(struct radeon_device *rdev);
1152extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1296extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1153extern int r600_cp_resume(struct radeon_device *rdev); 1297extern int r600_cp_resume(struct radeon_device *rdev);
1154extern void r600_cp_fini(struct radeon_device *rdev); 1298extern void r600_cp_fini(struct radeon_device *rdev);
1155extern int r600_count_pipe_bits(uint32_t val); 1299extern int r600_count_pipe_bits(uint32_t val);
1156extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
1157extern int r600_mc_wait_for_idle(struct radeon_device *rdev); 1300extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
1158extern int r600_pcie_gart_init(struct radeon_device *rdev); 1301extern int r600_pcie_gart_init(struct radeon_device *rdev);
1159extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); 1302extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
@@ -1189,6 +1332,14 @@ extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
1189 uint8_t status_bits, 1332 uint8_t status_bits,
1190 uint8_t category_code); 1333 uint8_t category_code);
1191 1334
1335/* evergreen */
1336struct evergreen_mc_save {
1337 u32 vga_control[6];
1338 u32 vga_render_control;
1339 u32 vga_hdp_control;
1340 u32 crtc_control[6];
1341};
1342
1192#include "radeon_object.h" 1343#include "radeon_object.h"
1193 1344
1194#endif 1345#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index c0681a5556dc..c4457791dff1 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -237,6 +237,10 @@ int radeon_agp_init(struct radeon_device *rdev)
237 237
238 rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base; 238 rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
239 rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20; 239 rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
240 rdev->mc.gtt_start = rdev->mc.agp_base;
241 rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
242 dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
243 rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
240 244
241 /* workaround some hw issues */ 245 /* workaround some hw issues */
242 if (rdev->family < CHIP_R200) { 246 if (rdev->family < CHIP_R200) {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 05ee1aeac3fd..d3a157b2bcb7 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -43,7 +43,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock
43void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 43void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
44 44
45/* 45/*
46 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 46 * r100,rv100,rs100,rv200,rs200
47 */ 47 */
48extern int r100_init(struct radeon_device *rdev); 48extern int r100_init(struct radeon_device *rdev);
49extern void r100_fini(struct radeon_device *rdev); 49extern void r100_fini(struct radeon_device *rdev);
@@ -108,6 +108,52 @@ static struct radeon_asic r100_asic = {
108 .set_engine_clock = &radeon_legacy_set_engine_clock, 108 .set_engine_clock = &radeon_legacy_set_engine_clock,
109 .get_memory_clock = &radeon_legacy_get_memory_clock, 109 .get_memory_clock = &radeon_legacy_get_memory_clock,
110 .set_memory_clock = NULL, 110 .set_memory_clock = NULL,
111 .get_pcie_lanes = NULL,
112 .set_pcie_lanes = NULL,
113 .set_clock_gating = &radeon_legacy_set_clock_gating,
114 .set_surface_reg = r100_set_surface_reg,
115 .clear_surface_reg = r100_clear_surface_reg,
116 .bandwidth_update = &r100_bandwidth_update,
117 .hpd_init = &r100_hpd_init,
118 .hpd_fini = &r100_hpd_fini,
119 .hpd_sense = &r100_hpd_sense,
120 .hpd_set_polarity = &r100_hpd_set_polarity,
121 .ioctl_wait_idle = NULL,
122};
123
124/*
125 * r200,rv250,rs300,rv280
126 */
127extern int r200_copy_dma(struct radeon_device *rdev,
128 uint64_t src_offset,
129 uint64_t dst_offset,
130 unsigned num_pages,
131 struct radeon_fence *fence);
132static struct radeon_asic r200_asic = {
133 .init = &r100_init,
134 .fini = &r100_fini,
135 .suspend = &r100_suspend,
136 .resume = &r100_resume,
137 .vga_set_state = &r100_vga_set_state,
138 .gpu_reset = &r100_gpu_reset,
139 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
140 .gart_set_page = &r100_pci_gart_set_page,
141 .cp_commit = &r100_cp_commit,
142 .ring_start = &r100_ring_start,
143 .ring_test = &r100_ring_test,
144 .ring_ib_execute = &r100_ring_ib_execute,
145 .irq_set = &r100_irq_set,
146 .irq_process = &r100_irq_process,
147 .get_vblank_counter = &r100_get_vblank_counter,
148 .fence_ring_emit = &r100_fence_ring_emit,
149 .cs_parse = &r100_cs_parse,
150 .copy_blit = &r100_copy_blit,
151 .copy_dma = &r200_copy_dma,
152 .copy = &r100_copy_blit,
153 .get_engine_clock = &radeon_legacy_get_engine_clock,
154 .set_engine_clock = &radeon_legacy_set_engine_clock,
155 .get_memory_clock = &radeon_legacy_get_memory_clock,
156 .set_memory_clock = NULL,
111 .set_pcie_lanes = NULL, 157 .set_pcie_lanes = NULL,
112 .set_clock_gating = &radeon_legacy_set_clock_gating, 158 .set_clock_gating = &radeon_legacy_set_clock_gating,
113 .set_surface_reg = r100_set_surface_reg, 159 .set_surface_reg = r100_set_surface_reg,
@@ -138,11 +184,8 @@ extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t
138extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 184extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
139extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 185extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
140extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 186extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
141extern int r300_copy_dma(struct radeon_device *rdev, 187extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
142 uint64_t src_offset, 188
143 uint64_t dst_offset,
144 unsigned num_pages,
145 struct radeon_fence *fence);
146static struct radeon_asic r300_asic = { 189static struct radeon_asic r300_asic = {
147 .init = &r300_init, 190 .init = &r300_init,
148 .fini = &r300_fini, 191 .fini = &r300_fini,
@@ -162,7 +205,46 @@ static struct radeon_asic r300_asic = {
162 .fence_ring_emit = &r300_fence_ring_emit, 205 .fence_ring_emit = &r300_fence_ring_emit,
163 .cs_parse = &r300_cs_parse, 206 .cs_parse = &r300_cs_parse,
164 .copy_blit = &r100_copy_blit, 207 .copy_blit = &r100_copy_blit,
165 .copy_dma = &r300_copy_dma, 208 .copy_dma = &r200_copy_dma,
209 .copy = &r100_copy_blit,
210 .get_engine_clock = &radeon_legacy_get_engine_clock,
211 .set_engine_clock = &radeon_legacy_set_engine_clock,
212 .get_memory_clock = &radeon_legacy_get_memory_clock,
213 .set_memory_clock = NULL,
214 .get_pcie_lanes = &rv370_get_pcie_lanes,
215 .set_pcie_lanes = &rv370_set_pcie_lanes,
216 .set_clock_gating = &radeon_legacy_set_clock_gating,
217 .set_surface_reg = r100_set_surface_reg,
218 .clear_surface_reg = r100_clear_surface_reg,
219 .bandwidth_update = &r100_bandwidth_update,
220 .hpd_init = &r100_hpd_init,
221 .hpd_fini = &r100_hpd_fini,
222 .hpd_sense = &r100_hpd_sense,
223 .hpd_set_polarity = &r100_hpd_set_polarity,
224 .ioctl_wait_idle = NULL,
225};
226
227
228static struct radeon_asic r300_asic_pcie = {
229 .init = &r300_init,
230 .fini = &r300_fini,
231 .suspend = &r300_suspend,
232 .resume = &r300_resume,
233 .vga_set_state = &r100_vga_set_state,
234 .gpu_reset = &r300_gpu_reset,
235 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
236 .gart_set_page = &rv370_pcie_gart_set_page,
237 .cp_commit = &r100_cp_commit,
238 .ring_start = &r300_ring_start,
239 .ring_test = &r100_ring_test,
240 .ring_ib_execute = &r100_ring_ib_execute,
241 .irq_set = &r100_irq_set,
242 .irq_process = &r100_irq_process,
243 .get_vblank_counter = &r100_get_vblank_counter,
244 .fence_ring_emit = &r300_fence_ring_emit,
245 .cs_parse = &r300_cs_parse,
246 .copy_blit = &r100_copy_blit,
247 .copy_dma = &r200_copy_dma,
166 .copy = &r100_copy_blit, 248 .copy = &r100_copy_blit,
167 .get_engine_clock = &radeon_legacy_get_engine_clock, 249 .get_engine_clock = &radeon_legacy_get_engine_clock,
168 .set_engine_clock = &radeon_legacy_set_engine_clock, 250 .set_engine_clock = &radeon_legacy_set_engine_clock,
@@ -206,12 +288,13 @@ static struct radeon_asic r420_asic = {
206 .fence_ring_emit = &r300_fence_ring_emit, 288 .fence_ring_emit = &r300_fence_ring_emit,
207 .cs_parse = &r300_cs_parse, 289 .cs_parse = &r300_cs_parse,
208 .copy_blit = &r100_copy_blit, 290 .copy_blit = &r100_copy_blit,
209 .copy_dma = &r300_copy_dma, 291 .copy_dma = &r200_copy_dma,
210 .copy = &r100_copy_blit, 292 .copy = &r100_copy_blit,
211 .get_engine_clock = &radeon_atom_get_engine_clock, 293 .get_engine_clock = &radeon_atom_get_engine_clock,
212 .set_engine_clock = &radeon_atom_set_engine_clock, 294 .set_engine_clock = &radeon_atom_set_engine_clock,
213 .get_memory_clock = &radeon_atom_get_memory_clock, 295 .get_memory_clock = &radeon_atom_get_memory_clock,
214 .set_memory_clock = &radeon_atom_set_memory_clock, 296 .set_memory_clock = &radeon_atom_set_memory_clock,
297 .get_pcie_lanes = &rv370_get_pcie_lanes,
215 .set_pcie_lanes = &rv370_set_pcie_lanes, 298 .set_pcie_lanes = &rv370_set_pcie_lanes,
216 .set_clock_gating = &radeon_atom_set_clock_gating, 299 .set_clock_gating = &radeon_atom_set_clock_gating,
217 .set_surface_reg = r100_set_surface_reg, 300 .set_surface_reg = r100_set_surface_reg,
@@ -255,12 +338,13 @@ static struct radeon_asic rs400_asic = {
255 .fence_ring_emit = &r300_fence_ring_emit, 338 .fence_ring_emit = &r300_fence_ring_emit,
256 .cs_parse = &r300_cs_parse, 339 .cs_parse = &r300_cs_parse,
257 .copy_blit = &r100_copy_blit, 340 .copy_blit = &r100_copy_blit,
258 .copy_dma = &r300_copy_dma, 341 .copy_dma = &r200_copy_dma,
259 .copy = &r100_copy_blit, 342 .copy = &r100_copy_blit,
260 .get_engine_clock = &radeon_legacy_get_engine_clock, 343 .get_engine_clock = &radeon_legacy_get_engine_clock,
261 .set_engine_clock = &radeon_legacy_set_engine_clock, 344 .set_engine_clock = &radeon_legacy_set_engine_clock,
262 .get_memory_clock = &radeon_legacy_get_memory_clock, 345 .get_memory_clock = &radeon_legacy_get_memory_clock,
263 .set_memory_clock = NULL, 346 .set_memory_clock = NULL,
347 .get_pcie_lanes = NULL,
264 .set_pcie_lanes = NULL, 348 .set_pcie_lanes = NULL,
265 .set_clock_gating = &radeon_legacy_set_clock_gating, 349 .set_clock_gating = &radeon_legacy_set_clock_gating,
266 .set_surface_reg = r100_set_surface_reg, 350 .set_surface_reg = r100_set_surface_reg,
@@ -314,14 +398,17 @@ static struct radeon_asic rs600_asic = {
314 .fence_ring_emit = &r300_fence_ring_emit, 398 .fence_ring_emit = &r300_fence_ring_emit,
315 .cs_parse = &r300_cs_parse, 399 .cs_parse = &r300_cs_parse,
316 .copy_blit = &r100_copy_blit, 400 .copy_blit = &r100_copy_blit,
317 .copy_dma = &r300_copy_dma, 401 .copy_dma = &r200_copy_dma,
318 .copy = &r100_copy_blit, 402 .copy = &r100_copy_blit,
319 .get_engine_clock = &radeon_atom_get_engine_clock, 403 .get_engine_clock = &radeon_atom_get_engine_clock,
320 .set_engine_clock = &radeon_atom_set_engine_clock, 404 .set_engine_clock = &radeon_atom_set_engine_clock,
321 .get_memory_clock = &radeon_atom_get_memory_clock, 405 .get_memory_clock = &radeon_atom_get_memory_clock,
322 .set_memory_clock = &radeon_atom_set_memory_clock, 406 .set_memory_clock = &radeon_atom_set_memory_clock,
407 .get_pcie_lanes = NULL,
323 .set_pcie_lanes = NULL, 408 .set_pcie_lanes = NULL,
324 .set_clock_gating = &radeon_atom_set_clock_gating, 409 .set_clock_gating = &radeon_atom_set_clock_gating,
410 .set_surface_reg = r100_set_surface_reg,
411 .clear_surface_reg = r100_clear_surface_reg,
325 .bandwidth_update = &rs600_bandwidth_update, 412 .bandwidth_update = &rs600_bandwidth_update,
326 .hpd_init = &rs600_hpd_init, 413 .hpd_init = &rs600_hpd_init,
327 .hpd_fini = &rs600_hpd_fini, 414 .hpd_fini = &rs600_hpd_fini,
@@ -360,12 +447,13 @@ static struct radeon_asic rs690_asic = {
360 .fence_ring_emit = &r300_fence_ring_emit, 447 .fence_ring_emit = &r300_fence_ring_emit,
361 .cs_parse = &r300_cs_parse, 448 .cs_parse = &r300_cs_parse,
362 .copy_blit = &r100_copy_blit, 449 .copy_blit = &r100_copy_blit,
363 .copy_dma = &r300_copy_dma, 450 .copy_dma = &r200_copy_dma,
364 .copy = &r300_copy_dma, 451 .copy = &r200_copy_dma,
365 .get_engine_clock = &radeon_atom_get_engine_clock, 452 .get_engine_clock = &radeon_atom_get_engine_clock,
366 .set_engine_clock = &radeon_atom_set_engine_clock, 453 .set_engine_clock = &radeon_atom_set_engine_clock,
367 .get_memory_clock = &radeon_atom_get_memory_clock, 454 .get_memory_clock = &radeon_atom_get_memory_clock,
368 .set_memory_clock = &radeon_atom_set_memory_clock, 455 .set_memory_clock = &radeon_atom_set_memory_clock,
456 .get_pcie_lanes = NULL,
369 .set_pcie_lanes = NULL, 457 .set_pcie_lanes = NULL,
370 .set_clock_gating = &radeon_atom_set_clock_gating, 458 .set_clock_gating = &radeon_atom_set_clock_gating,
371 .set_surface_reg = r100_set_surface_reg, 459 .set_surface_reg = r100_set_surface_reg,
@@ -412,12 +500,13 @@ static struct radeon_asic rv515_asic = {
412 .fence_ring_emit = &r300_fence_ring_emit, 500 .fence_ring_emit = &r300_fence_ring_emit,
413 .cs_parse = &r300_cs_parse, 501 .cs_parse = &r300_cs_parse,
414 .copy_blit = &r100_copy_blit, 502 .copy_blit = &r100_copy_blit,
415 .copy_dma = &r300_copy_dma, 503 .copy_dma = &r200_copy_dma,
416 .copy = &r100_copy_blit, 504 .copy = &r100_copy_blit,
417 .get_engine_clock = &radeon_atom_get_engine_clock, 505 .get_engine_clock = &radeon_atom_get_engine_clock,
418 .set_engine_clock = &radeon_atom_set_engine_clock, 506 .set_engine_clock = &radeon_atom_set_engine_clock,
419 .get_memory_clock = &radeon_atom_get_memory_clock, 507 .get_memory_clock = &radeon_atom_get_memory_clock,
420 .set_memory_clock = &radeon_atom_set_memory_clock, 508 .set_memory_clock = &radeon_atom_set_memory_clock,
509 .get_pcie_lanes = &rv370_get_pcie_lanes,
421 .set_pcie_lanes = &rv370_set_pcie_lanes, 510 .set_pcie_lanes = &rv370_set_pcie_lanes,
422 .set_clock_gating = &radeon_atom_set_clock_gating, 511 .set_clock_gating = &radeon_atom_set_clock_gating,
423 .set_surface_reg = r100_set_surface_reg, 512 .set_surface_reg = r100_set_surface_reg,
@@ -455,12 +544,13 @@ static struct radeon_asic r520_asic = {
455 .fence_ring_emit = &r300_fence_ring_emit, 544 .fence_ring_emit = &r300_fence_ring_emit,
456 .cs_parse = &r300_cs_parse, 545 .cs_parse = &r300_cs_parse,
457 .copy_blit = &r100_copy_blit, 546 .copy_blit = &r100_copy_blit,
458 .copy_dma = &r300_copy_dma, 547 .copy_dma = &r200_copy_dma,
459 .copy = &r100_copy_blit, 548 .copy = &r100_copy_blit,
460 .get_engine_clock = &radeon_atom_get_engine_clock, 549 .get_engine_clock = &radeon_atom_get_engine_clock,
461 .set_engine_clock = &radeon_atom_set_engine_clock, 550 .set_engine_clock = &radeon_atom_set_engine_clock,
462 .get_memory_clock = &radeon_atom_get_memory_clock, 551 .get_memory_clock = &radeon_atom_get_memory_clock,
463 .set_memory_clock = &radeon_atom_set_memory_clock, 552 .set_memory_clock = &radeon_atom_set_memory_clock,
553 .get_pcie_lanes = &rv370_get_pcie_lanes,
464 .set_pcie_lanes = &rv370_set_pcie_lanes, 554 .set_pcie_lanes = &rv370_set_pcie_lanes,
465 .set_clock_gating = &radeon_atom_set_clock_gating, 555 .set_clock_gating = &radeon_atom_set_clock_gating,
466 .set_surface_reg = r100_set_surface_reg, 556 .set_surface_reg = r100_set_surface_reg,
@@ -538,8 +628,9 @@ static struct radeon_asic r600_asic = {
538 .set_engine_clock = &radeon_atom_set_engine_clock, 628 .set_engine_clock = &radeon_atom_set_engine_clock,
539 .get_memory_clock = &radeon_atom_get_memory_clock, 629 .get_memory_clock = &radeon_atom_get_memory_clock,
540 .set_memory_clock = &radeon_atom_set_memory_clock, 630 .set_memory_clock = &radeon_atom_set_memory_clock,
631 .get_pcie_lanes = &rv370_get_pcie_lanes,
541 .set_pcie_lanes = NULL, 632 .set_pcie_lanes = NULL,
542 .set_clock_gating = &radeon_atom_set_clock_gating, 633 .set_clock_gating = NULL,
543 .set_surface_reg = r600_set_surface_reg, 634 .set_surface_reg = r600_set_surface_reg,
544 .clear_surface_reg = r600_clear_surface_reg, 635 .clear_surface_reg = r600_clear_surface_reg,
545 .bandwidth_update = &rv515_bandwidth_update, 636 .bandwidth_update = &rv515_bandwidth_update,
@@ -583,6 +674,7 @@ static struct radeon_asic rv770_asic = {
583 .set_engine_clock = &radeon_atom_set_engine_clock, 674 .set_engine_clock = &radeon_atom_set_engine_clock,
584 .get_memory_clock = &radeon_atom_get_memory_clock, 675 .get_memory_clock = &radeon_atom_get_memory_clock,
585 .set_memory_clock = &radeon_atom_set_memory_clock, 676 .set_memory_clock = &radeon_atom_set_memory_clock,
677 .get_pcie_lanes = &rv370_get_pcie_lanes,
586 .set_pcie_lanes = NULL, 678 .set_pcie_lanes = NULL,
587 .set_clock_gating = &radeon_atom_set_clock_gating, 679 .set_clock_gating = &radeon_atom_set_clock_gating,
588 .set_surface_reg = r600_set_surface_reg, 680 .set_surface_reg = r600_set_surface_reg,
@@ -595,4 +687,54 @@ static struct radeon_asic rv770_asic = {
595 .ioctl_wait_idle = r600_ioctl_wait_idle, 687 .ioctl_wait_idle = r600_ioctl_wait_idle,
596}; 688};
597 689
690/*
691 * evergreen
692 */
693int evergreen_init(struct radeon_device *rdev);
694void evergreen_fini(struct radeon_device *rdev);
695int evergreen_suspend(struct radeon_device *rdev);
696int evergreen_resume(struct radeon_device *rdev);
697int evergreen_gpu_reset(struct radeon_device *rdev);
698void evergreen_bandwidth_update(struct radeon_device *rdev);
699void evergreen_hpd_init(struct radeon_device *rdev);
700void evergreen_hpd_fini(struct radeon_device *rdev);
701bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
702void evergreen_hpd_set_polarity(struct radeon_device *rdev,
703 enum radeon_hpd_id hpd);
704
705static struct radeon_asic evergreen_asic = {
706 .init = &evergreen_init,
707 .fini = &evergreen_fini,
708 .suspend = &evergreen_suspend,
709 .resume = &evergreen_resume,
710 .cp_commit = NULL,
711 .gpu_reset = &evergreen_gpu_reset,
712 .vga_set_state = &r600_vga_set_state,
713 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
714 .gart_set_page = &rs600_gart_set_page,
715 .ring_test = NULL,
716 .ring_ib_execute = NULL,
717 .irq_set = NULL,
718 .irq_process = NULL,
719 .get_vblank_counter = NULL,
720 .fence_ring_emit = NULL,
721 .cs_parse = NULL,
722 .copy_blit = NULL,
723 .copy_dma = NULL,
724 .copy = NULL,
725 .get_engine_clock = &radeon_atom_get_engine_clock,
726 .set_engine_clock = &radeon_atom_set_engine_clock,
727 .get_memory_clock = &radeon_atom_get_memory_clock,
728 .set_memory_clock = &radeon_atom_set_memory_clock,
729 .set_pcie_lanes = NULL,
730 .set_clock_gating = NULL,
731 .set_surface_reg = r600_set_surface_reg,
732 .clear_surface_reg = r600_clear_surface_reg,
733 .bandwidth_update = &evergreen_bandwidth_update,
734 .hpd_init = &evergreen_hpd_init,
735 .hpd_fini = &evergreen_hpd_fini,
736 .hpd_sense = &evergreen_hpd_sense,
737 .hpd_set_polarity = &evergreen_hpd_set_polarity,
738};
739
598#endif 740#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 4d8831548a5f..93783b15c81d 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -159,8 +159,15 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
159 struct radeon_gpio_rec *gpio) 159 struct radeon_gpio_rec *gpio)
160{ 160{
161 struct radeon_hpd hpd; 161 struct radeon_hpd hpd;
162 u32 reg;
163
164 if (ASIC_IS_DCE4(rdev))
165 reg = EVERGREEN_DC_GPIO_HPD_A;
166 else
167 reg = AVIVO_DC_GPIO_HPD_A;
168
162 hpd.gpio = *gpio; 169 hpd.gpio = *gpio;
163 if (gpio->reg == AVIVO_DC_GPIO_HPD_A) { 170 if (gpio->reg == reg) {
164 switch(gpio->mask) { 171 switch(gpio->mask) {
165 case (1 << 0): 172 case (1 << 0):
166 hpd.hpd = RADEON_HPD_1; 173 hpd.hpd = RADEON_HPD_1;
@@ -574,6 +581,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
574 ddc_bus.valid = false; 581 ddc_bus.valid = false;
575 } 582 }
576 583
584 /* needed for aux chan transactions */
585 ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
586
577 conn_id = le16_to_cpu(path->usConnObjectId); 587 conn_id = le16_to_cpu(path->usConnObjectId);
578 588
579 if (!radeon_atom_apply_quirks 589 if (!radeon_atom_apply_quirks
@@ -838,6 +848,7 @@ union firmware_info {
838 ATOM_FIRMWARE_INFO_V1_2 info_12; 848 ATOM_FIRMWARE_INFO_V1_2 info_12;
839 ATOM_FIRMWARE_INFO_V1_3 info_13; 849 ATOM_FIRMWARE_INFO_V1_3 info_13;
840 ATOM_FIRMWARE_INFO_V1_4 info_14; 850 ATOM_FIRMWARE_INFO_V1_4 info_14;
851 ATOM_FIRMWARE_INFO_V2_1 info_21;
841}; 852};
842 853
843bool radeon_atom_get_clock_info(struct drm_device *dev) 854bool radeon_atom_get_clock_info(struct drm_device *dev)
@@ -849,6 +860,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
849 uint8_t frev, crev; 860 uint8_t frev, crev;
850 struct radeon_pll *p1pll = &rdev->clock.p1pll; 861 struct radeon_pll *p1pll = &rdev->clock.p1pll;
851 struct radeon_pll *p2pll = &rdev->clock.p2pll; 862 struct radeon_pll *p2pll = &rdev->clock.p2pll;
863 struct radeon_pll *dcpll = &rdev->clock.dcpll;
852 struct radeon_pll *spll = &rdev->clock.spll; 864 struct radeon_pll *spll = &rdev->clock.spll;
853 struct radeon_pll *mpll = &rdev->clock.mpll; 865 struct radeon_pll *mpll = &rdev->clock.mpll;
854 uint16_t data_offset; 866 uint16_t data_offset;
@@ -951,8 +963,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
951 rdev->clock.default_mclk = 963 rdev->clock.default_mclk =
952 le32_to_cpu(firmware_info->info.ulDefaultMemoryClock); 964 le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
953 965
966 if (ASIC_IS_DCE4(rdev)) {
967 rdev->clock.default_dispclk =
968 le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
969 if (rdev->clock.default_dispclk == 0)
970 rdev->clock.default_dispclk = 60000; /* 600 Mhz */
971 rdev->clock.dp_extclk =
972 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
973 }
974 *dcpll = *p1pll;
975
954 return true; 976 return true;
955 } 977 }
978
956 return false; 979 return false;
957} 980}
958 981
@@ -1091,6 +1114,30 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
1091 return ss; 1114 return ss;
1092} 1115}
1093 1116
1117static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
1118 struct radeon_encoder_atom_dig *lvds)
1119{
1120
1121 /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
1122 if ((dev->pdev->device == 0x95c4) &&
1123 (dev->pdev->subsystem_vendor == 0x1179) &&
1124 (dev->pdev->subsystem_device == 0xff50)) {
1125 if ((lvds->native_mode.hdisplay == 1280) &&
1126 (lvds->native_mode.vdisplay == 800))
1127 lvds->pll_algo = PLL_ALGO_LEGACY;
1128 }
1129
1130 /* Dell Studio 15 laptop panel doesn't like new pll divider algo */
1131 if ((dev->pdev->device == 0x95c4) &&
1132 (dev->pdev->subsystem_vendor == 0x1028) &&
1133 (dev->pdev->subsystem_device == 0x029f)) {
1134 if ((lvds->native_mode.hdisplay == 1280) &&
1135 (lvds->native_mode.vdisplay == 800))
1136 lvds->pll_algo = PLL_ALGO_LEGACY;
1137 }
1138
1139}
1140
1094union lvds_info { 1141union lvds_info {
1095 struct _ATOM_LVDS_INFO info; 1142 struct _ATOM_LVDS_INFO info;
1096 struct _ATOM_LVDS_INFO_V12 info_12; 1143 struct _ATOM_LVDS_INFO_V12 info_12;
@@ -1161,6 +1208,21 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1161 1208
1162 lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id); 1209 lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
1163 1210
1211 if (ASIC_IS_AVIVO(rdev)) {
1212 if (radeon_new_pll == 0)
1213 lvds->pll_algo = PLL_ALGO_LEGACY;
1214 else
1215 lvds->pll_algo = PLL_ALGO_NEW;
1216 } else {
1217 if (radeon_new_pll == 1)
1218 lvds->pll_algo = PLL_ALGO_NEW;
1219 else
1220 lvds->pll_algo = PLL_ALGO_LEGACY;
1221 }
1222
1223 /* LVDS quirks */
1224 radeon_atom_apply_lvds_quirks(dev, lvds);
1225
1164 encoder->native_mode = lvds->native_mode; 1226 encoder->native_mode = lvds->native_mode;
1165 } 1227 }
1166 return lvds; 1228 return lvds;
@@ -1385,20 +1447,375 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
1385 return tv_dac; 1447 return tv_dac;
1386} 1448}
1387 1449
1388void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) 1450union power_info {
1451 struct _ATOM_POWERPLAY_INFO info;
1452 struct _ATOM_POWERPLAY_INFO_V2 info_2;
1453 struct _ATOM_POWERPLAY_INFO_V3 info_3;
1454 struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
1455};
1456
1457void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1389{ 1458{
1390 DYNAMIC_CLOCK_GATING_PS_ALLOCATION args; 1459 struct radeon_mode_info *mode_info = &rdev->mode_info;
1391 int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating); 1460 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
1461 u16 data_offset;
1462 u8 frev, crev;
1463 u32 misc, misc2 = 0, sclk, mclk;
1464 union power_info *power_info;
1465 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
1466 struct _ATOM_PPLIB_STATE *power_state;
1467 int num_modes = 0, i, j;
1468 int state_index = 0, mode_index = 0;
1392 1469
1393 args.ucEnable = enable; 1470 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
1394 1471
1395 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1472 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1473
1474 rdev->pm.default_power_state = NULL;
1475
1476 if (power_info) {
1477 if (frev < 4) {
1478 num_modes = power_info->info.ucNumOfPowerModeEntries;
1479 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
1480 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
1481 for (i = 0; i < num_modes; i++) {
1482 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
1483 switch (frev) {
1484 case 1:
1485 rdev->pm.power_state[state_index].num_clock_modes = 1;
1486 rdev->pm.power_state[state_index].clock_info[0].mclk =
1487 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
1488 rdev->pm.power_state[state_index].clock_info[0].sclk =
1489 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
1490 /* skip invalid modes */
1491 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1492 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1493 continue;
1494 /* skip overclock modes for now */
1495 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
1496 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1497 (rdev->pm.power_state[state_index].clock_info[0].sclk >
1498 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1499 continue;
1500 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1501 power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
1502 misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
1503 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
1504 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1505 VOLTAGE_GPIO;
1506 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1507 radeon_lookup_gpio(rdev,
1508 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
1509 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
1510 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1511 true;
1512 else
1513 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1514 false;
1515 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
1516 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1517 VOLTAGE_VDDC;
1518 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1519 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
1520 }
1521 /* order matters! */
1522 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1523 rdev->pm.power_state[state_index].type =
1524 POWER_STATE_TYPE_POWERSAVE;
1525 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1526 rdev->pm.power_state[state_index].type =
1527 POWER_STATE_TYPE_BATTERY;
1528 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1529 rdev->pm.power_state[state_index].type =
1530 POWER_STATE_TYPE_BATTERY;
1531 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1532 rdev->pm.power_state[state_index].type =
1533 POWER_STATE_TYPE_BALANCED;
1534 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
1535 rdev->pm.power_state[state_index].type =
1536 POWER_STATE_TYPE_PERFORMANCE;
1537 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1538 rdev->pm.power_state[state_index].type =
1539 POWER_STATE_TYPE_DEFAULT;
1540 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1541 rdev->pm.power_state[state_index].default_clock_mode =
1542 &rdev->pm.power_state[state_index].clock_info[0];
1543 }
1544 state_index++;
1545 break;
1546 case 2:
1547 rdev->pm.power_state[state_index].num_clock_modes = 1;
1548 rdev->pm.power_state[state_index].clock_info[0].mclk =
1549 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
1550 rdev->pm.power_state[state_index].clock_info[0].sclk =
1551 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
1552 /* skip invalid modes */
1553 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1554 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1555 continue;
1556 /* skip overclock modes for now */
1557 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
1558 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1559 (rdev->pm.power_state[state_index].clock_info[0].sclk >
1560 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1561 continue;
1562 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1563 power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
1564 misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
1565 misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
1566 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
1567 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1568 VOLTAGE_GPIO;
1569 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1570 radeon_lookup_gpio(rdev,
1571 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
1572 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
1573 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1574 true;
1575 else
1576 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1577 false;
1578 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
1579 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1580 VOLTAGE_VDDC;
1581 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1582 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
1583 }
1584 /* order matters! */
1585 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1586 rdev->pm.power_state[state_index].type =
1587 POWER_STATE_TYPE_POWERSAVE;
1588 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1589 rdev->pm.power_state[state_index].type =
1590 POWER_STATE_TYPE_BATTERY;
1591 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1592 rdev->pm.power_state[state_index].type =
1593 POWER_STATE_TYPE_BATTERY;
1594 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1595 rdev->pm.power_state[state_index].type =
1596 POWER_STATE_TYPE_BALANCED;
1597 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
1598 rdev->pm.power_state[state_index].type =
1599 POWER_STATE_TYPE_PERFORMANCE;
1600 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
1601 rdev->pm.power_state[state_index].type =
1602 POWER_STATE_TYPE_BALANCED;
1603 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1604 rdev->pm.power_state[state_index].type =
1605 POWER_STATE_TYPE_DEFAULT;
1606 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1607 rdev->pm.power_state[state_index].default_clock_mode =
1608 &rdev->pm.power_state[state_index].clock_info[0];
1609 }
1610 state_index++;
1611 break;
1612 case 3:
1613 rdev->pm.power_state[state_index].num_clock_modes = 1;
1614 rdev->pm.power_state[state_index].clock_info[0].mclk =
1615 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
1616 rdev->pm.power_state[state_index].clock_info[0].sclk =
1617 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
1618 /* skip invalid modes */
1619 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1620 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1621 continue;
1622 /* skip overclock modes for now */
1623 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
1624 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1625 (rdev->pm.power_state[state_index].clock_info[0].sclk >
1626 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1627 continue;
1628 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1629 power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
1630 misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
1631 misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
1632 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
1633 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1634 VOLTAGE_GPIO;
1635 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1636 radeon_lookup_gpio(rdev,
1637 power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
1638 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
1639 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1640 true;
1641 else
1642 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1643 false;
1644 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
1645 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1646 VOLTAGE_VDDC;
1647 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1648 power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
1649 if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
1650 rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
1651 true;
1652 rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
1653 power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
1654 }
1655 }
1656 /* order matters! */
1657 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1658 rdev->pm.power_state[state_index].type =
1659 POWER_STATE_TYPE_POWERSAVE;
1660 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1661 rdev->pm.power_state[state_index].type =
1662 POWER_STATE_TYPE_BATTERY;
1663 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1664 rdev->pm.power_state[state_index].type =
1665 POWER_STATE_TYPE_BATTERY;
1666 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1667 rdev->pm.power_state[state_index].type =
1668 POWER_STATE_TYPE_BALANCED;
1669 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
1670 rdev->pm.power_state[state_index].type =
1671 POWER_STATE_TYPE_PERFORMANCE;
1672 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
1673 rdev->pm.power_state[state_index].type =
1674 POWER_STATE_TYPE_BALANCED;
1675 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1676 rdev->pm.power_state[state_index].type =
1677 POWER_STATE_TYPE_DEFAULT;
1678 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1679 rdev->pm.power_state[state_index].default_clock_mode =
1680 &rdev->pm.power_state[state_index].clock_info[0];
1681 }
1682 state_index++;
1683 break;
1684 }
1685 }
1686 } else if (frev == 4) {
1687 for (i = 0; i < power_info->info_4.ucNumStates; i++) {
1688 mode_index = 0;
1689 power_state = (struct _ATOM_PPLIB_STATE *)
1690 (mode_info->atom_context->bios +
1691 data_offset +
1692 le16_to_cpu(power_info->info_4.usStateArrayOffset) +
1693 i * power_info->info_4.ucStateEntrySize);
1694 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
1695 (mode_info->atom_context->bios +
1696 data_offset +
1697 le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
1698 (power_state->ucNonClockStateIndex *
1699 power_info->info_4.ucNonClockSize));
1700 for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
1701 if (rdev->flags & RADEON_IS_IGP) {
1702 struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
1703 (struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
1704 (mode_info->atom_context->bios +
1705 data_offset +
1706 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
1707 (power_state->ucClockStateIndices[j] *
1708 power_info->info_4.ucClockInfoSize));
1709 sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
1710 sclk |= clock_info->ucLowEngineClockHigh << 16;
1711 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
1712 /* skip invalid modes */
1713 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
1714 continue;
1715 /* skip overclock modes for now */
1716 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
1717 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
1718 continue;
1719 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
1720 VOLTAGE_SW;
1721 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
1722 clock_info->usVDDC;
1723 mode_index++;
1724 } else {
1725 struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
1726 (struct _ATOM_PPLIB_R600_CLOCK_INFO *)
1727 (mode_info->atom_context->bios +
1728 data_offset +
1729 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
1730 (power_state->ucClockStateIndices[j] *
1731 power_info->info_4.ucClockInfoSize));
1732 sclk = le16_to_cpu(clock_info->usEngineClockLow);
1733 sclk |= clock_info->ucEngineClockHigh << 16;
1734 mclk = le16_to_cpu(clock_info->usMemoryClockLow);
1735 mclk |= clock_info->ucMemoryClockHigh << 16;
1736 rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
1737 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
1738 /* skip invalid modes */
1739 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
1740 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
1741 continue;
1742 /* skip overclock modes for now */
1743 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
1744 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1745 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
1746 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1747 continue;
1748 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
1749 VOLTAGE_SW;
1750 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
1751 clock_info->usVDDC;
1752 mode_index++;
1753 }
1754 }
1755 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
1756 if (mode_index) {
1757 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
1758 misc2 = le16_to_cpu(non_clock_info->usClassification);
1759 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1760 ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
1761 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
1762 switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
1763 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
1764 rdev->pm.power_state[state_index].type =
1765 POWER_STATE_TYPE_BATTERY;
1766 break;
1767 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
1768 rdev->pm.power_state[state_index].type =
1769 POWER_STATE_TYPE_BALANCED;
1770 break;
1771 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
1772 rdev->pm.power_state[state_index].type =
1773 POWER_STATE_TYPE_PERFORMANCE;
1774 break;
1775 }
1776 if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
1777 rdev->pm.power_state[state_index].type =
1778 POWER_STATE_TYPE_DEFAULT;
1779 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1780 rdev->pm.power_state[state_index].default_clock_mode =
1781 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
1782 }
1783 state_index++;
1784 }
1785 }
1786 }
1787 } else {
1788 /* XXX figure out some good default low power mode for cards w/out power tables */
1789 }
1790
1791 if (rdev->pm.default_power_state == NULL) {
1792 /* add the default mode */
1793 rdev->pm.power_state[state_index].type =
1794 POWER_STATE_TYPE_DEFAULT;
1795 rdev->pm.power_state[state_index].num_clock_modes = 1;
1796 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
1797 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
1798 rdev->pm.power_state[state_index].default_clock_mode =
1799 &rdev->pm.power_state[state_index].clock_info[0];
1800 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
1801 if (rdev->asic->get_pcie_lanes)
1802 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
1803 else
1804 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
1805 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1806 state_index++;
1807 }
1808 rdev->pm.num_power_states = state_index;
1809
1810 rdev->pm.current_power_state = rdev->pm.default_power_state;
1811 rdev->pm.current_clock_mode =
1812 rdev->pm.default_power_state->default_clock_mode;
1396} 1813}
1397 1814
1398void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable) 1815void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
1399{ 1816{
1400 ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args; 1817 DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
1401 int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt); 1818 int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
1402 1819
1403 args.ucEnable = enable; 1820 args.ucEnable = enable;
1404 1821
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
new file mode 100644
index 000000000000..3f557c4151e0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright (c) 2010 Red Hat Inc.
3 * Author : Dave Airlie <airlied@redhat.com>
4 *
5 * Licensed under GPLv2
6 *
7 * ATPX support for both Intel/ATI
8 */
9#include <linux/vga_switcheroo.h>
10#include <acpi/acpi.h>
11#include <acpi/acpi_bus.h>
12#include <linux/pci.h>
13
14#define ATPX_VERSION 0
15#define ATPX_GPU_PWR 2
16#define ATPX_MUX_SELECT 3
17
18#define ATPX_INTEGRATED 0
19#define ATPX_DISCRETE 1
20
21#define ATPX_MUX_IGD 0
22#define ATPX_MUX_DISCRETE 1
23
24static struct radeon_atpx_priv {
25 bool atpx_detected;
26 /* handle for device - and atpx */
27 acpi_handle dhandle;
28 acpi_handle atpx_handle;
29 acpi_handle atrm_handle;
30} radeon_atpx_priv;
31
32/* retrieve the ROM in 4k blocks */
33static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
34 int offset, int len)
35{
36 acpi_status status;
37 union acpi_object atrm_arg_elements[2], *obj;
38 struct acpi_object_list atrm_arg;
39 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
40
41 atrm_arg.count = 2;
42 atrm_arg.pointer = &atrm_arg_elements[0];
43
44 atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
45 atrm_arg_elements[0].integer.value = offset;
46
47 atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
48 atrm_arg_elements[1].integer.value = len;
49
50 status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
51 if (ACPI_FAILURE(status)) {
52 printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
53 return -ENODEV;
54 }
55
56 obj = (union acpi_object *)buffer.pointer;
57 memcpy(bios+offset, obj->buffer.pointer, len);
58 kfree(buffer.pointer);
59 return len;
60}
61
62bool radeon_atrm_supported(struct pci_dev *pdev)
63{
64 /* get the discrete ROM only via ATRM */
65 if (!radeon_atpx_priv.atpx_detected)
66 return false;
67
68 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
69 return false;
70 return true;
71}
72
73
74int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
75{
76 return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
77}
78
79static int radeon_atpx_get_version(acpi_handle handle)
80{
81 acpi_status status;
82 union acpi_object atpx_arg_elements[2], *obj;
83 struct acpi_object_list atpx_arg;
84 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
85
86 atpx_arg.count = 2;
87 atpx_arg.pointer = &atpx_arg_elements[0];
88
89 atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
90 atpx_arg_elements[0].integer.value = ATPX_VERSION;
91
92 atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
93 atpx_arg_elements[1].integer.value = ATPX_VERSION;
94
95 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
96 if (ACPI_FAILURE(status)) {
97 printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
98 return -ENOSYS;
99 }
100 obj = (union acpi_object *)buffer.pointer;
101 if (obj && (obj->type == ACPI_TYPE_BUFFER))
102 printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2));
103 kfree(buffer.pointer);
104 return 0;
105}
106
107static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value)
108{
109 acpi_status status;
110 union acpi_object atpx_arg_elements[2];
111 struct acpi_object_list atpx_arg;
112 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
113 uint8_t buf[4] = {0};
114
115 if (!handle)
116 return -EINVAL;
117
118 atpx_arg.count = 2;
119 atpx_arg.pointer = &atpx_arg_elements[0];
120
121 atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
122 atpx_arg_elements[0].integer.value = cmd_id;
123
124 buf[2] = value & 0xff;
125 buf[3] = (value >> 8) & 0xff;
126
127 atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
128 atpx_arg_elements[1].buffer.length = 4;
129 atpx_arg_elements[1].buffer.pointer = buf;
130
131 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
132 if (ACPI_FAILURE(status)) {
133 printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
134 return -ENOSYS;
135 }
136 kfree(buffer.pointer);
137
138 return 0;
139}
140
141static int radeon_atpx_set_discrete_state(acpi_handle handle, int state)
142{
143 return radeon_atpx_execute(handle, ATPX_GPU_PWR, state);
144}
145
146static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
147{
148 return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
149}
150
151
152static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
153{
154 if (id == VGA_SWITCHEROO_IGD)
155 radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0);
156 else
157 radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1);
158 return 0;
159}
160
161static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
162 enum vga_switcheroo_state state)
163{
164 /* on w500 ACPI can't change intel gpu state */
165 if (id == VGA_SWITCHEROO_IGD)
166 return 0;
167
168 radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state);
169 return 0;
170}
171
172static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
173{
174 acpi_handle dhandle, atpx_handle, atrm_handle;
175 acpi_status status;
176
177 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
178 if (!dhandle)
179 return false;
180
181 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
182 if (ACPI_FAILURE(status))
183 return false;
184
185 status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
186 if (ACPI_FAILURE(status))
187 return false;
188
189 radeon_atpx_priv.dhandle = dhandle;
190 radeon_atpx_priv.atpx_handle = atpx_handle;
191 radeon_atpx_priv.atrm_handle = atrm_handle;
192 return true;
193}
194
195static int radeon_atpx_init(void)
196{
197 /* set up the ATPX handle */
198
199 radeon_atpx_get_version(radeon_atpx_priv.atpx_handle);
200 return 0;
201}
202
203static int radeon_atpx_get_client_id(struct pci_dev *pdev)
204{
205 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
206 return VGA_SWITCHEROO_IGD;
207 else
208 return VGA_SWITCHEROO_DIS;
209}
210
211static struct vga_switcheroo_handler radeon_atpx_handler = {
212 .switchto = radeon_atpx_switchto,
213 .power_state = radeon_atpx_power_state,
214 .init = radeon_atpx_init,
215 .get_client_id = radeon_atpx_get_client_id,
216};
217
218static bool radeon_atpx_detect(void)
219{
220 char acpi_method_name[255] = { 0 };
221 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
222 struct pci_dev *pdev = NULL;
223 bool has_atpx = false;
224 int vga_count = 0;
225
226 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
227 vga_count++;
228
229 has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
230 }
231
232 if (has_atpx && vga_count == 2) {
233 acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer);
234 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
235 acpi_method_name);
236 radeon_atpx_priv.atpx_detected = true;
237 return true;
238 }
239 return false;
240}
241
242void radeon_register_atpx_handler(void)
243{
244 bool r;
245
246 /* detect if we have any ATPX + 2 VGA in the system */
247 r = radeon_atpx_detect();
248 if (!r)
249 return;
250
251 vga_switcheroo_register_handler(&radeon_atpx_handler);
252}
253
254void radeon_unregister_atpx_handler(void)
255{
256 vga_switcheroo_unregister_handler();
257}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 906921740c60..557240460526 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -30,6 +30,7 @@
30#include "radeon.h" 30#include "radeon.h"
31#include "atom.h" 31#include "atom.h"
32 32
33#include <linux/vga_switcheroo.h>
33/* 34/*
34 * BIOS. 35 * BIOS.
35 */ 36 */
@@ -62,7 +63,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
62 iounmap(bios); 63 iounmap(bios);
63 return false; 64 return false;
64 } 65 }
65 memcpy(rdev->bios, bios, size); 66 memcpy_fromio(rdev->bios, bios, size);
66 iounmap(bios); 67 iounmap(bios);
67 return true; 68 return true;
68} 69}
@@ -93,6 +94,38 @@ static bool radeon_read_bios(struct radeon_device *rdev)
93 return true; 94 return true;
94} 95}
95 96
97/* ATRM is used to get the BIOS on the discrete cards in
98 * dual-gpu systems.
99 */
100static bool radeon_atrm_get_bios(struct radeon_device *rdev)
101{
102 int ret;
103 int size = 64 * 1024;
104 int i;
105
106 if (!radeon_atrm_supported(rdev->pdev))
107 return false;
108
109 rdev->bios = kmalloc(size, GFP_KERNEL);
110 if (!rdev->bios) {
111 DRM_ERROR("Unable to allocate bios\n");
112 return false;
113 }
114
115 for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
116 ret = radeon_atrm_get_bios_chunk(rdev->bios,
117 (i * ATRM_BIOS_PAGE),
118 ATRM_BIOS_PAGE);
119 if (ret <= 0)
120 break;
121 }
122
123 if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
124 kfree(rdev->bios);
125 return false;
126 }
127 return true;
128}
96static bool r700_read_disabled_bios(struct radeon_device *rdev) 129static bool r700_read_disabled_bios(struct radeon_device *rdev)
97{ 130{
98 uint32_t viph_control; 131 uint32_t viph_control;
@@ -388,16 +421,16 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
388 return legacy_read_disabled_bios(rdev); 421 return legacy_read_disabled_bios(rdev);
389} 422}
390 423
424
391bool radeon_get_bios(struct radeon_device *rdev) 425bool radeon_get_bios(struct radeon_device *rdev)
392{ 426{
393 bool r; 427 bool r;
394 uint16_t tmp; 428 uint16_t tmp;
395 429
396 if (rdev->flags & RADEON_IS_IGP) { 430 r = radeon_atrm_get_bios(rdev);
431 if (r == false)
397 r = igp_read_bios_from_vram(rdev); 432 r = igp_read_bios_from_vram(rdev);
398 if (r == false) 433 if (r == false)
399 r = radeon_read_bios(rdev);
400 } else
401 r = radeon_read_bios(rdev); 434 r = radeon_read_bios(rdev);
402 if (r == false) { 435 if (r == false) {
403 r = radeon_read_disabled_bios(rdev); 436 r = radeon_read_disabled_bios(rdev);
@@ -408,6 +441,13 @@ bool radeon_get_bios(struct radeon_device *rdev)
408 return false; 441 return false;
409 } 442 }
410 if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) { 443 if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
444 printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]);
445 goto free_bios;
446 }
447
448 tmp = RBIOS16(0x18);
449 if (RBIOS8(tmp + 0x14) != 0x0) {
450 DRM_INFO("Not an x86 BIOS ROM, not using.\n");
411 goto free_bios; 451 goto free_bios;
412 } 452 }
413 453
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 73c4405bf42f..f64936cc4dd9 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -96,6 +96,7 @@ void radeon_get_clock_info(struct drm_device *dev)
96 struct radeon_device *rdev = dev->dev_private; 96 struct radeon_device *rdev = dev->dev_private;
97 struct radeon_pll *p1pll = &rdev->clock.p1pll; 97 struct radeon_pll *p1pll = &rdev->clock.p1pll;
98 struct radeon_pll *p2pll = &rdev->clock.p2pll; 98 struct radeon_pll *p2pll = &rdev->clock.p2pll;
99 struct radeon_pll *dcpll = &rdev->clock.dcpll;
99 struct radeon_pll *spll = &rdev->clock.spll; 100 struct radeon_pll *spll = &rdev->clock.spll;
100 struct radeon_pll *mpll = &rdev->clock.mpll; 101 struct radeon_pll *mpll = &rdev->clock.mpll;
101 int ret; 102 int ret;
@@ -204,6 +205,17 @@ void radeon_get_clock_info(struct drm_device *dev)
204 p2pll->max_frac_feedback_div = 0; 205 p2pll->max_frac_feedback_div = 0;
205 } 206 }
206 207
208 /* dcpll is DCE4 only */
209 dcpll->min_post_div = 2;
210 dcpll->max_post_div = 0x7f;
211 dcpll->min_frac_feedback_div = 0;
212 dcpll->max_frac_feedback_div = 9;
213 dcpll->min_ref_div = 2;
214 dcpll->max_ref_div = 0x3ff;
215 dcpll->min_feedback_div = 4;
216 dcpll->max_feedback_div = 0xfff;
217 dcpll->best_vco = 0;
218
207 p1pll->min_ref_div = 2; 219 p1pll->min_ref_div = 2;
208 p1pll->max_ref_div = 0x3ff; 220 p1pll->max_ref_div = 0x3ff;
209 p1pll->min_feedback_div = 4; 221 p1pll->min_feedback_div = 4;
@@ -846,8 +858,10 @@ int radeon_static_clocks_init(struct drm_device *dev)
846 /* XXX make sure engine is idle */ 858 /* XXX make sure engine is idle */
847 859
848 if (radeon_dynclks != -1) { 860 if (radeon_dynclks != -1) {
849 if (radeon_dynclks) 861 if (radeon_dynclks) {
850 radeon_set_clock_gating(rdev, 1); 862 if (rdev->asic->set_clock_gating)
863 radeon_set_clock_gating(rdev, 1);
864 }
851 } 865 }
852 radeon_apply_clock_quirks(rdev); 866 radeon_apply_clock_quirks(rdev);
853 return 0; 867 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 22d476160d52..e9ea38ece375 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -150,6 +150,9 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
150 int rev; 150 int rev;
151 uint16_t offset = 0, check_offset; 151 uint16_t offset = 0, check_offset;
152 152
153 if (!rdev->bios)
154 return 0;
155
153 switch (table) { 156 switch (table) {
154 /* absolute offset tables */ 157 /* absolute offset tables */
155 case COMBIOS_ASIC_INIT_1_TABLE: 158 case COMBIOS_ASIC_INIT_1_TABLE:
@@ -443,6 +446,39 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
443 446
444} 447}
445 448
449bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
450{
451 int edid_info;
452 struct edid *edid;
453 edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
454 if (!edid_info)
455 return false;
456
457 edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
458 GFP_KERNEL);
459 if (edid == NULL)
460 return false;
461
462 memcpy((unsigned char *)edid,
463 (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
464
465 if (!drm_edid_is_valid(edid)) {
466 kfree(edid);
467 return false;
468 }
469
470 rdev->mode_info.bios_hardcoded_edid = edid;
471 return true;
472}
473
474struct edid *
475radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
476{
477 if (rdev->mode_info.bios_hardcoded_edid)
478 return rdev->mode_info.bios_hardcoded_edid;
479 return NULL;
480}
481
446static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev, 482static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
447 int ddc_line) 483 int ddc_line)
448{ 484{
@@ -486,9 +522,65 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
486 i2c.y_data_reg = ddc_line; 522 i2c.y_data_reg = ddc_line;
487 } 523 }
488 524
489 if (rdev->family < CHIP_R200) 525 switch (rdev->family) {
490 i2c.hw_capable = false; 526 case CHIP_R100:
491 else { 527 case CHIP_RV100:
528 case CHIP_RS100:
529 case CHIP_RV200:
530 case CHIP_RS200:
531 case CHIP_RS300:
532 switch (ddc_line) {
533 case RADEON_GPIO_DVI_DDC:
534 /* in theory this should be hw capable,
535 * but it doesn't seem to work
536 */
537 i2c.hw_capable = false;
538 break;
539 default:
540 i2c.hw_capable = false;
541 break;
542 }
543 break;
544 case CHIP_R200:
545 switch (ddc_line) {
546 case RADEON_GPIO_DVI_DDC:
547 case RADEON_GPIO_MONID:
548 i2c.hw_capable = true;
549 break;
550 default:
551 i2c.hw_capable = false;
552 break;
553 }
554 break;
555 case CHIP_RV250:
556 case CHIP_RV280:
557 switch (ddc_line) {
558 case RADEON_GPIO_VGA_DDC:
559 case RADEON_GPIO_DVI_DDC:
560 case RADEON_GPIO_CRT2_DDC:
561 i2c.hw_capable = true;
562 break;
563 default:
564 i2c.hw_capable = false;
565 break;
566 }
567 break;
568 case CHIP_R300:
569 case CHIP_R350:
570 switch (ddc_line) {
571 case RADEON_GPIO_VGA_DDC:
572 case RADEON_GPIO_DVI_DDC:
573 i2c.hw_capable = true;
574 break;
575 default:
576 i2c.hw_capable = false;
577 break;
578 }
579 break;
580 case CHIP_RV350:
581 case CHIP_RV380:
582 case CHIP_RS400:
583 case CHIP_RS480:
492 switch (ddc_line) { 584 switch (ddc_line) {
493 case RADEON_GPIO_VGA_DDC: 585 case RADEON_GPIO_VGA_DDC:
494 case RADEON_GPIO_DVI_DDC: 586 case RADEON_GPIO_DVI_DDC:
@@ -504,9 +596,14 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
504 i2c.hw_capable = false; 596 i2c.hw_capable = false;
505 break; 597 break;
506 } 598 }
599 break;
600 default:
601 i2c.hw_capable = false;
602 break;
507 } 603 }
508 i2c.mm_i2c = false; 604 i2c.mm_i2c = false;
509 i2c.i2c_id = 0; 605 i2c.i2c_id = 0;
606 i2c.hpd_id = 0;
510 607
511 if (ddc_line) 608 if (ddc_line)
512 i2c.valid = true; 609 i2c.valid = true;
@@ -527,9 +624,6 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
527 int8_t rev; 624 int8_t rev;
528 uint16_t sclk, mclk; 625 uint16_t sclk, mclk;
529 626
530 if (rdev->bios == NULL)
531 return false;
532
533 pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); 627 pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
534 if (pll_info) { 628 if (pll_info) {
535 rev = RBIOS8(pll_info); 629 rev = RBIOS8(pll_info);
@@ -654,9 +748,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
654 if (!p_dac) 748 if (!p_dac)
655 return NULL; 749 return NULL;
656 750
657 if (rdev->bios == NULL)
658 goto out;
659
660 /* check CRT table */ 751 /* check CRT table */
661 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); 752 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
662 if (dac_info) { 753 if (dac_info) {
@@ -673,7 +764,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
673 found = 1; 764 found = 1;
674 } 765 }
675 766
676out:
677 if (!found) /* fallback to defaults */ 767 if (!found) /* fallback to defaults */
678 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); 768 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
679 769
@@ -687,9 +777,6 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
687 uint16_t tv_info; 777 uint16_t tv_info;
688 enum radeon_tv_std tv_std = TV_STD_NTSC; 778 enum radeon_tv_std tv_std = TV_STD_NTSC;
689 779
690 if (rdev->bios == NULL)
691 return tv_std;
692
693 tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); 780 tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
694 if (tv_info) { 781 if (tv_info) {
695 if (RBIOS8(tv_info + 6) == 'T') { 782 if (RBIOS8(tv_info + 6) == 'T') {
@@ -793,9 +880,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
793 if (!tv_dac) 880 if (!tv_dac)
794 return NULL; 881 return NULL;
795 882
796 if (rdev->bios == NULL)
797 goto out;
798
799 /* first check TV table */ 883 /* first check TV table */
800 dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); 884 dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
801 if (dac_info) { 885 if (dac_info) {
@@ -857,7 +941,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
857 } 941 }
858 } 942 }
859 943
860out:
861 if (!found) /* fallback to defaults */ 944 if (!found) /* fallback to defaults */
862 radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac); 945 radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
863 946
@@ -945,11 +1028,6 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
945 int tmp, i; 1028 int tmp, i;
946 struct radeon_encoder_lvds *lvds = NULL; 1029 struct radeon_encoder_lvds *lvds = NULL;
947 1030
948 if (rdev->bios == NULL) {
949 lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
950 goto out;
951 }
952
953 lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE); 1031 lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
954 1032
955 if (lcd_info) { 1033 if (lcd_info) {
@@ -1050,7 +1128,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
1050 DRM_INFO("No panel info found in BIOS\n"); 1128 DRM_INFO("No panel info found in BIOS\n");
1051 lvds = radeon_legacy_get_lvds_info_from_regs(rdev); 1129 lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
1052 } 1130 }
1053out: 1131
1054 if (lvds) 1132 if (lvds)
1055 encoder->native_mode = lvds->native_mode; 1133 encoder->native_mode = lvds->native_mode;
1056 return lvds; 1134 return lvds;
@@ -1102,9 +1180,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
1102 int i, n; 1180 int i, n;
1103 uint8_t ver; 1181 uint8_t ver;
1104 1182
1105 if (rdev->bios == NULL)
1106 return false;
1107
1108 tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); 1183 tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
1109 1184
1110 if (tmds_info) { 1185 if (tmds_info) {
@@ -1184,9 +1259,6 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
1184 enum radeon_combios_ddc gpio; 1259 enum radeon_combios_ddc gpio;
1185 struct radeon_i2c_bus_rec i2c_bus; 1260 struct radeon_i2c_bus_rec i2c_bus;
1186 1261
1187 if (rdev->bios == NULL)
1188 return false;
1189
1190 tmds->i2c_bus = NULL; 1262 tmds->i2c_bus = NULL;
1191 if (rdev->flags & RADEON_IS_IGP) { 1263 if (rdev->flags & RADEON_IS_IGP) {
1192 offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); 1264 offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
@@ -1253,7 +1325,10 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
1253 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); 1325 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1254 break; 1326 break;
1255 case DDC_LCD: /* MM i2c */ 1327 case DDC_LCD: /* MM i2c */
1256 DRM_ERROR("MM i2c requires hw i2c engine\n"); 1328 i2c_bus.valid = true;
1329 i2c_bus.hw_capable = true;
1330 i2c_bus.mm_i2c = true;
1331 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1257 break; 1332 break;
1258 default: 1333 default:
1259 DRM_ERROR("Unsupported gpio %d\n", gpio); 1334 DRM_ERROR("Unsupported gpio %d\n", gpio);
@@ -1909,9 +1984,6 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1909 struct radeon_i2c_bus_rec ddc_i2c; 1984 struct radeon_i2c_bus_rec ddc_i2c;
1910 struct radeon_hpd hpd; 1985 struct radeon_hpd hpd;
1911 1986
1912 if (rdev->bios == NULL)
1913 return false;
1914
1915 conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE); 1987 conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
1916 if (conn_info) { 1988 if (conn_info) {
1917 for (i = 0; i < 4; i++) { 1989 for (i = 0; i < 4; i++) {
@@ -2278,6 +2350,115 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2278 return true; 2350 return true;
2279} 2351}
2280 2352
2353void radeon_combios_get_power_modes(struct radeon_device *rdev)
2354{
2355 struct drm_device *dev = rdev->ddev;
2356 u16 offset, misc, misc2 = 0;
2357 u8 rev, blocks, tmp;
2358 int state_index = 0;
2359
2360 rdev->pm.default_power_state = NULL;
2361
2362 if (rdev->flags & RADEON_IS_MOBILITY) {
2363 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
2364 if (offset) {
2365 rev = RBIOS8(offset);
2366 blocks = RBIOS8(offset + 0x2);
2367 /* power mode 0 tends to be the only valid one */
2368 rdev->pm.power_state[state_index].num_clock_modes = 1;
2369 rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
2370 rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6);
2371 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
2372 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
2373 goto default_mode;
2374 /* skip overclock modes for now */
2375 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
2376 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
2377 (rdev->pm.power_state[state_index].clock_info[0].sclk >
2378 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
2379 goto default_mode;
2380 rdev->pm.power_state[state_index].type =
2381 POWER_STATE_TYPE_BATTERY;
2382 misc = RBIOS16(offset + 0x5 + 0x0);
2383 if (rev > 4)
2384 misc2 = RBIOS16(offset + 0x5 + 0xe);
2385 if (misc & 0x4) {
2386 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
2387 if (misc & 0x8)
2388 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2389 true;
2390 else
2391 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2392 false;
2393 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true;
2394 if (rev < 6) {
2395 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
2396 RBIOS16(offset + 0x5 + 0xb) * 4;
2397 tmp = RBIOS8(offset + 0x5 + 0xd);
2398 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
2399 } else {
2400 u8 entries = RBIOS8(offset + 0x5 + 0xb);
2401 u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc);
2402 if (entries && voltage_table_offset) {
2403 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
2404 RBIOS16(voltage_table_offset) * 4;
2405 tmp = RBIOS8(voltage_table_offset + 0x2);
2406 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
2407 } else
2408 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false;
2409 }
2410 switch ((misc2 & 0x700) >> 8) {
2411 case 0:
2412 default:
2413 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0;
2414 break;
2415 case 1:
2416 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33;
2417 break;
2418 case 2:
2419 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66;
2420 break;
2421 case 3:
2422 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99;
2423 break;
2424 case 4:
2425 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132;
2426 break;
2427 }
2428 } else
2429 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2430 if (rev > 6)
2431 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
2432 RBIOS8(offset + 0x5 + 0x10);
2433 state_index++;
2434 } else {
2435 /* XXX figure out some good default low power mode for mobility cards w/out power tables */
2436 }
2437 } else {
2438 /* XXX figure out some good default low power mode for desktop cards */
2439 }
2440
2441default_mode:
2442 /* add the default mode */
2443 rdev->pm.power_state[state_index].type =
2444 POWER_STATE_TYPE_DEFAULT;
2445 rdev->pm.power_state[state_index].num_clock_modes = 1;
2446 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2447 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2448 rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
2449 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2450 if (rdev->asic->get_pcie_lanes)
2451 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
2452 else
2453 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
2454 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
2455 rdev->pm.num_power_states = state_index + 1;
2456
2457 rdev->pm.current_power_state = rdev->pm.default_power_state;
2458 rdev->pm.current_clock_mode =
2459 rdev->pm.default_power_state->default_clock_mode;
2460}
2461
2281void radeon_external_tmds_setup(struct drm_encoder *encoder) 2462void radeon_external_tmds_setup(struct drm_encoder *encoder)
2282{ 2463{
2283 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2464 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -2289,23 +2470,21 @@ void radeon_external_tmds_setup(struct drm_encoder *encoder)
2289 switch (tmds->dvo_chip) { 2470 switch (tmds->dvo_chip) {
2290 case DVO_SIL164: 2471 case DVO_SIL164:
2291 /* sil 164 */ 2472 /* sil 164 */
2292 radeon_i2c_do_lock(tmds->i2c_bus, 1); 2473 radeon_i2c_put_byte(tmds->i2c_bus,
2293 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2474 tmds->slave_addr,
2294 tmds->slave_addr, 2475 0x08, 0x30);
2295 0x08, 0x30); 2476 radeon_i2c_put_byte(tmds->i2c_bus,
2296 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2297 tmds->slave_addr, 2477 tmds->slave_addr,
2298 0x09, 0x00); 2478 0x09, 0x00);
2299 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2479 radeon_i2c_put_byte(tmds->i2c_bus,
2300 tmds->slave_addr, 2480 tmds->slave_addr,
2301 0x0a, 0x90); 2481 0x0a, 0x90);
2302 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2482 radeon_i2c_put_byte(tmds->i2c_bus,
2303 tmds->slave_addr, 2483 tmds->slave_addr,
2304 0x0c, 0x89); 2484 0x0c, 0x89);
2305 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2485 radeon_i2c_put_byte(tmds->i2c_bus,
2306 tmds->slave_addr, 2486 tmds->slave_addr,
2307 0x08, 0x3b); 2487 0x08, 0x3b);
2308 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2309 break; 2488 break;
2310 case DVO_SIL1178: 2489 case DVO_SIL1178:
2311 /* sil 1178 - untested */ 2490 /* sil 1178 - untested */
@@ -2338,9 +2517,6 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
2338 uint32_t reg, val, and_mask, or_mask; 2517 uint32_t reg, val, and_mask, or_mask;
2339 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; 2518 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
2340 2519
2341 if (rdev->bios == NULL)
2342 return false;
2343
2344 if (!tmds) 2520 if (!tmds)
2345 return false; 2521 return false;
2346 2522
@@ -2390,11 +2566,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
2390 index++; 2566 index++;
2391 val = RBIOS8(index); 2567 val = RBIOS8(index);
2392 index++; 2568 index++;
2393 radeon_i2c_do_lock(tmds->i2c_bus, 1); 2569 radeon_i2c_put_byte(tmds->i2c_bus,
2394 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2570 slave_addr,
2395 slave_addr, 2571 reg, val);
2396 reg, val);
2397 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2398 break; 2572 break;
2399 default: 2573 default:
2400 DRM_ERROR("Unknown id %d\n", id >> 13); 2574 DRM_ERROR("Unknown id %d\n", id >> 13);
@@ -2447,11 +2621,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
2447 reg = id & 0x1fff; 2621 reg = id & 0x1fff;
2448 val = RBIOS8(index); 2622 val = RBIOS8(index);
2449 index += 1; 2623 index += 1;
2450 radeon_i2c_do_lock(tmds->i2c_bus, 1); 2624 radeon_i2c_put_byte(tmds->i2c_bus,
2451 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2625 tmds->slave_addr,
2452 tmds->slave_addr, 2626 reg, val);
2453 reg, val);
2454 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2455 break; 2627 break;
2456 default: 2628 default:
2457 DRM_ERROR("Unknown id %d\n", id >> 13); 2629 DRM_ERROR("Unknown id %d\n", id >> 13);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 65f81942f399..ee0083f982d8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -479,10 +479,8 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
479 ret = connector_status_connected; 479 ret = connector_status_connected;
480 else { 480 else {
481 if (radeon_connector->ddc_bus) { 481 if (radeon_connector->ddc_bus) {
482 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
483 radeon_connector->edid = drm_get_edid(&radeon_connector->base, 482 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
484 &radeon_connector->ddc_bus->adapter); 483 &radeon_connector->ddc_bus->adapter);
485 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
486 if (radeon_connector->edid) 484 if (radeon_connector->edid)
487 ret = connector_status_connected; 485 ret = connector_status_connected;
488 } 486 }
@@ -587,19 +585,14 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
587 if (!encoder) 585 if (!encoder)
588 ret = connector_status_disconnected; 586 ret = connector_status_disconnected;
589 587
590 if (radeon_connector->ddc_bus) { 588 if (radeon_connector->ddc_bus)
591 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
592 dret = radeon_ddc_probe(radeon_connector); 589 dret = radeon_ddc_probe(radeon_connector);
593 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
594 }
595 if (dret) { 590 if (dret) {
596 if (radeon_connector->edid) { 591 if (radeon_connector->edid) {
597 kfree(radeon_connector->edid); 592 kfree(radeon_connector->edid);
598 radeon_connector->edid = NULL; 593 radeon_connector->edid = NULL;
599 } 594 }
600 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
601 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 595 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
602 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
603 596
604 if (!radeon_connector->edid) { 597 if (!radeon_connector->edid) {
605 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 598 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -744,19 +737,14 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
744 enum drm_connector_status ret = connector_status_disconnected; 737 enum drm_connector_status ret = connector_status_disconnected;
745 bool dret = false; 738 bool dret = false;
746 739
747 if (radeon_connector->ddc_bus) { 740 if (radeon_connector->ddc_bus)
748 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
749 dret = radeon_ddc_probe(radeon_connector); 741 dret = radeon_ddc_probe(radeon_connector);
750 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
751 }
752 if (dret) { 742 if (dret) {
753 if (radeon_connector->edid) { 743 if (radeon_connector->edid) {
754 kfree(radeon_connector->edid); 744 kfree(radeon_connector->edid);
755 radeon_connector->edid = NULL; 745 radeon_connector->edid = NULL;
756 } 746 }
757 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
758 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 747 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
759 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
760 748
761 if (!radeon_connector->edid) { 749 if (!radeon_connector->edid) {
762 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 750 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -952,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
952 if (radeon_connector->edid) 940 if (radeon_connector->edid)
953 kfree(radeon_connector->edid); 941 kfree(radeon_connector->edid);
954 if (radeon_dig_connector->dp_i2c_bus) 942 if (radeon_dig_connector->dp_i2c_bus)
955 radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus); 943 radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus);
956 kfree(radeon_connector->con_priv); 944 kfree(radeon_connector->con_priv);
957 drm_sysfs_connector_remove(connector); 945 drm_sysfs_connector_remove(connector);
958 drm_connector_cleanup(connector); 946 drm_connector_cleanup(connector);
@@ -988,12 +976,10 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
988 ret = connector_status_connected; 976 ret = connector_status_connected;
989 } 977 }
990 } else { 978 } else {
991 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
992 if (radeon_ddc_probe(radeon_connector)) { 979 if (radeon_ddc_probe(radeon_connector)) {
993 radeon_dig_connector->dp_sink_type = sink_type; 980 radeon_dig_connector->dp_sink_type = sink_type;
994 ret = connector_status_connected; 981 ret = connector_status_connected;
995 } 982 }
996 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
997 } 983 }
998 984
999 return ret; 985 return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 06123ba31d31..dc6eba6b96dd 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1644,6 +1644,7 @@ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_pri
1644 radeon_cp_load_microcode(dev_priv); 1644 radeon_cp_load_microcode(dev_priv);
1645 radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); 1645 radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
1646 1646
1647 dev_priv->have_z_offset = 0;
1647 radeon_do_engine_reset(dev); 1648 radeon_do_engine_reset(dev);
1648 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); 1649 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
1649 1650
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index e9d085021c1f..70ba02ed7723 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -194,11 +194,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
194 } 194 }
195 radeon_bo_list_unreserve(&parser->validated); 195 radeon_bo_list_unreserve(&parser->validated);
196 for (i = 0; i < parser->nrelocs; i++) { 196 for (i = 0; i < parser->nrelocs; i++) {
197 if (parser->relocs[i].gobj) { 197 if (parser->relocs[i].gobj)
198 mutex_lock(&parser->rdev->ddev->struct_mutex); 198 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
199 drm_gem_object_unreference(parser->relocs[i].gobj);
200 mutex_unlock(&parser->rdev->ddev->struct_mutex);
201 }
202 } 199 }
203 kfree(parser->track); 200 kfree(parser->track);
204 kfree(parser->relocs); 201 kfree(parser->relocs);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 28772a37009c..b7023fff89eb 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -36,7 +36,14 @@ static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
36 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 36 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
37 uint32_t cur_lock; 37 uint32_t cur_lock;
38 38
39 if (ASIC_IS_AVIVO(rdev)) { 39 if (ASIC_IS_DCE4(rdev)) {
40 cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
41 if (lock)
42 cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
43 else
44 cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
45 WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
46 } else if (ASIC_IS_AVIVO(rdev)) {
40 cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset); 47 cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
41 if (lock) 48 if (lock)
42 cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK; 49 cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
@@ -58,7 +65,10 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
58 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 65 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
59 struct radeon_device *rdev = crtc->dev->dev_private; 66 struct radeon_device *rdev = crtc->dev->dev_private;
60 67
61 if (ASIC_IS_AVIVO(rdev)) { 68 if (ASIC_IS_DCE4(rdev)) {
69 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
70 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
71 } else if (ASIC_IS_AVIVO(rdev)) {
62 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 72 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
63 WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 73 WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
64 } else { 74 } else {
@@ -81,10 +91,14 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
81 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 91 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
82 struct radeon_device *rdev = crtc->dev->dev_private; 92 struct radeon_device *rdev = crtc->dev->dev_private;
83 93
84 if (ASIC_IS_AVIVO(rdev)) { 94 if (ASIC_IS_DCE4(rdev)) {
95 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
96 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
97 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
98 } else if (ASIC_IS_AVIVO(rdev)) {
85 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 99 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
86 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | 100 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
87 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 101 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
88 } else { 102 } else {
89 switch (radeon_crtc->crtc_id) { 103 switch (radeon_crtc->crtc_id) {
90 case 0: 104 case 0:
@@ -109,7 +123,10 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
109 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 123 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
110 struct radeon_device *rdev = crtc->dev->dev_private; 124 struct radeon_device *rdev = crtc->dev->dev_private;
111 125
112 if (ASIC_IS_AVIVO(rdev)) { 126 if (ASIC_IS_DCE4(rdev)) {
127 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0);
128 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
129 } else if (ASIC_IS_AVIVO(rdev)) {
113 if (rdev->family >= CHIP_RV770) { 130 if (rdev->family >= CHIP_RV770) {
114 if (radeon_crtc->crtc_id) 131 if (radeon_crtc->crtc_id)
115 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0); 132 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
@@ -169,17 +186,13 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
169unpin: 186unpin:
170 if (radeon_crtc->cursor_bo) { 187 if (radeon_crtc->cursor_bo) {
171 radeon_gem_object_unpin(radeon_crtc->cursor_bo); 188 radeon_gem_object_unpin(radeon_crtc->cursor_bo);
172 mutex_lock(&crtc->dev->struct_mutex); 189 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
173 drm_gem_object_unreference(radeon_crtc->cursor_bo);
174 mutex_unlock(&crtc->dev->struct_mutex);
175 } 190 }
176 191
177 radeon_crtc->cursor_bo = obj; 192 radeon_crtc->cursor_bo = obj;
178 return 0; 193 return 0;
179fail: 194fail:
180 mutex_lock(&crtc->dev->struct_mutex); 195 drm_gem_object_unreference_unlocked(obj);
181 drm_gem_object_unreference(obj);
182 mutex_unlock(&crtc->dev->struct_mutex);
183 196
184 return 0; 197 return 0;
185} 198}
@@ -201,7 +214,20 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
201 yorigin = CURSOR_HEIGHT - 1; 214 yorigin = CURSOR_HEIGHT - 1;
202 215
203 radeon_lock_cursor(crtc, true); 216 radeon_lock_cursor(crtc, true);
204 if (ASIC_IS_AVIVO(rdev)) { 217 if (ASIC_IS_DCE4(rdev)) {
218 /* cursors are offset into the total surface */
219 x += crtc->x;
220 y += crtc->y;
221 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
222
223 /* XXX: check if evergreen has the same issues as avivo chips */
224 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
225 ((xorigin ? 0 : x) << 16) |
226 (yorigin ? 0 : y));
227 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
228 WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
229 ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
230 } else if (ASIC_IS_AVIVO(rdev)) {
205 int w = radeon_crtc->cursor_width; 231 int w = radeon_crtc->cursor_width;
206 int i = 0; 232 int i = 0;
207 struct drm_crtc *crtc_p; 233 struct drm_crtc *crtc_p;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 768b1509fa03..e28e4ed5f720 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -30,6 +30,7 @@
30#include <drm/drm_crtc_helper.h> 30#include <drm/drm_crtc_helper.h>
31#include <drm/radeon_drm.h> 31#include <drm/radeon_drm.h>
32#include <linux/vgaarb.h> 32#include <linux/vgaarb.h>
33#include <linux/vga_switcheroo.h>
33#include "radeon_reg.h" 34#include "radeon_reg.h"
34#include "radeon.h" 35#include "radeon.h"
35#include "radeon_asic.h" 36#include "radeon_asic.h"
@@ -100,80 +101,103 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
100 } 101 }
101} 102}
102 103
103/* 104/**
104 * MC common functions 105 * radeon_vram_location - try to find VRAM location
106 * @rdev: radeon device structure holding all necessary informations
107 * @mc: memory controller structure holding memory informations
108 * @base: base address at which to put VRAM
109 *
110 * Function will place try to place VRAM at base address provided
111 * as parameter (which is so far either PCI aperture address or
112 * for IGP TOM base address).
113 *
114 * If there is not enough space to fit the unvisible VRAM in the 32bits
115 * address space then we limit the VRAM size to the aperture.
116 *
117 * If we are using AGP and if the AGP aperture doesn't allow us to have
118 * room for all the VRAM than we restrict the VRAM to the PCI aperture
119 * size and print a warning.
120 *
121 * This function will never fails, worst case are limiting VRAM.
122 *
123 * Note: GTT start, end, size should be initialized before calling this
124 * function on AGP platform.
125 *
126 * Note: We don't explictly enforce VRAM start to be aligned on VRAM size,
127 * this shouldn't be a problem as we are using the PCI aperture as a reference.
128 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
129 * not IGP.
130 *
131 * Note: we use mc_vram_size as on some board we need to program the mc to
132 * cover the whole aperture even if VRAM size is inferior to aperture size
133 * Novell bug 204882 + along with lots of ubuntu ones
134 *
135 * Note: when limiting vram it's safe to overwritte real_vram_size because
136 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
137 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
138 * ones)
139 *
140 * Note: IGP TOM addr should be the same as the aperture addr, we don't
141 * explicitly check for that thought.
142 *
143 * FIXME: when reducing VRAM size align new size on power of 2.
144 */
145void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
146{
147 mc->vram_start = base;
148 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
149 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
150 mc->real_vram_size = mc->aper_size;
151 mc->mc_vram_size = mc->aper_size;
152 }
153 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
154 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
155 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
156 mc->real_vram_size = mc->aper_size;
157 mc->mc_vram_size = mc->aper_size;
158 }
159 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
160 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
161 mc->mc_vram_size >> 20, mc->vram_start,
162 mc->vram_end, mc->real_vram_size >> 20);
163}
164
165/**
166 * radeon_gtt_location - try to find GTT location
167 * @rdev: radeon device structure holding all necessary informations
168 * @mc: memory controller structure holding memory informations
169 *
170 * Function will place try to place GTT before or after VRAM.
171 *
172 * If GTT size is bigger than space left then we ajust GTT size.
173 * Thus function will never fails.
174 *
175 * FIXME: when reducing GTT size align new size on power of 2.
105 */ 176 */
106int radeon_mc_setup(struct radeon_device *rdev) 177void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
107{ 178{
108 uint32_t tmp; 179 u64 size_af, size_bf;
109 180
110 /* Some chips have an "issue" with the memory controller, the 181 size_af = 0xFFFFFFFF - mc->vram_end;
111 * location must be aligned to the size. We just align it down, 182 size_bf = mc->vram_start;
112 * too bad if we walk over the top of system memory, we don't 183 if (size_bf > size_af) {
113 * use DMA without a remapped anyway. 184 if (mc->gtt_size > size_bf) {
114 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP 185 dev_warn(rdev->dev, "limiting GTT\n");
115 */ 186 mc->gtt_size = size_bf;
116 /* FGLRX seems to setup like this, VRAM a 0, then GART.
117 */
118 /*
119 * Note: from R6xx the address space is 40bits but here we only
120 * use 32bits (still have to see a card which would exhaust 4G
121 * address space).
122 */
123 if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
124 /* vram location was already setup try to put gtt after
125 * if it fits */
126 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
127 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
128 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
129 rdev->mc.gtt_location = tmp;
130 } else {
131 if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
132 printk(KERN_ERR "[drm] GTT too big to fit "
133 "before or after vram location.\n");
134 return -EINVAL;
135 }
136 rdev->mc.gtt_location = 0;
137 }
138 } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
139 /* gtt location was already setup try to put vram before
140 * if it fits */
141 if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
142 rdev->mc.vram_location = 0;
143 } else {
144 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
145 tmp += (rdev->mc.mc_vram_size - 1);
146 tmp &= ~(rdev->mc.mc_vram_size - 1);
147 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
148 rdev->mc.vram_location = tmp;
149 } else {
150 printk(KERN_ERR "[drm] vram too big to fit "
151 "before or after GTT location.\n");
152 return -EINVAL;
153 }
154 } 187 }
188 mc->gtt_start = mc->vram_start - mc->gtt_size;
155 } else { 189 } else {
156 rdev->mc.vram_location = 0; 190 if (mc->gtt_size > size_af) {
157 tmp = rdev->mc.mc_vram_size; 191 dev_warn(rdev->dev, "limiting GTT\n");
158 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); 192 mc->gtt_size = size_af;
159 rdev->mc.gtt_location = tmp; 193 }
160 } 194 mc->gtt_start = mc->vram_end + 1;
161 rdev->mc.vram_start = rdev->mc.vram_location; 195 }
162 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; 196 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
163 rdev->mc.gtt_start = rdev->mc.gtt_location; 197 dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
164 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 198 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
165 DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
166 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
167 (unsigned)rdev->mc.vram_location,
168 (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
169 DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
170 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
171 (unsigned)rdev->mc.gtt_location,
172 (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
173 return 0;
174} 199}
175 200
176
177/* 201/*
178 * GPU helpers function. 202 * GPU helpers function.
179 */ 203 */
@@ -182,7 +206,16 @@ bool radeon_card_posted(struct radeon_device *rdev)
182 uint32_t reg; 206 uint32_t reg;
183 207
184 /* first check CRTCs */ 208 /* first check CRTCs */
185 if (ASIC_IS_AVIVO(rdev)) { 209 if (ASIC_IS_DCE4(rdev)) {
210 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
211 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
212 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
213 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
214 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
215 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
216 if (reg & EVERGREEN_CRTC_MASTER_EN)
217 return true;
218 } else if (ASIC_IS_AVIVO(rdev)) {
186 reg = RREG32(AVIVO_D1CRTC_CONTROL) | 219 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
187 RREG32(AVIVO_D2CRTC_CONTROL); 220 RREG32(AVIVO_D2CRTC_CONTROL);
188 if (reg & AVIVO_CRTC_EN) { 221 if (reg & AVIVO_CRTC_EN) {
@@ -229,6 +262,8 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
229 262
230int radeon_dummy_page_init(struct radeon_device *rdev) 263int radeon_dummy_page_init(struct radeon_device *rdev)
231{ 264{
265 if (rdev->dummy_page.page)
266 return 0;
232 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 267 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
233 if (rdev->dummy_page.page == NULL) 268 if (rdev->dummy_page.page == NULL)
234 return -ENOMEM; 269 return -ENOMEM;
@@ -310,7 +345,7 @@ void radeon_register_accessor_init(struct radeon_device *rdev)
310 rdev->mc_rreg = &rs600_mc_rreg; 345 rdev->mc_rreg = &rs600_mc_rreg;
311 rdev->mc_wreg = &rs600_mc_wreg; 346 rdev->mc_wreg = &rs600_mc_wreg;
312 } 347 }
313 if (rdev->family >= CHIP_R600) { 348 if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
314 rdev->pciep_rreg = &r600_pciep_rreg; 349 rdev->pciep_rreg = &r600_pciep_rreg;
315 rdev->pciep_wreg = &r600_pciep_wreg; 350 rdev->pciep_wreg = &r600_pciep_wreg;
316 } 351 }
@@ -329,21 +364,22 @@ int radeon_asic_init(struct radeon_device *rdev)
329 case CHIP_RS100: 364 case CHIP_RS100:
330 case CHIP_RV200: 365 case CHIP_RV200:
331 case CHIP_RS200: 366 case CHIP_RS200:
367 rdev->asic = &r100_asic;
368 break;
332 case CHIP_R200: 369 case CHIP_R200:
333 case CHIP_RV250: 370 case CHIP_RV250:
334 case CHIP_RS300: 371 case CHIP_RS300:
335 case CHIP_RV280: 372 case CHIP_RV280:
336 rdev->asic = &r100_asic; 373 rdev->asic = &r200_asic;
337 break; 374 break;
338 case CHIP_R300: 375 case CHIP_R300:
339 case CHIP_R350: 376 case CHIP_R350:
340 case CHIP_RV350: 377 case CHIP_RV350:
341 case CHIP_RV380: 378 case CHIP_RV380:
342 rdev->asic = &r300_asic; 379 if (rdev->flags & RADEON_IS_PCIE)
343 if (rdev->flags & RADEON_IS_PCIE) { 380 rdev->asic = &r300_asic_pcie;
344 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; 381 else
345 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; 382 rdev->asic = &r300_asic;
346 }
347 break; 383 break;
348 case CHIP_R420: 384 case CHIP_R420:
349 case CHIP_R423: 385 case CHIP_R423:
@@ -387,6 +423,13 @@ int radeon_asic_init(struct radeon_device *rdev)
387 case CHIP_RV740: 423 case CHIP_RV740:
388 rdev->asic = &rv770_asic; 424 rdev->asic = &rv770_asic;
389 break; 425 break;
426 case CHIP_CEDAR:
427 case CHIP_REDWOOD:
428 case CHIP_JUNIPER:
429 case CHIP_CYPRESS:
430 case CHIP_HEMLOCK:
431 rdev->asic = &evergreen_asic;
432 break;
390 default: 433 default:
391 /* FIXME: not supported yet */ 434 /* FIXME: not supported yet */
392 return -EINVAL; 435 return -EINVAL;
@@ -613,6 +656,36 @@ void radeon_check_arguments(struct radeon_device *rdev)
613 } 656 }
614} 657}
615 658
659static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
660{
661 struct drm_device *dev = pci_get_drvdata(pdev);
662 struct radeon_device *rdev = dev->dev_private;
663 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
664 if (state == VGA_SWITCHEROO_ON) {
665 printk(KERN_INFO "radeon: switched on\n");
666 /* don't suspend or resume card normally */
667 rdev->powered_down = false;
668 radeon_resume_kms(dev);
669 } else {
670 printk(KERN_INFO "radeon: switched off\n");
671 radeon_suspend_kms(dev, pmm);
672 /* don't suspend or resume card normally */
673 rdev->powered_down = true;
674 }
675}
676
677static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
678{
679 struct drm_device *dev = pci_get_drvdata(pdev);
680 bool can_switch;
681
682 spin_lock(&dev->count_lock);
683 can_switch = (dev->open_count == 0);
684 spin_unlock(&dev->count_lock);
685 return can_switch;
686}
687
688
616int radeon_device_init(struct radeon_device *rdev, 689int radeon_device_init(struct radeon_device *rdev,
617 struct drm_device *ddev, 690 struct drm_device *ddev,
618 struct pci_dev *pdev, 691 struct pci_dev *pdev,
@@ -638,11 +711,14 @@ int radeon_device_init(struct radeon_device *rdev,
638 mutex_init(&rdev->cs_mutex); 711 mutex_init(&rdev->cs_mutex);
639 mutex_init(&rdev->ib_pool.mutex); 712 mutex_init(&rdev->ib_pool.mutex);
640 mutex_init(&rdev->cp.mutex); 713 mutex_init(&rdev->cp.mutex);
714 mutex_init(&rdev->dc_hw_i2c_mutex);
641 if (rdev->family >= CHIP_R600) 715 if (rdev->family >= CHIP_R600)
642 spin_lock_init(&rdev->ih.lock); 716 spin_lock_init(&rdev->ih.lock);
643 mutex_init(&rdev->gem.mutex); 717 mutex_init(&rdev->gem.mutex);
718 mutex_init(&rdev->pm.mutex);
644 rwlock_init(&rdev->fence_drv.lock); 719 rwlock_init(&rdev->fence_drv.lock);
645 INIT_LIST_HEAD(&rdev->gem.objects); 720 INIT_LIST_HEAD(&rdev->gem.objects);
721 init_waitqueue_head(&rdev->irq.vblank_queue);
646 722
647 /* setup workqueue */ 723 /* setup workqueue */
648 rdev->wq = create_workqueue("radeon"); 724 rdev->wq = create_workqueue("radeon");
@@ -692,6 +768,9 @@ int radeon_device_init(struct radeon_device *rdev,
692 /* this will fail for cards that aren't VGA class devices, just 768 /* this will fail for cards that aren't VGA class devices, just
693 * ignore it */ 769 * ignore it */
694 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 770 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
771 vga_switcheroo_register_client(rdev->pdev,
772 radeon_switcheroo_set_state,
773 radeon_switcheroo_can_switch);
695 774
696 r = radeon_init(rdev); 775 r = radeon_init(rdev);
697 if (r) 776 if (r)
@@ -723,6 +802,7 @@ void radeon_device_fini(struct radeon_device *rdev)
723 rdev->shutdown = true; 802 rdev->shutdown = true;
724 radeon_fini(rdev); 803 radeon_fini(rdev);
725 destroy_workqueue(rdev->wq); 804 destroy_workqueue(rdev->wq);
805 vga_switcheroo_unregister_client(rdev->pdev);
726 vga_client_register(rdev->pdev, NULL, NULL, NULL); 806 vga_client_register(rdev->pdev, NULL, NULL, NULL);
727 iounmap(rdev->rmmio); 807 iounmap(rdev->rmmio);
728 rdev->rmmio = NULL; 808 rdev->rmmio = NULL;
@@ -746,6 +826,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
746 } 826 }
747 rdev = dev->dev_private; 827 rdev = dev->dev_private;
748 828
829 if (rdev->powered_down)
830 return 0;
749 /* unpin the front buffers */ 831 /* unpin the front buffers */
750 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 832 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
751 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 833 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
@@ -791,6 +873,9 @@ int radeon_resume_kms(struct drm_device *dev)
791{ 873{
792 struct radeon_device *rdev = dev->dev_private; 874 struct radeon_device *rdev = dev->dev_private;
793 875
876 if (rdev->powered_down)
877 return 0;
878
794 acquire_console_sem(); 879 acquire_console_sem();
795 pci_set_power_state(dev->pdev, PCI_D0); 880 pci_set_power_state(dev->pdev, PCI_D0);
796 pci_restore_state(dev->pdev); 881 pci_restore_state(dev->pdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7e17a362b54b..ba8d806dcf39 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -68,6 +68,36 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
68 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); 68 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
69} 69}
70 70
71static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
72{
73 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
74 struct drm_device *dev = crtc->dev;
75 struct radeon_device *rdev = dev->dev_private;
76 int i;
77
78 DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
79 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
80
81 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
82 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
83 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
84
85 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
86 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
87 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
88
89 WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id);
90 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007);
91
92 WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0);
93 for (i = 0; i < 256; i++) {
94 WREG32(EVERGREEN_DC_LUT_30_COLOR,
95 (radeon_crtc->lut_r[i] << 20) |
96 (radeon_crtc->lut_g[i] << 10) |
97 (radeon_crtc->lut_b[i] << 0));
98 }
99}
100
71static void legacy_crtc_load_lut(struct drm_crtc *crtc) 101static void legacy_crtc_load_lut(struct drm_crtc *crtc)
72{ 102{
73 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 103 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -100,7 +130,9 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
100 if (!crtc->enabled) 130 if (!crtc->enabled)
101 return; 131 return;
102 132
103 if (ASIC_IS_AVIVO(rdev)) 133 if (ASIC_IS_DCE4(rdev))
134 evergreen_crtc_load_lut(crtc);
135 else if (ASIC_IS_AVIVO(rdev))
104 avivo_crtc_load_lut(crtc); 136 avivo_crtc_load_lut(crtc);
105 else 137 else
106 legacy_crtc_load_lut(crtc); 138 legacy_crtc_load_lut(crtc);
@@ -361,6 +393,8 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
361 393
362int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) 394int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
363{ 395{
396 struct drm_device *dev = radeon_connector->base.dev;
397 struct radeon_device *rdev = dev->dev_private;
364 int ret = 0; 398 int ret = 0;
365 399
366 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 400 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
@@ -373,11 +407,11 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
373 if (!radeon_connector->ddc_bus) 407 if (!radeon_connector->ddc_bus)
374 return -1; 408 return -1;
375 if (!radeon_connector->edid) { 409 if (!radeon_connector->edid) {
376 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
377 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 410 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
378 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
379 } 411 }
380 412 /* some servers provide a hardcoded edid in rom for KVMs */
413 if (!radeon_connector->edid)
414 radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
381 if (radeon_connector->edid) { 415 if (radeon_connector->edid) {
382 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); 416 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
383 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); 417 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@ -395,9 +429,7 @@ static int radeon_ddc_dump(struct drm_connector *connector)
395 429
396 if (!radeon_connector->ddc_bus) 430 if (!radeon_connector->ddc_bus)
397 return -1; 431 return -1;
398 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
399 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); 432 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
400 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
401 if (edid) { 433 if (edid) {
402 kfree(edid); 434 kfree(edid);
403 } 435 }
@@ -414,13 +446,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
414 return n; 446 return n;
415} 447}
416 448
417void radeon_compute_pll(struct radeon_pll *pll, 449static void radeon_compute_pll_legacy(struct radeon_pll *pll,
418 uint64_t freq, 450 uint64_t freq,
419 uint32_t *dot_clock_p, 451 uint32_t *dot_clock_p,
420 uint32_t *fb_div_p, 452 uint32_t *fb_div_p,
421 uint32_t *frac_fb_div_p, 453 uint32_t *frac_fb_div_p,
422 uint32_t *ref_div_p, 454 uint32_t *ref_div_p,
423 uint32_t *post_div_p) 455 uint32_t *post_div_p)
424{ 456{
425 uint32_t min_ref_div = pll->min_ref_div; 457 uint32_t min_ref_div = pll->min_ref_div;
426 uint32_t max_ref_div = pll->max_ref_div; 458 uint32_t max_ref_div = pll->max_ref_div;
@@ -580,95 +612,194 @@ void radeon_compute_pll(struct radeon_pll *pll,
580 *post_div_p = best_post_div; 612 *post_div_p = best_post_div;
581} 613}
582 614
583void radeon_compute_pll_avivo(struct radeon_pll *pll, 615static bool
584 uint64_t freq, 616calc_fb_div(struct radeon_pll *pll,
585 uint32_t *dot_clock_p, 617 uint32_t freq,
586 uint32_t *fb_div_p, 618 uint32_t post_div,
587 uint32_t *frac_fb_div_p, 619 uint32_t ref_div,
588 uint32_t *ref_div_p, 620 uint32_t *fb_div,
589 uint32_t *post_div_p) 621 uint32_t *fb_div_frac)
590{ 622{
591 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; 623 fixed20_12 feedback_divider, a, b;
592 fixed20_12 pll_out_max, pll_out_min; 624 u32 vco_freq;
593 fixed20_12 pll_in_max, pll_in_min; 625
594 fixed20_12 reference_freq; 626 vco_freq = freq * post_div;
595 fixed20_12 error, ffreq, a, b; 627 /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
596 628 a.full = rfixed_const(pll->reference_freq);
597 pll_out_max.full = rfixed_const(pll->pll_out_max); 629 feedback_divider.full = rfixed_const(vco_freq);
598 pll_out_min.full = rfixed_const(pll->pll_out_min); 630 feedback_divider.full = rfixed_div(feedback_divider, a);
599 pll_in_max.full = rfixed_const(pll->pll_in_max); 631 a.full = rfixed_const(ref_div);
600 pll_in_min.full = rfixed_const(pll->pll_in_min); 632 feedback_divider.full = rfixed_mul(feedback_divider, a);
601 reference_freq.full = rfixed_const(pll->reference_freq); 633
602 do_div(freq, 10); 634 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
635 /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
636 a.full = rfixed_const(10);
637 feedback_divider.full = rfixed_mul(feedback_divider, a);
638 feedback_divider.full += rfixed_const_half(0);
639 feedback_divider.full = rfixed_floor(feedback_divider);
640 feedback_divider.full = rfixed_div(feedback_divider, a);
641
642 /* *fb_div = floor(feedback_divider); */
643 a.full = rfixed_floor(feedback_divider);
644 *fb_div = rfixed_trunc(a);
645 /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
646 a.full = rfixed_const(10);
647 b.full = rfixed_mul(feedback_divider, a);
648
649 feedback_divider.full = rfixed_floor(feedback_divider);
650 feedback_divider.full = rfixed_mul(feedback_divider, a);
651 feedback_divider.full = b.full - feedback_divider.full;
652 *fb_div_frac = rfixed_trunc(feedback_divider);
653 } else {
654 /* *fb_div = floor(feedback_divider + 0.5); */
655 feedback_divider.full += rfixed_const_half(0);
656 feedback_divider.full = rfixed_floor(feedback_divider);
657
658 *fb_div = rfixed_trunc(feedback_divider);
659 *fb_div_frac = 0;
660 }
661
662 if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
663 return false;
664 else
665 return true;
666}
667
668static bool
669calc_fb_ref_div(struct radeon_pll *pll,
670 uint32_t freq,
671 uint32_t post_div,
672 uint32_t *fb_div,
673 uint32_t *fb_div_frac,
674 uint32_t *ref_div)
675{
676 fixed20_12 ffreq, max_error, error, pll_out, a;
677 u32 vco;
678
603 ffreq.full = rfixed_const(freq); 679 ffreq.full = rfixed_const(freq);
604 error.full = rfixed_const(100 * 100); 680 /* max_error = ffreq * 0.0025; */
681 a.full = rfixed_const(400);
682 max_error.full = rfixed_div(ffreq, a);
605 683
606 /* max p */ 684 for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
607 p.full = rfixed_div(pll_out_max, ffreq); 685 if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
608 p.full = rfixed_floor(p); 686 vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
687 vco = vco / ((*ref_div) * 10);
609 688
610 /* min m */ 689 if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max))
611 m.full = rfixed_div(reference_freq, pll_in_max); 690 continue;
612 m.full = rfixed_ceil(m);
613 691
614 while (1) { 692 /* pll_out = vco / post_div; */
615 n.full = rfixed_div(ffreq, reference_freq); 693 a.full = rfixed_const(post_div);
616 n.full = rfixed_mul(n, m); 694 pll_out.full = rfixed_const(vco);
617 n.full = rfixed_mul(n, p); 695 pll_out.full = rfixed_div(pll_out, a);
618 696
619 f_vco.full = rfixed_div(n, m); 697 if (pll_out.full >= ffreq.full) {
620 f_vco.full = rfixed_mul(f_vco, reference_freq); 698 error.full = pll_out.full - ffreq.full;
699 if (error.full <= max_error.full)
700 return true;
701 }
702 }
703 }
704 return false;
705}
621 706
622 f_pclk.full = rfixed_div(f_vco, p); 707static void radeon_compute_pll_new(struct radeon_pll *pll,
708 uint64_t freq,
709 uint32_t *dot_clock_p,
710 uint32_t *fb_div_p,
711 uint32_t *frac_fb_div_p,
712 uint32_t *ref_div_p,
713 uint32_t *post_div_p)
714{
715 u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
716 u32 best_freq = 0, vco_frequency;
623 717
624 if (f_pclk.full > ffreq.full) 718 /* freq = freq / 10; */
625 error.full = f_pclk.full - ffreq.full; 719 do_div(freq, 10);
626 else
627 error.full = ffreq.full - f_pclk.full;
628 error.full = rfixed_div(error, f_pclk);
629 a.full = rfixed_const(100 * 100);
630 error.full = rfixed_mul(error, a);
631
632 a.full = rfixed_mul(m, p);
633 a.full = rfixed_div(n, a);
634 best_freq.full = rfixed_mul(reference_freq, a);
635
636 if (rfixed_trunc(error) < 25)
637 break;
638
639 a.full = rfixed_const(1);
640 m.full = m.full + a.full;
641 a.full = rfixed_div(reference_freq, m);
642 if (a.full >= pll_in_min.full)
643 continue;
644 720
645 m.full = rfixed_div(reference_freq, pll_in_max); 721 if (pll->flags & RADEON_PLL_USE_POST_DIV) {
646 m.full = rfixed_ceil(m); 722 post_div = pll->post_div;
647 a.full= rfixed_const(1); 723 if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
648 p.full = p.full - a.full; 724 goto done;
649 a.full = rfixed_mul(p, ffreq); 725
650 if (a.full >= pll_out_min.full) 726 vco_frequency = freq * post_div;
651 continue; 727 if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
652 else { 728 goto done;
653 DRM_ERROR("Unable to find pll dividers\n"); 729
654 break; 730 if (pll->flags & RADEON_PLL_USE_REF_DIV) {
731 ref_div = pll->reference_div;
732 if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
733 goto done;
734 if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
735 goto done;
736 }
737 } else {
738 for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
739 if (pll->flags & RADEON_PLL_LEGACY) {
740 if ((post_div == 5) ||
741 (post_div == 7) ||
742 (post_div == 9) ||
743 (post_div == 10) ||
744 (post_div == 11))
745 continue;
746 }
747
748 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
749 continue;
750
751 vco_frequency = freq * post_div;
752 if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
753 continue;
754 if (pll->flags & RADEON_PLL_USE_REF_DIV) {
755 ref_div = pll->reference_div;
756 if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
757 goto done;
758 if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
759 break;
760 } else {
761 if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
762 break;
763 }
655 } 764 }
656 } 765 }
657 766
658 a.full = rfixed_const(10); 767 best_freq = pll->reference_freq * 10 * fb_div;
659 b.full = rfixed_mul(n, a); 768 best_freq += pll->reference_freq * fb_div_frac;
769 best_freq = best_freq / (ref_div * post_div);
660 770
661 frac_n.full = rfixed_floor(n); 771done:
662 frac_n.full = rfixed_mul(frac_n, a); 772 if (best_freq == 0)
663 frac_n.full = b.full - frac_n.full; 773 DRM_ERROR("Couldn't find valid PLL dividers\n");
664 774
665 *dot_clock_p = rfixed_trunc(best_freq); 775 *dot_clock_p = best_freq / 10;
666 *fb_div_p = rfixed_trunc(n); 776 *fb_div_p = fb_div;
667 *frac_fb_div_p = rfixed_trunc(frac_n); 777 *frac_fb_div_p = fb_div_frac;
668 *ref_div_p = rfixed_trunc(m); 778 *ref_div_p = ref_div;
669 *post_div_p = rfixed_trunc(p); 779 *post_div_p = post_div;
670 780
671 DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); 781 DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
782}
783
784void radeon_compute_pll(struct radeon_pll *pll,
785 uint64_t freq,
786 uint32_t *dot_clock_p,
787 uint32_t *fb_div_p,
788 uint32_t *frac_fb_div_p,
789 uint32_t *ref_div_p,
790 uint32_t *post_div_p)
791{
792 switch (pll->algo) {
793 case PLL_ALGO_NEW:
794 radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
795 frac_fb_div_p, ref_div_p, post_div_p);
796 break;
797 case PLL_ALGO_LEGACY:
798 default:
799 radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p,
800 frac_fb_div_p, ref_div_p, post_div_p);
801 break;
802 }
672} 803}
673 804
674static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 805static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -679,11 +810,8 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
679 if (fb->fbdev) 810 if (fb->fbdev)
680 radeonfb_remove(dev, fb); 811 radeonfb_remove(dev, fb);
681 812
682 if (radeon_fb->obj) { 813 if (radeon_fb->obj)
683 mutex_lock(&dev->struct_mutex); 814 drm_gem_object_unreference_unlocked(radeon_fb->obj);
684 drm_gem_object_unreference(radeon_fb->obj);
685 mutex_unlock(&dev->struct_mutex);
686 }
687 drm_framebuffer_cleanup(fb); 815 drm_framebuffer_cleanup(fb);
688 kfree(radeon_fb); 816 kfree(radeon_fb);
689} 817}
@@ -819,7 +947,7 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
819 947
820int radeon_modeset_init(struct radeon_device *rdev) 948int radeon_modeset_init(struct radeon_device *rdev)
821{ 949{
822 int num_crtc = 2, i; 950 int i;
823 int ret; 951 int ret;
824 952
825 drm_mode_config_init(rdev->ddev); 953 drm_mode_config_init(rdev->ddev);
@@ -842,11 +970,23 @@ int radeon_modeset_init(struct radeon_device *rdev)
842 return ret; 970 return ret;
843 } 971 }
844 972
973 /* check combios for a valid hardcoded EDID - Sun servers */
974 if (!rdev->is_atom_bios) {
975 /* check for hardcoded EDID in BIOS */
976 radeon_combios_check_hardcoded_edid(rdev);
977 }
978
845 if (rdev->flags & RADEON_SINGLE_CRTC) 979 if (rdev->flags & RADEON_SINGLE_CRTC)
846 num_crtc = 1; 980 rdev->num_crtc = 1;
981 else {
982 if (ASIC_IS_DCE4(rdev))
983 rdev->num_crtc = 6;
984 else
985 rdev->num_crtc = 2;
986 }
847 987
848 /* allocate crtcs */ 988 /* allocate crtcs */
849 for (i = 0; i < num_crtc; i++) { 989 for (i = 0; i < rdev->num_crtc; i++) {
850 radeon_crtc_init(rdev->ddev, i); 990 radeon_crtc_init(rdev->ddev, i);
851 } 991 }
852 992
@@ -863,6 +1003,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
863 1003
864void radeon_modeset_fini(struct radeon_device *rdev) 1004void radeon_modeset_fini(struct radeon_device *rdev)
865{ 1005{
1006 kfree(rdev->mode_info.bios_hardcoded_edid);
1007
866 if (rdev->mode_info.mode_config_initialized) { 1008 if (rdev->mode_info.mode_config_initialized) {
867 radeon_hpd_fini(rdev); 1009 radeon_hpd_fini(rdev);
868 drm_mode_config_cleanup(rdev->ddev); 1010 drm_mode_config_cleanup(rdev->ddev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8ba3de7994d4..6eec0ece6a6c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -40,9 +40,11 @@
40 40
41/* 41/*
42 * KMS wrapper. 42 * KMS wrapper.
43 * - 2.0.0 - initial interface
44 * - 2.1.0 - add square tiling interface
43 */ 45 */
44#define KMS_DRIVER_MAJOR 2 46#define KMS_DRIVER_MAJOR 2
45#define KMS_DRIVER_MINOR 0 47#define KMS_DRIVER_MINOR 1
46#define KMS_DRIVER_PATCHLEVEL 0 48#define KMS_DRIVER_PATCHLEVEL 0
47int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 49int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
48int radeon_driver_unload_kms(struct drm_device *dev); 50int radeon_driver_unload_kms(struct drm_device *dev);
@@ -86,7 +88,8 @@ int radeon_benchmarking = 0;
86int radeon_testing = 0; 88int radeon_testing = 0;
87int radeon_connector_table = 0; 89int radeon_connector_table = 0;
88int radeon_tv = 1; 90int radeon_tv = 1;
89int radeon_new_pll = 1; 91int radeon_new_pll = -1;
92int radeon_dynpm = -1;
90int radeon_audio = 1; 93int radeon_audio = 1;
91 94
92MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 95MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
@@ -122,9 +125,12 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
122MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); 125MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
123module_param_named(tv, radeon_tv, int, 0444); 126module_param_named(tv, radeon_tv, int, 0444);
124 127
125MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips"); 128MODULE_PARM_DESC(new_pll, "Select new PLL code");
126module_param_named(new_pll, radeon_new_pll, int, 0444); 129module_param_named(new_pll, radeon_new_pll, int, 0444);
127 130
131MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
132module_param_named(dynpm, radeon_dynpm, int, 0444);
133
128MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); 134MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
129module_param_named(audio, radeon_audio, int, 0444); 135module_param_named(audio, radeon_audio, int, 0444);
130 136
@@ -339,6 +345,7 @@ static int __init radeon_init(void)
339 driver = &kms_driver; 345 driver = &kms_driver;
340 driver->driver_features |= DRIVER_MODESET; 346 driver->driver_features |= DRIVER_MODESET;
341 driver->num_ioctls = radeon_max_kms_ioctl; 347 driver->num_ioctls = radeon_max_kms_ioctl;
348 radeon_register_atpx_handler();
342 } 349 }
343 /* if the vga console setting is enabled still 350 /* if the vga console setting is enabled still
344 * let modprobe override it */ 351 * let modprobe override it */
@@ -348,6 +355,7 @@ static int __init radeon_init(void)
348static void __exit radeon_exit(void) 355static void __exit radeon_exit(void)
349{ 356{
350 drm_exit(driver); 357 drm_exit(driver);
358 radeon_unregister_atpx_handler();
351} 359}
352 360
353module_init(radeon_init); 361module_init(radeon_init);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index c57ad606504d..ec55f2b23c22 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -268,6 +268,8 @@ typedef struct drm_radeon_private {
268 268
269 u32 scratch_ages[5]; 269 u32 scratch_ages[5];
270 270
271 int have_z_offset;
272
271 /* starting from here on, data is preserved accross an open */ 273 /* starting from here on, data is preserved accross an open */
272 uint32_t flags; /* see radeon_chip_flags */ 274 uint32_t flags; /* see radeon_chip_flags */
273 resource_size_t fb_aper_offset; 275 resource_size_t fb_aper_offset;
@@ -295,6 +297,9 @@ typedef struct drm_radeon_private {
295 int r700_sc_prim_fifo_size; 297 int r700_sc_prim_fifo_size;
296 int r700_sc_hiz_tile_fifo_size; 298 int r700_sc_hiz_tile_fifo_size;
297 int r700_sc_earlyz_tile_fifo_fize; 299 int r700_sc_earlyz_tile_fifo_fize;
300 int r600_group_size;
301 int r600_npipes;
302 int r600_nbanks;
298 303
299 struct mutex cs_mutex; 304 struct mutex cs_mutex;
300 u32 cs_id_scnt; 305 u32 cs_id_scnt;
@@ -310,9 +315,11 @@ typedef struct drm_radeon_buf_priv {
310 u32 age; 315 u32 age;
311} drm_radeon_buf_priv_t; 316} drm_radeon_buf_priv_t;
312 317
318struct drm_buffer;
319
313typedef struct drm_radeon_kcmd_buffer { 320typedef struct drm_radeon_kcmd_buffer {
314 int bufsz; 321 int bufsz;
315 char *buf; 322 struct drm_buffer *buffer;
316 int nbox; 323 int nbox;
317 struct drm_clip_rect __user *boxes; 324 struct drm_clip_rect __user *boxes;
318} drm_radeon_kcmd_buffer_t; 325} drm_radeon_kcmd_buffer_t;
@@ -455,6 +462,15 @@ extern void r600_blit_swap(struct drm_device *dev,
455 int sx, int sy, int dx, int dy, 462 int sx, int sy, int dx, int dy,
456 int w, int h, int src_pitch, int dst_pitch, int cpp); 463 int w, int h, int src_pitch, int dst_pitch, int cpp);
457 464
465/* atpx handler */
466#if defined(CONFIG_VGA_SWITCHEROO)
467void radeon_register_atpx_handler(void);
468void radeon_unregister_atpx_handler(void);
469#else
470static inline void radeon_register_atpx_handler(void) {}
471static inline void radeon_unregister_atpx_handler(void) {}
472#endif
473
458/* Flags for stats.boxes 474/* Flags for stats.boxes
459 */ 475 */
460#define RADEON_BOX_DMA_IDLE 0x1 476#define RADEON_BOX_DMA_IDLE 0x1
@@ -2122,4 +2138,32 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
2122 write &= mask; \ 2138 write &= mask; \
2123} while (0) 2139} while (0)
2124 2140
2141/**
2142 * Copy given number of dwords from drm buffer to the ring buffer.
2143 */
2144#define OUT_RING_DRM_BUFFER(buf, sz) do { \
2145 int _size = (sz) * 4; \
2146 struct drm_buffer *_buf = (buf); \
2147 int _part_size; \
2148 while (_size > 0) { \
2149 _part_size = _size; \
2150 \
2151 if (write + _part_size/4 > mask) \
2152 _part_size = ((mask + 1) - write)*4; \
2153 \
2154 if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE) \
2155 _part_size = PAGE_SIZE - drm_buffer_index(_buf);\
2156 \
2157 \
2158 \
2159 memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \
2160 [drm_buffer_index(_buf)], _part_size); \
2161 \
2162 _size -= _part_size; \
2163 write = (write + _part_size/4) & mask; \
2164 drm_buffer_advance(_buf, _part_size); \
2165 } \
2166} while (0)
2167
2168
2125#endif /* __RADEON_DRV_H__ */ 2169#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 3c91724457ca..bc926ea0a530 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -53,7 +53,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
53 /* DVO requires 2x ppll clocks depending on tmds chip */ 53 /* DVO requires 2x ppll clocks depending on tmds chip */
54 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) 54 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
55 return index_mask; 55 return index_mask;
56 56
57 count = -1; 57 count = -1;
58 list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { 58 list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
59 struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder); 59 struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
@@ -228,6 +228,32 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
228 return NULL; 228 return NULL;
229} 229}
230 230
231static struct radeon_connector_atom_dig *
232radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
233{
234 struct drm_device *dev = encoder->dev;
235 struct radeon_device *rdev = dev->dev_private;
236 struct drm_connector *connector;
237 struct radeon_connector *radeon_connector;
238 struct radeon_connector_atom_dig *dig_connector;
239
240 if (!rdev->is_atom_bios)
241 return NULL;
242
243 connector = radeon_get_connector_for_encoder(encoder);
244 if (!connector)
245 return NULL;
246
247 radeon_connector = to_radeon_connector(connector);
248
249 if (!radeon_connector->con_priv)
250 return NULL;
251
252 dig_connector = radeon_connector->con_priv;
253
254 return dig_connector;
255}
256
231static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, 257static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
232 struct drm_display_mode *mode, 258 struct drm_display_mode *mode,
233 struct drm_display_mode *adjusted_mode) 259 struct drm_display_mode *adjusted_mode)
@@ -236,6 +262,9 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
236 struct drm_device *dev = encoder->dev; 262 struct drm_device *dev = encoder->dev;
237 struct radeon_device *rdev = dev->dev_private; 263 struct radeon_device *rdev = dev->dev_private;
238 264
265 /* adjust pm to upcoming mode change */
266 radeon_pm_compute_clocks(rdev);
267
239 /* set the active encoder to connector routing */ 268 /* set the active encoder to connector routing */
240 radeon_encoder_set_active_device(encoder); 269 radeon_encoder_set_active_device(encoder);
241 drm_mode_set_crtcinfo(adjusted_mode, 0); 270 drm_mode_set_crtcinfo(adjusted_mode, 0);
@@ -458,34 +487,20 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
458 struct drm_device *dev = encoder->dev; 487 struct drm_device *dev = encoder->dev;
459 struct radeon_device *rdev = dev->dev_private; 488 struct radeon_device *rdev = dev->dev_private;
460 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 489 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
490 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
491 struct radeon_connector_atom_dig *dig_connector =
492 radeon_get_atom_connector_priv_from_encoder(encoder);
461 union lvds_encoder_control args; 493 union lvds_encoder_control args;
462 int index = 0; 494 int index = 0;
463 int hdmi_detected = 0; 495 int hdmi_detected = 0;
464 uint8_t frev, crev; 496 uint8_t frev, crev;
465 struct radeon_encoder_atom_dig *dig;
466 struct drm_connector *connector;
467 struct radeon_connector *radeon_connector;
468 struct radeon_connector_atom_dig *dig_connector;
469 497
470 connector = radeon_get_connector_for_encoder(encoder); 498 if (!dig || !dig_connector)
471 if (!connector)
472 return; 499 return;
473 500
474 radeon_connector = to_radeon_connector(connector); 501 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
475
476 if (!radeon_encoder->enc_priv)
477 return;
478
479 dig = radeon_encoder->enc_priv;
480
481 if (!radeon_connector->con_priv)
482 return;
483
484 if (drm_detect_hdmi_monitor(radeon_connector->edid))
485 hdmi_detected = 1; 502 hdmi_detected = 1;
486 503
487 dig_connector = radeon_connector->con_priv;
488
489 memset(&args, 0, sizeof(args)); 504 memset(&args, 0, sizeof(args));
490 505
491 switch (radeon_encoder->encoder_id) { 506 switch (radeon_encoder->encoder_id) {
@@ -586,7 +601,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
586{ 601{
587 struct drm_connector *connector; 602 struct drm_connector *connector;
588 struct radeon_connector *radeon_connector; 603 struct radeon_connector *radeon_connector;
589 struct radeon_connector_atom_dig *radeon_dig_connector; 604 struct radeon_connector_atom_dig *dig_connector;
590 605
591 connector = radeon_get_connector_for_encoder(encoder); 606 connector = radeon_get_connector_for_encoder(encoder);
592 if (!connector) 607 if (!connector)
@@ -617,9 +632,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
617 break; 632 break;
618 case DRM_MODE_CONNECTOR_DisplayPort: 633 case DRM_MODE_CONNECTOR_DisplayPort:
619 case DRM_MODE_CONNECTOR_eDP: 634 case DRM_MODE_CONNECTOR_eDP:
620 radeon_dig_connector = radeon_connector->con_priv; 635 dig_connector = radeon_connector->con_priv;
621 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 636 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
622 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 637 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
623 return ATOM_ENCODER_MODE_DP; 638 return ATOM_ENCODER_MODE_DP;
624 else if (drm_detect_hdmi_monitor(radeon_connector->edid)) 639 else if (drm_detect_hdmi_monitor(radeon_connector->edid))
625 return ATOM_ENCODER_MODE_HDMI; 640 return ATOM_ENCODER_MODE_HDMI;
@@ -656,6 +671,18 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
656 * - 2 DIG encoder blocks. 671 * - 2 DIG encoder blocks.
657 * DIG1/2 can drive UNIPHY0/1/2 link A or link B 672 * DIG1/2 can drive UNIPHY0/1/2 link A or link B
658 * 673 *
674 * DCE 4.0
675 * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
676 * Supports up to 6 digital outputs
677 * - 6 DIG encoder blocks.
678 * - DIG to PHY mapping is hardcoded
679 * DIG1 drives UNIPHY0 link A, A+B
680 * DIG2 drives UNIPHY0 link B
681 * DIG3 drives UNIPHY1 link A, A+B
682 * DIG4 drives UNIPHY1 link B
683 * DIG5 drives UNIPHY2 link A, A+B
684 * DIG6 drives UNIPHY2 link B
685 *
659 * Routing 686 * Routing
660 * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) 687 * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
661 * Examples: 688 * Examples:
@@ -664,88 +691,78 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
664 * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS 691 * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
665 * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI 692 * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
666 */ 693 */
667static void 694
695union dig_encoder_control {
696 DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
697 DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
698 DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
699};
700
701void
668atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) 702atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
669{ 703{
670 struct drm_device *dev = encoder->dev; 704 struct drm_device *dev = encoder->dev;
671 struct radeon_device *rdev = dev->dev_private; 705 struct radeon_device *rdev = dev->dev_private;
672 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 706 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
673 DIG_ENCODER_CONTROL_PS_ALLOCATION args; 707 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
708 struct radeon_connector_atom_dig *dig_connector =
709 radeon_get_atom_connector_priv_from_encoder(encoder);
710 union dig_encoder_control args;
674 int index = 0, num = 0; 711 int index = 0, num = 0;
675 uint8_t frev, crev; 712 uint8_t frev, crev;
676 struct radeon_encoder_atom_dig *dig;
677 struct drm_connector *connector;
678 struct radeon_connector *radeon_connector;
679 struct radeon_connector_atom_dig *dig_connector;
680 713
681 connector = radeon_get_connector_for_encoder(encoder); 714 if (!dig || !dig_connector)
682 if (!connector)
683 return; 715 return;
684 716
685 radeon_connector = to_radeon_connector(connector);
686
687 if (!radeon_connector->con_priv)
688 return;
689
690 dig_connector = radeon_connector->con_priv;
691
692 if (!radeon_encoder->enc_priv)
693 return;
694
695 dig = radeon_encoder->enc_priv;
696
697 memset(&args, 0, sizeof(args)); 717 memset(&args, 0, sizeof(args));
698 718
699 if (dig->dig_encoder) 719 if (ASIC_IS_DCE4(rdev))
700 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); 720 index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
701 else 721 else {
702 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); 722 if (dig->dig_encoder)
723 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
724 else
725 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
726 }
703 num = dig->dig_encoder + 1; 727 num = dig->dig_encoder + 1;
704 728
705 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 729 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
706 730
707 args.ucAction = action; 731 args.v1.ucAction = action;
708 args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 732 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
733 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
709 734
710 if (ASIC_IS_DCE32(rdev)) { 735 if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
736 if (dig_connector->dp_clock == 270000)
737 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
738 args.v1.ucLaneNum = dig_connector->dp_lane_count;
739 } else if (radeon_encoder->pixel_clock > 165000)
740 args.v1.ucLaneNum = 8;
741 else
742 args.v1.ucLaneNum = 4;
743
744 if (ASIC_IS_DCE4(rdev)) {
745 args.v3.acConfig.ucDigSel = dig->dig_encoder;
746 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
747 } else {
711 switch (radeon_encoder->encoder_id) { 748 switch (radeon_encoder->encoder_id) {
712 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 749 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
713 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; 750 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
714 break; 751 break;
715 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 752 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
716 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; 753 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
754 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
717 break; 755 break;
718 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 756 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
719 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; 757 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
720 break;
721 }
722 } else {
723 switch (radeon_encoder->encoder_id) {
724 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
725 args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1;
726 break;
727 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
728 args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2;
729 break; 758 break;
730 } 759 }
760 if (dig_connector->linkb)
761 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
762 else
763 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
731 } 764 }
732 765
733 args.ucEncoderMode = atombios_get_encoder_mode(encoder);
734
735 if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
736 if (dig_connector->dp_clock == 270000)
737 args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
738 args.ucLaneNum = dig_connector->dp_lane_count;
739 } else if (radeon_encoder->pixel_clock > 165000)
740 args.ucLaneNum = 8;
741 else
742 args.ucLaneNum = 4;
743
744 if (dig_connector->linkb)
745 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
746 else
747 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
748
749 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 766 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
750 767
751} 768}
@@ -753,6 +770,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
753union dig_transmitter_control { 770union dig_transmitter_control {
754 DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; 771 DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
755 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; 772 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
773 DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
756}; 774};
757 775
758void 776void
@@ -761,37 +779,29 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
761 struct drm_device *dev = encoder->dev; 779 struct drm_device *dev = encoder->dev;
762 struct radeon_device *rdev = dev->dev_private; 780 struct radeon_device *rdev = dev->dev_private;
763 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 781 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
782 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
783 struct radeon_connector_atom_dig *dig_connector =
784 radeon_get_atom_connector_priv_from_encoder(encoder);
785 struct drm_connector *connector;
786 struct radeon_connector *radeon_connector;
764 union dig_transmitter_control args; 787 union dig_transmitter_control args;
765 int index = 0, num = 0; 788 int index = 0, num = 0;
766 uint8_t frev, crev; 789 uint8_t frev, crev;
767 struct radeon_encoder_atom_dig *dig;
768 struct drm_connector *connector;
769 struct radeon_connector *radeon_connector;
770 struct radeon_connector_atom_dig *dig_connector;
771 bool is_dp = false; 790 bool is_dp = false;
791 int pll_id = 0;
772 792
773 connector = radeon_get_connector_for_encoder(encoder); 793 if (!dig || !dig_connector)
774 if (!connector)
775 return; 794 return;
776 795
796 connector = radeon_get_connector_for_encoder(encoder);
777 radeon_connector = to_radeon_connector(connector); 797 radeon_connector = to_radeon_connector(connector);
778 798
779 if (!radeon_encoder->enc_priv)
780 return;
781
782 dig = radeon_encoder->enc_priv;
783
784 if (!radeon_connector->con_priv)
785 return;
786
787 dig_connector = radeon_connector->con_priv;
788
789 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) 799 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
790 is_dp = true; 800 is_dp = true;
791 801
792 memset(&args, 0, sizeof(args)); 802 memset(&args, 0, sizeof(args));
793 803
794 if (ASIC_IS_DCE32(rdev)) 804 if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
795 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); 805 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
796 else { 806 else {
797 switch (radeon_encoder->encoder_id) { 807 switch (radeon_encoder->encoder_id) {
@@ -821,7 +831,54 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
821 else 831 else
822 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 832 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
823 } 833 }
824 if (ASIC_IS_DCE32(rdev)) { 834 if (ASIC_IS_DCE4(rdev)) {
835 if (is_dp)
836 args.v3.ucLaneNum = dig_connector->dp_lane_count;
837 else if (radeon_encoder->pixel_clock > 165000)
838 args.v3.ucLaneNum = 8;
839 else
840 args.v3.ucLaneNum = 4;
841
842 if (dig_connector->linkb) {
843 args.v3.acConfig.ucLinkSel = 1;
844 args.v3.acConfig.ucEncoderSel = 1;
845 }
846
847 /* Select the PLL for the PHY
848 * DP PHY should be clocked from external src if there is
849 * one.
850 */
851 if (encoder->crtc) {
852 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
853 pll_id = radeon_crtc->pll_id;
854 }
855 if (is_dp && rdev->clock.dp_extclk)
856 args.v3.acConfig.ucRefClkSource = 2; /* external src */
857 else
858 args.v3.acConfig.ucRefClkSource = pll_id;
859
860 switch (radeon_encoder->encoder_id) {
861 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
862 args.v3.acConfig.ucTransmitterSel = 0;
863 num = 0;
864 break;
865 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
866 args.v3.acConfig.ucTransmitterSel = 1;
867 num = 1;
868 break;
869 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
870 args.v3.acConfig.ucTransmitterSel = 2;
871 num = 2;
872 break;
873 }
874
875 if (is_dp)
876 args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
877 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
878 if (dig->coherent_mode)
879 args.v3.acConfig.fCoherentMode = 1;
880 }
881 } else if (ASIC_IS_DCE32(rdev)) {
825 if (dig->dig_encoder == 1) 882 if (dig->dig_encoder == 1)
826 args.v2.acConfig.ucEncoderSel = 1; 883 args.v2.acConfig.ucEncoderSel = 1;
827 if (dig_connector->linkb) 884 if (dig_connector->linkb)
@@ -849,7 +906,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
849 args.v2.acConfig.fCoherentMode = 1; 906 args.v2.acConfig.fCoherentMode = 1;
850 } 907 }
851 } else { 908 } else {
852
853 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 909 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
854 910
855 if (dig->dig_encoder) 911 if (dig->dig_encoder)
@@ -1024,9 +1080,12 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1024 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1080 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1025 } 1081 }
1026 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 1082 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
1083
1084 /* adjust pm to dpms change */
1085 radeon_pm_compute_clocks(rdev);
1027} 1086}
1028 1087
1029union crtc_sourc_param { 1088union crtc_source_param {
1030 SELECT_CRTC_SOURCE_PS_ALLOCATION v1; 1089 SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
1031 SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; 1090 SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
1032}; 1091};
@@ -1038,7 +1097,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1038 struct radeon_device *rdev = dev->dev_private; 1097 struct radeon_device *rdev = dev->dev_private;
1039 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1098 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1040 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 1099 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1041 union crtc_sourc_param args; 1100 union crtc_source_param args;
1042 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); 1101 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
1043 uint8_t frev, crev; 1102 uint8_t frev, crev;
1044 struct radeon_encoder_atom_dig *dig; 1103 struct radeon_encoder_atom_dig *dig;
@@ -1107,10 +1166,26 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1107 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1166 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1108 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1167 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1109 dig = radeon_encoder->enc_priv; 1168 dig = radeon_encoder->enc_priv;
1110 if (dig->dig_encoder) 1169 switch (dig->dig_encoder) {
1111 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; 1170 case 0:
1112 else
1113 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; 1171 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1172 break;
1173 case 1:
1174 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1175 break;
1176 case 2:
1177 args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
1178 break;
1179 case 3:
1180 args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
1181 break;
1182 case 4:
1183 args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
1184 break;
1185 case 5:
1186 args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
1187 break;
1188 }
1114 break; 1189 break;
1115 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1190 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1116 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; 1191 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
@@ -1167,6 +1242,7 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1167 } 1242 }
1168 1243
1169 /* set scaler clears this on some chips */ 1244 /* set scaler clears this on some chips */
1245 /* XXX check DCE4 */
1170 if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { 1246 if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
1171 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) 1247 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
1172 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 1248 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
@@ -1183,6 +1259,33 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1183 struct drm_encoder *test_encoder; 1259 struct drm_encoder *test_encoder;
1184 struct radeon_encoder_atom_dig *dig; 1260 struct radeon_encoder_atom_dig *dig;
1185 uint32_t dig_enc_in_use = 0; 1261 uint32_t dig_enc_in_use = 0;
1262
1263 if (ASIC_IS_DCE4(rdev)) {
1264 struct radeon_connector_atom_dig *dig_connector =
1265 radeon_get_atom_connector_priv_from_encoder(encoder);
1266
1267 switch (radeon_encoder->encoder_id) {
1268 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1269 if (dig_connector->linkb)
1270 return 1;
1271 else
1272 return 0;
1273 break;
1274 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1275 if (dig_connector->linkb)
1276 return 3;
1277 else
1278 return 2;
1279 break;
1280 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1281 if (dig_connector->linkb)
1282 return 5;
1283 else
1284 return 4;
1285 break;
1286 }
1287 }
1288
1186 /* on DCE32 and encoder can driver any block so just crtc id */ 1289 /* on DCE32 and encoder can driver any block so just crtc id */
1187 if (ASIC_IS_DCE32(rdev)) { 1290 if (ASIC_IS_DCE32(rdev)) {
1188 return radeon_crtc->crtc_id; 1291 return radeon_crtc->crtc_id;
@@ -1254,15 +1357,26 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1254 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1357 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1255 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1358 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1256 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1359 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1257 /* disable the encoder and transmitter */ 1360 if (ASIC_IS_DCE4(rdev)) {
1258 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1361 /* disable the transmitter */
1259 atombios_dig_encoder_setup(encoder, ATOM_DISABLE); 1362 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1260 1363 /* setup and enable the encoder */
1261 /* setup and enable the encoder and transmitter */ 1364 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP);
1262 atombios_dig_encoder_setup(encoder, ATOM_ENABLE); 1365
1263 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); 1366 /* init and enable the transmitter */
1264 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1367 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1265 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1368 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1369 } else {
1370 /* disable the encoder and transmitter */
1371 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1372 atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
1373
1374 /* setup and enable the encoder and transmitter */
1375 atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
1376 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1377 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1378 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1379 }
1266 break; 1380 break;
1267 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1381 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1268 atombios_ddia_setup(encoder, ATOM_ENABLE); 1382 atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1282,7 +1396,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1282 } 1396 }
1283 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1397 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1284 1398
1285 r600_hdmi_setmode(encoder, adjusted_mode); 1399 /* XXX */
1400 if (!ASIC_IS_DCE4(rdev))
1401 r600_hdmi_setmode(encoder, adjusted_mode);
1286} 1402}
1287 1403
1288static bool 1404static bool
@@ -1480,10 +1596,18 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1480 return; 1596 return;
1481 1597
1482 encoder = &radeon_encoder->base; 1598 encoder = &radeon_encoder->base;
1483 if (rdev->flags & RADEON_SINGLE_CRTC) 1599 switch (rdev->num_crtc) {
1600 case 1:
1484 encoder->possible_crtcs = 0x1; 1601 encoder->possible_crtcs = 0x1;
1485 else 1602 break;
1603 case 2:
1604 default:
1486 encoder->possible_crtcs = 0x3; 1605 encoder->possible_crtcs = 0x3;
1606 break;
1607 case 6:
1608 encoder->possible_crtcs = 0x3f;
1609 break;
1610 }
1487 1611
1488 radeon_encoder->enc_priv = NULL; 1612 radeon_encoder->enc_priv = NULL;
1489 1613
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 797972e344a6..93c7d5d41914 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -75,6 +75,11 @@ enum radeon_family {
75 CHIP_RV730, 75 CHIP_RV730,
76 CHIP_RV710, 76 CHIP_RV710,
77 CHIP_RV740, 77 CHIP_RV740,
78 CHIP_CEDAR,
79 CHIP_REDWOOD,
80 CHIP_JUNIPER,
81 CHIP_CYPRESS,
82 CHIP_HEMLOCK,
78 CHIP_LAST, 83 CHIP_LAST,
79}; 84};
80 85
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d71e346e9ab5..8fccbf29235e 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -39,6 +39,8 @@
39 39
40#include "drm_fb_helper.h" 40#include "drm_fb_helper.h"
41 41
42#include <linux/vga_switcheroo.h>
43
42struct radeon_fb_device { 44struct radeon_fb_device {
43 struct drm_fb_helper helper; 45 struct drm_fb_helper helper;
44 struct radeon_framebuffer *rfb; 46 struct radeon_framebuffer *rfb;
@@ -148,7 +150,6 @@ int radeonfb_create(struct drm_device *dev,
148 unsigned long tmp; 150 unsigned long tmp;
149 bool fb_tiled = false; /* useful for testing */ 151 bool fb_tiled = false; /* useful for testing */
150 u32 tiling_flags = 0; 152 u32 tiling_flags = 0;
151 int crtc_count;
152 153
153 mode_cmd.width = surface_width; 154 mode_cmd.width = surface_width;
154 mode_cmd.height = surface_height; 155 mode_cmd.height = surface_height;
@@ -239,11 +240,7 @@ int radeonfb_create(struct drm_device *dev,
239 rfbdev = info->par; 240 rfbdev = info->par;
240 rfbdev->helper.funcs = &radeon_fb_helper_funcs; 241 rfbdev->helper.funcs = &radeon_fb_helper_funcs;
241 rfbdev->helper.dev = dev; 242 rfbdev->helper.dev = dev;
242 if (rdev->flags & RADEON_SINGLE_CRTC) 243 ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
243 crtc_count = 1;
244 else
245 crtc_count = 2;
246 ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
247 RADEONFB_CONN_LIMIT); 244 RADEONFB_CONN_LIMIT);
248 if (ret) 245 if (ret)
249 goto out_unref; 246 goto out_unref;
@@ -257,7 +254,7 @@ int radeonfb_create(struct drm_device *dev,
257 info->flags = FBINFO_DEFAULT; 254 info->flags = FBINFO_DEFAULT;
258 info->fbops = &radeonfb_ops; 255 info->fbops = &radeonfb_ops;
259 256
260 tmp = fb_gpuaddr - rdev->mc.vram_location; 257 tmp = fb_gpuaddr - rdev->mc.vram_start;
261 info->fix.smem_start = rdev->mc.aper_base + tmp; 258 info->fix.smem_start = rdev->mc.aper_base + tmp;
262 info->fix.smem_len = size; 259 info->fix.smem_len = size;
263 info->screen_base = fbptr; 260 info->screen_base = fbptr;
@@ -291,6 +288,7 @@ int radeonfb_create(struct drm_device *dev,
291 rfbdev->rdev = rdev; 288 rfbdev->rdev = rdev;
292 289
293 mutex_unlock(&rdev->ddev->struct_mutex); 290 mutex_unlock(&rdev->ddev->struct_mutex);
291 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
294 return 0; 292 return 0;
295 293
296out_unref: 294out_unref:
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index e73d56e83fa6..1770d3c07fd0 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -139,6 +139,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
139 unsigned t; 139 unsigned t;
140 unsigned p; 140 unsigned p;
141 int i, j; 141 int i, j;
142 u64 page_base;
142 143
143 if (!rdev->gart.ready) { 144 if (!rdev->gart.ready) {
144 WARN(1, "trying to unbind memory to unitialized GART !\n"); 145 WARN(1, "trying to unbind memory to unitialized GART !\n");
@@ -151,9 +152,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
151 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], 152 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
152 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 153 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
153 rdev->gart.pages[p] = NULL; 154 rdev->gart.pages[p] = NULL;
154 rdev->gart.pages_addr[p] = 0; 155 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
156 page_base = rdev->gart.pages_addr[p];
155 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 157 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
156 radeon_gart_set_page(rdev, t, 0); 158 radeon_gart_set_page(rdev, t, page_base);
159 page_base += RADEON_GPU_PAGE_SIZE;
157 } 160 }
158 } 161 }
159 } 162 }
@@ -199,8 +202,26 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
199 return 0; 202 return 0;
200} 203}
201 204
205void radeon_gart_restore(struct radeon_device *rdev)
206{
207 int i, j, t;
208 u64 page_base;
209
210 for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
211 page_base = rdev->gart.pages_addr[i];
212 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
213 radeon_gart_set_page(rdev, t, page_base);
214 page_base += RADEON_GPU_PAGE_SIZE;
215 }
216 }
217 mb();
218 radeon_gart_tlb_flush(rdev);
219}
220
202int radeon_gart_init(struct radeon_device *rdev) 221int radeon_gart_init(struct radeon_device *rdev)
203{ 222{
223 int r, i;
224
204 if (rdev->gart.pages) { 225 if (rdev->gart.pages) {
205 return 0; 226 return 0;
206 } 227 }
@@ -209,6 +230,9 @@ int radeon_gart_init(struct radeon_device *rdev)
209 DRM_ERROR("Page size is smaller than GPU page size!\n"); 230 DRM_ERROR("Page size is smaller than GPU page size!\n");
210 return -EINVAL; 231 return -EINVAL;
211 } 232 }
233 r = radeon_dummy_page_init(rdev);
234 if (r)
235 return r;
212 /* Compute table size */ 236 /* Compute table size */
213 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; 237 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
214 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; 238 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
@@ -227,6 +251,10 @@ int radeon_gart_init(struct radeon_device *rdev)
227 radeon_gart_fini(rdev); 251 radeon_gart_fini(rdev);
228 return -ENOMEM; 252 return -ENOMEM;
229 } 253 }
254 /* set GART entry to point to the dummy page by default */
255 for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
256 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
257 }
230 return 0; 258 return 0;
231} 259}
232 260
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index db8e9a355a01..ef92d147d8f0 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -69,9 +69,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
69 if (r != -ERESTARTSYS) 69 if (r != -ERESTARTSYS)
70 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 70 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
71 size, initial_domain, alignment, r); 71 size, initial_domain, alignment, r);
72 mutex_lock(&rdev->ddev->struct_mutex); 72 drm_gem_object_unreference_unlocked(gobj);
73 drm_gem_object_unreference(gobj);
74 mutex_unlock(&rdev->ddev->struct_mutex);
75 return r; 73 return r;
76 } 74 }
77 gobj->driver_private = robj; 75 gobj->driver_private = robj;
@@ -202,14 +200,10 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
202 } 200 }
203 r = drm_gem_handle_create(filp, gobj, &handle); 201 r = drm_gem_handle_create(filp, gobj, &handle);
204 if (r) { 202 if (r) {
205 mutex_lock(&dev->struct_mutex); 203 drm_gem_object_unreference_unlocked(gobj);
206 drm_gem_object_unreference(gobj);
207 mutex_unlock(&dev->struct_mutex);
208 return r; 204 return r;
209 } 205 }
210 mutex_lock(&dev->struct_mutex); 206 drm_gem_object_handle_unreference_unlocked(gobj);
211 drm_gem_object_handle_unreference(gobj);
212 mutex_unlock(&dev->struct_mutex);
213 args->handle = handle; 207 args->handle = handle;
214 return 0; 208 return 0;
215} 209}
@@ -236,9 +230,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
236 230
237 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 231 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
238 232
239 mutex_lock(&dev->struct_mutex); 233 drm_gem_object_unreference_unlocked(gobj);
240 drm_gem_object_unreference(gobj);
241 mutex_unlock(&dev->struct_mutex);
242 return r; 234 return r;
243} 235}
244 236
@@ -255,9 +247,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
255 } 247 }
256 robj = gobj->driver_private; 248 robj = gobj->driver_private;
257 args->addr_ptr = radeon_bo_mmap_offset(robj); 249 args->addr_ptr = radeon_bo_mmap_offset(robj);
258 mutex_lock(&dev->struct_mutex); 250 drm_gem_object_unreference_unlocked(gobj);
259 drm_gem_object_unreference(gobj);
260 mutex_unlock(&dev->struct_mutex);
261 return 0; 251 return 0;
262} 252}
263 253
@@ -288,9 +278,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
288 default: 278 default:
289 break; 279 break;
290 } 280 }
291 mutex_lock(&dev->struct_mutex); 281 drm_gem_object_unreference_unlocked(gobj);
292 drm_gem_object_unreference(gobj);
293 mutex_unlock(&dev->struct_mutex);
294 return r; 282 return r;
295} 283}
296 284
@@ -311,9 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
311 /* callback hw specific functions if any */ 299 /* callback hw specific functions if any */
312 if (robj->rdev->asic->ioctl_wait_idle) 300 if (robj->rdev->asic->ioctl_wait_idle)
313 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); 301 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
314 mutex_lock(&dev->struct_mutex); 302 drm_gem_object_unreference_unlocked(gobj);
315 drm_gem_object_unreference(gobj);
316 mutex_unlock(&dev->struct_mutex);
317 return r; 303 return r;
318} 304}
319 305
@@ -331,9 +317,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
331 return -EINVAL; 317 return -EINVAL;
332 robj = gobj->driver_private; 318 robj = gobj->driver_private;
333 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 319 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
334 mutex_lock(&dev->struct_mutex); 320 drm_gem_object_unreference_unlocked(gobj);
335 drm_gem_object_unreference(gobj);
336 mutex_unlock(&dev->struct_mutex);
337 return r; 321 return r;
338} 322}
339 323
@@ -356,8 +340,6 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
356 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 340 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
357 radeon_bo_unreserve(rbo); 341 radeon_bo_unreserve(rbo);
358out: 342out:
359 mutex_lock(&dev->struct_mutex); 343 drm_gem_object_unreference_unlocked(gobj);
360 drm_gem_object_unreference(gobj);
361 mutex_unlock(&dev->struct_mutex);
362 return r; 344 return r;
363} 345}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index da3da1e89d00..4ae50c19589f 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -26,6 +26,7 @@
26#include "drmP.h" 26#include "drmP.h"
27#include "radeon_drm.h" 27#include "radeon_drm.h"
28#include "radeon.h" 28#include "radeon.h"
29#include "atom.h"
29 30
30/** 31/**
31 * radeon_ddc_probe 32 * radeon_ddc_probe
@@ -59,7 +60,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
59} 60}
60 61
61 62
62void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) 63static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
63{ 64{
64 struct radeon_device *rdev = i2c->dev->dev_private; 65 struct radeon_device *rdev = i2c->dev->dev_private;
65 struct radeon_i2c_bus_rec *rec = &i2c->rec; 66 struct radeon_i2c_bus_rec *rec = &i2c->rec;
@@ -71,13 +72,25 @@ void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
71 */ 72 */
72 if (rec->hw_capable) { 73 if (rec->hw_capable) {
73 if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) { 74 if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
74 if (rec->a_clk_reg == RADEON_GPIO_MONID) { 75 u32 reg;
76
77 if (rdev->family >= CHIP_RV350)
78 reg = RADEON_GPIO_MONID;
79 else if ((rdev->family == CHIP_R300) ||
80 (rdev->family == CHIP_R350))
81 reg = RADEON_GPIO_DVI_DDC;
82 else
83 reg = RADEON_GPIO_CRT2_DDC;
84
85 mutex_lock(&rdev->dc_hw_i2c_mutex);
86 if (rec->a_clk_reg == reg) {
75 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | 87 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
76 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1))); 88 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
77 } else { 89 } else {
78 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | 90 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
79 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); 91 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
80 } 92 }
93 mutex_unlock(&rdev->dc_hw_i2c_mutex);
81 } 94 }
82 } 95 }
83 96
@@ -168,6 +181,692 @@ static void set_data(void *i2c_priv, int data)
168 WREG32(rec->en_data_reg, val); 181 WREG32(rec->en_data_reg, val);
169} 182}
170 183
184static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
185{
186 struct radeon_pll *spll = &rdev->clock.spll;
187 u32 sclk = radeon_get_engine_clock(rdev);
188 u32 prescale = 0;
189 u32 n, m;
190 u8 loop;
191 int i2c_clock;
192
193 switch (rdev->family) {
194 case CHIP_R100:
195 case CHIP_RV100:
196 case CHIP_RS100:
197 case CHIP_RV200:
198 case CHIP_RS200:
199 case CHIP_R200:
200 case CHIP_RV250:
201 case CHIP_RS300:
202 case CHIP_RV280:
203 case CHIP_R300:
204 case CHIP_R350:
205 case CHIP_RV350:
206 n = (spll->reference_freq) / (4 * 6);
207 for (loop = 1; loop < 255; loop++) {
208 if ((loop * (loop - 1)) > n)
209 break;
210 }
211 m = loop - 1;
212 prescale = m | (loop << 8);
213 break;
214 case CHIP_RV380:
215 case CHIP_RS400:
216 case CHIP_RS480:
217 case CHIP_R420:
218 case CHIP_R423:
219 case CHIP_RV410:
220 sclk = radeon_get_engine_clock(rdev);
221 prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
222 break;
223 case CHIP_RS600:
224 case CHIP_RS690:
225 case CHIP_RS740:
226 /* todo */
227 break;
228 case CHIP_RV515:
229 case CHIP_R520:
230 case CHIP_RV530:
231 case CHIP_RV560:
232 case CHIP_RV570:
233 case CHIP_R580:
234 i2c_clock = 50;
235 sclk = radeon_get_engine_clock(rdev);
236 if (rdev->family == CHIP_R520)
237 prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
238 else
239 prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
240 break;
241 case CHIP_R600:
242 case CHIP_RV610:
243 case CHIP_RV630:
244 case CHIP_RV670:
245 /* todo */
246 break;
247 case CHIP_RV620:
248 case CHIP_RV635:
249 case CHIP_RS780:
250 case CHIP_RS880:
251 case CHIP_RV770:
252 case CHIP_RV730:
253 case CHIP_RV710:
254 case CHIP_RV740:
255 /* todo */
256 break;
257 case CHIP_CEDAR:
258 case CHIP_REDWOOD:
259 case CHIP_JUNIPER:
260 case CHIP_CYPRESS:
261 case CHIP_HEMLOCK:
262 /* todo */
263 break;
264 default:
265 DRM_ERROR("i2c: unhandled radeon chip\n");
266 break;
267 }
268 return prescale;
269}
270
271
272/* hw i2c engine for r1xx-4xx hardware
273 * hw can buffer up to 15 bytes
274 */
275static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
276 struct i2c_msg *msgs, int num)
277{
278 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
279 struct radeon_device *rdev = i2c->dev->dev_private;
280 struct radeon_i2c_bus_rec *rec = &i2c->rec;
281 struct i2c_msg *p;
282 int i, j, k, ret = num;
283 u32 prescale;
284 u32 i2c_cntl_0, i2c_cntl_1, i2c_data;
285 u32 tmp, reg;
286
287 mutex_lock(&rdev->dc_hw_i2c_mutex);
288 /* take the pm lock since we need a constant sclk */
289 mutex_lock(&rdev->pm.mutex);
290
291 prescale = radeon_get_i2c_prescale(rdev);
292
293 reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
294 RADEON_I2C_START |
295 RADEON_I2C_STOP |
296 RADEON_I2C_GO);
297
298 if (rdev->is_atom_bios) {
299 tmp = RREG32(RADEON_BIOS_6_SCRATCH);
300 WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
301 }
302
303 if (rec->mm_i2c) {
304 i2c_cntl_0 = RADEON_I2C_CNTL_0;
305 i2c_cntl_1 = RADEON_I2C_CNTL_1;
306 i2c_data = RADEON_I2C_DATA;
307 } else {
308 i2c_cntl_0 = RADEON_DVI_I2C_CNTL_0;
309 i2c_cntl_1 = RADEON_DVI_I2C_CNTL_1;
310 i2c_data = RADEON_DVI_I2C_DATA;
311
312 switch (rdev->family) {
313 case CHIP_R100:
314 case CHIP_RV100:
315 case CHIP_RS100:
316 case CHIP_RV200:
317 case CHIP_RS200:
318 case CHIP_RS300:
319 switch (rec->mask_clk_reg) {
320 case RADEON_GPIO_DVI_DDC:
321 /* no gpio select bit */
322 break;
323 default:
324 DRM_ERROR("gpio not supported with hw i2c\n");
325 ret = -EINVAL;
326 goto done;
327 }
328 break;
329 case CHIP_R200:
330 /* only bit 4 on r200 */
331 switch (rec->mask_clk_reg) {
332 case RADEON_GPIO_DVI_DDC:
333 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
334 break;
335 case RADEON_GPIO_MONID:
336 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
337 break;
338 default:
339 DRM_ERROR("gpio not supported with hw i2c\n");
340 ret = -EINVAL;
341 goto done;
342 }
343 break;
344 case CHIP_RV250:
345 case CHIP_RV280:
346 /* bits 3 and 4 */
347 switch (rec->mask_clk_reg) {
348 case RADEON_GPIO_DVI_DDC:
349 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
350 break;
351 case RADEON_GPIO_VGA_DDC:
352 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
353 break;
354 case RADEON_GPIO_CRT2_DDC:
355 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
356 break;
357 default:
358 DRM_ERROR("gpio not supported with hw i2c\n");
359 ret = -EINVAL;
360 goto done;
361 }
362 break;
363 case CHIP_R300:
364 case CHIP_R350:
365 /* only bit 4 on r300/r350 */
366 switch (rec->mask_clk_reg) {
367 case RADEON_GPIO_VGA_DDC:
368 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
369 break;
370 case RADEON_GPIO_DVI_DDC:
371 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
372 break;
373 default:
374 DRM_ERROR("gpio not supported with hw i2c\n");
375 ret = -EINVAL;
376 goto done;
377 }
378 break;
379 case CHIP_RV350:
380 case CHIP_RV380:
381 case CHIP_R420:
382 case CHIP_R423:
383 case CHIP_RV410:
384 case CHIP_RS400:
385 case CHIP_RS480:
386 /* bits 3 and 4 */
387 switch (rec->mask_clk_reg) {
388 case RADEON_GPIO_VGA_DDC:
389 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
390 break;
391 case RADEON_GPIO_DVI_DDC:
392 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
393 break;
394 case RADEON_GPIO_MONID:
395 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
396 break;
397 default:
398 DRM_ERROR("gpio not supported with hw i2c\n");
399 ret = -EINVAL;
400 goto done;
401 }
402 break;
403 default:
404 DRM_ERROR("unsupported asic\n");
405 ret = -EINVAL;
406 goto done;
407 break;
408 }
409 }
410
411 /* check for bus probe */
412 p = &msgs[0];
413 if ((num == 1) && (p->len == 0)) {
414 WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
415 RADEON_I2C_NACK |
416 RADEON_I2C_HALT |
417 RADEON_I2C_SOFT_RST));
418 WREG32(i2c_data, (p->addr << 1) & 0xff);
419 WREG32(i2c_data, 0);
420 WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
421 (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
422 RADEON_I2C_EN |
423 (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
424 WREG32(i2c_cntl_0, reg);
425 for (k = 0; k < 32; k++) {
426 udelay(10);
427 tmp = RREG32(i2c_cntl_0);
428 if (tmp & RADEON_I2C_GO)
429 continue;
430 tmp = RREG32(i2c_cntl_0);
431 if (tmp & RADEON_I2C_DONE)
432 break;
433 else {
434 DRM_DEBUG("i2c write error 0x%08x\n", tmp);
435 WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
436 ret = -EIO;
437 goto done;
438 }
439 }
440 goto done;
441 }
442
443 for (i = 0; i < num; i++) {
444 p = &msgs[i];
445 for (j = 0; j < p->len; j++) {
446 if (p->flags & I2C_M_RD) {
447 WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
448 RADEON_I2C_NACK |
449 RADEON_I2C_HALT |
450 RADEON_I2C_SOFT_RST));
451 WREG32(i2c_data, ((p->addr << 1) & 0xff) | 0x1);
452 WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
453 (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
454 RADEON_I2C_EN |
455 (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
456 WREG32(i2c_cntl_0, reg | RADEON_I2C_RECEIVE);
457 for (k = 0; k < 32; k++) {
458 udelay(10);
459 tmp = RREG32(i2c_cntl_0);
460 if (tmp & RADEON_I2C_GO)
461 continue;
462 tmp = RREG32(i2c_cntl_0);
463 if (tmp & RADEON_I2C_DONE)
464 break;
465 else {
466 DRM_DEBUG("i2c read error 0x%08x\n", tmp);
467 WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
468 ret = -EIO;
469 goto done;
470 }
471 }
472 p->buf[j] = RREG32(i2c_data) & 0xff;
473 } else {
474 WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
475 RADEON_I2C_NACK |
476 RADEON_I2C_HALT |
477 RADEON_I2C_SOFT_RST));
478 WREG32(i2c_data, (p->addr << 1) & 0xff);
479 WREG32(i2c_data, p->buf[j]);
480 WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
481 (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
482 RADEON_I2C_EN |
483 (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
484 WREG32(i2c_cntl_0, reg);
485 for (k = 0; k < 32; k++) {
486 udelay(10);
487 tmp = RREG32(i2c_cntl_0);
488 if (tmp & RADEON_I2C_GO)
489 continue;
490 tmp = RREG32(i2c_cntl_0);
491 if (tmp & RADEON_I2C_DONE)
492 break;
493 else {
494 DRM_DEBUG("i2c write error 0x%08x\n", tmp);
495 WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
496 ret = -EIO;
497 goto done;
498 }
499 }
500 }
501 }
502 }
503
504done:
505 WREG32(i2c_cntl_0, 0);
506 WREG32(i2c_cntl_1, 0);
507 WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
508 RADEON_I2C_NACK |
509 RADEON_I2C_HALT |
510 RADEON_I2C_SOFT_RST));
511
512 if (rdev->is_atom_bios) {
513 tmp = RREG32(RADEON_BIOS_6_SCRATCH);
514 tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
515 WREG32(RADEON_BIOS_6_SCRATCH, tmp);
516 }
517
518 mutex_unlock(&rdev->pm.mutex);
519 mutex_unlock(&rdev->dc_hw_i2c_mutex);
520
521 return ret;
522}
523
524/* hw i2c engine for r5xx hardware
525 * hw can buffer up to 15 bytes
526 */
527static int r500_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
528 struct i2c_msg *msgs, int num)
529{
530 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
531 struct radeon_device *rdev = i2c->dev->dev_private;
532 struct radeon_i2c_bus_rec *rec = &i2c->rec;
533 struct i2c_msg *p;
534 int i, j, remaining, current_count, buffer_offset, ret = num;
535 u32 prescale;
536 u32 tmp, reg;
537 u32 saved1, saved2;
538
539 mutex_lock(&rdev->dc_hw_i2c_mutex);
540 /* take the pm lock since we need a constant sclk */
541 mutex_lock(&rdev->pm.mutex);
542
543 prescale = radeon_get_i2c_prescale(rdev);
544
545 /* clear gpio mask bits */
546 tmp = RREG32(rec->mask_clk_reg);
547 tmp &= ~rec->mask_clk_mask;
548 WREG32(rec->mask_clk_reg, tmp);
549 tmp = RREG32(rec->mask_clk_reg);
550
551 tmp = RREG32(rec->mask_data_reg);
552 tmp &= ~rec->mask_data_mask;
553 WREG32(rec->mask_data_reg, tmp);
554 tmp = RREG32(rec->mask_data_reg);
555
556 /* clear pin values */
557 tmp = RREG32(rec->a_clk_reg);
558 tmp &= ~rec->a_clk_mask;
559 WREG32(rec->a_clk_reg, tmp);
560 tmp = RREG32(rec->a_clk_reg);
561
562 tmp = RREG32(rec->a_data_reg);
563 tmp &= ~rec->a_data_mask;
564 WREG32(rec->a_data_reg, tmp);
565 tmp = RREG32(rec->a_data_reg);
566
567 /* set the pins to input */
568 tmp = RREG32(rec->en_clk_reg);
569 tmp &= ~rec->en_clk_mask;
570 WREG32(rec->en_clk_reg, tmp);
571 tmp = RREG32(rec->en_clk_reg);
572
573 tmp = RREG32(rec->en_data_reg);
574 tmp &= ~rec->en_data_mask;
575 WREG32(rec->en_data_reg, tmp);
576 tmp = RREG32(rec->en_data_reg);
577
578 /* */
579 tmp = RREG32(RADEON_BIOS_6_SCRATCH);
580 WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
581 saved1 = RREG32(AVIVO_DC_I2C_CONTROL1);
582 saved2 = RREG32(0x494);
583 WREG32(0x494, saved2 | 0x1);
584
585 WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C);
586 for (i = 0; i < 50; i++) {
587 udelay(1);
588 if (RREG32(AVIVO_DC_I2C_ARBITRATION) & AVIVO_DC_I2C_SW_CAN_USE_I2C)
589 break;
590 }
591 if (i == 50) {
592 DRM_ERROR("failed to get i2c bus\n");
593 ret = -EBUSY;
594 goto done;
595 }
596
597 reg = AVIVO_DC_I2C_START | AVIVO_DC_I2C_STOP | AVIVO_DC_I2C_EN;
598 switch (rec->mask_clk_reg) {
599 case AVIVO_DC_GPIO_DDC1_MASK:
600 reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC1);
601 break;
602 case AVIVO_DC_GPIO_DDC2_MASK:
603 reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC2);
604 break;
605 case AVIVO_DC_GPIO_DDC3_MASK:
606 reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC3);
607 break;
608 default:
609 DRM_ERROR("gpio not supported with hw i2c\n");
610 ret = -EINVAL;
611 goto done;
612 }
613
614 /* check for bus probe */
615 p = &msgs[0];
616 if ((num == 1) && (p->len == 0)) {
617 WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
618 AVIVO_DC_I2C_NACK |
619 AVIVO_DC_I2C_HALT));
620 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
621 udelay(1);
622 WREG32(AVIVO_DC_I2C_RESET, 0);
623
624 WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
625 WREG32(AVIVO_DC_I2C_DATA, 0);
626
627 WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
628 WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
629 AVIVO_DC_I2C_DATA_COUNT(1) |
630 (prescale << 16)));
631 WREG32(AVIVO_DC_I2C_CONTROL1, reg);
632 WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
633 for (j = 0; j < 200; j++) {
634 udelay(50);
635 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
636 if (tmp & AVIVO_DC_I2C_GO)
637 continue;
638 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
639 if (tmp & AVIVO_DC_I2C_DONE)
640 break;
641 else {
642 DRM_DEBUG("i2c write error 0x%08x\n", tmp);
643 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
644 ret = -EIO;
645 goto done;
646 }
647 }
648 goto done;
649 }
650
651 for (i = 0; i < num; i++) {
652 p = &msgs[i];
653 remaining = p->len;
654 buffer_offset = 0;
655 if (p->flags & I2C_M_RD) {
656 while (remaining) {
657 if (remaining > 15)
658 current_count = 15;
659 else
660 current_count = remaining;
661 WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
662 AVIVO_DC_I2C_NACK |
663 AVIVO_DC_I2C_HALT));
664 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
665 udelay(1);
666 WREG32(AVIVO_DC_I2C_RESET, 0);
667
668 WREG32(AVIVO_DC_I2C_DATA, ((p->addr << 1) & 0xff) | 0x1);
669 WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
670 WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
671 AVIVO_DC_I2C_DATA_COUNT(current_count) |
672 (prescale << 16)));
673 WREG32(AVIVO_DC_I2C_CONTROL1, reg | AVIVO_DC_I2C_RECEIVE);
674 WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
675 for (j = 0; j < 200; j++) {
676 udelay(50);
677 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
678 if (tmp & AVIVO_DC_I2C_GO)
679 continue;
680 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
681 if (tmp & AVIVO_DC_I2C_DONE)
682 break;
683 else {
684 DRM_DEBUG("i2c read error 0x%08x\n", tmp);
685 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
686 ret = -EIO;
687 goto done;
688 }
689 }
690 for (j = 0; j < current_count; j++)
691 p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff;
692 remaining -= current_count;
693 buffer_offset += current_count;
694 }
695 } else {
696 while (remaining) {
697 if (remaining > 15)
698 current_count = 15;
699 else
700 current_count = remaining;
701 WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
702 AVIVO_DC_I2C_NACK |
703 AVIVO_DC_I2C_HALT));
704 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
705 udelay(1);
706 WREG32(AVIVO_DC_I2C_RESET, 0);
707
708 WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
709 for (j = 0; j < current_count; j++)
710 WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]);
711
712 WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
713 WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
714 AVIVO_DC_I2C_DATA_COUNT(current_count) |
715 (prescale << 16)));
716 WREG32(AVIVO_DC_I2C_CONTROL1, reg);
717 WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
718 for (j = 0; j < 200; j++) {
719 udelay(50);
720 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
721 if (tmp & AVIVO_DC_I2C_GO)
722 continue;
723 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
724 if (tmp & AVIVO_DC_I2C_DONE)
725 break;
726 else {
727 DRM_DEBUG("i2c write error 0x%08x\n", tmp);
728 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
729 ret = -EIO;
730 goto done;
731 }
732 }
733 remaining -= current_count;
734 buffer_offset += current_count;
735 }
736 }
737 }
738
739done:
740 WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
741 AVIVO_DC_I2C_NACK |
742 AVIVO_DC_I2C_HALT));
743 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
744 udelay(1);
745 WREG32(AVIVO_DC_I2C_RESET, 0);
746
747 WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_DONE_USING_I2C);
748 WREG32(AVIVO_DC_I2C_CONTROL1, saved1);
749 WREG32(0x494, saved2);
750 tmp = RREG32(RADEON_BIOS_6_SCRATCH);
751 tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
752 WREG32(RADEON_BIOS_6_SCRATCH, tmp);
753
754 mutex_unlock(&rdev->pm.mutex);
755 mutex_unlock(&rdev->dc_hw_i2c_mutex);
756
757 return ret;
758}
759
760static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap,
761 struct i2c_msg *msgs, int num)
762{
763 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
764 int ret;
765
766 radeon_i2c_do_lock(i2c, 1);
767 ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num);
768 radeon_i2c_do_lock(i2c, 0);
769
770 return ret;
771}
772
773static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
774 struct i2c_msg *msgs, int num)
775{
776 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
777 struct radeon_device *rdev = i2c->dev->dev_private;
778 struct radeon_i2c_bus_rec *rec = &i2c->rec;
779 int ret;
780
781 switch (rdev->family) {
782 case CHIP_R100:
783 case CHIP_RV100:
784 case CHIP_RS100:
785 case CHIP_RV200:
786 case CHIP_RS200:
787 case CHIP_R200:
788 case CHIP_RV250:
789 case CHIP_RS300:
790 case CHIP_RV280:
791 case CHIP_R300:
792 case CHIP_R350:
793 case CHIP_RV350:
794 case CHIP_RV380:
795 case CHIP_R420:
796 case CHIP_R423:
797 case CHIP_RV410:
798 case CHIP_RS400:
799 case CHIP_RS480:
800 if (rec->hw_capable)
801 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
802 else
803 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
804 break;
805 case CHIP_RS600:
806 case CHIP_RS690:
807 case CHIP_RS740:
808 /* XXX fill in hw i2c implementation */
809 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
810 break;
811 case CHIP_RV515:
812 case CHIP_R520:
813 case CHIP_RV530:
814 case CHIP_RV560:
815 case CHIP_RV570:
816 case CHIP_R580:
817 if (rec->hw_capable) {
818 if (rec->mm_i2c)
819 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
820 else
821 ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
822 } else
823 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
824 break;
825 case CHIP_R600:
826 case CHIP_RV610:
827 case CHIP_RV630:
828 case CHIP_RV670:
829 /* XXX fill in hw i2c implementation */
830 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
831 break;
832 case CHIP_RV620:
833 case CHIP_RV635:
834 case CHIP_RS780:
835 case CHIP_RS880:
836 case CHIP_RV770:
837 case CHIP_RV730:
838 case CHIP_RV710:
839 case CHIP_RV740:
840 /* XXX fill in hw i2c implementation */
841 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
842 break;
843 case CHIP_CEDAR:
844 case CHIP_REDWOOD:
845 case CHIP_JUNIPER:
846 case CHIP_CYPRESS:
847 case CHIP_HEMLOCK:
848 /* XXX fill in hw i2c implementation */
849 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
850 break;
851 default:
852 DRM_ERROR("i2c: unhandled radeon chip\n");
853 ret = -EIO;
854 break;
855 }
856
857 return ret;
858}
859
860static u32 radeon_i2c_func(struct i2c_adapter *adap)
861{
862 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
863}
864
865static const struct i2c_algorithm radeon_i2c_algo = {
866 .master_xfer = radeon_i2c_xfer,
867 .functionality = radeon_i2c_func,
868};
869
171struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, 870struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
172 struct radeon_i2c_bus_rec *rec, 871 struct radeon_i2c_bus_rec *rec,
173 const char *name) 872 const char *name)
@@ -179,23 +878,36 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
179 if (i2c == NULL) 878 if (i2c == NULL)
180 return NULL; 879 return NULL;
181 880
182 i2c->adapter.owner = THIS_MODULE; 881 /* set the internal bit adapter */
183 i2c->dev = dev; 882 i2c->algo.radeon.bit_adapter.owner = THIS_MODULE;
184 i2c_set_adapdata(&i2c->adapter, i2c); 883 i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c);
185 i2c->adapter.algo_data = &i2c->algo.bit; 884 sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name);
186 i2c->algo.bit.setsda = set_data; 885 i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data;
187 i2c->algo.bit.setscl = set_clock; 886 i2c->algo.radeon.bit_data.setsda = set_data;
188 i2c->algo.bit.getsda = get_data; 887 i2c->algo.radeon.bit_data.setscl = set_clock;
189 i2c->algo.bit.getscl = get_clock; 888 i2c->algo.radeon.bit_data.getsda = get_data;
190 i2c->algo.bit.udelay = 20; 889 i2c->algo.radeon.bit_data.getscl = get_clock;
890 i2c->algo.radeon.bit_data.udelay = 20;
191 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always 891 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
192 * make this, 2 jiffies is a lot more reliable */ 892 * make this, 2 jiffies is a lot more reliable */
193 i2c->algo.bit.timeout = 2; 893 i2c->algo.radeon.bit_data.timeout = 2;
194 i2c->algo.bit.data = i2c; 894 i2c->algo.radeon.bit_data.data = i2c;
895 ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter);
896 if (ret) {
897 DRM_ERROR("Failed to register internal bit i2c %s\n", name);
898 goto out_free;
899 }
900 /* set the radeon i2c adapter */
901 i2c->dev = dev;
195 i2c->rec = *rec; 902 i2c->rec = *rec;
196 ret = i2c_bit_add_bus(&i2c->adapter); 903 i2c->adapter.owner = THIS_MODULE;
904 i2c_set_adapdata(&i2c->adapter, i2c);
905 sprintf(i2c->adapter.name, "Radeon i2c %s", name);
906 i2c->adapter.algo_data = &i2c->algo.radeon;
907 i2c->adapter.algo = &radeon_i2c_algo;
908 ret = i2c_add_adapter(&i2c->adapter);
197 if (ret) { 909 if (ret) {
198 DRM_INFO("Failed to register i2c %s\n", name); 910 DRM_ERROR("Failed to register i2c %s\n", name);
199 goto out_free; 911 goto out_free;
200 } 912 }
201 913
@@ -237,11 +949,19 @@ out_free:
237 949
238} 950}
239 951
240
241void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) 952void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
242{ 953{
243 if (!i2c) 954 if (!i2c)
244 return; 955 return;
956 i2c_del_adapter(&i2c->algo.radeon.bit_adapter);
957 i2c_del_adapter(&i2c->adapter);
958 kfree(i2c);
959}
960
961void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c)
962{
963 if (!i2c)
964 return;
245 965
246 i2c_del_adapter(&i2c->adapter); 966 i2c_del_adapter(&i2c->adapter);
247 kfree(i2c); 967 kfree(i2c);
@@ -252,10 +972,10 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
252 return NULL; 972 return NULL;
253} 973}
254 974
255void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, 975void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
256 u8 slave_addr, 976 u8 slave_addr,
257 u8 addr, 977 u8 addr,
258 u8 *val) 978 u8 *val)
259{ 979{
260 u8 out_buf[2]; 980 u8 out_buf[2];
261 u8 in_buf[2]; 981 u8 in_buf[2];
@@ -286,10 +1006,10 @@ void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
286 } 1006 }
287} 1007}
288 1008
289void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus, 1009void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
290 u8 slave_addr, 1010 u8 slave_addr,
291 u8 addr, 1011 u8 addr,
292 u8 val) 1012 u8 val)
293{ 1013{
294 uint8_t out_buf[2]; 1014 uint8_t out_buf[2];
295 struct i2c_msg msg = { 1015 struct i2c_msg msg = {
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f23b05606eb5..20ec276e7596 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,6 +30,8 @@
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_drm.h" 31#include "radeon_drm.h"
32 32
33#include <linux/vga_switcheroo.h>
34
33int radeon_driver_unload_kms(struct drm_device *dev) 35int radeon_driver_unload_kms(struct drm_device *dev)
34{ 36{
35 struct radeon_device *rdev = dev->dev_private; 37 struct radeon_device *rdev = dev->dev_private;
@@ -136,6 +138,7 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
136 138
137void radeon_driver_lastclose_kms(struct drm_device *dev) 139void radeon_driver_lastclose_kms(struct drm_device *dev)
138{ 140{
141 vga_switcheroo_process_delayed_switch();
139} 142}
140 143
141int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 144int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
@@ -276,17 +279,17 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
276 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), 279 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
277 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), 280 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
278 /* KMS */ 281 /* KMS */
279 DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH), 282 DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
280 DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH), 283 DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
281 DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH), 284 DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
282 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH), 285 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
283 DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), 286 DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
284 DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), 287 DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
285 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), 288 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
286 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), 289 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
287 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), 290 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
288 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), 291 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
289 DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), 292 DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
290 DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH), 293 DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
291}; 294};
292int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 295int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index b6d8081e1246..df23d6a01d02 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -403,7 +403,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
403 403
404 /* if scanout was in GTT this really wouldn't work */ 404 /* if scanout was in GTT this really wouldn't work */
405 /* crtc offset is from display base addr not FB location */ 405 /* crtc offset is from display base addr not FB location */
406 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; 406 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start;
407 407
408 base -= radeon_crtc->legacy_display_base_addr; 408 base -= radeon_crtc->legacy_display_base_addr;
409 409
@@ -582,29 +582,6 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
582 ? RADEON_CRTC_V_SYNC_POL 582 ? RADEON_CRTC_V_SYNC_POL
583 : 0)); 583 : 0));
584 584
585 /* TODO -> Dell Server */
586 if (0) {
587 uint32_t disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
588 uint32_t tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
589 uint32_t dac2_cntl = RREG32(RADEON_DAC_CNTL2);
590 uint32_t crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
591
592 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
593 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
594
595 /* For CRT on DAC2, don't turn it on if BIOS didn't
596 enable it, even it's detected.
597 */
598 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
599 tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16));
600 tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16));
601
602 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
603 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
604 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
605 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
606 }
607
608 if (radeon_crtc->crtc_id) { 585 if (radeon_crtc->crtc_id) {
609 uint32_t crtc2_gen_cntl; 586 uint32_t crtc2_gen_cntl;
610 uint32_t disp2_merge_cntl; 587 uint32_t disp2_merge_cntl;
@@ -726,6 +703,10 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
726 pll = &rdev->clock.p1pll; 703 pll = &rdev->clock.p1pll;
727 704
728 pll->flags = RADEON_PLL_LEGACY; 705 pll->flags = RADEON_PLL_LEGACY;
706 if (radeon_new_pll == 1)
707 pll->algo = PLL_ALGO_NEW;
708 else
709 pll->algo = PLL_ALGO_LEGACY;
729 710
730 if (mode->clock > 200000) /* range limits??? */ 711 if (mode->clock > 200000) /* range limits??? */
731 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 712 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 38e45e231ef5..cf389ce50a8a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -115,6 +115,9 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
115 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 115 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
116 else 116 else
117 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 117 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
118
119 /* adjust pm to dpms change */
120 radeon_pm_compute_clocks(rdev);
118} 121}
119 122
120static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) 123static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
@@ -214,6 +217,11 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
214 struct drm_display_mode *adjusted_mode) 217 struct drm_display_mode *adjusted_mode)
215{ 218{
216 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 219 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
220 struct drm_device *dev = encoder->dev;
221 struct radeon_device *rdev = dev->dev_private;
222
223 /* adjust pm to upcoming mode change */
224 radeon_pm_compute_clocks(rdev);
217 225
218 /* set the active encoder to connector routing */ 226 /* set the active encoder to connector routing */
219 radeon_encoder_set_active_device(encoder); 227 radeon_encoder_set_active_device(encoder);
@@ -285,6 +293,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
285 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 293 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
286 else 294 else
287 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 295 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
296
297 /* adjust pm to dpms change */
298 radeon_pm_compute_clocks(rdev);
288} 299}
289 300
290static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) 301static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
@@ -470,6 +481,9 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
470 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 481 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
471 else 482 else
472 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 483 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
484
485 /* adjust pm to dpms change */
486 radeon_pm_compute_clocks(rdev);
473} 487}
474 488
475static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) 489static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
@@ -635,6 +649,9 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
635 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 649 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
636 else 650 else
637 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 651 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
652
653 /* adjust pm to dpms change */
654 radeon_pm_compute_clocks(rdev);
638} 655}
639 656
640static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) 657static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
@@ -842,6 +859,9 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
842 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 859 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
843 else 860 else
844 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 861 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
862
863 /* adjust pm to dpms change */
864 radeon_pm_compute_clocks(rdev);
845} 865}
846 866
847static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) 867static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e81b2aeb6a8f..1702b820aa4d 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -83,6 +83,8 @@ struct radeon_i2c_bus_rec {
83 bool valid; 83 bool valid;
84 /* id used by atom */ 84 /* id used by atom */
85 uint8_t i2c_id; 85 uint8_t i2c_id;
86 /* id used by atom */
87 uint8_t hpd_id;
86 /* can be used with hw i2c engine */ 88 /* can be used with hw i2c engine */
87 bool hw_capable; 89 bool hw_capable;
88 /* uses multi-media i2c engine */ 90 /* uses multi-media i2c engine */
@@ -113,6 +115,7 @@ struct radeon_tmds_pll {
113 115
114#define RADEON_MAX_BIOS_CONNECTOR 16 116#define RADEON_MAX_BIOS_CONNECTOR 16
115 117
118/* pll flags */
116#define RADEON_PLL_USE_BIOS_DIVS (1 << 0) 119#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
117#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1) 120#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
118#define RADEON_PLL_USE_REF_DIV (1 << 2) 121#define RADEON_PLL_USE_REF_DIV (1 << 2)
@@ -127,6 +130,12 @@ struct radeon_tmds_pll {
127#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 130#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
128#define RADEON_PLL_USE_POST_DIV (1 << 12) 131#define RADEON_PLL_USE_POST_DIV (1 << 12)
129 132
133/* pll algo */
134enum radeon_pll_algo {
135 PLL_ALGO_LEGACY,
136 PLL_ALGO_NEW
137};
138
130struct radeon_pll { 139struct radeon_pll {
131 /* reference frequency */ 140 /* reference frequency */
132 uint32_t reference_freq; 141 uint32_t reference_freq;
@@ -157,6 +166,13 @@ struct radeon_pll {
157 166
158 /* pll id */ 167 /* pll id */
159 uint32_t id; 168 uint32_t id;
169 /* pll algo */
170 enum radeon_pll_algo algo;
171};
172
173struct i2c_algo_radeon_data {
174 struct i2c_adapter bit_adapter;
175 struct i2c_algo_bit_data bit_data;
160}; 176};
161 177
162struct radeon_i2c_chan { 178struct radeon_i2c_chan {
@@ -164,7 +180,7 @@ struct radeon_i2c_chan {
164 struct drm_device *dev; 180 struct drm_device *dev;
165 union { 181 union {
166 struct i2c_algo_dp_aux_data dp; 182 struct i2c_algo_dp_aux_data dp;
167 struct i2c_algo_bit_data bit; 183 struct i2c_algo_radeon_data radeon;
168 } algo; 184 } algo;
169 struct radeon_i2c_bus_rec rec; 185 struct radeon_i2c_bus_rec rec;
170}; 186};
@@ -193,7 +209,7 @@ struct radeon_mode_info {
193 struct card_info *atom_card_info; 209 struct card_info *atom_card_info;
194 enum radeon_connector_table connector_table; 210 enum radeon_connector_table connector_table;
195 bool mode_config_initialized; 211 bool mode_config_initialized;
196 struct radeon_crtc *crtcs[2]; 212 struct radeon_crtc *crtcs[6];
197 /* DVI-I properties */ 213 /* DVI-I properties */
198 struct drm_property *coherent_mode_property; 214 struct drm_property *coherent_mode_property;
199 /* DAC enable load detect */ 215 /* DAC enable load detect */
@@ -202,7 +218,8 @@ struct radeon_mode_info {
202 struct drm_property *tv_std_property; 218 struct drm_property *tv_std_property;
203 /* legacy TMDS PLL detect */ 219 /* legacy TMDS PLL detect */
204 struct drm_property *tmds_pll_property; 220 struct drm_property *tmds_pll_property;
205 221 /* hardcoded DFP edid from BIOS */
222 struct edid *bios_hardcoded_edid;
206}; 223};
207 224
208#define MAX_H_CODE_TIMING_LEN 32 225#define MAX_H_CODE_TIMING_LEN 32
@@ -237,6 +254,7 @@ struct radeon_crtc {
237 fixed20_12 vsc; 254 fixed20_12 vsc;
238 fixed20_12 hsc; 255 fixed20_12 hsc;
239 struct drm_display_mode native_mode; 256 struct drm_display_mode native_mode;
257 int pll_id;
240}; 258};
241 259
242struct radeon_encoder_primary_dac { 260struct radeon_encoder_primary_dac {
@@ -303,6 +321,7 @@ struct radeon_encoder_atom_dig {
303 /* atom lvds */ 321 /* atom lvds */
304 uint32_t lvds_misc; 322 uint32_t lvds_misc;
305 uint16_t panel_pwr_delay; 323 uint16_t panel_pwr_delay;
324 enum radeon_pll_algo pll_algo;
306 struct radeon_atom_ss *ss; 325 struct radeon_atom_ss *ss;
307 /* panel mode */ 326 /* panel mode */
308 struct drm_display_mode native_mode; 327 struct drm_display_mode native_mode;
@@ -398,6 +417,7 @@ extern void dp_link_train(struct drm_encoder *encoder,
398 struct drm_connector *connector); 417 struct drm_connector *connector);
399extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); 418extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
400extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); 419extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
420extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action);
401extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, 421extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
402 int action, uint8_t lane_num, 422 int action, uint8_t lane_num,
403 uint8_t lane_set); 423 uint8_t lane_set);
@@ -411,14 +431,15 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
411 struct radeon_i2c_bus_rec *rec, 431 struct radeon_i2c_bus_rec *rec,
412 const char *name); 432 const char *name);
413extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); 433extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
414extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, 434extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c);
415 u8 slave_addr, 435extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
416 u8 addr, 436 u8 slave_addr,
417 u8 *val); 437 u8 addr,
418extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c, 438 u8 *val);
419 u8 slave_addr, 439extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
420 u8 addr, 440 u8 slave_addr,
421 u8 val); 441 u8 addr,
442 u8 val);
422extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 443extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
423extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 444extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
424 445
@@ -432,14 +453,6 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
432 uint32_t *ref_div_p, 453 uint32_t *ref_div_p,
433 uint32_t *post_div_p); 454 uint32_t *post_div_p);
434 455
435extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
436 uint64_t freq,
437 uint32_t *dot_clock_p,
438 uint32_t *fb_div_p,
439 uint32_t *frac_fb_div_p,
440 uint32_t *ref_div_p,
441 uint32_t *post_div_p);
442
443extern void radeon_setup_encoder_clones(struct drm_device *dev); 456extern void radeon_setup_encoder_clones(struct drm_device *dev);
444 457
445struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); 458struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
@@ -473,6 +486,9 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
473extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, 486extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
474 int x, int y); 487 int x, int y);
475 488
489extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
490extern struct edid *
491radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
476extern bool radeon_atom_get_clock_info(struct drm_device *dev); 492extern bool radeon_atom_get_clock_info(struct drm_device *dev);
477extern bool radeon_combios_get_clock_info(struct drm_device *dev); 493extern bool radeon_combios_get_clock_info(struct drm_device *dev);
478extern struct radeon_encoder_atom_dig * 494extern struct radeon_encoder_atom_dig *
@@ -531,7 +547,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
531 struct radeon_crtc *radeon_crtc); 547 struct radeon_crtc *radeon_crtc);
532void radeon_legacy_init_crtc(struct drm_device *dev, 548void radeon_legacy_init_crtc(struct drm_device *dev,
533 struct radeon_crtc *radeon_crtc); 549 struct radeon_crtc *radeon_crtc);
534extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
535 550
536void radeon_get_clock_info(struct drm_device *dev); 551void radeon_get_clock_info(struct drm_device *dev);
537 552
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index f1da370928eb..fc9d00ac6b15 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -178,7 +178,6 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
178{ 178{
179 int r, i; 179 int r, i;
180 180
181 radeon_ttm_placement_from_domain(bo, domain);
182 if (bo->pin_count) { 181 if (bo->pin_count) {
183 bo->pin_count++; 182 bo->pin_count++;
184 if (gpu_addr) 183 if (gpu_addr)
@@ -186,6 +185,8 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
186 return 0; 185 return 0;
187 } 186 }
188 radeon_ttm_placement_from_domain(bo, domain); 187 radeon_ttm_placement_from_domain(bo, domain);
188 /* force to pin into visible video ram */
189 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
189 for (i = 0; i < bo->placement.num_placement; i++) 190 for (i = 0; i < bo->placement.num_placement; i++)
190 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 191 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
191 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 192 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8bce64cdc320..d4d1c39a0e99 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -18,21 +18,413 @@
18 * OTHER DEALINGS IN THE SOFTWARE. 18 * OTHER DEALINGS IN THE SOFTWARE.
19 * 19 *
20 * Authors: Rafał Miłecki <zajec5@gmail.com> 20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
21 */ 22 */
22#include "drmP.h" 23#include "drmP.h"
23#include "radeon.h" 24#include "radeon.h"
25#include "avivod.h"
24 26
25int radeon_debugfs_pm_init(struct radeon_device *rdev); 27#define RADEON_IDLE_LOOP_MS 100
28#define RADEON_RECLOCK_DELAY_MS 200
29#define RADEON_WAIT_VBLANK_TIMEOUT 200
30
31static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
32static void radeon_pm_set_clocks(struct radeon_device *rdev);
33static void radeon_pm_idle_work_handler(struct work_struct *work);
34static int radeon_debugfs_pm_init(struct radeon_device *rdev);
35
36static const char *pm_state_names[4] = {
37 "PM_STATE_DISABLED",
38 "PM_STATE_MINIMUM",
39 "PM_STATE_PAUSED",
40 "PM_STATE_ACTIVE"
41};
42
43static const char *pm_state_types[5] = {
44 "Default",
45 "Powersave",
46 "Battery",
47 "Balanced",
48 "Performance",
49};
50
51static void radeon_print_power_mode_info(struct radeon_device *rdev)
52{
53 int i, j;
54 bool is_default;
55
56 DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
57 for (i = 0; i < rdev->pm.num_power_states; i++) {
58 if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
59 is_default = true;
60 else
61 is_default = false;
62 DRM_INFO("State %d %s %s\n", i,
63 pm_state_types[rdev->pm.power_state[i].type],
64 is_default ? "(default)" : "");
65 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
66 DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
67 DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
68 for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
69 if (rdev->flags & RADEON_IS_IGP)
70 DRM_INFO("\t\t%d engine: %d\n",
71 j,
72 rdev->pm.power_state[i].clock_info[j].sclk * 10);
73 else
74 DRM_INFO("\t\t%d engine/memory: %d/%d\n",
75 j,
76 rdev->pm.power_state[i].clock_info[j].sclk * 10,
77 rdev->pm.power_state[i].clock_info[j].mclk * 10);
78 }
79 }
80}
81
82static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
83 enum radeon_pm_state_type type)
84{
85 int i, j;
86 enum radeon_pm_state_type wanted_types[2];
87 int wanted_count;
88
89 switch (type) {
90 case POWER_STATE_TYPE_DEFAULT:
91 default:
92 return rdev->pm.default_power_state;
93 case POWER_STATE_TYPE_POWERSAVE:
94 if (rdev->flags & RADEON_IS_MOBILITY) {
95 wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
96 wanted_types[1] = POWER_STATE_TYPE_BATTERY;
97 wanted_count = 2;
98 } else {
99 wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
100 wanted_count = 1;
101 }
102 break;
103 case POWER_STATE_TYPE_BATTERY:
104 if (rdev->flags & RADEON_IS_MOBILITY) {
105 wanted_types[0] = POWER_STATE_TYPE_BATTERY;
106 wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
107 wanted_count = 2;
108 } else {
109 wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
110 wanted_count = 1;
111 }
112 break;
113 case POWER_STATE_TYPE_BALANCED:
114 case POWER_STATE_TYPE_PERFORMANCE:
115 wanted_types[0] = type;
116 wanted_count = 1;
117 break;
118 }
119
120 for (i = 0; i < wanted_count; i++) {
121 for (j = 0; j < rdev->pm.num_power_states; j++) {
122 if (rdev->pm.power_state[j].type == wanted_types[i])
123 return &rdev->pm.power_state[j];
124 }
125 }
126
127 return rdev->pm.default_power_state;
128}
129
130static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
131 struct radeon_power_state *power_state,
132 enum radeon_pm_clock_mode_type type)
133{
134 switch (type) {
135 case POWER_MODE_TYPE_DEFAULT:
136 default:
137 return power_state->default_clock_mode;
138 case POWER_MODE_TYPE_LOW:
139 return &power_state->clock_info[0];
140 case POWER_MODE_TYPE_MID:
141 if (power_state->num_clock_modes > 2)
142 return &power_state->clock_info[1];
143 else
144 return &power_state->clock_info[0];
145 break;
146 case POWER_MODE_TYPE_HIGH:
147 return &power_state->clock_info[power_state->num_clock_modes - 1];
148 }
149
150}
151
152static void radeon_get_power_state(struct radeon_device *rdev,
153 enum radeon_pm_action action)
154{
155 switch (action) {
156 case PM_ACTION_MINIMUM:
157 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
158 rdev->pm.requested_clock_mode =
159 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
160 break;
161 case PM_ACTION_DOWNCLOCK:
162 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
163 rdev->pm.requested_clock_mode =
164 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
165 break;
166 case PM_ACTION_UPCLOCK:
167 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
168 rdev->pm.requested_clock_mode =
169 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
170 break;
171 case PM_ACTION_NONE:
172 default:
173 DRM_ERROR("Requested mode for not defined action\n");
174 return;
175 }
176 DRM_INFO("Requested: e: %d m: %d p: %d\n",
177 rdev->pm.requested_clock_mode->sclk,
178 rdev->pm.requested_clock_mode->mclk,
179 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
180}
181
182static void radeon_set_power_state(struct radeon_device *rdev)
183{
184 /* if *_clock_mode are the same, *_power_state are as well */
185 if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
186 return;
187
188 DRM_INFO("Setting: e: %d m: %d p: %d\n",
189 rdev->pm.requested_clock_mode->sclk,
190 rdev->pm.requested_clock_mode->mclk,
191 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
192 /* set pcie lanes */
193 /* set voltage */
194 /* set engine clock */
195 radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
196 /* set memory clock */
197
198 rdev->pm.current_power_state = rdev->pm.requested_power_state;
199 rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
200}
26 201
27int radeon_pm_init(struct radeon_device *rdev) 202int radeon_pm_init(struct radeon_device *rdev)
28{ 203{
204 rdev->pm.state = PM_STATE_DISABLED;
205 rdev->pm.planned_action = PM_ACTION_NONE;
206 rdev->pm.downclocked = false;
207
208 if (rdev->bios) {
209 if (rdev->is_atom_bios)
210 radeon_atombios_get_power_modes(rdev);
211 else
212 radeon_combios_get_power_modes(rdev);
213 radeon_print_power_mode_info(rdev);
214 }
215
29 if (radeon_debugfs_pm_init(rdev)) { 216 if (radeon_debugfs_pm_init(rdev)) {
30 DRM_ERROR("Failed to register debugfs file for PM!\n"); 217 DRM_ERROR("Failed to register debugfs file for PM!\n");
31 } 218 }
32 219
220 INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
221
222 if (radeon_dynpm != -1 && radeon_dynpm) {
223 rdev->pm.state = PM_STATE_PAUSED;
224 DRM_INFO("radeon: dynamic power management enabled\n");
225 }
226
227 DRM_INFO("radeon: power management initialized\n");
228
33 return 0; 229 return 0;
34} 230}
35 231
232void radeon_pm_compute_clocks(struct radeon_device *rdev)
233{
234 struct drm_device *ddev = rdev->ddev;
235 struct drm_connector *connector;
236 struct radeon_crtc *radeon_crtc;
237 int count = 0;
238
239 if (rdev->pm.state == PM_STATE_DISABLED)
240 return;
241
242 mutex_lock(&rdev->pm.mutex);
243
244 rdev->pm.active_crtcs = 0;
245 list_for_each_entry(connector,
246 &ddev->mode_config.connector_list, head) {
247 if (connector->encoder &&
248 connector->dpms != DRM_MODE_DPMS_OFF) {
249 radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
250 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
251 ++count;
252 }
253 }
254
255 if (count > 1) {
256 if (rdev->pm.state == PM_STATE_ACTIVE) {
257 cancel_delayed_work(&rdev->pm.idle_work);
258
259 rdev->pm.state = PM_STATE_PAUSED;
260 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
261 if (rdev->pm.downclocked)
262 radeon_pm_set_clocks(rdev);
263
264 DRM_DEBUG("radeon: dynamic power management deactivated\n");
265 }
266 } else if (count == 1) {
267 /* TODO: Increase clocks if needed for current mode */
268
269 if (rdev->pm.state == PM_STATE_MINIMUM) {
270 rdev->pm.state = PM_STATE_ACTIVE;
271 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
272 radeon_pm_set_clocks(rdev);
273
274 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
275 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
276 }
277 else if (rdev->pm.state == PM_STATE_PAUSED) {
278 rdev->pm.state = PM_STATE_ACTIVE;
279 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
280 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
281 DRM_DEBUG("radeon: dynamic power management activated\n");
282 }
283 }
284 else { /* count == 0 */
285 if (rdev->pm.state != PM_STATE_MINIMUM) {
286 cancel_delayed_work(&rdev->pm.idle_work);
287
288 rdev->pm.state = PM_STATE_MINIMUM;
289 rdev->pm.planned_action = PM_ACTION_MINIMUM;
290 radeon_pm_set_clocks(rdev);
291 }
292 }
293
294 mutex_unlock(&rdev->pm.mutex);
295}
296
297static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
298{
299 u32 stat_crtc1 = 0, stat_crtc2 = 0;
300 bool in_vbl = true;
301
302 if (ASIC_IS_AVIVO(rdev)) {
303 if (rdev->pm.active_crtcs & (1 << 0)) {
304 stat_crtc1 = RREG32(D1CRTC_STATUS);
305 if (!(stat_crtc1 & 1))
306 in_vbl = false;
307 }
308 if (rdev->pm.active_crtcs & (1 << 1)) {
309 stat_crtc2 = RREG32(D2CRTC_STATUS);
310 if (!(stat_crtc2 & 1))
311 in_vbl = false;
312 }
313 }
314 if (in_vbl == false)
315 DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
316 stat_crtc2, finish ? "exit" : "entry");
317 return in_vbl;
318}
319static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
320{
321 /*radeon_fence_wait_last(rdev);*/
322 switch (rdev->pm.planned_action) {
323 case PM_ACTION_UPCLOCK:
324 rdev->pm.downclocked = false;
325 break;
326 case PM_ACTION_DOWNCLOCK:
327 rdev->pm.downclocked = true;
328 break;
329 case PM_ACTION_MINIMUM:
330 break;
331 case PM_ACTION_NONE:
332 DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
333 break;
334 }
335
336 /* check if we are in vblank */
337 radeon_pm_debug_check_in_vbl(rdev, false);
338 radeon_set_power_state(rdev);
339 radeon_pm_debug_check_in_vbl(rdev, true);
340 rdev->pm.planned_action = PM_ACTION_NONE;
341}
342
343static void radeon_pm_set_clocks(struct radeon_device *rdev)
344{
345 radeon_get_power_state(rdev, rdev->pm.planned_action);
346 mutex_lock(&rdev->cp.mutex);
347
348 if (rdev->pm.active_crtcs & (1 << 0)) {
349 rdev->pm.req_vblank |= (1 << 0);
350 drm_vblank_get(rdev->ddev, 0);
351 }
352 if (rdev->pm.active_crtcs & (1 << 1)) {
353 rdev->pm.req_vblank |= (1 << 1);
354 drm_vblank_get(rdev->ddev, 1);
355 }
356 if (rdev->pm.active_crtcs)
357 wait_event_interruptible_timeout(
358 rdev->irq.vblank_queue, 0,
359 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
360 if (rdev->pm.req_vblank & (1 << 0)) {
361 rdev->pm.req_vblank &= ~(1 << 0);
362 drm_vblank_put(rdev->ddev, 0);
363 }
364 if (rdev->pm.req_vblank & (1 << 1)) {
365 rdev->pm.req_vblank &= ~(1 << 1);
366 drm_vblank_put(rdev->ddev, 1);
367 }
368
369 radeon_pm_set_clocks_locked(rdev);
370 mutex_unlock(&rdev->cp.mutex);
371}
372
373static void radeon_pm_idle_work_handler(struct work_struct *work)
374{
375 struct radeon_device *rdev;
376 rdev = container_of(work, struct radeon_device,
377 pm.idle_work.work);
378
379 mutex_lock(&rdev->pm.mutex);
380 if (rdev->pm.state == PM_STATE_ACTIVE) {
381 unsigned long irq_flags;
382 int not_processed = 0;
383
384 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
385 if (!list_empty(&rdev->fence_drv.emited)) {
386 struct list_head *ptr;
387 list_for_each(ptr, &rdev->fence_drv.emited) {
388 /* count up to 3, that's enought info */
389 if (++not_processed >= 3)
390 break;
391 }
392 }
393 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
394
395 if (not_processed >= 3) { /* should upclock */
396 if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
397 rdev->pm.planned_action = PM_ACTION_NONE;
398 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
399 rdev->pm.downclocked) {
400 rdev->pm.planned_action =
401 PM_ACTION_UPCLOCK;
402 rdev->pm.action_timeout = jiffies +
403 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
404 }
405 } else if (not_processed == 0) { /* should downclock */
406 if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
407 rdev->pm.planned_action = PM_ACTION_NONE;
408 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
409 !rdev->pm.downclocked) {
410 rdev->pm.planned_action =
411 PM_ACTION_DOWNCLOCK;
412 rdev->pm.action_timeout = jiffies +
413 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
414 }
415 }
416
417 if (rdev->pm.planned_action != PM_ACTION_NONE &&
418 jiffies > rdev->pm.action_timeout) {
419 radeon_pm_set_clocks(rdev);
420 }
421 }
422 mutex_unlock(&rdev->pm.mutex);
423
424 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
425 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
426}
427
36/* 428/*
37 * Debugfs info 429 * Debugfs info
38 */ 430 */
@@ -44,11 +436,14 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
44 struct drm_device *dev = node->minor->dev; 436 struct drm_device *dev = node->minor->dev;
45 struct radeon_device *rdev = dev->dev_private; 437 struct radeon_device *rdev = dev->dev_private;
46 438
439 seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
47 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); 440 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
48 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 441 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
49 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); 442 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
50 if (rdev->asic->get_memory_clock) 443 if (rdev->asic->get_memory_clock)
51 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 444 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
445 if (rdev->asic->get_pcie_lanes)
446 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
52 447
53 return 0; 448 return 0;
54} 449}
@@ -58,7 +453,7 @@ static struct drm_info_list radeon_pm_info_list[] = {
58}; 453};
59#endif 454#endif
60 455
61int radeon_debugfs_pm_init(struct radeon_device *rdev) 456static int radeon_debugfs_pm_init(struct radeon_device *rdev)
62{ 457{
63#if defined(CONFIG_DEBUG_FS) 458#if defined(CONFIG_DEBUG_FS)
64 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 459 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 6d0a009dd4a1..5c0dc082d330 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -54,7 +54,7 @@
54#include "r300_reg.h" 54#include "r300_reg.h"
55#include "r500_reg.h" 55#include "r500_reg.h"
56#include "r600_reg.h" 56#include "r600_reg.h"
57 57#include "evergreen_reg.h"
58 58
59#define RADEON_MC_AGP_LOCATION 0x014c 59#define RADEON_MC_AGP_LOCATION 0x014c
60#define RADEON_MC_AGP_START_MASK 0x0000FFFF 60#define RADEON_MC_AGP_START_MASK 0x0000FFFF
@@ -1060,32 +1060,38 @@
1060 1060
1061 /* Multimedia I2C bus */ 1061 /* Multimedia I2C bus */
1062#define RADEON_I2C_CNTL_0 0x0090 1062#define RADEON_I2C_CNTL_0 0x0090
1063#define RADEON_I2C_DONE (1 << 0) 1063# define RADEON_I2C_DONE (1 << 0)
1064#define RADEON_I2C_NACK (1 << 1) 1064# define RADEON_I2C_NACK (1 << 1)
1065#define RADEON_I2C_HALT (1 << 2) 1065# define RADEON_I2C_HALT (1 << 2)
1066#define RADEON_I2C_SOFT_RST (1 << 5) 1066# define RADEON_I2C_SOFT_RST (1 << 5)
1067#define RADEON_I2C_DRIVE_EN (1 << 6) 1067# define RADEON_I2C_DRIVE_EN (1 << 6)
1068#define RADEON_I2C_DRIVE_SEL (1 << 7) 1068# define RADEON_I2C_DRIVE_SEL (1 << 7)
1069#define RADEON_I2C_START (1 << 8) 1069# define RADEON_I2C_START (1 << 8)
1070#define RADEON_I2C_STOP (1 << 9) 1070# define RADEON_I2C_STOP (1 << 9)
1071#define RADEON_I2C_RECEIVE (1 << 10) 1071# define RADEON_I2C_RECEIVE (1 << 10)
1072#define RADEON_I2C_ABORT (1 << 11) 1072# define RADEON_I2C_ABORT (1 << 11)
1073#define RADEON_I2C_GO (1 << 12) 1073# define RADEON_I2C_GO (1 << 12)
1074#define RADEON_I2C_PRESCALE_SHIFT 16 1074# define RADEON_I2C_PRESCALE_SHIFT 16
1075#define RADEON_I2C_CNTL_1 0x0094 1075#define RADEON_I2C_CNTL_1 0x0094
1076#define RADEON_I2C_DATA_COUNT_SHIFT 0 1076# define RADEON_I2C_DATA_COUNT_SHIFT 0
1077#define RADEON_I2C_ADDR_COUNT_SHIFT 4 1077# define RADEON_I2C_ADDR_COUNT_SHIFT 4
1078#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8 1078# define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
1079#define RADEON_I2C_SEL (1 << 16) 1079# define RADEON_I2C_SEL (1 << 16)
1080#define RADEON_I2C_EN (1 << 17) 1080# define RADEON_I2C_EN (1 << 17)
1081#define RADEON_I2C_TIME_LIMIT_SHIFT 24 1081# define RADEON_I2C_TIME_LIMIT_SHIFT 24
1082#define RADEON_I2C_DATA 0x0098 1082#define RADEON_I2C_DATA 0x0098
1083 1083
1084#define RADEON_DVI_I2C_CNTL_0 0x02e0 1084#define RADEON_DVI_I2C_CNTL_0 0x02e0
1085# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3) 1085# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3)
1086# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */ 1086# define R200_SEL_DDC1 0 /* depends on asic */
1087# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */ 1087# define R200_SEL_DDC2 1 /* depends on asic */
1088# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */ 1088# define R200_SEL_DDC3 2 /* depends on asic */
1089# define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13)
1090# define RADEON_SW_CAN_USE_DVI_I2C (1 << 13)
1091# define RADEON_SW_DONE_USING_DVI_I2C (1 << 14)
1092# define RADEON_HW_NEEDS_DVI_I2C (1 << 14)
1093# define RADEON_ABORT_HW_DVI_I2C (1 << 15)
1094# define RADEON_HW_USING_DVI_I2C (1 << 15)
1089#define RADEON_DVI_I2C_CNTL_1 0x02e4 1095#define RADEON_DVI_I2C_CNTL_1 0x02e4
1090#define RADEON_DVI_I2C_DATA 0x02e8 1096#define RADEON_DVI_I2C_DATA 0x02e8
1091 1097
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 6579eb4c1f28..e50513a62735 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -34,6 +34,36 @@
34 34
35int radeon_debugfs_ib_init(struct radeon_device *rdev); 35int radeon_debugfs_ib_init(struct radeon_device *rdev);
36 36
37void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
38{
39 struct radeon_ib *ib, *n;
40
41 list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
42 list_del(&ib->list);
43 vfree(ib->ptr);
44 kfree(ib);
45 }
46}
47
48void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
49{
50 struct radeon_ib *bib;
51
52 bib = kmalloc(sizeof(*bib), GFP_KERNEL);
53 if (bib == NULL)
54 return;
55 bib->ptr = vmalloc(ib->length_dw * 4);
56 if (bib->ptr == NULL) {
57 kfree(bib);
58 return;
59 }
60 memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
61 bib->length_dw = ib->length_dw;
62 mutex_lock(&rdev->ib_pool.mutex);
63 list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
64 mutex_unlock(&rdev->ib_pool.mutex);
65}
66
37/* 67/*
38 * IB. 68 * IB.
39 */ 69 */
@@ -142,6 +172,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
142 172
143 if (rdev->ib_pool.robj) 173 if (rdev->ib_pool.robj)
144 return 0; 174 return 0;
175 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
145 /* Allocate 1M object buffer */ 176 /* Allocate 1M object buffer */
146 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 177 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
147 true, RADEON_GEM_DOMAIN_GTT, 178 true, RADEON_GEM_DOMAIN_GTT,
@@ -192,6 +223,8 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
192 return; 223 return;
193 } 224 }
194 mutex_lock(&rdev->ib_pool.mutex); 225 mutex_lock(&rdev->ib_pool.mutex);
226 radeon_ib_bogus_cleanup(rdev);
227
195 if (rdev->ib_pool.robj) { 228 if (rdev->ib_pool.robj) {
196 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 229 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
197 if (likely(r == 0)) { 230 if (likely(r == 0)) {
@@ -349,15 +382,49 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
349 return 0; 382 return 0;
350} 383}
351 384
385static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
386{
387 struct drm_info_node *node = (struct drm_info_node *) m->private;
388 struct radeon_device *rdev = node->info_ent->data;
389 struct radeon_ib *ib;
390 unsigned i;
391
392 mutex_lock(&rdev->ib_pool.mutex);
393 if (list_empty(&rdev->ib_pool.bogus_ib)) {
394 mutex_unlock(&rdev->ib_pool.mutex);
395 seq_printf(m, "no bogus IB recorded\n");
396 return 0;
397 }
398 ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
399 list_del_init(&ib->list);
400 mutex_unlock(&rdev->ib_pool.mutex);
401 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
402 for (i = 0; i < ib->length_dw; i++) {
403 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
404 }
405 vfree(ib->ptr);
406 kfree(ib);
407 return 0;
408}
409
352static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; 410static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
353static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; 411static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
412
413static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
414 {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
415};
354#endif 416#endif
355 417
356int radeon_debugfs_ib_init(struct radeon_device *rdev) 418int radeon_debugfs_ib_init(struct radeon_device *rdev)
357{ 419{
358#if defined(CONFIG_DEBUG_FS) 420#if defined(CONFIG_DEBUG_FS)
359 unsigned i; 421 unsigned i;
422 int r;
360 423
424 radeon_debugfs_ib_bogus_info_list[0].data = rdev;
425 r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
426 if (r)
427 return r;
361 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { 428 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
362 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); 429 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
363 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; 430 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 067167cb39ca..3c32f840dcd2 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -29,6 +29,7 @@
29 29
30#include "drmP.h" 30#include "drmP.h"
31#include "drm.h" 31#include "drm.h"
32#include "drm_buffer.h"
32#include "drm_sarea.h" 33#include "drm_sarea.h"
33#include "radeon_drm.h" 34#include "radeon_drm.h"
34#include "radeon_drv.h" 35#include "radeon_drv.h"
@@ -91,21 +92,27 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
91static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * 92static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
92 dev_priv, 93 dev_priv,
93 struct drm_file *file_priv, 94 struct drm_file *file_priv,
94 int id, u32 *data) 95 int id, struct drm_buffer *buf)
95{ 96{
97 u32 *data;
96 switch (id) { 98 switch (id) {
97 99
98 case RADEON_EMIT_PP_MISC: 100 case RADEON_EMIT_PP_MISC:
99 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 101 data = drm_buffer_pointer_to_dword(buf,
100 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { 102 (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
103
104 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
101 DRM_ERROR("Invalid depth buffer offset\n"); 105 DRM_ERROR("Invalid depth buffer offset\n");
102 return -EINVAL; 106 return -EINVAL;
103 } 107 }
108 dev_priv->have_z_offset = 1;
104 break; 109 break;
105 110
106 case RADEON_EMIT_PP_CNTL: 111 case RADEON_EMIT_PP_CNTL:
107 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 112 data = drm_buffer_pointer_to_dword(buf,
108 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { 113 (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
114
115 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
109 DRM_ERROR("Invalid colour buffer offset\n"); 116 DRM_ERROR("Invalid colour buffer offset\n");
110 return -EINVAL; 117 return -EINVAL;
111 } 118 }
@@ -117,8 +124,8 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
117 case R200_EMIT_PP_TXOFFSET_3: 124 case R200_EMIT_PP_TXOFFSET_3:
118 case R200_EMIT_PP_TXOFFSET_4: 125 case R200_EMIT_PP_TXOFFSET_4:
119 case R200_EMIT_PP_TXOFFSET_5: 126 case R200_EMIT_PP_TXOFFSET_5:
120 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 127 data = drm_buffer_pointer_to_dword(buf, 0);
121 &data[0])) { 128 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
122 DRM_ERROR("Invalid R200 texture offset\n"); 129 DRM_ERROR("Invalid R200 texture offset\n");
123 return -EINVAL; 130 return -EINVAL;
124 } 131 }
@@ -127,8 +134,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
127 case RADEON_EMIT_PP_TXFILTER_0: 134 case RADEON_EMIT_PP_TXFILTER_0:
128 case RADEON_EMIT_PP_TXFILTER_1: 135 case RADEON_EMIT_PP_TXFILTER_1:
129 case RADEON_EMIT_PP_TXFILTER_2: 136 case RADEON_EMIT_PP_TXFILTER_2:
130 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 137 data = drm_buffer_pointer_to_dword(buf,
131 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { 138 (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
139 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
132 DRM_ERROR("Invalid R100 texture offset\n"); 140 DRM_ERROR("Invalid R100 texture offset\n");
133 return -EINVAL; 141 return -EINVAL;
134 } 142 }
@@ -142,9 +150,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
142 case R200_EMIT_PP_CUBIC_OFFSETS_5:{ 150 case R200_EMIT_PP_CUBIC_OFFSETS_5:{
143 int i; 151 int i;
144 for (i = 0; i < 5; i++) { 152 for (i = 0; i < 5; i++) {
153 data = drm_buffer_pointer_to_dword(buf, i);
145 if (radeon_check_and_fixup_offset(dev_priv, 154 if (radeon_check_and_fixup_offset(dev_priv,
146 file_priv, 155 file_priv,
147 &data[i])) { 156 data)) {
148 DRM_ERROR 157 DRM_ERROR
149 ("Invalid R200 cubic texture offset\n"); 158 ("Invalid R200 cubic texture offset\n");
150 return -EINVAL; 159 return -EINVAL;
@@ -158,9 +167,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
158 case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{ 167 case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
159 int i; 168 int i;
160 for (i = 0; i < 5; i++) { 169 for (i = 0; i < 5; i++) {
170 data = drm_buffer_pointer_to_dword(buf, i);
161 if (radeon_check_and_fixup_offset(dev_priv, 171 if (radeon_check_and_fixup_offset(dev_priv,
162 file_priv, 172 file_priv,
163 &data[i])) { 173 data)) {
164 DRM_ERROR 174 DRM_ERROR
165 ("Invalid R100 cubic texture offset\n"); 175 ("Invalid R100 cubic texture offset\n");
166 return -EINVAL; 176 return -EINVAL;
@@ -269,23 +279,24 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
269 cmdbuf, 279 cmdbuf,
270 unsigned int *cmdsz) 280 unsigned int *cmdsz)
271{ 281{
272 u32 *cmd = (u32 *) cmdbuf->buf; 282 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
273 u32 offset, narrays; 283 u32 offset, narrays;
274 int count, i, k; 284 int count, i, k;
275 285
276 *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16); 286 count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
287 *cmdsz = 2 + count;
277 288
278 if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { 289 if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
279 DRM_ERROR("Not a type 3 packet\n"); 290 DRM_ERROR("Not a type 3 packet\n");
280 return -EINVAL; 291 return -EINVAL;
281 } 292 }
282 293
283 if (4 * *cmdsz > cmdbuf->bufsz) { 294 if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
284 DRM_ERROR("Packet size larger than size of data provided\n"); 295 DRM_ERROR("Packet size larger than size of data provided\n");
285 return -EINVAL; 296 return -EINVAL;
286 } 297 }
287 298
288 switch(cmd[0] & 0xff00) { 299 switch (*cmd & 0xff00) {
289 /* XXX Are there old drivers needing other packets? */ 300 /* XXX Are there old drivers needing other packets? */
290 301
291 case RADEON_3D_DRAW_IMMD: 302 case RADEON_3D_DRAW_IMMD:
@@ -312,7 +323,6 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
312 break; 323 break;
313 324
314 case RADEON_3D_LOAD_VBPNTR: 325 case RADEON_3D_LOAD_VBPNTR:
315 count = (cmd[0] >> 16) & 0x3fff;
316 326
317 if (count > 18) { /* 12 arrays max */ 327 if (count > 18) { /* 12 arrays max */
318 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 328 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
@@ -321,13 +331,16 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
321 } 331 }
322 332
323 /* carefully check packet contents */ 333 /* carefully check packet contents */
324 narrays = cmd[1] & ~0xc000; 334 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
335
336 narrays = *cmd & ~0xc000;
325 k = 0; 337 k = 0;
326 i = 2; 338 i = 2;
327 while ((k < narrays) && (i < (count + 2))) { 339 while ((k < narrays) && (i < (count + 2))) {
328 i++; /* skip attribute field */ 340 i++; /* skip attribute field */
341 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
329 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 342 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
330 &cmd[i])) { 343 cmd)) {
331 DRM_ERROR 344 DRM_ERROR
332 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 345 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
333 k, i); 346 k, i);
@@ -338,8 +351,10 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
338 if (k == narrays) 351 if (k == narrays)
339 break; 352 break;
340 /* have one more to process, they come in pairs */ 353 /* have one more to process, they come in pairs */
354 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
355
341 if (radeon_check_and_fixup_offset(dev_priv, 356 if (radeon_check_and_fixup_offset(dev_priv,
342 file_priv, &cmd[i])) 357 file_priv, cmd))
343 { 358 {
344 DRM_ERROR 359 DRM_ERROR
345 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 360 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
@@ -363,7 +378,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
363 DRM_ERROR("Invalid 3d packet for r200-class chip\n"); 378 DRM_ERROR("Invalid 3d packet for r200-class chip\n");
364 return -EINVAL; 379 return -EINVAL;
365 } 380 }
366 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) { 381
382 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
383 if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
367 DRM_ERROR("Invalid rndr_gen_indx offset\n"); 384 DRM_ERROR("Invalid rndr_gen_indx offset\n");
368 return -EINVAL; 385 return -EINVAL;
369 } 386 }
@@ -374,12 +391,15 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
374 DRM_ERROR("Invalid 3d packet for r100-class chip\n"); 391 DRM_ERROR("Invalid 3d packet for r100-class chip\n");
375 return -EINVAL; 392 return -EINVAL;
376 } 393 }
377 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 394
378 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 395 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
396 if ((*cmd & 0x8000ffff) != 0x80000810) {
397 DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
379 return -EINVAL; 398 return -EINVAL;
380 } 399 }
381 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) { 400 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
382 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 401 if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
402 DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
383 return -EINVAL; 403 return -EINVAL;
384 } 404 }
385 break; 405 break;
@@ -388,31 +408,34 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
388 case RADEON_CNTL_PAINT_MULTI: 408 case RADEON_CNTL_PAINT_MULTI:
389 case RADEON_CNTL_BITBLT_MULTI: 409 case RADEON_CNTL_BITBLT_MULTI:
390 /* MSB of opcode: next DWORD GUI_CNTL */ 410 /* MSB of opcode: next DWORD GUI_CNTL */
391 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL 411 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
412 if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
392 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 413 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
393 offset = cmd[2] << 10; 414 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
415 offset = *cmd2 << 10;
394 if (radeon_check_and_fixup_offset 416 if (radeon_check_and_fixup_offset
395 (dev_priv, file_priv, &offset)) { 417 (dev_priv, file_priv, &offset)) {
396 DRM_ERROR("Invalid first packet offset\n"); 418 DRM_ERROR("Invalid first packet offset\n");
397 return -EINVAL; 419 return -EINVAL;
398 } 420 }
399 cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; 421 *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
400 } 422 }
401 423
402 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && 424 if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
403 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 425 (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
404 offset = cmd[3] << 10; 426 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
427 offset = *cmd << 10;
405 if (radeon_check_and_fixup_offset 428 if (radeon_check_and_fixup_offset
406 (dev_priv, file_priv, &offset)) { 429 (dev_priv, file_priv, &offset)) {
407 DRM_ERROR("Invalid second packet offset\n"); 430 DRM_ERROR("Invalid second packet offset\n");
408 return -EINVAL; 431 return -EINVAL;
409 } 432 }
410 cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; 433 *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
411 } 434 }
412 break; 435 break;
413 436
414 default: 437 default:
415 DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); 438 DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
416 return -EINVAL; 439 return -EINVAL;
417 } 440 }
418 441
@@ -876,6 +899,11 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
876 if (tmp & RADEON_BACK) 899 if (tmp & RADEON_BACK)
877 flags |= RADEON_FRONT; 900 flags |= RADEON_FRONT;
878 } 901 }
902 if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
903 if (!dev_priv->have_z_offset)
904 printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
905 flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
906 }
879 907
880 if (flags & (RADEON_FRONT | RADEON_BACK)) { 908 if (flags & (RADEON_FRONT | RADEON_BACK)) {
881 909
@@ -2611,7 +2639,6 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2611{ 2639{
2612 int id = (int)header.packet.packet_id; 2640 int id = (int)header.packet.packet_id;
2613 int sz, reg; 2641 int sz, reg;
2614 int *data = (int *)cmdbuf->buf;
2615 RING_LOCALS; 2642 RING_LOCALS;
2616 2643
2617 if (id >= RADEON_MAX_STATE_PACKETS) 2644 if (id >= RADEON_MAX_STATE_PACKETS)
@@ -2620,23 +2647,22 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2620 sz = packet[id].len; 2647 sz = packet[id].len;
2621 reg = packet[id].start; 2648 reg = packet[id].start;
2622 2649
2623 if (sz * sizeof(int) > cmdbuf->bufsz) { 2650 if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
2624 DRM_ERROR("Packet size provided larger than data provided\n"); 2651 DRM_ERROR("Packet size provided larger than data provided\n");
2625 return -EINVAL; 2652 return -EINVAL;
2626 } 2653 }
2627 2654
2628 if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) { 2655 if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
2656 cmdbuf->buffer)) {
2629 DRM_ERROR("Packet verification failed\n"); 2657 DRM_ERROR("Packet verification failed\n");
2630 return -EINVAL; 2658 return -EINVAL;
2631 } 2659 }
2632 2660
2633 BEGIN_RING(sz + 1); 2661 BEGIN_RING(sz + 1);
2634 OUT_RING(CP_PACKET0(reg, (sz - 1))); 2662 OUT_RING(CP_PACKET0(reg, (sz - 1)));
2635 OUT_RING_TABLE(data, sz); 2663 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2636 ADVANCE_RING(); 2664 ADVANCE_RING();
2637 2665
2638 cmdbuf->buf += sz * sizeof(int);
2639 cmdbuf->bufsz -= sz * sizeof(int);
2640 return 0; 2666 return 0;
2641} 2667}
2642 2668
@@ -2653,10 +2679,8 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
2653 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); 2679 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2654 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); 2680 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2655 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); 2681 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2656 OUT_RING_TABLE(cmdbuf->buf, sz); 2682 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2657 ADVANCE_RING(); 2683 ADVANCE_RING();
2658 cmdbuf->buf += sz * sizeof(int);
2659 cmdbuf->bufsz -= sz * sizeof(int);
2660 return 0; 2684 return 0;
2661} 2685}
2662 2686
@@ -2675,10 +2699,8 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
2675 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); 2699 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2676 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); 2700 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2677 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); 2701 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2678 OUT_RING_TABLE(cmdbuf->buf, sz); 2702 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2679 ADVANCE_RING(); 2703 ADVANCE_RING();
2680 cmdbuf->buf += sz * sizeof(int);
2681 cmdbuf->bufsz -= sz * sizeof(int);
2682 return 0; 2704 return 0;
2683} 2705}
2684 2706
@@ -2696,11 +2718,9 @@ static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
2696 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); 2718 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2697 OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); 2719 OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2698 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); 2720 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2699 OUT_RING_TABLE(cmdbuf->buf, sz); 2721 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2700 ADVANCE_RING(); 2722 ADVANCE_RING();
2701 2723
2702 cmdbuf->buf += sz * sizeof(int);
2703 cmdbuf->bufsz -= sz * sizeof(int);
2704 return 0; 2724 return 0;
2705} 2725}
2706 2726
@@ -2714,7 +2734,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2714 2734
2715 if (!sz) 2735 if (!sz)
2716 return 0; 2736 return 0;
2717 if (sz * 4 > cmdbuf->bufsz) 2737 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
2718 return -EINVAL; 2738 return -EINVAL;
2719 2739
2720 BEGIN_RING(5 + sz); 2740 BEGIN_RING(5 + sz);
@@ -2722,11 +2742,9 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2722 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); 2742 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2723 OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); 2743 OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2724 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); 2744 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2725 OUT_RING_TABLE(cmdbuf->buf, sz); 2745 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2726 ADVANCE_RING(); 2746 ADVANCE_RING();
2727 2747
2728 cmdbuf->buf += sz * sizeof(int);
2729 cmdbuf->bufsz -= sz * sizeof(int);
2730 return 0; 2748 return 0;
2731} 2749}
2732 2750
@@ -2748,11 +2766,9 @@ static int radeon_emit_packet3(struct drm_device * dev,
2748 } 2766 }
2749 2767
2750 BEGIN_RING(cmdsz); 2768 BEGIN_RING(cmdsz);
2751 OUT_RING_TABLE(cmdbuf->buf, cmdsz); 2769 OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
2752 ADVANCE_RING(); 2770 ADVANCE_RING();
2753 2771
2754 cmdbuf->buf += cmdsz * 4;
2755 cmdbuf->bufsz -= cmdsz * 4;
2756 return 0; 2772 return 0;
2757} 2773}
2758 2774
@@ -2805,16 +2821,16 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
2805 } 2821 }
2806 2822
2807 BEGIN_RING(cmdsz); 2823 BEGIN_RING(cmdsz);
2808 OUT_RING_TABLE(cmdbuf->buf, cmdsz); 2824 OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
2809 ADVANCE_RING(); 2825 ADVANCE_RING();
2810 2826
2811 } while (++i < cmdbuf->nbox); 2827 } while (++i < cmdbuf->nbox);
2812 if (cmdbuf->nbox == 1) 2828 if (cmdbuf->nbox == 1)
2813 cmdbuf->nbox = 0; 2829 cmdbuf->nbox = 0;
2814 2830
2831 return 0;
2815 out: 2832 out:
2816 cmdbuf->buf += cmdsz * 4; 2833 drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
2817 cmdbuf->bufsz -= cmdsz * 4;
2818 return 0; 2834 return 0;
2819} 2835}
2820 2836
@@ -2847,16 +2863,16 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
2847 return 0; 2863 return 0;
2848} 2864}
2849 2865
2850static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) 2866static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
2867 struct drm_file *file_priv)
2851{ 2868{
2852 drm_radeon_private_t *dev_priv = dev->dev_private; 2869 drm_radeon_private_t *dev_priv = dev->dev_private;
2853 struct drm_device_dma *dma = dev->dma; 2870 struct drm_device_dma *dma = dev->dma;
2854 struct drm_buf *buf = NULL; 2871 struct drm_buf *buf = NULL;
2872 drm_radeon_cmd_header_t stack_header;
2855 int idx; 2873 int idx;
2856 drm_radeon_kcmd_buffer_t *cmdbuf = data; 2874 drm_radeon_kcmd_buffer_t *cmdbuf = data;
2857 drm_radeon_cmd_header_t header; 2875 int orig_nbox;
2858 int orig_nbox, orig_bufsz;
2859 char *kbuf = NULL;
2860 2876
2861 LOCK_TEST_WITH_RETURN(dev, file_priv); 2877 LOCK_TEST_WITH_RETURN(dev, file_priv);
2862 2878
@@ -2871,17 +2887,16 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2871 * races between checking values and using those values in other code, 2887 * races between checking values and using those values in other code,
2872 * and simply to avoid a lot of function calls to copy in data. 2888 * and simply to avoid a lot of function calls to copy in data.
2873 */ 2889 */
2874 orig_bufsz = cmdbuf->bufsz; 2890 if (cmdbuf->bufsz != 0) {
2875 if (orig_bufsz != 0) { 2891 int rv;
2876 kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL); 2892 void __user *buffer = cmdbuf->buffer;
2877 if (kbuf == NULL) 2893 rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
2878 return -ENOMEM; 2894 if (rv)
2879 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, 2895 return rv;
2880 cmdbuf->bufsz)) { 2896 rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
2881 kfree(kbuf); 2897 cmdbuf->bufsz);
2882 return -EFAULT; 2898 if (rv)
2883 } 2899 return rv;
2884 cmdbuf->buf = kbuf;
2885 } 2900 }
2886 2901
2887 orig_nbox = cmdbuf->nbox; 2902 orig_nbox = cmdbuf->nbox;
@@ -2890,24 +2905,24 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2890 int temp; 2905 int temp;
2891 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); 2906 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
2892 2907
2893 if (orig_bufsz != 0) 2908 if (cmdbuf->bufsz != 0)
2894 kfree(kbuf); 2909 drm_buffer_free(cmdbuf->buffer);
2895 2910
2896 return temp; 2911 return temp;
2897 } 2912 }
2898 2913
2899 /* microcode_version != r300 */ 2914 /* microcode_version != r300 */
2900 while (cmdbuf->bufsz >= sizeof(header)) { 2915 while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
2901 2916
2902 header.i = *(int *)cmdbuf->buf; 2917 drm_radeon_cmd_header_t *header;
2903 cmdbuf->buf += sizeof(header); 2918 header = drm_buffer_read_object(cmdbuf->buffer,
2904 cmdbuf->bufsz -= sizeof(header); 2919 sizeof(stack_header), &stack_header);
2905 2920
2906 switch (header.header.cmd_type) { 2921 switch (header->header.cmd_type) {
2907 case RADEON_CMD_PACKET: 2922 case RADEON_CMD_PACKET:
2908 DRM_DEBUG("RADEON_CMD_PACKET\n"); 2923 DRM_DEBUG("RADEON_CMD_PACKET\n");
2909 if (radeon_emit_packets 2924 if (radeon_emit_packets
2910 (dev_priv, file_priv, header, cmdbuf)) { 2925 (dev_priv, file_priv, *header, cmdbuf)) {
2911 DRM_ERROR("radeon_emit_packets failed\n"); 2926 DRM_ERROR("radeon_emit_packets failed\n");
2912 goto err; 2927 goto err;
2913 } 2928 }
@@ -2915,7 +2930,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2915 2930
2916 case RADEON_CMD_SCALARS: 2931 case RADEON_CMD_SCALARS:
2917 DRM_DEBUG("RADEON_CMD_SCALARS\n"); 2932 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2918 if (radeon_emit_scalars(dev_priv, header, cmdbuf)) { 2933 if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
2919 DRM_ERROR("radeon_emit_scalars failed\n"); 2934 DRM_ERROR("radeon_emit_scalars failed\n");
2920 goto err; 2935 goto err;
2921 } 2936 }
@@ -2923,7 +2938,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2923 2938
2924 case RADEON_CMD_VECTORS: 2939 case RADEON_CMD_VECTORS:
2925 DRM_DEBUG("RADEON_CMD_VECTORS\n"); 2940 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2926 if (radeon_emit_vectors(dev_priv, header, cmdbuf)) { 2941 if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
2927 DRM_ERROR("radeon_emit_vectors failed\n"); 2942 DRM_ERROR("radeon_emit_vectors failed\n");
2928 goto err; 2943 goto err;
2929 } 2944 }
@@ -2931,7 +2946,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2931 2946
2932 case RADEON_CMD_DMA_DISCARD: 2947 case RADEON_CMD_DMA_DISCARD:
2933 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); 2948 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
2934 idx = header.dma.buf_idx; 2949 idx = header->dma.buf_idx;
2935 if (idx < 0 || idx >= dma->buf_count) { 2950 if (idx < 0 || idx >= dma->buf_count) {
2936 DRM_ERROR("buffer index %d (of %d max)\n", 2951 DRM_ERROR("buffer index %d (of %d max)\n",
2937 idx, dma->buf_count - 1); 2952 idx, dma->buf_count - 1);
@@ -2968,7 +2983,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2968 2983
2969 case RADEON_CMD_SCALARS2: 2984 case RADEON_CMD_SCALARS2:
2970 DRM_DEBUG("RADEON_CMD_SCALARS2\n"); 2985 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2971 if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) { 2986 if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
2972 DRM_ERROR("radeon_emit_scalars2 failed\n"); 2987 DRM_ERROR("radeon_emit_scalars2 failed\n");
2973 goto err; 2988 goto err;
2974 } 2989 }
@@ -2976,37 +2991,37 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2976 2991
2977 case RADEON_CMD_WAIT: 2992 case RADEON_CMD_WAIT:
2978 DRM_DEBUG("RADEON_CMD_WAIT\n"); 2993 DRM_DEBUG("RADEON_CMD_WAIT\n");
2979 if (radeon_emit_wait(dev, header.wait.flags)) { 2994 if (radeon_emit_wait(dev, header->wait.flags)) {
2980 DRM_ERROR("radeon_emit_wait failed\n"); 2995 DRM_ERROR("radeon_emit_wait failed\n");
2981 goto err; 2996 goto err;
2982 } 2997 }
2983 break; 2998 break;
2984 case RADEON_CMD_VECLINEAR: 2999 case RADEON_CMD_VECLINEAR:
2985 DRM_DEBUG("RADEON_CMD_VECLINEAR\n"); 3000 DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
2986 if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) { 3001 if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
2987 DRM_ERROR("radeon_emit_veclinear failed\n"); 3002 DRM_ERROR("radeon_emit_veclinear failed\n");
2988 goto err; 3003 goto err;
2989 } 3004 }
2990 break; 3005 break;
2991 3006
2992 default: 3007 default:
2993 DRM_ERROR("bad cmd_type %d at %p\n", 3008 DRM_ERROR("bad cmd_type %d at byte %d\n",
2994 header.header.cmd_type, 3009 header->header.cmd_type,
2995 cmdbuf->buf - sizeof(header)); 3010 cmdbuf->buffer->iterator);
2996 goto err; 3011 goto err;
2997 } 3012 }
2998 } 3013 }
2999 3014
3000 if (orig_bufsz != 0) 3015 if (cmdbuf->bufsz != 0)
3001 kfree(kbuf); 3016 drm_buffer_free(cmdbuf->buffer);
3002 3017
3003 DRM_DEBUG("DONE\n"); 3018 DRM_DEBUG("DONE\n");
3004 COMMIT_RING(); 3019 COMMIT_RING();
3005 return 0; 3020 return 0;
3006 3021
3007 err: 3022 err:
3008 if (orig_bufsz != 0) 3023 if (cmdbuf->bufsz != 0)
3009 kfree(kbuf); 3024 drm_buffer_free(cmdbuf->buffer);
3010 return -EINVAL; 3025 return -EINVAL;
3011} 3026}
3012 3027
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 9f5e2f929da9..313c96bc09da 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -186,7 +186,7 @@ void radeon_test_moves(struct radeon_device *rdev)
186 radeon_bo_kunmap(gtt_obj[i]); 186 radeon_bo_kunmap(gtt_obj[i]);
187 187
188 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", 188 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
189 gtt_addr - rdev->mc.gtt_location); 189 gtt_addr - rdev->mc.gtt_start);
190 } 190 }
191 191
192out_cleanup: 192out_cleanup:
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 58b5adf974ca..43c5ab34b634 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
150 man->default_caching = TTM_PL_FLAG_CACHED; 150 man->default_caching = TTM_PL_FLAG_CACHED;
151 break; 151 break;
152 case TTM_PL_TT: 152 case TTM_PL_TT:
153 man->gpu_offset = rdev->mc.gtt_location; 153 man->gpu_offset = rdev->mc.gtt_start;
154 man->available_caching = TTM_PL_MASK_CACHING; 154 man->available_caching = TTM_PL_MASK_CACHING;
155 man->default_caching = TTM_PL_FLAG_CACHED; 155 man->default_caching = TTM_PL_FLAG_CACHED;
156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; 156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
180 break; 180 break;
181 case TTM_PL_VRAM: 181 case TTM_PL_VRAM:
182 /* "On-card" video ram */ 182 /* "On-card" video ram */
183 man->gpu_offset = rdev->mc.vram_location; 183 man->gpu_offset = rdev->mc.vram_start;
184 man->flags = TTM_MEMTYPE_FLAG_FIXED | 184 man->flags = TTM_MEMTYPE_FLAG_FIXED |
185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | 185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
186 TTM_MEMTYPE_FLAG_MAPPABLE; 186 TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -262,10 +262,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
262 262
263 switch (old_mem->mem_type) { 263 switch (old_mem->mem_type) {
264 case TTM_PL_VRAM: 264 case TTM_PL_VRAM:
265 old_start += rdev->mc.vram_location; 265 old_start += rdev->mc.vram_start;
266 break; 266 break;
267 case TTM_PL_TT: 267 case TTM_PL_TT:
268 old_start += rdev->mc.gtt_location; 268 old_start += rdev->mc.gtt_start;
269 break; 269 break;
270 default: 270 default:
271 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 271 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -273,10 +273,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
273 } 273 }
274 switch (new_mem->mem_type) { 274 switch (new_mem->mem_type) {
275 case TTM_PL_VRAM: 275 case TTM_PL_VRAM:
276 new_start += rdev->mc.vram_location; 276 new_start += rdev->mc.vram_start;
277 break; 277 break;
278 case TTM_PL_TT: 278 case TTM_PL_TT:
279 new_start += rdev->mc.gtt_location; 279 new_start += rdev->mc.gtt_start;
280 break; 280 break;
281 default: 281 default:
282 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 282 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
new file mode 100644
index 000000000000..8f414a5f520f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -0,0 +1,837 @@
1r600 0x9400
20x000287A0 R7xx_CB_SHADER_CONTROL
30x00028230 R7xx_PA_SC_EDGERULE
40x000286C8 R7xx_SPI_THREAD_GROUPING
50x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
60x000088C4 VGT_CACHE_INVALIDATION
70x00028A50 VGT_ENHANCE
80x000088CC VGT_ES_PER_GS
90x00028A2C VGT_GROUP_DECR
100x00028A28 VGT_GROUP_FIRST_DECR
110x00028A24 VGT_GROUP_PRIM_TYPE
120x00028A30 VGT_GROUP_VECT_0_CNTL
130x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
140x00028A34 VGT_GROUP_VECT_1_CNTL
150x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
160x00028A40 VGT_GS_MODE
170x00028A6C VGT_GS_OUT_PRIM_TYPE
180x000088C8 VGT_GS_PER_ES
190x000088E8 VGT_GS_PER_VS
200x000088D4 VGT_GS_VERTEX_REUSE
210x00028A14 VGT_HOS_CNTL
220x00028A18 VGT_HOS_MAX_TESS_LEVEL
230x00028A1C VGT_HOS_MIN_TESS_LEVEL
240x00028A20 VGT_HOS_REUSE_DEPTH
250x0000895C VGT_INDEX_TYPE
260x00028408 VGT_INDX_OFFSET
270x00028AA0 VGT_INSTANCE_STEP_RATE_0
280x00028AA4 VGT_INSTANCE_STEP_RATE_1
290x000088C0 VGT_LAST_COPY_STATE
300x00028400 VGT_MAX_VTX_INDX
310x000088D8 VGT_MC_LAT_CNTL
320x00028404 VGT_MIN_VTX_INDX
330x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
340x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
350x00008970 VGT_NUM_INDICES
360x00008974 VGT_NUM_INSTANCES
370x00028A10 VGT_OUTPUT_PATH_CNTL
380x00028C5C VGT_OUT_DEALLOC_CNTL
390x00028A84 VGT_PRIMITIVEID_EN
400x00008958 VGT_PRIMITIVE_TYPE
410x00028AB4 VGT_REUSE_OFF
420x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL
430x00028AB8 VGT_VTX_CNT_EN
440x000088B0 VGT_VTX_VECT_EJECT_REG
450x00028810 PA_CL_CLIP_CNTL
460x00008A14 PA_CL_ENHANCE
470x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
480x00028C18 PA_CL_GB_HORZ_DISC_ADJ
490x00028C0C PA_CL_GB_VERT_CLIP_ADJ
500x00028C10 PA_CL_GB_VERT_DISC_ADJ
510x00028820 PA_CL_NANINF_CNTL
520x00028E1C PA_CL_POINT_CULL_RAD
530x00028E18 PA_CL_POINT_SIZE
540x00028E10 PA_CL_POINT_X_RAD
550x00028E14 PA_CL_POINT_Y_RAD
560x00028E2C PA_CL_UCP_0_W
570x00028E3C PA_CL_UCP_1_W
580x00028E4C PA_CL_UCP_2_W
590x00028E5C PA_CL_UCP_3_W
600x00028E6C PA_CL_UCP_4_W
610x00028E7C PA_CL_UCP_5_W
620x00028E20 PA_CL_UCP_0_X
630x00028E30 PA_CL_UCP_1_X
640x00028E40 PA_CL_UCP_2_X
650x00028E50 PA_CL_UCP_3_X
660x00028E60 PA_CL_UCP_4_X
670x00028E70 PA_CL_UCP_5_X
680x00028E24 PA_CL_UCP_0_Y
690x00028E34 PA_CL_UCP_1_Y
700x00028E44 PA_CL_UCP_2_Y
710x00028E54 PA_CL_UCP_3_Y
720x00028E64 PA_CL_UCP_4_Y
730x00028E74 PA_CL_UCP_5_Y
740x00028E28 PA_CL_UCP_0_Z
750x00028E38 PA_CL_UCP_1_Z
760x00028E48 PA_CL_UCP_2_Z
770x00028E58 PA_CL_UCP_3_Z
780x00028E68 PA_CL_UCP_4_Z
790x00028E78 PA_CL_UCP_5_Z
800x00028440 PA_CL_VPORT_XOFFSET_0
810x00028458 PA_CL_VPORT_XOFFSET_1
820x00028470 PA_CL_VPORT_XOFFSET_2
830x00028488 PA_CL_VPORT_XOFFSET_3
840x000284A0 PA_CL_VPORT_XOFFSET_4
850x000284B8 PA_CL_VPORT_XOFFSET_5
860x000284D0 PA_CL_VPORT_XOFFSET_6
870x000284E8 PA_CL_VPORT_XOFFSET_7
880x00028500 PA_CL_VPORT_XOFFSET_8
890x00028518 PA_CL_VPORT_XOFFSET_9
900x00028530 PA_CL_VPORT_XOFFSET_10
910x00028548 PA_CL_VPORT_XOFFSET_11
920x00028560 PA_CL_VPORT_XOFFSET_12
930x00028578 PA_CL_VPORT_XOFFSET_13
940x00028590 PA_CL_VPORT_XOFFSET_14
950x000285A8 PA_CL_VPORT_XOFFSET_15
960x0002843C PA_CL_VPORT_XSCALE_0
970x00028454 PA_CL_VPORT_XSCALE_1
980x0002846C PA_CL_VPORT_XSCALE_2
990x00028484 PA_CL_VPORT_XSCALE_3
1000x0002849C PA_CL_VPORT_XSCALE_4
1010x000284B4 PA_CL_VPORT_XSCALE_5
1020x000284CC PA_CL_VPORT_XSCALE_6
1030x000284E4 PA_CL_VPORT_XSCALE_7
1040x000284FC PA_CL_VPORT_XSCALE_8
1050x00028514 PA_CL_VPORT_XSCALE_9
1060x0002852C PA_CL_VPORT_XSCALE_10
1070x00028544 PA_CL_VPORT_XSCALE_11
1080x0002855C PA_CL_VPORT_XSCALE_12
1090x00028574 PA_CL_VPORT_XSCALE_13
1100x0002858C PA_CL_VPORT_XSCALE_14
1110x000285A4 PA_CL_VPORT_XSCALE_15
1120x00028448 PA_CL_VPORT_YOFFSET_0
1130x00028460 PA_CL_VPORT_YOFFSET_1
1140x00028478 PA_CL_VPORT_YOFFSET_2
1150x00028490 PA_CL_VPORT_YOFFSET_3
1160x000284A8 PA_CL_VPORT_YOFFSET_4
1170x000284C0 PA_CL_VPORT_YOFFSET_5
1180x000284D8 PA_CL_VPORT_YOFFSET_6
1190x000284F0 PA_CL_VPORT_YOFFSET_7
1200x00028508 PA_CL_VPORT_YOFFSET_8
1210x00028520 PA_CL_VPORT_YOFFSET_9
1220x00028538 PA_CL_VPORT_YOFFSET_10
1230x00028550 PA_CL_VPORT_YOFFSET_11
1240x00028568 PA_CL_VPORT_YOFFSET_12
1250x00028580 PA_CL_VPORT_YOFFSET_13
1260x00028598 PA_CL_VPORT_YOFFSET_14
1270x000285B0 PA_CL_VPORT_YOFFSET_15
1280x00028444 PA_CL_VPORT_YSCALE_0
1290x0002845C PA_CL_VPORT_YSCALE_1
1300x00028474 PA_CL_VPORT_YSCALE_2
1310x0002848C PA_CL_VPORT_YSCALE_3
1320x000284A4 PA_CL_VPORT_YSCALE_4
1330x000284BC PA_CL_VPORT_YSCALE_5
1340x000284D4 PA_CL_VPORT_YSCALE_6
1350x000284EC PA_CL_VPORT_YSCALE_7
1360x00028504 PA_CL_VPORT_YSCALE_8
1370x0002851C PA_CL_VPORT_YSCALE_9
1380x00028534 PA_CL_VPORT_YSCALE_10
1390x0002854C PA_CL_VPORT_YSCALE_11
1400x00028564 PA_CL_VPORT_YSCALE_12
1410x0002857C PA_CL_VPORT_YSCALE_13
1420x00028594 PA_CL_VPORT_YSCALE_14
1430x000285AC PA_CL_VPORT_YSCALE_15
1440x00028450 PA_CL_VPORT_ZOFFSET_0
1450x00028468 PA_CL_VPORT_ZOFFSET_1
1460x00028480 PA_CL_VPORT_ZOFFSET_2
1470x00028498 PA_CL_VPORT_ZOFFSET_3
1480x000284B0 PA_CL_VPORT_ZOFFSET_4
1490x000284C8 PA_CL_VPORT_ZOFFSET_5
1500x000284E0 PA_CL_VPORT_ZOFFSET_6
1510x000284F8 PA_CL_VPORT_ZOFFSET_7
1520x00028510 PA_CL_VPORT_ZOFFSET_8
1530x00028528 PA_CL_VPORT_ZOFFSET_9
1540x00028540 PA_CL_VPORT_ZOFFSET_10
1550x00028558 PA_CL_VPORT_ZOFFSET_11
1560x00028570 PA_CL_VPORT_ZOFFSET_12
1570x00028588 PA_CL_VPORT_ZOFFSET_13
1580x000285A0 PA_CL_VPORT_ZOFFSET_14
1590x000285B8 PA_CL_VPORT_ZOFFSET_15
1600x0002844C PA_CL_VPORT_ZSCALE_0
1610x00028464 PA_CL_VPORT_ZSCALE_1
1620x0002847C PA_CL_VPORT_ZSCALE_2
1630x00028494 PA_CL_VPORT_ZSCALE_3
1640x000284AC PA_CL_VPORT_ZSCALE_4
1650x000284C4 PA_CL_VPORT_ZSCALE_5
1660x000284DC PA_CL_VPORT_ZSCALE_6
1670x000284F4 PA_CL_VPORT_ZSCALE_7
1680x0002850C PA_CL_VPORT_ZSCALE_8
1690x00028524 PA_CL_VPORT_ZSCALE_9
1700x0002853C PA_CL_VPORT_ZSCALE_10
1710x00028554 PA_CL_VPORT_ZSCALE_11
1720x0002856C PA_CL_VPORT_ZSCALE_12
1730x00028584 PA_CL_VPORT_ZSCALE_13
1740x0002859C PA_CL_VPORT_ZSCALE_14
1750x000285B4 PA_CL_VPORT_ZSCALE_15
1760x0002881C PA_CL_VS_OUT_CNTL
1770x00028818 PA_CL_VTE_CNTL
1780x00028C48 PA_SC_AA_MASK
1790x00008B40 PA_SC_AA_SAMPLE_LOCS_2S
1800x00008B44 PA_SC_AA_SAMPLE_LOCS_4S
1810x00008B48 PA_SC_AA_SAMPLE_LOCS_8S_WD0
1820x00008B4C PA_SC_AA_SAMPLE_LOCS_8S_WD1
1830x00028C20 PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
1840x00028C1C PA_SC_AA_SAMPLE_LOCS_MCTX
1850x00028214 PA_SC_CLIPRECT_0_BR
1860x0002821C PA_SC_CLIPRECT_1_BR
1870x00028224 PA_SC_CLIPRECT_2_BR
1880x0002822C PA_SC_CLIPRECT_3_BR
1890x00028210 PA_SC_CLIPRECT_0_TL
1900x00028218 PA_SC_CLIPRECT_1_TL
1910x00028220 PA_SC_CLIPRECT_2_TL
1920x00028228 PA_SC_CLIPRECT_3_TL
1930x0002820C PA_SC_CLIPRECT_RULE
1940x00008BF0 PA_SC_ENHANCE
1950x00028244 PA_SC_GENERIC_SCISSOR_BR
1960x00028240 PA_SC_GENERIC_SCISSOR_TL
1970x00028C00 PA_SC_LINE_CNTL
1980x00028A0C PA_SC_LINE_STIPPLE
1990x00008B10 PA_SC_LINE_STIPPLE_STATE
2000x00028A4C PA_SC_MODE_CNTL
2010x00028A48 PA_SC_MPASS_PS_CNTL
2020x00008B20 PA_SC_MULTI_CHIP_CNTL
2030x00028034 PA_SC_SCREEN_SCISSOR_BR
2040x00028030 PA_SC_SCREEN_SCISSOR_TL
2050x00028254 PA_SC_VPORT_SCISSOR_0_BR
2060x0002825C PA_SC_VPORT_SCISSOR_1_BR
2070x00028264 PA_SC_VPORT_SCISSOR_2_BR
2080x0002826C PA_SC_VPORT_SCISSOR_3_BR
2090x00028274 PA_SC_VPORT_SCISSOR_4_BR
2100x0002827C PA_SC_VPORT_SCISSOR_5_BR
2110x00028284 PA_SC_VPORT_SCISSOR_6_BR
2120x0002828C PA_SC_VPORT_SCISSOR_7_BR
2130x00028294 PA_SC_VPORT_SCISSOR_8_BR
2140x0002829C PA_SC_VPORT_SCISSOR_9_BR
2150x000282A4 PA_SC_VPORT_SCISSOR_10_BR
2160x000282AC PA_SC_VPORT_SCISSOR_11_BR
2170x000282B4 PA_SC_VPORT_SCISSOR_12_BR
2180x000282BC PA_SC_VPORT_SCISSOR_13_BR
2190x000282C4 PA_SC_VPORT_SCISSOR_14_BR
2200x000282CC PA_SC_VPORT_SCISSOR_15_BR
2210x00028250 PA_SC_VPORT_SCISSOR_0_TL
2220x00028258 PA_SC_VPORT_SCISSOR_1_TL
2230x00028260 PA_SC_VPORT_SCISSOR_2_TL
2240x00028268 PA_SC_VPORT_SCISSOR_3_TL
2250x00028270 PA_SC_VPORT_SCISSOR_4_TL
2260x00028278 PA_SC_VPORT_SCISSOR_5_TL
2270x00028280 PA_SC_VPORT_SCISSOR_6_TL
2280x00028288 PA_SC_VPORT_SCISSOR_7_TL
2290x00028290 PA_SC_VPORT_SCISSOR_8_TL
2300x00028298 PA_SC_VPORT_SCISSOR_9_TL
2310x000282A0 PA_SC_VPORT_SCISSOR_10_TL
2320x000282A8 PA_SC_VPORT_SCISSOR_11_TL
2330x000282B0 PA_SC_VPORT_SCISSOR_12_TL
2340x000282B8 PA_SC_VPORT_SCISSOR_13_TL
2350x000282C0 PA_SC_VPORT_SCISSOR_14_TL
2360x000282C8 PA_SC_VPORT_SCISSOR_15_TL
2370x000282D4 PA_SC_VPORT_ZMAX_0
2380x000282DC PA_SC_VPORT_ZMAX_1
2390x000282E4 PA_SC_VPORT_ZMAX_2
2400x000282EC PA_SC_VPORT_ZMAX_3
2410x000282F4 PA_SC_VPORT_ZMAX_4
2420x000282FC PA_SC_VPORT_ZMAX_5
2430x00028304 PA_SC_VPORT_ZMAX_6
2440x0002830C PA_SC_VPORT_ZMAX_7
2450x00028314 PA_SC_VPORT_ZMAX_8
2460x0002831C PA_SC_VPORT_ZMAX_9
2470x00028324 PA_SC_VPORT_ZMAX_10
2480x0002832C PA_SC_VPORT_ZMAX_11
2490x00028334 PA_SC_VPORT_ZMAX_12
2500x0002833C PA_SC_VPORT_ZMAX_13
2510x00028344 PA_SC_VPORT_ZMAX_14
2520x0002834C PA_SC_VPORT_ZMAX_15
2530x000282D0 PA_SC_VPORT_ZMIN_0
2540x000282D8 PA_SC_VPORT_ZMIN_1
2550x000282E0 PA_SC_VPORT_ZMIN_2
2560x000282E8 PA_SC_VPORT_ZMIN_3
2570x000282F0 PA_SC_VPORT_ZMIN_4
2580x000282F8 PA_SC_VPORT_ZMIN_5
2590x00028300 PA_SC_VPORT_ZMIN_6
2600x00028308 PA_SC_VPORT_ZMIN_7
2610x00028310 PA_SC_VPORT_ZMIN_8
2620x00028318 PA_SC_VPORT_ZMIN_9
2630x00028320 PA_SC_VPORT_ZMIN_10
2640x00028328 PA_SC_VPORT_ZMIN_11
2650x00028330 PA_SC_VPORT_ZMIN_12
2660x00028338 PA_SC_VPORT_ZMIN_13
2670x00028340 PA_SC_VPORT_ZMIN_14
2680x00028348 PA_SC_VPORT_ZMIN_15
2690x00028200 PA_SC_WINDOW_OFFSET
2700x00028208 PA_SC_WINDOW_SCISSOR_BR
2710x00028204 PA_SC_WINDOW_SCISSOR_TL
2720x00028A08 PA_SU_LINE_CNTL
2730x00028A04 PA_SU_POINT_MINMAX
2740x00028A00 PA_SU_POINT_SIZE
2750x00028E0C PA_SU_POLY_OFFSET_BACK_OFFSET
2760x00028E08 PA_SU_POLY_OFFSET_BACK_SCALE
2770x00028DFC PA_SU_POLY_OFFSET_CLAMP
2780x00028DF8 PA_SU_POLY_OFFSET_DB_FMT_CNTL
2790x00028E04 PA_SU_POLY_OFFSET_FRONT_OFFSET
2800x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
2810x00028814 PA_SU_SC_MODE_CNTL
2820x00028C08 PA_SU_VTX_CNTL
2830x00008C00 SQ_CONFIG
2840x00008C04 SQ_GPR_RESOURCE_MGMT_1
2850x00008C08 SQ_GPR_RESOURCE_MGMT_2
2860x00008C10 SQ_STACK_RESOURCE_MGMT_1
2870x00008C14 SQ_STACK_RESOURCE_MGMT_2
2880x00008C0C SQ_THREAD_RESOURCE_MGMT
2890x00028380 SQ_VTX_SEMANTIC_0
2900x00028384 SQ_VTX_SEMANTIC_1
2910x00028388 SQ_VTX_SEMANTIC_2
2920x0002838C SQ_VTX_SEMANTIC_3
2930x00028390 SQ_VTX_SEMANTIC_4
2940x00028394 SQ_VTX_SEMANTIC_5
2950x00028398 SQ_VTX_SEMANTIC_6
2960x0002839C SQ_VTX_SEMANTIC_7
2970x000283A0 SQ_VTX_SEMANTIC_8
2980x000283A4 SQ_VTX_SEMANTIC_9
2990x000283A8 SQ_VTX_SEMANTIC_10
3000x000283AC SQ_VTX_SEMANTIC_11
3010x000283B0 SQ_VTX_SEMANTIC_12
3020x000283B4 SQ_VTX_SEMANTIC_13
3030x000283B8 SQ_VTX_SEMANTIC_14
3040x000283BC SQ_VTX_SEMANTIC_15
3050x000283C0 SQ_VTX_SEMANTIC_16
3060x000283C4 SQ_VTX_SEMANTIC_17
3070x000283C8 SQ_VTX_SEMANTIC_18
3080x000283CC SQ_VTX_SEMANTIC_19
3090x000283D0 SQ_VTX_SEMANTIC_20
3100x000283D4 SQ_VTX_SEMANTIC_21
3110x000283D8 SQ_VTX_SEMANTIC_22
3120x000283DC SQ_VTX_SEMANTIC_23
3130x000283E0 SQ_VTX_SEMANTIC_24
3140x000283E4 SQ_VTX_SEMANTIC_25
3150x000283E8 SQ_VTX_SEMANTIC_26
3160x000283EC SQ_VTX_SEMANTIC_27
3170x000283F0 SQ_VTX_SEMANTIC_28
3180x000283F4 SQ_VTX_SEMANTIC_29
3190x000283F8 SQ_VTX_SEMANTIC_30
3200x000283FC SQ_VTX_SEMANTIC_31
3210x000288E0 SQ_VTX_SEMANTIC_CLEAR
3220x0003CFF4 SQ_VTX_START_INST_LOC
3230x0003C000 SQ_TEX_SAMPLER_WORD0_0
3240x0003C004 SQ_TEX_SAMPLER_WORD1_0
3250x0003C008 SQ_TEX_SAMPLER_WORD2_0
3260x00030000 SQ_ALU_CONSTANT0_0
3270x00030004 SQ_ALU_CONSTANT1_0
3280x00030008 SQ_ALU_CONSTANT2_0
3290x0003000C SQ_ALU_CONSTANT3_0
3300x0003E380 SQ_BOOL_CONST_0
3310x0003E384 SQ_BOOL_CONST_1
3320x0003E388 SQ_BOOL_CONST_2
3330x0003E200 SQ_LOOP_CONST_0
3340x0003E200 SQ_LOOP_CONST_DX10_0
3350x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
3360x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
3370x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
3380x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
3390x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
3400x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
3410x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
3420x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
3430x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
3440x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
3450x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
3460x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
3470x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
3480x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
3490x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
3500x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
3510x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
3520x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
3530x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
3540x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
3550x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
3560x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
3570x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
3580x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
3590x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
3600x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
3610x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
3620x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
3630x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
3640x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
3650x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
3660x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
3670x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
3680x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
3690x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
3700x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
3710x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
3720x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
3730x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
3740x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
3750x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
3760x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
3770x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
3780x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
3790x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
3800x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
3810x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
3820x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
3830x000289C0 SQ_ALU_CONST_CACHE_GS_0
3840x000289C4 SQ_ALU_CONST_CACHE_GS_1
3850x000289C8 SQ_ALU_CONST_CACHE_GS_2
3860x000289CC SQ_ALU_CONST_CACHE_GS_3
3870x000289D0 SQ_ALU_CONST_CACHE_GS_4
3880x000289D4 SQ_ALU_CONST_CACHE_GS_5
3890x000289D8 SQ_ALU_CONST_CACHE_GS_6
3900x000289DC SQ_ALU_CONST_CACHE_GS_7
3910x000289E0 SQ_ALU_CONST_CACHE_GS_8
3920x000289E4 SQ_ALU_CONST_CACHE_GS_9
3930x000289E8 SQ_ALU_CONST_CACHE_GS_10
3940x000289EC SQ_ALU_CONST_CACHE_GS_11
3950x000289F0 SQ_ALU_CONST_CACHE_GS_12
3960x000289F4 SQ_ALU_CONST_CACHE_GS_13
3970x000289F8 SQ_ALU_CONST_CACHE_GS_14
3980x000289FC SQ_ALU_CONST_CACHE_GS_15
3990x00028940 SQ_ALU_CONST_CACHE_PS_0
4000x00028944 SQ_ALU_CONST_CACHE_PS_1
4010x00028948 SQ_ALU_CONST_CACHE_PS_2
4020x0002894C SQ_ALU_CONST_CACHE_PS_3
4030x00028950 SQ_ALU_CONST_CACHE_PS_4
4040x00028954 SQ_ALU_CONST_CACHE_PS_5
4050x00028958 SQ_ALU_CONST_CACHE_PS_6
4060x0002895C SQ_ALU_CONST_CACHE_PS_7
4070x00028960 SQ_ALU_CONST_CACHE_PS_8
4080x00028964 SQ_ALU_CONST_CACHE_PS_9
4090x00028968 SQ_ALU_CONST_CACHE_PS_10
4100x0002896C SQ_ALU_CONST_CACHE_PS_11
4110x00028970 SQ_ALU_CONST_CACHE_PS_12
4120x00028974 SQ_ALU_CONST_CACHE_PS_13
4130x00028978 SQ_ALU_CONST_CACHE_PS_14
4140x0002897C SQ_ALU_CONST_CACHE_PS_15
4150x00028980 SQ_ALU_CONST_CACHE_VS_0
4160x00028984 SQ_ALU_CONST_CACHE_VS_1
4170x00028988 SQ_ALU_CONST_CACHE_VS_2
4180x0002898C SQ_ALU_CONST_CACHE_VS_3
4190x00028990 SQ_ALU_CONST_CACHE_VS_4
4200x00028994 SQ_ALU_CONST_CACHE_VS_5
4210x00028998 SQ_ALU_CONST_CACHE_VS_6
4220x0002899C SQ_ALU_CONST_CACHE_VS_7
4230x000289A0 SQ_ALU_CONST_CACHE_VS_8
4240x000289A4 SQ_ALU_CONST_CACHE_VS_9
4250x000289A8 SQ_ALU_CONST_CACHE_VS_10
4260x000289AC SQ_ALU_CONST_CACHE_VS_11
4270x000289B0 SQ_ALU_CONST_CACHE_VS_12
4280x000289B4 SQ_ALU_CONST_CACHE_VS_13
4290x000289B8 SQ_ALU_CONST_CACHE_VS_14
4300x000289BC SQ_ALU_CONST_CACHE_VS_15
4310x000288D8 SQ_PGM_CF_OFFSET_ES
4320x000288DC SQ_PGM_CF_OFFSET_FS
4330x000288D4 SQ_PGM_CF_OFFSET_GS
4340x000288CC SQ_PGM_CF_OFFSET_PS
4350x000288D0 SQ_PGM_CF_OFFSET_VS
4360x00028854 SQ_PGM_EXPORTS_PS
4370x00028890 SQ_PGM_RESOURCES_ES
4380x000288A4 SQ_PGM_RESOURCES_FS
4390x0002887C SQ_PGM_RESOURCES_GS
4400x00028850 SQ_PGM_RESOURCES_PS
4410x00028868 SQ_PGM_RESOURCES_VS
4420x00009100 SPI_CONFIG_CNTL
4430x0000913C SPI_CONFIG_CNTL_1
4440x000286DC SPI_FOG_CNTL
4450x000286E4 SPI_FOG_FUNC_BIAS
4460x000286E0 SPI_FOG_FUNC_SCALE
4470x000286D8 SPI_INPUT_Z
4480x000286D4 SPI_INTERP_CONTROL_0
4490x00028644 SPI_PS_INPUT_CNTL_0
4500x00028648 SPI_PS_INPUT_CNTL_1
4510x0002864C SPI_PS_INPUT_CNTL_2
4520x00028650 SPI_PS_INPUT_CNTL_3
4530x00028654 SPI_PS_INPUT_CNTL_4
4540x00028658 SPI_PS_INPUT_CNTL_5
4550x0002865C SPI_PS_INPUT_CNTL_6
4560x00028660 SPI_PS_INPUT_CNTL_7
4570x00028664 SPI_PS_INPUT_CNTL_8
4580x00028668 SPI_PS_INPUT_CNTL_9
4590x0002866C SPI_PS_INPUT_CNTL_10
4600x00028670 SPI_PS_INPUT_CNTL_11
4610x00028674 SPI_PS_INPUT_CNTL_12
4620x00028678 SPI_PS_INPUT_CNTL_13
4630x0002867C SPI_PS_INPUT_CNTL_14
4640x00028680 SPI_PS_INPUT_CNTL_15
4650x00028684 SPI_PS_INPUT_CNTL_16
4660x00028688 SPI_PS_INPUT_CNTL_17
4670x0002868C SPI_PS_INPUT_CNTL_18
4680x00028690 SPI_PS_INPUT_CNTL_19
4690x00028694 SPI_PS_INPUT_CNTL_20
4700x00028698 SPI_PS_INPUT_CNTL_21
4710x0002869C SPI_PS_INPUT_CNTL_22
4720x000286A0 SPI_PS_INPUT_CNTL_23
4730x000286A4 SPI_PS_INPUT_CNTL_24
4740x000286A8 SPI_PS_INPUT_CNTL_25
4750x000286AC SPI_PS_INPUT_CNTL_26
4760x000286B0 SPI_PS_INPUT_CNTL_27
4770x000286B4 SPI_PS_INPUT_CNTL_28
4780x000286B8 SPI_PS_INPUT_CNTL_29
4790x000286BC SPI_PS_INPUT_CNTL_30
4800x000286C0 SPI_PS_INPUT_CNTL_31
4810x000286CC SPI_PS_IN_CONTROL_0
4820x000286D0 SPI_PS_IN_CONTROL_1
4830x000286C4 SPI_VS_OUT_CONFIG
4840x00028614 SPI_VS_OUT_ID_0
4850x00028618 SPI_VS_OUT_ID_1
4860x0002861C SPI_VS_OUT_ID_2
4870x00028620 SPI_VS_OUT_ID_3
4880x00028624 SPI_VS_OUT_ID_4
4890x00028628 SPI_VS_OUT_ID_5
4900x0002862C SPI_VS_OUT_ID_6
4910x00028630 SPI_VS_OUT_ID_7
4920x00028634 SPI_VS_OUT_ID_8
4930x00028638 SPI_VS_OUT_ID_9
4940x00028438 SX_ALPHA_REF
4950x00028410 SX_ALPHA_TEST_CONTROL
4960x00028350 SX_MISC
4970x0000A020 SMX_DC_CTL0
4980x0000A024 SMX_DC_CTL1
4990x0000A028 SMX_DC_CTL2
5000x00009608 TC_CNTL
5010x00009604 TC_INVALIDATE
5020x00009490 TD_CNTL
5030x00009400 TD_FILTER4
5040x00009404 TD_FILTER4_1
5050x00009408 TD_FILTER4_2
5060x0000940C TD_FILTER4_3
5070x00009410 TD_FILTER4_4
5080x00009414 TD_FILTER4_5
5090x00009418 TD_FILTER4_6
5100x0000941C TD_FILTER4_7
5110x00009420 TD_FILTER4_8
5120x00009424 TD_FILTER4_9
5130x00009428 TD_FILTER4_10
5140x0000942C TD_FILTER4_11
5150x00009430 TD_FILTER4_12
5160x00009434 TD_FILTER4_13
5170x00009438 TD_FILTER4_14
5180x0000943C TD_FILTER4_15
5190x00009440 TD_FILTER4_16
5200x00009444 TD_FILTER4_17
5210x00009448 TD_FILTER4_18
5220x0000944C TD_FILTER4_19
5230x00009450 TD_FILTER4_20
5240x00009454 TD_FILTER4_21
5250x00009458 TD_FILTER4_22
5260x0000945C TD_FILTER4_23
5270x00009460 TD_FILTER4_24
5280x00009464 TD_FILTER4_25
5290x00009468 TD_FILTER4_26
5300x0000946C TD_FILTER4_27
5310x00009470 TD_FILTER4_28
5320x00009474 TD_FILTER4_29
5330x00009478 TD_FILTER4_30
5340x0000947C TD_FILTER4_31
5350x00009480 TD_FILTER4_32
5360x00009484 TD_FILTER4_33
5370x00009488 TD_FILTER4_34
5380x0000948C TD_FILTER4_35
5390x0000A80C TD_GS_SAMPLER0_BORDER_ALPHA
5400x0000A81C TD_GS_SAMPLER1_BORDER_ALPHA
5410x0000A82C TD_GS_SAMPLER2_BORDER_ALPHA
5420x0000A83C TD_GS_SAMPLER3_BORDER_ALPHA
5430x0000A84C TD_GS_SAMPLER4_BORDER_ALPHA
5440x0000A85C TD_GS_SAMPLER5_BORDER_ALPHA
5450x0000A86C TD_GS_SAMPLER6_BORDER_ALPHA
5460x0000A87C TD_GS_SAMPLER7_BORDER_ALPHA
5470x0000A88C TD_GS_SAMPLER8_BORDER_ALPHA
5480x0000A89C TD_GS_SAMPLER9_BORDER_ALPHA
5490x0000A8AC TD_GS_SAMPLER10_BORDER_ALPHA
5500x0000A8BC TD_GS_SAMPLER11_BORDER_ALPHA
5510x0000A8CC TD_GS_SAMPLER12_BORDER_ALPHA
5520x0000A8DC TD_GS_SAMPLER13_BORDER_ALPHA
5530x0000A8EC TD_GS_SAMPLER14_BORDER_ALPHA
5540x0000A8FC TD_GS_SAMPLER15_BORDER_ALPHA
5550x0000A90C TD_GS_SAMPLER16_BORDER_ALPHA
5560x0000A91C TD_GS_SAMPLER17_BORDER_ALPHA
5570x0000A808 TD_GS_SAMPLER0_BORDER_BLUE
5580x0000A818 TD_GS_SAMPLER1_BORDER_BLUE
5590x0000A828 TD_GS_SAMPLER2_BORDER_BLUE
5600x0000A838 TD_GS_SAMPLER3_BORDER_BLUE
5610x0000A848 TD_GS_SAMPLER4_BORDER_BLUE
5620x0000A858 TD_GS_SAMPLER5_BORDER_BLUE
5630x0000A868 TD_GS_SAMPLER6_BORDER_BLUE
5640x0000A878 TD_GS_SAMPLER7_BORDER_BLUE
5650x0000A888 TD_GS_SAMPLER8_BORDER_BLUE
5660x0000A898 TD_GS_SAMPLER9_BORDER_BLUE
5670x0000A8A8 TD_GS_SAMPLER10_BORDER_BLUE
5680x0000A8B8 TD_GS_SAMPLER11_BORDER_BLUE
5690x0000A8C8 TD_GS_SAMPLER12_BORDER_BLUE
5700x0000A8D8 TD_GS_SAMPLER13_BORDER_BLUE
5710x0000A8E8 TD_GS_SAMPLER14_BORDER_BLUE
5720x0000A8F8 TD_GS_SAMPLER15_BORDER_BLUE
5730x0000A908 TD_GS_SAMPLER16_BORDER_BLUE
5740x0000A918 TD_GS_SAMPLER17_BORDER_BLUE
5750x0000A804 TD_GS_SAMPLER0_BORDER_GREEN
5760x0000A814 TD_GS_SAMPLER1_BORDER_GREEN
5770x0000A824 TD_GS_SAMPLER2_BORDER_GREEN
5780x0000A834 TD_GS_SAMPLER3_BORDER_GREEN
5790x0000A844 TD_GS_SAMPLER4_BORDER_GREEN
5800x0000A854 TD_GS_SAMPLER5_BORDER_GREEN
5810x0000A864 TD_GS_SAMPLER6_BORDER_GREEN
5820x0000A874 TD_GS_SAMPLER7_BORDER_GREEN
5830x0000A884 TD_GS_SAMPLER8_BORDER_GREEN
5840x0000A894 TD_GS_SAMPLER9_BORDER_GREEN
5850x0000A8A4 TD_GS_SAMPLER10_BORDER_GREEN
5860x0000A8B4 TD_GS_SAMPLER11_BORDER_GREEN
5870x0000A8C4 TD_GS_SAMPLER12_BORDER_GREEN
5880x0000A8D4 TD_GS_SAMPLER13_BORDER_GREEN
5890x0000A8E4 TD_GS_SAMPLER14_BORDER_GREEN
5900x0000A8F4 TD_GS_SAMPLER15_BORDER_GREEN
5910x0000A904 TD_GS_SAMPLER16_BORDER_GREEN
5920x0000A914 TD_GS_SAMPLER17_BORDER_GREEN
5930x0000A800 TD_GS_SAMPLER0_BORDER_RED
5940x0000A810 TD_GS_SAMPLER1_BORDER_RED
5950x0000A820 TD_GS_SAMPLER2_BORDER_RED
5960x0000A830 TD_GS_SAMPLER3_BORDER_RED
5970x0000A840 TD_GS_SAMPLER4_BORDER_RED
5980x0000A850 TD_GS_SAMPLER5_BORDER_RED
5990x0000A860 TD_GS_SAMPLER6_BORDER_RED
6000x0000A870 TD_GS_SAMPLER7_BORDER_RED
6010x0000A880 TD_GS_SAMPLER8_BORDER_RED
6020x0000A890 TD_GS_SAMPLER9_BORDER_RED
6030x0000A8A0 TD_GS_SAMPLER10_BORDER_RED
6040x0000A8B0 TD_GS_SAMPLER11_BORDER_RED
6050x0000A8C0 TD_GS_SAMPLER12_BORDER_RED
6060x0000A8D0 TD_GS_SAMPLER13_BORDER_RED
6070x0000A8E0 TD_GS_SAMPLER14_BORDER_RED
6080x0000A8F0 TD_GS_SAMPLER15_BORDER_RED
6090x0000A900 TD_GS_SAMPLER16_BORDER_RED
6100x0000A910 TD_GS_SAMPLER17_BORDER_RED
6110x0000A40C TD_PS_SAMPLER0_BORDER_ALPHA
6120x0000A41C TD_PS_SAMPLER1_BORDER_ALPHA
6130x0000A42C TD_PS_SAMPLER2_BORDER_ALPHA
6140x0000A43C TD_PS_SAMPLER3_BORDER_ALPHA
6150x0000A44C TD_PS_SAMPLER4_BORDER_ALPHA
6160x0000A45C TD_PS_SAMPLER5_BORDER_ALPHA
6170x0000A46C TD_PS_SAMPLER6_BORDER_ALPHA
6180x0000A47C TD_PS_SAMPLER7_BORDER_ALPHA
6190x0000A48C TD_PS_SAMPLER8_BORDER_ALPHA
6200x0000A49C TD_PS_SAMPLER9_BORDER_ALPHA
6210x0000A4AC TD_PS_SAMPLER10_BORDER_ALPHA
6220x0000A4BC TD_PS_SAMPLER11_BORDER_ALPHA
6230x0000A4CC TD_PS_SAMPLER12_BORDER_ALPHA
6240x0000A4DC TD_PS_SAMPLER13_BORDER_ALPHA
6250x0000A4EC TD_PS_SAMPLER14_BORDER_ALPHA
6260x0000A4FC TD_PS_SAMPLER15_BORDER_ALPHA
6270x0000A50C TD_PS_SAMPLER16_BORDER_ALPHA
6280x0000A51C TD_PS_SAMPLER17_BORDER_ALPHA
6290x0000A408 TD_PS_SAMPLER0_BORDER_BLUE
6300x0000A418 TD_PS_SAMPLER1_BORDER_BLUE
6310x0000A428 TD_PS_SAMPLER2_BORDER_BLUE
6320x0000A438 TD_PS_SAMPLER3_BORDER_BLUE
6330x0000A448 TD_PS_SAMPLER4_BORDER_BLUE
6340x0000A458 TD_PS_SAMPLER5_BORDER_BLUE
6350x0000A468 TD_PS_SAMPLER6_BORDER_BLUE
6360x0000A478 TD_PS_SAMPLER7_BORDER_BLUE
6370x0000A488 TD_PS_SAMPLER8_BORDER_BLUE
6380x0000A498 TD_PS_SAMPLER9_BORDER_BLUE
6390x0000A4A8 TD_PS_SAMPLER10_BORDER_BLUE
6400x0000A4B8 TD_PS_SAMPLER11_BORDER_BLUE
6410x0000A4C8 TD_PS_SAMPLER12_BORDER_BLUE
6420x0000A4D8 TD_PS_SAMPLER13_BORDER_BLUE
6430x0000A4E8 TD_PS_SAMPLER14_BORDER_BLUE
6440x0000A4F8 TD_PS_SAMPLER15_BORDER_BLUE
6450x0000A508 TD_PS_SAMPLER16_BORDER_BLUE
6460x0000A518 TD_PS_SAMPLER17_BORDER_BLUE
6470x0000A404 TD_PS_SAMPLER0_BORDER_GREEN
6480x0000A414 TD_PS_SAMPLER1_BORDER_GREEN
6490x0000A424 TD_PS_SAMPLER2_BORDER_GREEN
6500x0000A434 TD_PS_SAMPLER3_BORDER_GREEN
6510x0000A444 TD_PS_SAMPLER4_BORDER_GREEN
6520x0000A454 TD_PS_SAMPLER5_BORDER_GREEN
6530x0000A464 TD_PS_SAMPLER6_BORDER_GREEN
6540x0000A474 TD_PS_SAMPLER7_BORDER_GREEN
6550x0000A484 TD_PS_SAMPLER8_BORDER_GREEN
6560x0000A494 TD_PS_SAMPLER9_BORDER_GREEN
6570x0000A4A4 TD_PS_SAMPLER10_BORDER_GREEN
6580x0000A4B4 TD_PS_SAMPLER11_BORDER_GREEN
6590x0000A4C4 TD_PS_SAMPLER12_BORDER_GREEN
6600x0000A4D4 TD_PS_SAMPLER13_BORDER_GREEN
6610x0000A4E4 TD_PS_SAMPLER14_BORDER_GREEN
6620x0000A4F4 TD_PS_SAMPLER15_BORDER_GREEN
6630x0000A504 TD_PS_SAMPLER16_BORDER_GREEN
6640x0000A514 TD_PS_SAMPLER17_BORDER_GREEN
6650x0000A400 TD_PS_SAMPLER0_BORDER_RED
6660x0000A410 TD_PS_SAMPLER1_BORDER_RED
6670x0000A420 TD_PS_SAMPLER2_BORDER_RED
6680x0000A430 TD_PS_SAMPLER3_BORDER_RED
6690x0000A440 TD_PS_SAMPLER4_BORDER_RED
6700x0000A450 TD_PS_SAMPLER5_BORDER_RED
6710x0000A460 TD_PS_SAMPLER6_BORDER_RED
6720x0000A470 TD_PS_SAMPLER7_BORDER_RED
6730x0000A480 TD_PS_SAMPLER8_BORDER_RED
6740x0000A490 TD_PS_SAMPLER9_BORDER_RED
6750x0000A4A0 TD_PS_SAMPLER10_BORDER_RED
6760x0000A4B0 TD_PS_SAMPLER11_BORDER_RED
6770x0000A4C0 TD_PS_SAMPLER12_BORDER_RED
6780x0000A4D0 TD_PS_SAMPLER13_BORDER_RED
6790x0000A4E0 TD_PS_SAMPLER14_BORDER_RED
6800x0000A4F0 TD_PS_SAMPLER15_BORDER_RED
6810x0000A500 TD_PS_SAMPLER16_BORDER_RED
6820x0000A510 TD_PS_SAMPLER17_BORDER_RED
6830x0000AA00 TD_PS_SAMPLER0_CLEARTYPE_KERNEL
6840x0000AA04 TD_PS_SAMPLER1_CLEARTYPE_KERNEL
6850x0000AA08 TD_PS_SAMPLER2_CLEARTYPE_KERNEL
6860x0000AA0C TD_PS_SAMPLER3_CLEARTYPE_KERNEL
6870x0000AA10 TD_PS_SAMPLER4_CLEARTYPE_KERNEL
6880x0000AA14 TD_PS_SAMPLER5_CLEARTYPE_KERNEL
6890x0000AA18 TD_PS_SAMPLER6_CLEARTYPE_KERNEL
6900x0000AA1C TD_PS_SAMPLER7_CLEARTYPE_KERNEL
6910x0000AA20 TD_PS_SAMPLER8_CLEARTYPE_KERNEL
6920x0000AA24 TD_PS_SAMPLER9_CLEARTYPE_KERNEL
6930x0000AA28 TD_PS_SAMPLER10_CLEARTYPE_KERNEL
6940x0000AA2C TD_PS_SAMPLER11_CLEARTYPE_KERNEL
6950x0000AA30 TD_PS_SAMPLER12_CLEARTYPE_KERNEL
6960x0000AA34 TD_PS_SAMPLER13_CLEARTYPE_KERNEL
6970x0000AA38 TD_PS_SAMPLER14_CLEARTYPE_KERNEL
6980x0000AA3C TD_PS_SAMPLER15_CLEARTYPE_KERNEL
6990x0000AA40 TD_PS_SAMPLER16_CLEARTYPE_KERNEL
7000x0000AA44 TD_PS_SAMPLER17_CLEARTYPE_KERNEL
7010x0000A60C TD_VS_SAMPLER0_BORDER_ALPHA
7020x0000A61C TD_VS_SAMPLER1_BORDER_ALPHA
7030x0000A62C TD_VS_SAMPLER2_BORDER_ALPHA
7040x0000A63C TD_VS_SAMPLER3_BORDER_ALPHA
7050x0000A64C TD_VS_SAMPLER4_BORDER_ALPHA
7060x0000A65C TD_VS_SAMPLER5_BORDER_ALPHA
7070x0000A66C TD_VS_SAMPLER6_BORDER_ALPHA
7080x0000A67C TD_VS_SAMPLER7_BORDER_ALPHA
7090x0000A68C TD_VS_SAMPLER8_BORDER_ALPHA
7100x0000A69C TD_VS_SAMPLER9_BORDER_ALPHA
7110x0000A6AC TD_VS_SAMPLER10_BORDER_ALPHA
7120x0000A6BC TD_VS_SAMPLER11_BORDER_ALPHA
7130x0000A6CC TD_VS_SAMPLER12_BORDER_ALPHA
7140x0000A6DC TD_VS_SAMPLER13_BORDER_ALPHA
7150x0000A6EC TD_VS_SAMPLER14_BORDER_ALPHA
7160x0000A6FC TD_VS_SAMPLER15_BORDER_ALPHA
7170x0000A70C TD_VS_SAMPLER16_BORDER_ALPHA
7180x0000A71C TD_VS_SAMPLER17_BORDER_ALPHA
7190x0000A608 TD_VS_SAMPLER0_BORDER_BLUE
7200x0000A618 TD_VS_SAMPLER1_BORDER_BLUE
7210x0000A628 TD_VS_SAMPLER2_BORDER_BLUE
7220x0000A638 TD_VS_SAMPLER3_BORDER_BLUE
7230x0000A648 TD_VS_SAMPLER4_BORDER_BLUE
7240x0000A658 TD_VS_SAMPLER5_BORDER_BLUE
7250x0000A668 TD_VS_SAMPLER6_BORDER_BLUE
7260x0000A678 TD_VS_SAMPLER7_BORDER_BLUE
7270x0000A688 TD_VS_SAMPLER8_BORDER_BLUE
7280x0000A698 TD_VS_SAMPLER9_BORDER_BLUE
7290x0000A6A8 TD_VS_SAMPLER10_BORDER_BLUE
7300x0000A6B8 TD_VS_SAMPLER11_BORDER_BLUE
7310x0000A6C8 TD_VS_SAMPLER12_BORDER_BLUE
7320x0000A6D8 TD_VS_SAMPLER13_BORDER_BLUE
7330x0000A6E8 TD_VS_SAMPLER14_BORDER_BLUE
7340x0000A6F8 TD_VS_SAMPLER15_BORDER_BLUE
7350x0000A708 TD_VS_SAMPLER16_BORDER_BLUE
7360x0000A718 TD_VS_SAMPLER17_BORDER_BLUE
7370x0000A604 TD_VS_SAMPLER0_BORDER_GREEN
7380x0000A614 TD_VS_SAMPLER1_BORDER_GREEN
7390x0000A624 TD_VS_SAMPLER2_BORDER_GREEN
7400x0000A634 TD_VS_SAMPLER3_BORDER_GREEN
7410x0000A644 TD_VS_SAMPLER4_BORDER_GREEN
7420x0000A654 TD_VS_SAMPLER5_BORDER_GREEN
7430x0000A664 TD_VS_SAMPLER6_BORDER_GREEN
7440x0000A674 TD_VS_SAMPLER7_BORDER_GREEN
7450x0000A684 TD_VS_SAMPLER8_BORDER_GREEN
7460x0000A694 TD_VS_SAMPLER9_BORDER_GREEN
7470x0000A6A4 TD_VS_SAMPLER10_BORDER_GREEN
7480x0000A6B4 TD_VS_SAMPLER11_BORDER_GREEN
7490x0000A6C4 TD_VS_SAMPLER12_BORDER_GREEN
7500x0000A6D4 TD_VS_SAMPLER13_BORDER_GREEN
7510x0000A6E4 TD_VS_SAMPLER14_BORDER_GREEN
7520x0000A6F4 TD_VS_SAMPLER15_BORDER_GREEN
7530x0000A704 TD_VS_SAMPLER16_BORDER_GREEN
7540x0000A714 TD_VS_SAMPLER17_BORDER_GREEN
7550x0000A600 TD_VS_SAMPLER0_BORDER_RED
7560x0000A610 TD_VS_SAMPLER1_BORDER_RED
7570x0000A620 TD_VS_SAMPLER2_BORDER_RED
7580x0000A630 TD_VS_SAMPLER3_BORDER_RED
7590x0000A640 TD_VS_SAMPLER4_BORDER_RED
7600x0000A650 TD_VS_SAMPLER5_BORDER_RED
7610x0000A660 TD_VS_SAMPLER6_BORDER_RED
7620x0000A670 TD_VS_SAMPLER7_BORDER_RED
7630x0000A680 TD_VS_SAMPLER8_BORDER_RED
7640x0000A690 TD_VS_SAMPLER9_BORDER_RED
7650x0000A6A0 TD_VS_SAMPLER10_BORDER_RED
7660x0000A6B0 TD_VS_SAMPLER11_BORDER_RED
7670x0000A6C0 TD_VS_SAMPLER12_BORDER_RED
7680x0000A6D0 TD_VS_SAMPLER13_BORDER_RED
7690x0000A6E0 TD_VS_SAMPLER14_BORDER_RED
7700x0000A6F0 TD_VS_SAMPLER15_BORDER_RED
7710x0000A700 TD_VS_SAMPLER16_BORDER_RED
7720x0000A710 TD_VS_SAMPLER17_BORDER_RED
7730x00009508 TA_CNTL_AUX
7740x0002802C DB_DEPTH_CLEAR
7750x00028D24 DB_HTILE_SURFACE
7760x00028D34 DB_PREFETCH_LIMIT
7770x00028D30 DB_PRELOAD_CONTROL
7780x00028D0C DB_RENDER_CONTROL
7790x00028D10 DB_RENDER_OVERRIDE
7800x0002880C DB_SHADER_CONTROL
7810x00028D2C DB_SRESULTS_COMPARE_STATE1
7820x00028430 DB_STENCILREFMASK
7830x00028434 DB_STENCILREFMASK_BF
7840x00028028 DB_STENCIL_CLEAR
7850x00028780 CB_BLEND0_CONTROL
7860x00028784 CB_BLEND1_CONTROL
7870x00028788 CB_BLEND2_CONTROL
7880x0002878C CB_BLEND3_CONTROL
7890x00028790 CB_BLEND4_CONTROL
7900x00028794 CB_BLEND5_CONTROL
7910x00028798 CB_BLEND6_CONTROL
7920x0002879C CB_BLEND7_CONTROL
7930x00028804 CB_BLEND_CONTROL
7940x00028420 CB_BLEND_ALPHA
7950x0002841C CB_BLEND_BLUE
7960x00028418 CB_BLEND_GREEN
7970x00028414 CB_BLEND_RED
7980x0002812C CB_CLEAR_ALPHA
7990x00028128 CB_CLEAR_BLUE
8000x00028124 CB_CLEAR_GREEN
8010x00028120 CB_CLEAR_RED
8020x00028C30 CB_CLRCMP_CONTROL
8030x00028C38 CB_CLRCMP_DST
8040x00028C3C CB_CLRCMP_MSK
8050x00028C34 CB_CLRCMP_SRC
8060x00028100 CB_COLOR0_MASK
8070x00028104 CB_COLOR1_MASK
8080x00028108 CB_COLOR2_MASK
8090x0002810C CB_COLOR3_MASK
8100x00028110 CB_COLOR4_MASK
8110x00028114 CB_COLOR5_MASK
8120x00028118 CB_COLOR6_MASK
8130x0002811C CB_COLOR7_MASK
8140x00028080 CB_COLOR0_VIEW
8150x00028084 CB_COLOR1_VIEW
8160x00028088 CB_COLOR2_VIEW
8170x0002808C CB_COLOR3_VIEW
8180x00028090 CB_COLOR4_VIEW
8190x00028094 CB_COLOR5_VIEW
8200x00028098 CB_COLOR6_VIEW
8210x0002809C CB_COLOR7_VIEW
8220x00028808 CB_COLOR_CONTROL
8230x0002842C CB_FOG_BLUE
8240x00028428 CB_FOG_GREEN
8250x00028424 CB_FOG_RED
8260x00008040 WAIT_UNTIL
8270x00008950 CC_GC_SHADER_PIPE_CONFIG
8280x00008954 GC_USER_SHADER_PIPE_CONFIG
8290x00009714 VC_ENHANCE
8300x00009830 DB_DEBUG
8310x00009838 DB_WATERMARKS
8320x00028D28 DB_SRESULTS_COMPARE_STATE0
8330x00028D44 DB_ALPHA_TO_MASK
8340x00009504 TA_CNTL
8350x00009700 VC_CNTL
8360x00009718 VC_CONFIG
8370x0000A02C SMX_DC_MC_INTF_CTL
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 287fcebfb4e6..626d51891ee9 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -113,6 +113,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
113 uint32_t size_reg; 113 uint32_t size_reg;
114 uint32_t tmp; 114 uint32_t tmp;
115 115
116 radeon_gart_restore(rdev);
116 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 117 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
117 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; 118 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
118 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); 119 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
@@ -150,9 +151,8 @@ int rs400_gart_enable(struct radeon_device *rdev)
150 WREG32(RADEON_AGP_BASE, 0xFFFFFFFF); 151 WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
151 WREG32(RS480_AGP_BASE_2, 0); 152 WREG32(RS480_AGP_BASE_2, 0);
152 } 153 }
153 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 154 tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
154 tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16); 155 tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
155 tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
156 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { 156 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
157 WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp); 157 WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
158 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 158 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
@@ -251,14 +251,19 @@ void rs400_gpu_init(struct radeon_device *rdev)
251 } 251 }
252} 252}
253 253
254void rs400_vram_info(struct radeon_device *rdev) 254void rs400_mc_init(struct radeon_device *rdev)
255{ 255{
256 u64 base;
257
256 rs400_gart_adjust_size(rdev); 258 rs400_gart_adjust_size(rdev);
259 rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
257 /* DDR for all card after R300 & IGP */ 260 /* DDR for all card after R300 & IGP */
258 rdev->mc.vram_is_ddr = true; 261 rdev->mc.vram_is_ddr = true;
259 rdev->mc.vram_width = 128; 262 rdev->mc.vram_width = 128;
260
261 r100_vram_init_sizes(rdev); 263 r100_vram_init_sizes(rdev);
264 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
265 radeon_vram_location(rdev, &rdev->mc, base);
266 radeon_gtt_location(rdev, &rdev->mc);
262} 267}
263 268
264uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 269uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -362,22 +367,6 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
362#endif 367#endif
363} 368}
364 369
365static int rs400_mc_init(struct radeon_device *rdev)
366{
367 int r;
368 u32 tmp;
369
370 /* Setup GPU memory space */
371 tmp = RREG32(R_00015C_NB_TOM);
372 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
373 rdev->mc.gtt_location = 0xFFFFFFFFUL;
374 r = radeon_mc_setup(rdev);
375 rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
376 if (r)
377 return r;
378 return 0;
379}
380
381void rs400_mc_program(struct radeon_device *rdev) 370void rs400_mc_program(struct radeon_device *rdev)
382{ 371{
383 struct r100_mc_save save; 372 struct r100_mc_save save;
@@ -516,12 +505,8 @@ int rs400_init(struct radeon_device *rdev)
516 radeon_get_clock_info(rdev->ddev); 505 radeon_get_clock_info(rdev->ddev);
517 /* Initialize power management */ 506 /* Initialize power management */
518 radeon_pm_init(rdev); 507 radeon_pm_init(rdev);
519 /* Get vram informations */ 508 /* initialize memory controller */
520 rs400_vram_info(rdev); 509 rs400_mc_init(rdev);
521 /* Initialize memory controller (also test AGP) */
522 r = rs400_mc_init(rdev);
523 if (r)
524 return r;
525 /* Fence driver */ 510 /* Fence driver */
526 r = radeon_fence_driver_init(rdev); 511 r = radeon_fence_driver_init(rdev);
527 if (r) 512 if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index c3818562a13e..47f046b78c6b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -45,23 +45,6 @@
45void rs600_gpu_init(struct radeon_device *rdev); 45void rs600_gpu_init(struct radeon_device *rdev);
46int rs600_mc_wait_for_idle(struct radeon_device *rdev); 46int rs600_mc_wait_for_idle(struct radeon_device *rdev);
47 47
48int rs600_mc_init(struct radeon_device *rdev)
49{
50 /* read back the MC value from the hw */
51 int r;
52 u32 tmp;
53
54 /* Setup GPU memory space */
55 tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
56 rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
57 rdev->mc.gtt_location = 0xffffffffUL;
58 r = radeon_mc_setup(rdev);
59 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
60 if (r)
61 return r;
62 return 0;
63}
64
65/* hpd for digital panel detect/disconnect */ 48/* hpd for digital panel detect/disconnect */
66bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 49bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
67{ 50{
@@ -213,6 +196,7 @@ int rs600_gart_enable(struct radeon_device *rdev)
213 r = radeon_gart_table_vram_pin(rdev); 196 r = radeon_gart_table_vram_pin(rdev);
214 if (r) 197 if (r)
215 return r; 198 return r;
199 radeon_gart_restore(rdev);
216 /* Enable bus master */ 200 /* Enable bus master */
217 tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; 201 tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
218 WREG32(R_00004C_BUS_CNTL, tmp); 202 WREG32(R_00004C_BUS_CNTL, tmp);
@@ -406,10 +390,14 @@ int rs600_irq_process(struct radeon_device *rdev)
406 if (G_000044_SW_INT(status)) 390 if (G_000044_SW_INT(status))
407 radeon_fence_process(rdev); 391 radeon_fence_process(rdev);
408 /* Vertical blank interrupts */ 392 /* Vertical blank interrupts */
409 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) 393 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
410 drm_handle_vblank(rdev->ddev, 0); 394 drm_handle_vblank(rdev->ddev, 0);
411 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) 395 wake_up(&rdev->irq.vblank_queue);
396 }
397 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
412 drm_handle_vblank(rdev->ddev, 1); 398 drm_handle_vblank(rdev->ddev, 1);
399 wake_up(&rdev->irq.vblank_queue);
400 }
413 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { 401 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
414 queue_hotplug = true; 402 queue_hotplug = true;
415 DRM_DEBUG("HPD1\n"); 403 DRM_DEBUG("HPD1\n");
@@ -470,22 +458,22 @@ void rs600_gpu_init(struct radeon_device *rdev)
470 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 458 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
471} 459}
472 460
473void rs600_vram_info(struct radeon_device *rdev) 461void rs600_mc_init(struct radeon_device *rdev)
474{ 462{
463 u64 base;
464
465 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
466 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
475 rdev->mc.vram_is_ddr = true; 467 rdev->mc.vram_is_ddr = true;
476 rdev->mc.vram_width = 128; 468 rdev->mc.vram_width = 128;
477
478 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 469 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
479 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 470 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
480 471 rdev->mc.visible_vram_size = rdev->mc.aper_size;
481 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 472 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
482 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 473 base = RREG32_MC(R_000004_MC_FB_LOCATION);
483 474 base = G_000004_MC_FB_START(base) << 16;
484 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 475 radeon_vram_location(rdev, &rdev->mc, base);
485 rdev->mc.mc_vram_size = rdev->mc.aper_size; 476 radeon_gtt_location(rdev, &rdev->mc);
486
487 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
488 rdev->mc.real_vram_size = rdev->mc.aper_size;
489} 477}
490 478
491void rs600_bandwidth_update(struct radeon_device *rdev) 479void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -661,12 +649,8 @@ int rs600_init(struct radeon_device *rdev)
661 radeon_get_clock_info(rdev->ddev); 649 radeon_get_clock_info(rdev->ddev);
662 /* Initialize power management */ 650 /* Initialize power management */
663 radeon_pm_init(rdev); 651 radeon_pm_init(rdev);
664 /* Get vram informations */ 652 /* initialize memory controller */
665 rs600_vram_info(rdev); 653 rs600_mc_init(rdev);
666 /* Initialize memory controller (also test AGP) */
667 r = rs600_mc_init(rdev);
668 if (r)
669 return r;
670 rs600_debugfs(rdev); 654 rs600_debugfs(rdev);
671 /* Fence driver */ 655 /* Fence driver */
672 r = radeon_fence_driver_init(rdev); 656 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 06e2771aee5a..83b9174f76f2 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -129,27 +129,21 @@ void rs690_pm_info(struct radeon_device *rdev)
129 rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); 129 rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
130} 130}
131 131
132void rs690_vram_info(struct radeon_device *rdev) 132void rs690_mc_init(struct radeon_device *rdev)
133{ 133{
134 fixed20_12 a; 134 fixed20_12 a;
135 u64 base;
135 136
136 rs400_gart_adjust_size(rdev); 137 rs400_gart_adjust_size(rdev);
137
138 rdev->mc.vram_is_ddr = true; 138 rdev->mc.vram_is_ddr = true;
139 rdev->mc.vram_width = 128; 139 rdev->mc.vram_width = 128;
140
141 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 140 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
142 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 141 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
143
144 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 142 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
145 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 143 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
146 144 rdev->mc.visible_vram_size = rdev->mc.aper_size;
147 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 145 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
148 rdev->mc.mc_vram_size = rdev->mc.aper_size; 146 base = G_000100_MC_FB_START(base) << 16;
149
150 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
151 rdev->mc.real_vram_size = rdev->mc.aper_size;
152
153 rs690_pm_info(rdev); 147 rs690_pm_info(rdev);
154 /* FIXME: we should enforce default clock in case GPU is not in 148 /* FIXME: we should enforce default clock in case GPU is not in
155 * default setup 149 * default setup
@@ -160,22 +154,9 @@ void rs690_vram_info(struct radeon_device *rdev)
160 a.full = rfixed_const(16); 154 a.full = rfixed_const(16);
161 /* core_bandwidth = sclk(Mhz) * 16 */ 155 /* core_bandwidth = sclk(Mhz) * 16 */
162 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); 156 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
163}
164
165static int rs690_mc_init(struct radeon_device *rdev)
166{
167 int r;
168 u32 tmp;
169
170 /* Setup GPU memory space */
171 tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
172 rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
173 rdev->mc.gtt_location = 0xFFFFFFFFUL;
174 r = radeon_mc_setup(rdev);
175 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 157 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
176 if (r) 158 radeon_vram_location(rdev, &rdev->mc, base);
177 return r; 159 radeon_gtt_location(rdev, &rdev->mc);
178 return 0;
179} 160}
180 161
181void rs690_line_buffer_adjust(struct radeon_device *rdev, 162void rs690_line_buffer_adjust(struct radeon_device *rdev,
@@ -728,12 +709,8 @@ int rs690_init(struct radeon_device *rdev)
728 radeon_get_clock_info(rdev->ddev); 709 radeon_get_clock_info(rdev->ddev);
729 /* Initialize power management */ 710 /* Initialize power management */
730 radeon_pm_init(rdev); 711 radeon_pm_init(rdev);
731 /* Get vram informations */ 712 /* initialize memory controller */
732 rs690_vram_info(rdev); 713 rs690_mc_init(rdev);
733 /* Initialize memory controller (also test AGP) */
734 r = rs690_mc_init(rdev);
735 if (r)
736 return r;
737 rv515_debugfs(rdev); 714 rv515_debugfs(rdev);
738 /* Fence driver */ 715 /* Fence driver */
739 r = radeon_fence_driver_init(rdev); 716 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 0e1e6b8632b8..bea747da123f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -277,13 +277,15 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
277 } 277 }
278} 278}
279 279
280void rv515_vram_info(struct radeon_device *rdev) 280void rv515_mc_init(struct radeon_device *rdev)
281{ 281{
282 fixed20_12 a; 282 fixed20_12 a;
283 283
284 rv515_vram_get_type(rdev); 284 rv515_vram_get_type(rdev);
285
286 r100_vram_init_sizes(rdev); 285 r100_vram_init_sizes(rdev);
286 radeon_vram_location(rdev, &rdev->mc, 0);
287 if (!(rdev->flags & RADEON_IS_AGP))
288 radeon_gtt_location(rdev, &rdev->mc);
287 /* FIXME: we should enforce default clock in case GPU is not in 289 /* FIXME: we should enforce default clock in case GPU is not in
288 * default setup 290 * default setup
289 */ 291 */
@@ -587,12 +589,15 @@ int rv515_init(struct radeon_device *rdev)
587 radeon_get_clock_info(rdev->ddev); 589 radeon_get_clock_info(rdev->ddev);
588 /* Initialize power management */ 590 /* Initialize power management */
589 radeon_pm_init(rdev); 591 radeon_pm_init(rdev);
590 /* Get vram informations */ 592 /* initialize AGP */
591 rv515_vram_info(rdev); 593 if (rdev->flags & RADEON_IS_AGP) {
592 /* Initialize memory controller (also test AGP) */ 594 r = radeon_agp_init(rdev);
593 r = r420_mc_init(rdev); 595 if (r) {
594 if (r) 596 radeon_agp_disable(rdev);
595 return r; 597 }
598 }
599 /* initialize memory controller */
600 rv515_mc_init(rdev);
596 rv515_debugfs(rdev); 601 rv515_debugfs(rdev);
597 /* Fence driver */ 602 /* Fence driver */
598 r = radeon_fence_driver_init(rdev); 603 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 03021674d097..37887dee12af 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -56,6 +56,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
56 r = radeon_gart_table_vram_pin(rdev); 56 r = radeon_gart_table_vram_pin(rdev);
57 if (r) 57 if (r)
58 return r; 58 return r;
59 radeon_gart_restore(rdev);
59 /* Setup L2 cache */ 60 /* Setup L2 cache */
60 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 61 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
61 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 62 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
@@ -273,9 +274,10 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
273/* 274/*
274 * Core functions 275 * Core functions
275 */ 276 */
276static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, 277static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
277 u32 num_backends, 278 u32 num_tile_pipes,
278 u32 backend_disable_mask) 279 u32 num_backends,
280 u32 backend_disable_mask)
279{ 281{
280 u32 backend_map = 0; 282 u32 backend_map = 0;
281 u32 enabled_backends_mask; 283 u32 enabled_backends_mask;
@@ -284,6 +286,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
284 u32 swizzle_pipe[R7XX_MAX_PIPES]; 286 u32 swizzle_pipe[R7XX_MAX_PIPES];
285 u32 cur_backend; 287 u32 cur_backend;
286 u32 i; 288 u32 i;
289 bool force_no_swizzle;
287 290
288 if (num_tile_pipes > R7XX_MAX_PIPES) 291 if (num_tile_pipes > R7XX_MAX_PIPES)
289 num_tile_pipes = R7XX_MAX_PIPES; 292 num_tile_pipes = R7XX_MAX_PIPES;
@@ -313,6 +316,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
313 if (enabled_backends_count != num_backends) 316 if (enabled_backends_count != num_backends)
314 num_backends = enabled_backends_count; 317 num_backends = enabled_backends_count;
315 318
319 switch (rdev->family) {
320 case CHIP_RV770:
321 case CHIP_RV730:
322 force_no_swizzle = false;
323 break;
324 case CHIP_RV710:
325 case CHIP_RV740:
326 default:
327 force_no_swizzle = true;
328 break;
329 }
330
316 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); 331 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
317 switch (num_tile_pipes) { 332 switch (num_tile_pipes) {
318 case 1: 333 case 1:
@@ -323,49 +338,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
323 swizzle_pipe[1] = 1; 338 swizzle_pipe[1] = 1;
324 break; 339 break;
325 case 3: 340 case 3:
326 swizzle_pipe[0] = 0; 341 if (force_no_swizzle) {
327 swizzle_pipe[1] = 2; 342 swizzle_pipe[0] = 0;
328 swizzle_pipe[2] = 1; 343 swizzle_pipe[1] = 1;
344 swizzle_pipe[2] = 2;
345 } else {
346 swizzle_pipe[0] = 0;
347 swizzle_pipe[1] = 2;
348 swizzle_pipe[2] = 1;
349 }
329 break; 350 break;
330 case 4: 351 case 4:
331 swizzle_pipe[0] = 0; 352 if (force_no_swizzle) {
332 swizzle_pipe[1] = 2; 353 swizzle_pipe[0] = 0;
333 swizzle_pipe[2] = 3; 354 swizzle_pipe[1] = 1;
334 swizzle_pipe[3] = 1; 355 swizzle_pipe[2] = 2;
356 swizzle_pipe[3] = 3;
357 } else {
358 swizzle_pipe[0] = 0;
359 swizzle_pipe[1] = 2;
360 swizzle_pipe[2] = 3;
361 swizzle_pipe[3] = 1;
362 }
335 break; 363 break;
336 case 5: 364 case 5:
337 swizzle_pipe[0] = 0; 365 if (force_no_swizzle) {
338 swizzle_pipe[1] = 2; 366 swizzle_pipe[0] = 0;
339 swizzle_pipe[2] = 4; 367 swizzle_pipe[1] = 1;
340 swizzle_pipe[3] = 1; 368 swizzle_pipe[2] = 2;
341 swizzle_pipe[4] = 3; 369 swizzle_pipe[3] = 3;
370 swizzle_pipe[4] = 4;
371 } else {
372 swizzle_pipe[0] = 0;
373 swizzle_pipe[1] = 2;
374 swizzle_pipe[2] = 4;
375 swizzle_pipe[3] = 1;
376 swizzle_pipe[4] = 3;
377 }
342 break; 378 break;
343 case 6: 379 case 6:
344 swizzle_pipe[0] = 0; 380 if (force_no_swizzle) {
345 swizzle_pipe[1] = 2; 381 swizzle_pipe[0] = 0;
346 swizzle_pipe[2] = 4; 382 swizzle_pipe[1] = 1;
347 swizzle_pipe[3] = 5; 383 swizzle_pipe[2] = 2;
348 swizzle_pipe[4] = 3; 384 swizzle_pipe[3] = 3;
349 swizzle_pipe[5] = 1; 385 swizzle_pipe[4] = 4;
386 swizzle_pipe[5] = 5;
387 } else {
388 swizzle_pipe[0] = 0;
389 swizzle_pipe[1] = 2;
390 swizzle_pipe[2] = 4;
391 swizzle_pipe[3] = 5;
392 swizzle_pipe[4] = 3;
393 swizzle_pipe[5] = 1;
394 }
350 break; 395 break;
351 case 7: 396 case 7:
352 swizzle_pipe[0] = 0; 397 if (force_no_swizzle) {
353 swizzle_pipe[1] = 2; 398 swizzle_pipe[0] = 0;
354 swizzle_pipe[2] = 4; 399 swizzle_pipe[1] = 1;
355 swizzle_pipe[3] = 6; 400 swizzle_pipe[2] = 2;
356 swizzle_pipe[4] = 3; 401 swizzle_pipe[3] = 3;
357 swizzle_pipe[5] = 1; 402 swizzle_pipe[4] = 4;
358 swizzle_pipe[6] = 5; 403 swizzle_pipe[5] = 5;
404 swizzle_pipe[6] = 6;
405 } else {
406 swizzle_pipe[0] = 0;
407 swizzle_pipe[1] = 2;
408 swizzle_pipe[2] = 4;
409 swizzle_pipe[3] = 6;
410 swizzle_pipe[4] = 3;
411 swizzle_pipe[5] = 1;
412 swizzle_pipe[6] = 5;
413 }
359 break; 414 break;
360 case 8: 415 case 8:
361 swizzle_pipe[0] = 0; 416 if (force_no_swizzle) {
362 swizzle_pipe[1] = 2; 417 swizzle_pipe[0] = 0;
363 swizzle_pipe[2] = 4; 418 swizzle_pipe[1] = 1;
364 swizzle_pipe[3] = 6; 419 swizzle_pipe[2] = 2;
365 swizzle_pipe[4] = 3; 420 swizzle_pipe[3] = 3;
366 swizzle_pipe[5] = 1; 421 swizzle_pipe[4] = 4;
367 swizzle_pipe[6] = 7; 422 swizzle_pipe[5] = 5;
368 swizzle_pipe[7] = 5; 423 swizzle_pipe[6] = 6;
424 swizzle_pipe[7] = 7;
425 } else {
426 swizzle_pipe[0] = 0;
427 swizzle_pipe[1] = 2;
428 swizzle_pipe[2] = 4;
429 swizzle_pipe[3] = 6;
430 swizzle_pipe[4] = 3;
431 swizzle_pipe[5] = 1;
432 swizzle_pipe[6] = 7;
433 swizzle_pipe[7] = 5;
434 }
369 break; 435 break;
370 } 436 }
371 437
@@ -385,8 +451,10 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
385static void rv770_gpu_init(struct radeon_device *rdev) 451static void rv770_gpu_init(struct radeon_device *rdev)
386{ 452{
387 int i, j, num_qd_pipes; 453 int i, j, num_qd_pipes;
454 u32 ta_aux_cntl;
388 u32 sx_debug_1; 455 u32 sx_debug_1;
389 u32 smx_dc_ctl0; 456 u32 smx_dc_ctl0;
457 u32 db_debug3;
390 u32 num_gs_verts_per_thread; 458 u32 num_gs_verts_per_thread;
391 u32 vgt_gs_per_es; 459 u32 vgt_gs_per_es;
392 u32 gs_prim_buffer_depth = 0; 460 u32 gs_prim_buffer_depth = 0;
@@ -515,6 +583,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
515 583
516 switch (rdev->config.rv770.max_tile_pipes) { 584 switch (rdev->config.rv770.max_tile_pipes) {
517 case 1: 585 case 1:
586 default:
518 gb_tiling_config |= PIPE_TILING(0); 587 gb_tiling_config |= PIPE_TILING(0);
519 break; 588 break;
520 case 2: 589 case 2:
@@ -526,16 +595,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
526 case 8: 595 case 8:
527 gb_tiling_config |= PIPE_TILING(3); 596 gb_tiling_config |= PIPE_TILING(3);
528 break; 597 break;
529 default:
530 break;
531 } 598 }
599 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
532 600
533 if (rdev->family == CHIP_RV770) 601 if (rdev->family == CHIP_RV770)
534 gb_tiling_config |= BANK_TILING(1); 602 gb_tiling_config |= BANK_TILING(1);
535 else 603 else
536 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 604 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
605 rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
537 606
538 gb_tiling_config |= GROUP_SIZE(0); 607 gb_tiling_config |= GROUP_SIZE(0);
608 rdev->config.rv770.tiling_group_size = 256;
539 609
540 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { 610 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
541 gb_tiling_config |= ROW_TILING(3); 611 gb_tiling_config |= ROW_TILING(3);
@@ -549,21 +619,27 @@ static void rv770_gpu_init(struct radeon_device *rdev)
549 619
550 gb_tiling_config |= BANK_SWAPS(1); 620 gb_tiling_config |= BANK_SWAPS(1);
551 621
552 if (rdev->family == CHIP_RV740) 622 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
553 backend_map = 0x28; 623 cc_rb_backend_disable |=
554 else 624 BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
555 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
556 rdev->config.rv770.max_backends,
557 (0xff << rdev->config.rv770.max_backends) & 0xff);
558 gb_tiling_config |= BACKEND_MAP(backend_map);
559 625
560 cc_gc_shader_pipe_config = 626 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
627 cc_gc_shader_pipe_config |=
561 INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK); 628 INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
562 cc_gc_shader_pipe_config |= 629 cc_gc_shader_pipe_config |=
563 INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK); 630 INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
564 631
565 cc_rb_backend_disable = 632 if (rdev->family == CHIP_RV740)
566 BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK); 633 backend_map = 0x28;
634 else
635 backend_map = r700_get_tile_pipe_to_backend_map(rdev,
636 rdev->config.rv770.max_tile_pipes,
637 (R7XX_MAX_BACKENDS -
638 r600_count_pipe_bits((cc_rb_backend_disable &
639 R7XX_MAX_BACKENDS_MASK) >> 16)),
640 (cc_rb_backend_disable >> 16));
641 gb_tiling_config |= BACKEND_MAP(backend_map);
642
567 643
568 WREG32(GB_TILING_CONFIG, gb_tiling_config); 644 WREG32(GB_TILING_CONFIG, gb_tiling_config);
569 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 645 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
@@ -571,16 +647,13 @@ static void rv770_gpu_init(struct radeon_device *rdev)
571 647
572 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 648 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
573 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 649 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
574 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 650 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
575 651
576 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
577 WREG32(CGTS_SYS_TCC_DISABLE, 0); 652 WREG32(CGTS_SYS_TCC_DISABLE, 0);
578 WREG32(CGTS_TCC_DISABLE, 0); 653 WREG32(CGTS_TCC_DISABLE, 0);
579 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
580 WREG32(CGTS_USER_TCC_DISABLE, 0);
581 654
582 num_qd_pipes = 655 num_qd_pipes =
583 R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK); 656 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
584 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); 657 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
585 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); 658 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
586 659
@@ -590,10 +663,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
590 663
591 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); 664 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
592 665
593 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | 666 ta_aux_cntl = RREG32(TA_CNTL_AUX);
594 SYNC_GRADIENT | 667 WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
595 SYNC_WALKER |
596 SYNC_ALIGNER));
597 668
598 sx_debug_1 = RREG32(SX_DEBUG_1); 669 sx_debug_1 = RREG32(SX_DEBUG_1);
599 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 670 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
@@ -604,14 +675,28 @@ static void rv770_gpu_init(struct radeon_device *rdev)
604 smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1); 675 smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
605 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 676 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
606 677
607 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | 678 if (rdev->family != CHIP_RV740)
608 GS_FLUSH_CTL(4) | 679 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
609 ACK_FLUSH_CTL(3) | 680 GS_FLUSH_CTL(4) |
610 SYNC_FLUSH_CTL)); 681 ACK_FLUSH_CTL(3) |
682 SYNC_FLUSH_CTL));
611 683
612 if (rdev->family == CHIP_RV770) 684 db_debug3 = RREG32(DB_DEBUG3);
613 WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f)); 685 db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
614 else { 686 switch (rdev->family) {
687 case CHIP_RV770:
688 case CHIP_RV740:
689 db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
690 break;
691 case CHIP_RV710:
692 case CHIP_RV730:
693 default:
694 db_debug3 |= DB_CLK_OFF_DELAY(2);
695 break;
696 }
697 WREG32(DB_DEBUG3, db_debug3);
698
699 if (rdev->family != CHIP_RV770) {
615 db_debug4 = RREG32(DB_DEBUG4); 700 db_debug4 = RREG32(DB_DEBUG4);
616 db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER; 701 db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
617 WREG32(DB_DEBUG4, db_debug4); 702 WREG32(DB_DEBUG4, db_debug4);
@@ -640,10 +725,10 @@ static void rv770_gpu_init(struct radeon_device *rdev)
640 ALU_UPDATE_FIFO_HIWATER(0x8)); 725 ALU_UPDATE_FIFO_HIWATER(0x8));
641 switch (rdev->family) { 726 switch (rdev->family) {
642 case CHIP_RV770: 727 case CHIP_RV770:
643 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
644 break;
645 case CHIP_RV730: 728 case CHIP_RV730:
646 case CHIP_RV710: 729 case CHIP_RV710:
730 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
731 break;
647 case CHIP_RV740: 732 case CHIP_RV740:
648 default: 733 default:
649 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4); 734 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
@@ -816,45 +901,13 @@ int rv770_mc_init(struct radeon_device *rdev)
816 /* Setup GPU memory space */ 901 /* Setup GPU memory space */
817 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 902 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
818 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 903 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
819 904 rdev->mc.visible_vram_size = rdev->mc.aper_size;
820 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 905 /* FIXME remove this once we support unmappable VRAM */
906 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
821 rdev->mc.mc_vram_size = rdev->mc.aper_size; 907 rdev->mc.mc_vram_size = rdev->mc.aper_size;
822
823 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
824 rdev->mc.real_vram_size = rdev->mc.aper_size; 908 rdev->mc.real_vram_size = rdev->mc.aper_size;
825
826 if (rdev->flags & RADEON_IS_AGP) {
827 /* gtt_size is setup by radeon_agp_init */
828 rdev->mc.gtt_location = rdev->mc.agp_base;
829 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
830 /* Try to put vram before or after AGP because we
831 * we want SYSTEM_APERTURE to cover both VRAM and
832 * AGP so that GPU can catch out of VRAM/AGP access
833 */
834 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
835 /* Enough place before */
836 rdev->mc.vram_location = rdev->mc.gtt_location -
837 rdev->mc.mc_vram_size;
838 } else if (tmp > rdev->mc.mc_vram_size) {
839 /* Enough place after */
840 rdev->mc.vram_location = rdev->mc.gtt_location +
841 rdev->mc.gtt_size;
842 } else {
843 /* Try to setup VRAM then AGP might not
844 * not work on some card
845 */
846 rdev->mc.vram_location = 0x00000000UL;
847 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
848 }
849 } else {
850 rdev->mc.vram_location = 0x00000000UL;
851 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
852 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
853 } 909 }
854 rdev->mc.vram_start = rdev->mc.vram_location; 910 r600_vram_gtt_location(rdev, &rdev->mc);
855 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
856 rdev->mc.gtt_start = rdev->mc.gtt_location;
857 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
858 /* FIXME: we should enforce default clock in case GPU is not in 911 /* FIXME: we should enforce default clock in case GPU is not in
859 * default setup 912 * default setup
860 */ 913 */
@@ -863,6 +916,7 @@ int rv770_mc_init(struct radeon_device *rdev)
863 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 916 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
864 return 0; 917 return 0;
865} 918}
919
866int rv770_gpu_reset(struct radeon_device *rdev) 920int rv770_gpu_reset(struct radeon_device *rdev)
867{ 921{
868 /* FIXME: implement any rv770 specific bits */ 922 /* FIXME: implement any rv770 specific bits */
@@ -1038,6 +1092,7 @@ int rv770_init(struct radeon_device *rdev)
1038 r = radeon_fence_driver_init(rdev); 1092 r = radeon_fence_driver_init(rdev);
1039 if (r) 1093 if (r)
1040 return r; 1094 return r;
1095 /* initialize AGP */
1041 if (rdev->flags & RADEON_IS_AGP) { 1096 if (rdev->flags & RADEON_IS_AGP) {
1042 r = radeon_agp_init(rdev); 1097 r = radeon_agp_init(rdev);
1043 if (r) 1098 if (r)
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index a1367ab6f261..9506f8cb99e0 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -343,4 +343,6 @@
343 343
344#define WAIT_UNTIL 0x8040 344#define WAIT_UNTIL 0x8040
345 345
346#define SRBM_STATUS 0x0E50
347
346#endif 348#endif
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 3d47a2c12322..a759170763bb 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -480,7 +480,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
480 void *from_virtual; 480 void *from_virtual;
481 void *to_virtual; 481 void *to_virtual;
482 int i; 482 int i;
483 int ret; 483 int ret = -ENOMEM;
484 484
485 if (ttm->page_flags & TTM_PAGE_FLAG_USER) { 485 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
486 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, 486 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
@@ -499,8 +499,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
499 499
500 for (i = 0; i < ttm->num_pages; ++i) { 500 for (i = 0; i < ttm->num_pages; ++i) {
501 from_page = read_mapping_page(swap_space, i, NULL); 501 from_page = read_mapping_page(swap_space, i, NULL);
502 if (IS_ERR(from_page)) 502 if (IS_ERR(from_page)) {
503 ret = PTR_ERR(from_page);
503 goto out_err; 504 goto out_err;
505 }
504 to_page = __ttm_tt_get_page(ttm, i); 506 to_page = __ttm_tt_get_page(ttm, i);
505 if (unlikely(to_page == NULL)) 507 if (unlikely(to_page == NULL))
506 goto out_err; 508 goto out_err;
@@ -523,7 +525,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
523 return 0; 525 return 0;
524out_err: 526out_err:
525 ttm_tt_free_alloced_pages(ttm); 527 ttm_tt_free_alloced_pages(ttm);
526 return -ENOMEM; 528 return ret;
527} 529}
528 530
529int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) 531int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
@@ -535,6 +537,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
535 void *from_virtual; 537 void *from_virtual;
536 void *to_virtual; 538 void *to_virtual;
537 int i; 539 int i;
540 int ret = -ENOMEM;
538 541
539 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 542 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
540 BUG_ON(ttm->caching_state != tt_cached); 543 BUG_ON(ttm->caching_state != tt_cached);
@@ -557,7 +560,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
557 0); 560 0);
558 if (unlikely(IS_ERR(swap_storage))) { 561 if (unlikely(IS_ERR(swap_storage))) {
559 printk(KERN_ERR "Failed allocating swap storage.\n"); 562 printk(KERN_ERR "Failed allocating swap storage.\n");
560 return -ENOMEM; 563 return PTR_ERR(swap_storage);
561 } 564 }
562 } else 565 } else
563 swap_storage = persistant_swap_storage; 566 swap_storage = persistant_swap_storage;
@@ -569,9 +572,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
569 if (unlikely(from_page == NULL)) 572 if (unlikely(from_page == NULL))
570 continue; 573 continue;
571 to_page = read_mapping_page(swap_space, i, NULL); 574 to_page = read_mapping_page(swap_space, i, NULL);
572 if (unlikely(to_page == NULL)) 575 if (unlikely(IS_ERR(to_page))) {
576 ret = PTR_ERR(to_page);
573 goto out_err; 577 goto out_err;
574 578 }
575 preempt_disable(); 579 preempt_disable();
576 from_virtual = kmap_atomic(from_page, KM_USER0); 580 from_virtual = kmap_atomic(from_page, KM_USER0);
577 to_virtual = kmap_atomic(to_page, KM_USER1); 581 to_virtual = kmap_atomic(to_page, KM_USER1);
@@ -595,5 +599,5 @@ out_err:
595 if (!persistant_swap_storage) 599 if (!persistant_swap_storage)
596 fput(swap_storage); 600 fput(swap_storage);
597 601
598 return -ENOMEM; 602 return ret;
599} 603}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 0920492cea0a..61ab4daf0bbb 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -16,3 +16,14 @@ config VGA_ARB_MAX_GPUS
16 help 16 help
17 Reserves space in the kernel to maintain resource locking for 17 Reserves space in the kernel to maintain resource locking for
18 multiple GPUS. The overhead for each GPU is very small. 18 multiple GPUS. The overhead for each GPU is very small.
19
20config VGA_SWITCHEROO
21 bool "Laptop Hybrid Grapics - GPU switching support"
22 depends on X86
23 depends on ACPI
24 help
25 Many laptops released in 2008/9/10 have two gpus with a multiplxer
26 to switch between them. This adds support for dynamic switching when
27 X isn't running and delayed switching until the next logoff. This
28 features is called hybrid graphics, ATI PowerXpress, and Nvidia
29 HybridPower.
diff --git a/drivers/gpu/vga/Makefile b/drivers/gpu/vga/Makefile
index 7cc8c1ed645b..14ca30b75d0a 100644
--- a/drivers/gpu/vga/Makefile
+++ b/drivers/gpu/vga/Makefile
@@ -1 +1,2 @@
1obj-$(CONFIG_VGA_ARB) += vgaarb.o 1obj-$(CONFIG_VGA_ARB) += vgaarb.o
2obj-$(CONFIG_VGA_SWITCHEROO) += vga_switcheroo.o
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
new file mode 100644
index 000000000000..d6d1149d525d
--- /dev/null
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -0,0 +1,450 @@
1/*
2 * Copyright (c) 2010 Red Hat Inc.
3 * Author : Dave Airlie <airlied@redhat.com>
4 *
5 *
6 * Licensed under GPLv2
7 *
8 * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
9
10 Switcher interface - methods require for ATPX and DCM
11 - switchto - this throws the output MUX switch
12 - discrete_set_power - sets the power state for the discrete card
13
14 GPU driver interface
15 - set_gpu_state - this should do the equiv of s/r for the card
16 - this should *not* set the discrete power state
17 - switch_check - check if the device is in a position to switch now
18 */
19
20#include <linux/module.h>
21#include <linux/dmi.h>
22#include <linux/seq_file.h>
23#include <linux/uaccess.h>
24#include <linux/fs.h>
25#include <linux/debugfs.h>
26#include <linux/fb.h>
27
28#include <linux/pci.h>
29#include <linux/vga_switcheroo.h>
30
31struct vga_switcheroo_client {
32 struct pci_dev *pdev;
33 struct fb_info *fb_info;
34 int pwr_state;
35 void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
36 bool (*can_switch)(struct pci_dev *pdev);
37 int id;
38 bool active;
39};
40
41static DEFINE_MUTEX(vgasr_mutex);
42
43struct vgasr_priv {
44
45 bool active;
46 bool delayed_switch_active;
47 enum vga_switcheroo_client_id delayed_client_id;
48
49 struct dentry *debugfs_root;
50 struct dentry *switch_file;
51
52 int registered_clients;
53 struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS];
54
55 struct vga_switcheroo_handler *handler;
56};
57
58static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
59static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
60
61/* only one switcheroo per system */
62static struct vgasr_priv vgasr_priv;
63
64int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
65{
66 mutex_lock(&vgasr_mutex);
67 if (vgasr_priv.handler) {
68 mutex_unlock(&vgasr_mutex);
69 return -EINVAL;
70 }
71
72 vgasr_priv.handler = handler;
73 mutex_unlock(&vgasr_mutex);
74 return 0;
75}
76EXPORT_SYMBOL(vga_switcheroo_register_handler);
77
78void vga_switcheroo_unregister_handler(void)
79{
80 mutex_lock(&vgasr_mutex);
81 vgasr_priv.handler = NULL;
82 mutex_unlock(&vgasr_mutex);
83}
84EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
85
86static void vga_switcheroo_enable(void)
87{
88 int i;
89 int ret;
90 /* call the handler to init */
91 vgasr_priv.handler->init();
92
93 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
94 ret = vgasr_priv.handler->get_client_id(vgasr_priv.clients[i].pdev);
95 if (ret < 0)
96 return;
97
98 vgasr_priv.clients[i].id = ret;
99 }
100 vga_switcheroo_debugfs_init(&vgasr_priv);
101 vgasr_priv.active = true;
102}
103
104int vga_switcheroo_register_client(struct pci_dev *pdev,
105 void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
106 bool (*can_switch)(struct pci_dev *pdev))
107{
108 int index;
109
110 mutex_lock(&vgasr_mutex);
111 /* don't do IGD vs DIS here */
112 if (vgasr_priv.registered_clients & 1)
113 index = 1;
114 else
115 index = 0;
116
117 vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
118 vgasr_priv.clients[index].pdev = pdev;
119 vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
120 vgasr_priv.clients[index].can_switch = can_switch;
121 vgasr_priv.clients[index].id = -1;
122 if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
123 vgasr_priv.clients[index].active = true;
124
125 vgasr_priv.registered_clients |= (1 << index);
126
127 /* if we get two clients + handler */
128 if (vgasr_priv.registered_clients == 0x3 && vgasr_priv.handler) {
129 printk(KERN_INFO "vga_switcheroo: enabled\n");
130 vga_switcheroo_enable();
131 }
132 mutex_unlock(&vgasr_mutex);
133 return 0;
134}
135EXPORT_SYMBOL(vga_switcheroo_register_client);
136
137void vga_switcheroo_unregister_client(struct pci_dev *pdev)
138{
139 int i;
140
141 mutex_lock(&vgasr_mutex);
142 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
143 if (vgasr_priv.clients[i].pdev == pdev) {
144 vgasr_priv.registered_clients &= ~(1 << i);
145 break;
146 }
147 }
148
149 printk(KERN_INFO "vga_switcheroo: disabled\n");
150 vga_switcheroo_debugfs_fini(&vgasr_priv);
151 vgasr_priv.active = false;
152 mutex_unlock(&vgasr_mutex);
153}
154EXPORT_SYMBOL(vga_switcheroo_unregister_client);
155
156void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
157 struct fb_info *info)
158{
159 int i;
160
161 mutex_lock(&vgasr_mutex);
162 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
163 if (vgasr_priv.clients[i].pdev == pdev) {
164 vgasr_priv.clients[i].fb_info = info;
165 break;
166 }
167 }
168 mutex_unlock(&vgasr_mutex);
169}
170EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
171
172static int vga_switcheroo_show(struct seq_file *m, void *v)
173{
174 int i;
175 mutex_lock(&vgasr_mutex);
176 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
177 seq_printf(m, "%d:%c:%s:%s\n", i,
178 vgasr_priv.clients[i].active ? '+' : ' ',
179 vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
180 pci_name(vgasr_priv.clients[i].pdev));
181 }
182 mutex_unlock(&vgasr_mutex);
183 return 0;
184}
185
186static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
187{
188 return single_open(file, vga_switcheroo_show, NULL);
189}
190
191static int vga_switchon(struct vga_switcheroo_client *client)
192{
193 int ret;
194
195 ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
196 /* call the driver callback to turn on device */
197 client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
198 client->pwr_state = VGA_SWITCHEROO_ON;
199 return 0;
200}
201
202static int vga_switchoff(struct vga_switcheroo_client *client)
203{
204 /* call the driver callback to turn off device */
205 client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
206 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
207 client->pwr_state = VGA_SWITCHEROO_OFF;
208 return 0;
209}
210
211static int vga_switchto(struct vga_switcheroo_client *new_client)
212{
213 int ret;
214 int i;
215 struct vga_switcheroo_client *active = NULL;
216
217 if (new_client->active == true)
218 return 0;
219
220 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
221 if (vgasr_priv.clients[i].active == true) {
222 active = &vgasr_priv.clients[i];
223 break;
224 }
225 }
226 if (!active)
227 return 0;
228
229 /* power up the first device */
230 ret = pci_enable_device(new_client->pdev);
231 if (ret)
232 return ret;
233
234 if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
235 vga_switchon(new_client);
236
237 /* swap shadow resource to denote boot VGA device has changed so X starts on new device */
238 active->active = false;
239
240 active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
241 new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
242
243 if (new_client->fb_info) {
244 struct fb_event event;
245 event.info = new_client->fb_info;
246 fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
247 }
248
249 ret = vgasr_priv.handler->switchto(new_client->id);
250 if (ret)
251 return ret;
252
253 if (active->pwr_state == VGA_SWITCHEROO_ON)
254 vga_switchoff(active);
255
256 new_client->active = true;
257 return 0;
258}
259
260static ssize_t
261vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
262 size_t cnt, loff_t *ppos)
263{
264 char usercmd[64];
265 const char *pdev_name;
266 int i, ret;
267 bool delay = false, can_switch;
268 int client_id = -1;
269 struct vga_switcheroo_client *client = NULL;
270
271 if (cnt > 63)
272 cnt = 63;
273
274 if (copy_from_user(usercmd, ubuf, cnt))
275 return -EFAULT;
276
277 mutex_lock(&vgasr_mutex);
278
279 if (!vgasr_priv.active)
280 return -EINVAL;
281
282 /* pwr off the device not in use */
283 if (strncmp(usercmd, "OFF", 3) == 0) {
284 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
285 if (vgasr_priv.clients[i].active)
286 continue;
287 if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_ON)
288 vga_switchoff(&vgasr_priv.clients[i]);
289 }
290 goto out;
291 }
292 /* pwr on the device not in use */
293 if (strncmp(usercmd, "ON", 2) == 0) {
294 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
295 if (vgasr_priv.clients[i].active)
296 continue;
297 if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_OFF)
298 vga_switchon(&vgasr_priv.clients[i]);
299 }
300 goto out;
301 }
302
303 /* request a delayed switch - test can we switch now */
304 if (strncmp(usercmd, "DIGD", 4) == 0) {
305 client_id = VGA_SWITCHEROO_IGD;
306 delay = true;
307 }
308
309 if (strncmp(usercmd, "DDIS", 4) == 0) {
310 client_id = VGA_SWITCHEROO_DIS;
311 delay = true;
312 }
313
314 if (strncmp(usercmd, "IGD", 3) == 0)
315 client_id = VGA_SWITCHEROO_IGD;
316
317 if (strncmp(usercmd, "DIS", 3) == 0)
318 client_id = VGA_SWITCHEROO_DIS;
319
320 if (client_id == -1)
321 goto out;
322
323 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
324 if (vgasr_priv.clients[i].id == client_id) {
325 client = &vgasr_priv.clients[i];
326 break;
327 }
328 }
329
330 vgasr_priv.delayed_switch_active = false;
331 /* okay we want a switch - test if devices are willing to switch */
332 can_switch = true;
333 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
334 can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
335 if (can_switch == false) {
336 printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
337 break;
338 }
339 }
340
341 if (can_switch == false && delay == false)
342 goto out;
343
344 if (can_switch == true) {
345 pdev_name = pci_name(client->pdev);
346 ret = vga_switchto(client);
347 if (ret)
348 printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret);
349 } else {
350 printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
351 vgasr_priv.delayed_switch_active = true;
352 vgasr_priv.delayed_client_id = client_id;
353
354 /* we should at least power up the card to
355 make the switch faster */
356 if (client->pwr_state == VGA_SWITCHEROO_OFF)
357 vga_switchon(client);
358 }
359
360out:
361 mutex_unlock(&vgasr_mutex);
362 return cnt;
363}
364
365static const struct file_operations vga_switcheroo_debugfs_fops = {
366 .owner = THIS_MODULE,
367 .open = vga_switcheroo_debugfs_open,
368 .write = vga_switcheroo_debugfs_write,
369 .read = seq_read,
370 .llseek = seq_lseek,
371 .release = single_release,
372};
373
374static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
375{
376 if (priv->switch_file) {
377 debugfs_remove(priv->switch_file);
378 priv->switch_file = NULL;
379 }
380 if (priv->debugfs_root) {
381 debugfs_remove(priv->debugfs_root);
382 priv->debugfs_root = NULL;
383 }
384}
385
386static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
387{
388 /* already initialised */
389 if (priv->debugfs_root)
390 return 0;
391 priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
392
393 if (!priv->debugfs_root) {
394 printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n");
395 goto fail;
396 }
397
398 priv->switch_file = debugfs_create_file("switch", 0644,
399 priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops);
400 if (!priv->switch_file) {
401 printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n");
402 goto fail;
403 }
404 return 0;
405fail:
406 vga_switcheroo_debugfs_fini(priv);
407 return -1;
408}
409
410int vga_switcheroo_process_delayed_switch(void)
411{
412 struct vga_switcheroo_client *client = NULL;
413 const char *pdev_name;
414 bool can_switch = true;
415 int i;
416 int ret;
417 int err = -EINVAL;
418
419 mutex_lock(&vgasr_mutex);
420 if (!vgasr_priv.delayed_switch_active)
421 goto err;
422
423 printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
424
425 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
426 if (vgasr_priv.clients[i].id == vgasr_priv.delayed_client_id)
427 client = &vgasr_priv.clients[i];
428 can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
429 if (can_switch == false) {
430 printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
431 break;
432 }
433 }
434
435 if (can_switch == false || client == NULL)
436 goto err;
437
438 pdev_name = pci_name(client->pdev);
439 ret = vga_switchto(client);
440 if (ret)
441 printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret);
442
443 vgasr_priv.delayed_switch_active = false;
444 err = 0;
445err:
446 mutex_unlock(&vgasr_mutex);
447 return err;
448}
449EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
450
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 3681c6a88212..b0a3fa00706d 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3025,6 +3025,20 @@ static int fbcon_fb_unregistered(struct fb_info *info)
3025 return 0; 3025 return 0;
3026} 3026}
3027 3027
3028static void fbcon_remap_all(int idx)
3029{
3030 int i;
3031 for (i = first_fb_vc; i <= last_fb_vc; i++)
3032 set_con2fb_map(i, idx, 0);
3033
3034 if (con_is_bound(&fb_con)) {
3035 printk(KERN_INFO "fbcon: Remapping primary device, "
3036 "fb%i, to tty %i-%i\n", idx,
3037 first_fb_vc + 1, last_fb_vc + 1);
3038 info_idx = idx;
3039 }
3040}
3041
3028#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY 3042#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
3029static void fbcon_select_primary(struct fb_info *info) 3043static void fbcon_select_primary(struct fb_info *info)
3030{ 3044{
@@ -3225,6 +3239,10 @@ static int fbcon_event_notify(struct notifier_block *self,
3225 caps = event->data; 3239 caps = event->data;
3226 fbcon_get_requirement(info, caps); 3240 fbcon_get_requirement(info, caps);
3227 break; 3241 break;
3242 case FB_EVENT_REMAP_ALL_CONSOLE:
3243 idx = info->node;
3244 fbcon_remap_all(idx);
3245 break;
3228 } 3246 }
3229done: 3247done:
3230 return ret; 3248 return ret;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 99bbd282ce63..a15b44e9c003 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1513,7 +1513,6 @@ register_framebuffer(struct fb_info *fb_info)
1513 fb_info->fix.id, 1513 fb_info->fix.id,
1514 registered_fb[i]->fix.id); 1514 registered_fb[i]->fix.id);
1515 unregister_framebuffer(registered_fb[i]); 1515 unregister_framebuffer(registered_fb[i]);
1516 break;
1517 } 1516 }
1518 } 1517 }
1519 } 1518 }