aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 14:14:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 14:14:52 -0400
commit59534f7298c5e28aaa64e6ed550e247f64ee72ae (patch)
treeb9fef7756abf897d9e1b10950cdf10bf6dfe5cb7 /drivers
parentac3ee84c604502240122c47b52f0542ec8774f15 (diff)
parentb486787ee4797d6e42a9bd3a6f079385ad0f4472 (diff)
Merge branch 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (207 commits) drm/radeon/kms/pm/r600: select the mid clock mode for single head low profile drm/radeon: fix power supply kconfig interaction. drm/radeon/kms: record object that have been list reserved drm/radeon: AGP memory is only I/O if the aperture can be mapped by the CPU. drm/radeon/kms: don't default display priority to high on rs4xx drm/edid: fix typo in 1600x1200@75 mode drm/nouveau: fix i2c-related init table handlers drm/nouveau: support init table i2c device identifier 0x81 drm/nouveau: ensure we've parsed i2c table entry for INIT_*I2C* handlers drm/nouveau: display error message for any failed init table opcode drm/nouveau: fix init table handlers to return proper error codes drm/nv50: support fractional feedback divider on newer chips drm/nv50: fix monitor detection on certain chipsets drm/nv50: store full dcb i2c entry from vbios drm/nv50: fix suspend/resume with DP outputs drm/nv50: output calculated crtc pll when debugging on drm/nouveau: dump pll limits entries when debugging is on drm/nouveau: bios parser fixes for eDP boards drm/nouveau: fix a nouveau_bo dereference after it's been destroyed drm/nv40: remove some completed ctxprog TODOs ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/agp/agp.h80
-rw-r--r--drivers/char/agp/ali-agp.c1
-rw-r--r--drivers/char/agp/amd-k7-agp.c9
-rw-r--r--drivers/char/agp/amd64-agp.c28
-rw-r--r--drivers/char/agp/ati-agp.c8
-rw-r--r--drivers/char/agp/efficeon-agp.c1
-rw-r--r--drivers/char/agp/intel-agp.c1883
-rw-r--r--drivers/char/agp/intel-agp.h239
-rw-r--r--drivers/char/agp/intel-gtt.c1516
-rw-r--r--drivers/char/agp/nvidia-agp.c1
-rw-r--r--drivers/char/agp/sis-agp.c9
-rw-r--r--drivers/char/agp/uninorth-agp.c16
-rw-r--r--drivers/char/agp/via-agp.c2
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_auth.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c9
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c490
-rw-r--r--drivers/gpu/drm/drm_dma.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c802
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c908
-rw-r--r--drivers/gpu/drm/drm_fops.c3
-rw-r--r--drivers/gpu/drm/drm_gem.c49
-rw-r--r--drivers/gpu/drm/drm_modes.c105
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h10
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c46
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c44
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c38
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c32
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c28
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h38
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c154
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c23
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h143
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c41
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h88
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c93
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1064
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c256
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h31
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c103
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c217
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c71
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c111
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c21
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1009
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c185
-rw-r--r--drivers/gpu/drm/nouveau/Makefile3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c500
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c116
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c259
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c58
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c566
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_calc.c87
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c15
-rw-r--r--drivers/gpu/drm/radeon/atombios.h76
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c22
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1549
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h556
-rw-r--r--drivers/gpu/drm/radeon/r100.c729
-rw-r--r--drivers/gpu/drm/radeon/r100d.h164
-rw-r--r--drivers/gpu/drm/radeon/r300.c151
-rw-r--r--drivers/gpu/drm/radeon/r300d.h47
-rw-r--r--drivers/gpu/drm/radeon/r420.c36
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/r520.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c600
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c58
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c65
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h57
-rw-r--r--drivers/gpu/drm/radeon/radeon.h258
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c140
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c242
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c129
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c358
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_fixed.h67
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c814
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c122
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rs600.c231
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h80
-rw-r--r--drivers/gpu/drm/radeon/rs690.c289
-rw-r--r--drivers/gpu/drm/radeon/rv515.c287
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h46
-rw-r--r--drivers/gpu/drm/radeon/rv770.c28
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c3
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c98
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c122
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c845
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/vga/Kconfig6
-rw-r--r--drivers/video/efifb.c11
-rw-r--r--drivers/video/fbmem.c74
-rw-r--r--drivers/video/fbsysfs.c1
-rw-r--r--drivers/video/offb.c28
-rw-r--r--drivers/video/vesafb.c11
-rw-r--r--drivers/video/vga16fb.c26
155 files changed, 14018 insertions, 7209 deletions
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 870f12cfed93..120490949997 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -178,86 +178,6 @@ struct agp_bridge_data {
178#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page) 178#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
179 179
180 180
181/* Intel registers */
182#define INTEL_APSIZE 0xb4
183#define INTEL_ATTBASE 0xb8
184#define INTEL_AGPCTRL 0xb0
185#define INTEL_NBXCFG 0x50
186#define INTEL_ERRSTS 0x91
187
188/* Intel i830 registers */
189#define I830_GMCH_CTRL 0x52
190#define I830_GMCH_ENABLED 0x4
191#define I830_GMCH_MEM_MASK 0x1
192#define I830_GMCH_MEM_64M 0x1
193#define I830_GMCH_MEM_128M 0
194#define I830_GMCH_GMS_MASK 0x70
195#define I830_GMCH_GMS_DISABLED 0x00
196#define I830_GMCH_GMS_LOCAL 0x10
197#define I830_GMCH_GMS_STOLEN_512 0x20
198#define I830_GMCH_GMS_STOLEN_1024 0x30
199#define I830_GMCH_GMS_STOLEN_8192 0x40
200#define I830_RDRAM_CHANNEL_TYPE 0x03010
201#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
202#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
203
204/* This one is for I830MP w. an external graphic card */
205#define INTEL_I830_ERRSTS 0x92
206
207/* Intel 855GM/852GM registers */
208#define I855_GMCH_GMS_MASK 0xF0
209#define I855_GMCH_GMS_STOLEN_0M 0x0
210#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
211#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
212#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
213#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
214#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
215#define I85X_CAPID 0x44
216#define I85X_VARIANT_MASK 0x7
217#define I85X_VARIANT_SHIFT 5
218#define I855_GME 0x0
219#define I855_GM 0x4
220#define I852_GME 0x2
221#define I852_GM 0x5
222
223/* Intel i845 registers */
224#define INTEL_I845_AGPM 0x51
225#define INTEL_I845_ERRSTS 0xc8
226
227/* Intel i860 registers */
228#define INTEL_I860_MCHCFG 0x50
229#define INTEL_I860_ERRSTS 0xc8
230
231/* Intel i810 registers */
232#define I810_GMADDR 0x10
233#define I810_MMADDR 0x14
234#define I810_PTE_BASE 0x10000
235#define I810_PTE_MAIN_UNCACHED 0x00000000
236#define I810_PTE_LOCAL 0x00000002
237#define I810_PTE_VALID 0x00000001
238#define I830_PTE_SYSTEM_CACHED 0x00000006
239#define I810_SMRAM_MISCC 0x70
240#define I810_GFX_MEM_WIN_SIZE 0x00010000
241#define I810_GFX_MEM_WIN_32M 0x00010000
242#define I810_GMS 0x000000c0
243#define I810_GMS_DISABLE 0x00000000
244#define I810_PGETBL_CTL 0x2020
245#define I810_PGETBL_ENABLED 0x00000001
246#define I965_PGETBL_SIZE_MASK 0x0000000e
247#define I965_PGETBL_SIZE_512KB (0 << 1)
248#define I965_PGETBL_SIZE_256KB (1 << 1)
249#define I965_PGETBL_SIZE_128KB (2 << 1)
250#define I965_PGETBL_SIZE_1MB (3 << 1)
251#define I965_PGETBL_SIZE_2MB (4 << 1)
252#define I965_PGETBL_SIZE_1_5MB (5 << 1)
253#define G33_PGETBL_SIZE_MASK (3 << 8)
254#define G33_PGETBL_SIZE_1M (1 << 8)
255#define G33_PGETBL_SIZE_2M (2 << 8)
256
257#define I810_DRAM_CTL 0x3000
258#define I810_DRAM_ROW_0 0x00000001
259#define I810_DRAM_ROW_0_SDRAM 0x00000001
260
261struct agp_device_ids { 181struct agp_device_ids {
262 unsigned short device_id; /* first, to make table easier to read */ 182 unsigned short device_id; /* first, to make table easier to read */
263 enum chipset_type chipset; 183 enum chipset_type chipset;
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index d2ce68f27e4b..fd793519ea2b 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -204,6 +204,7 @@ static const struct agp_bridge_driver ali_generic_bridge = {
204 .aperture_sizes = ali_generic_sizes, 204 .aperture_sizes = ali_generic_sizes,
205 .size_type = U32_APER_SIZE, 205 .size_type = U32_APER_SIZE,
206 .num_aperture_sizes = 7, 206 .num_aperture_sizes = 7,
207 .needs_scratch_page = true,
207 .configure = ali_configure, 208 .configure = ali_configure,
208 .fetch_size = ali_fetch_size, 209 .fetch_size = ali_fetch_size,
209 .cleanup = ali_cleanup, 210 .cleanup = ali_cleanup,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index a7637d72cef6..b6b1568314c8 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -142,6 +142,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
142{ 142{
143 struct aper_size_info_lvl2 *value; 143 struct aper_size_info_lvl2 *value;
144 struct amd_page_map page_dir; 144 struct amd_page_map page_dir;
145 unsigned long __iomem *cur_gatt;
145 unsigned long addr; 146 unsigned long addr;
146 int retval; 147 int retval;
147 u32 temp; 148 u32 temp;
@@ -178,6 +179,13 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
178 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ 179 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
179 } 180 }
180 181
182 for (i = 0; i < value->num_entries; i++) {
183 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
184 cur_gatt = GET_GATT(addr);
185 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
186 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
187 }
188
181 return 0; 189 return 0;
182} 190}
183 191
@@ -375,6 +383,7 @@ static const struct agp_bridge_driver amd_irongate_driver = {
375 .aperture_sizes = amd_irongate_sizes, 383 .aperture_sizes = amd_irongate_sizes,
376 .size_type = LVL2_APER_SIZE, 384 .size_type = LVL2_APER_SIZE,
377 .num_aperture_sizes = 7, 385 .num_aperture_sizes = 7,
386 .needs_scratch_page = true,
378 .configure = amd_irongate_configure, 387 .configure = amd_irongate_configure,
379 .fetch_size = amd_irongate_fetch_size, 388 .fetch_size = amd_irongate_fetch_size,
380 .cleanup = amd_irongate_cleanup, 389 .cleanup = amd_irongate_cleanup,
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index fd50ead59c79..67ea3a60de74 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -210,6 +210,7 @@ static const struct agp_bridge_driver amd_8151_driver = {
210 .aperture_sizes = amd_8151_sizes, 210 .aperture_sizes = amd_8151_sizes,
211 .size_type = U32_APER_SIZE, 211 .size_type = U32_APER_SIZE,
212 .num_aperture_sizes = 7, 212 .num_aperture_sizes = 7,
213 .needs_scratch_page = true,
213 .configure = amd_8151_configure, 214 .configure = amd_8151_configure,
214 .fetch_size = amd64_fetch_size, 215 .fetch_size = amd64_fetch_size,
215 .cleanup = amd64_cleanup, 216 .cleanup = amd64_cleanup,
@@ -499,6 +500,10 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
499 u8 cap_ptr; 500 u8 cap_ptr;
500 int err; 501 int err;
501 502
503 /* The Highlander principle */
504 if (agp_bridges_found)
505 return -ENODEV;
506
502 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 507 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
503 if (!cap_ptr) 508 if (!cap_ptr)
504 return -ENODEV; 509 return -ENODEV;
@@ -562,6 +567,8 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
562 amd64_aperture_sizes[bridge->aperture_size_idx].size); 567 amd64_aperture_sizes[bridge->aperture_size_idx].size);
563 agp_remove_bridge(bridge); 568 agp_remove_bridge(bridge);
564 agp_put_bridge(bridge); 569 agp_put_bridge(bridge);
570
571 agp_bridges_found--;
565} 572}
566 573
567#ifdef CONFIG_PM 574#ifdef CONFIG_PM
@@ -709,6 +716,11 @@ static struct pci_device_id agp_amd64_pci_table[] = {
709 716
710MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); 717MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
711 718
719static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
720 { PCI_DEVICE_CLASS(0, 0) },
721 { }
722};
723
712static struct pci_driver agp_amd64_pci_driver = { 724static struct pci_driver agp_amd64_pci_driver = {
713 .name = "agpgart-amd64", 725 .name = "agpgart-amd64",
714 .id_table = agp_amd64_pci_table, 726 .id_table = agp_amd64_pci_table,
@@ -734,7 +746,6 @@ int __init agp_amd64_init(void)
734 return err; 746 return err;
735 747
736 if (agp_bridges_found == 0) { 748 if (agp_bridges_found == 0) {
737 struct pci_dev *dev;
738 if (!agp_try_unsupported && !agp_try_unsupported_boot) { 749 if (!agp_try_unsupported && !agp_try_unsupported_boot) {
739 printk(KERN_INFO PFX "No supported AGP bridge found.\n"); 750 printk(KERN_INFO PFX "No supported AGP bridge found.\n");
740#ifdef MODULE 751#ifdef MODULE
@@ -750,17 +761,10 @@ int __init agp_amd64_init(void)
750 return -ENODEV; 761 return -ENODEV;
751 762
752 /* Look for any AGP bridge */ 763 /* Look for any AGP bridge */
753 dev = NULL; 764 agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
754 err = -ENODEV; 765 err = driver_attach(&agp_amd64_pci_driver.driver);
755 for_each_pci_dev(dev) { 766 if (err == 0 && agp_bridges_found == 0)
756 if (!pci_find_capability(dev, PCI_CAP_ID_AGP)) 767 err = -ENODEV;
757 continue;
758 /* Only one bridge supported right now */
759 if (agp_amd64_probe(dev, NULL) == 0) {
760 err = 0;
761 break;
762 }
763 }
764 } 768 }
765 return err; 769 return err;
766} 770}
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 3b2ecbe86ebe..dc30e2243494 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -341,6 +341,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
341{ 341{
342 struct aper_size_info_lvl2 *value; 342 struct aper_size_info_lvl2 *value;
343 struct ati_page_map page_dir; 343 struct ati_page_map page_dir;
344 unsigned long __iomem *cur_gatt;
344 unsigned long addr; 345 unsigned long addr;
345 int retval; 346 int retval;
346 u32 temp; 347 u32 temp;
@@ -395,6 +396,12 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
395 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ 396 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
396 } 397 }
397 398
399 for (i = 0; i < value->num_entries; i++) {
400 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
401 cur_gatt = GET_GATT(addr);
402 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
403 }
404
398 return 0; 405 return 0;
399} 406}
400 407
@@ -415,6 +422,7 @@ static const struct agp_bridge_driver ati_generic_bridge = {
415 .aperture_sizes = ati_generic_sizes, 422 .aperture_sizes = ati_generic_sizes,
416 .size_type = LVL2_APER_SIZE, 423 .size_type = LVL2_APER_SIZE,
417 .num_aperture_sizes = 7, 424 .num_aperture_sizes = 7,
425 .needs_scratch_page = true,
418 .configure = ati_configure, 426 .configure = ati_configure,
419 .fetch_size = ati_fetch_size, 427 .fetch_size = ati_fetch_size,
420 .cleanup = ati_cleanup, 428 .cleanup = ati_cleanup,
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 793f39ea9618..aa109cbe0e6e 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -28,6 +28,7 @@
28#include <linux/page-flags.h> 28#include <linux/page-flags.h>
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include "agp.h" 30#include "agp.h"
31#include "intel-agp.h"
31 32
32/* 33/*
33 * The real differences to the generic AGP code is 34 * The real differences to the generic AGP code is
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index aa4248efc5d8..d836a71bf06d 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -11,1531 +11,13 @@
11#include <linux/agp_backend.h> 11#include <linux/agp_backend.h>
12#include <asm/smp.h> 12#include <asm/smp.h>
13#include "agp.h" 13#include "agp.h"
14#include "intel-agp.h"
15
16#include "intel-gtt.c"
14 17
15int intel_agp_enabled; 18int intel_agp_enabled;
16EXPORT_SYMBOL(intel_agp_enabled); 19EXPORT_SYMBOL(intel_agp_enabled);
17 20
18/*
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
23 */
24#ifdef CONFIG_DMAR
25#define USE_PCI_DMA_API 1
26#endif
27
28#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
29#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
30#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
31#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
32#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
33#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
34#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
35#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
36#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
37#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
38#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
39#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
40#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
41#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
42#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
43#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
44#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
45#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
46#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
47#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
48#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
49#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
50#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
51#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
52#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
53#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
54#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
55#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
56#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
57#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
58#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
59#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
60#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
61#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
62#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
63#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
64#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
65#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
66#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
67#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
68#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
69#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
70#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
71#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
72#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
73#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
74#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
75#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
76
77/* cover 915 and 945 variants */
78#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
79 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
80 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
81 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
82 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
83 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
84
85#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
86 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
87 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
88 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
89 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
90 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
91
92#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
93 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
94 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
95 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
96 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
97
98#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
99 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
100
101#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
102 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
103
104#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
105 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
106 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
107 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
108 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
109 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
110 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
111 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
112 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
113 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
114 IS_SNB)
115
116extern int agp_memory_reserved;
117
118
119/* Intel 815 register */
120#define INTEL_815_APCONT 0x51
121#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
122
123/* Intel i820 registers */
124#define INTEL_I820_RDCR 0x51
125#define INTEL_I820_ERRSTS 0xc8
126
127/* Intel i840 registers */
128#define INTEL_I840_MCHCFG 0x50
129#define INTEL_I840_ERRSTS 0xc8
130
131/* Intel i850 registers */
132#define INTEL_I850_MCHCFG 0x50
133#define INTEL_I850_ERRSTS 0xc8
134
135/* intel 915G registers */
136#define I915_GMADDR 0x18
137#define I915_MMADDR 0x10
138#define I915_PTEADDR 0x1C
139#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
140#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
141#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
142#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
143#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
144#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
145#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
146#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
147
148#define I915_IFPADDR 0x60
149
150/* Intel 965G registers */
151#define I965_MSAC 0x62
152#define I965_IFPADDR 0x70
153
154/* Intel 7505 registers */
155#define INTEL_I7505_APSIZE 0x74
156#define INTEL_I7505_NCAPID 0x60
157#define INTEL_I7505_NISTAT 0x6c
158#define INTEL_I7505_ATTBASE 0x78
159#define INTEL_I7505_ERRSTS 0x42
160#define INTEL_I7505_AGPCTRL 0x70
161#define INTEL_I7505_MCHCFG 0x50
162
163#define SNB_GMCH_CTRL 0x50
164#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
165#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
166#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
167#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
168#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
169#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
170#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
171#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
172#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
173#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
174#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
175#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
176#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
177#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
178#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
179#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
180#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
181#define SNB_GTT_SIZE_0M (0 << 8)
182#define SNB_GTT_SIZE_1M (1 << 8)
183#define SNB_GTT_SIZE_2M (2 << 8)
184#define SNB_GTT_SIZE_MASK (3 << 8)
185
186static const struct aper_size_info_fixed intel_i810_sizes[] =
187{
188 {64, 16384, 4},
189 /* The 32M mode still requires a 64k gatt */
190 {32, 8192, 4}
191};
192
193#define AGP_DCACHE_MEMORY 1
194#define AGP_PHYS_MEMORY 2
195#define INTEL_AGP_CACHED_MEMORY 3
196
197static struct gatt_mask intel_i810_masks[] =
198{
199 {.mask = I810_PTE_VALID, .type = 0},
200 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
201 {.mask = I810_PTE_VALID, .type = 0},
202 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
203 .type = INTEL_AGP_CACHED_MEMORY}
204};
205
206static struct _intel_private {
207 struct pci_dev *pcidev; /* device one */
208 u8 __iomem *registers;
209 u32 __iomem *gtt; /* I915G */
210 int num_dcache_entries;
211 /* gtt_entries is the number of gtt entries that are already mapped
212 * to stolen memory. Stolen memory is larger than the memory mapped
213 * through gtt_entries, as it includes some reserved space for the BIOS
214 * popup and for the GTT.
215 */
216 int gtt_entries; /* i830+ */
217 int gtt_total_size;
218 union {
219 void __iomem *i9xx_flush_page;
220 void *i8xx_flush_page;
221 };
222 struct page *i8xx_page;
223 struct resource ifp_resource;
224 int resource_valid;
225} intel_private;
226
227#ifdef USE_PCI_DMA_API
228static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
229{
230 *ret = pci_map_page(intel_private.pcidev, page, 0,
231 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
232 if (pci_dma_mapping_error(intel_private.pcidev, *ret))
233 return -EINVAL;
234 return 0;
235}
236
237static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
238{
239 pci_unmap_page(intel_private.pcidev, dma,
240 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
241}
242
243static void intel_agp_free_sglist(struct agp_memory *mem)
244{
245 struct sg_table st;
246
247 st.sgl = mem->sg_list;
248 st.orig_nents = st.nents = mem->page_count;
249
250 sg_free_table(&st);
251
252 mem->sg_list = NULL;
253 mem->num_sg = 0;
254}
255
256static int intel_agp_map_memory(struct agp_memory *mem)
257{
258 struct sg_table st;
259 struct scatterlist *sg;
260 int i;
261
262 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
263
264 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
265 return -ENOMEM;
266
267 mem->sg_list = sg = st.sgl;
268
269 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
270 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
271
272 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
273 mem->page_count, PCI_DMA_BIDIRECTIONAL);
274 if (unlikely(!mem->num_sg)) {
275 intel_agp_free_sglist(mem);
276 return -ENOMEM;
277 }
278 return 0;
279}
280
281static void intel_agp_unmap_memory(struct agp_memory *mem)
282{
283 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
284
285 pci_unmap_sg(intel_private.pcidev, mem->sg_list,
286 mem->page_count, PCI_DMA_BIDIRECTIONAL);
287 intel_agp_free_sglist(mem);
288}
289
290static void intel_agp_insert_sg_entries(struct agp_memory *mem,
291 off_t pg_start, int mask_type)
292{
293 struct scatterlist *sg;
294 int i, j;
295
296 j = pg_start;
297
298 WARN_ON(!mem->num_sg);
299
300 if (mem->num_sg == mem->page_count) {
301 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
302 writel(agp_bridge->driver->mask_memory(agp_bridge,
303 sg_dma_address(sg), mask_type),
304 intel_private.gtt+j);
305 j++;
306 }
307 } else {
308 /* sg may merge pages, but we have to separate
309 * per-page addr for GTT */
310 unsigned int len, m;
311
312 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
313 len = sg_dma_len(sg) / PAGE_SIZE;
314 for (m = 0; m < len; m++) {
315 writel(agp_bridge->driver->mask_memory(agp_bridge,
316 sg_dma_address(sg) + m * PAGE_SIZE,
317 mask_type),
318 intel_private.gtt+j);
319 j++;
320 }
321 }
322 }
323 readl(intel_private.gtt+j-1);
324}
325
326#else
327
328static void intel_agp_insert_sg_entries(struct agp_memory *mem,
329 off_t pg_start, int mask_type)
330{
331 int i, j;
332 u32 cache_bits = 0;
333
334 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
335 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
336 {
337 cache_bits = I830_PTE_SYSTEM_CACHED;
338 }
339
340 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
341 writel(agp_bridge->driver->mask_memory(agp_bridge,
342 page_to_phys(mem->pages[i]), mask_type),
343 intel_private.gtt+j);
344 }
345
346 readl(intel_private.gtt+j-1);
347}
348
349#endif
350
351static int intel_i810_fetch_size(void)
352{
353 u32 smram_miscc;
354 struct aper_size_info_fixed *values;
355
356 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
357 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
358
359 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
360 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
361 return 0;
362 }
363 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
364 agp_bridge->previous_size =
365 agp_bridge->current_size = (void *) (values + 1);
366 agp_bridge->aperture_size_idx = 1;
367 return values[1].size;
368 } else {
369 agp_bridge->previous_size =
370 agp_bridge->current_size = (void *) (values);
371 agp_bridge->aperture_size_idx = 0;
372 return values[0].size;
373 }
374
375 return 0;
376}
377
378static int intel_i810_configure(void)
379{
380 struct aper_size_info_fixed *current_size;
381 u32 temp;
382 int i;
383
384 current_size = A_SIZE_FIX(agp_bridge->current_size);
385
386 if (!intel_private.registers) {
387 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
388 temp &= 0xfff80000;
389
390 intel_private.registers = ioremap(temp, 128 * 4096);
391 if (!intel_private.registers) {
392 dev_err(&intel_private.pcidev->dev,
393 "can't remap memory\n");
394 return -ENOMEM;
395 }
396 }
397
398 if ((readl(intel_private.registers+I810_DRAM_CTL)
399 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
400 /* This will need to be dynamically assigned */
401 dev_info(&intel_private.pcidev->dev,
402 "detected 4MB dedicated video ram\n");
403 intel_private.num_dcache_entries = 1024;
404 }
405 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
406 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
407 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
408 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
409
410 if (agp_bridge->driver->needs_scratch_page) {
411 for (i = 0; i < current_size->num_entries; i++) {
412 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
413 }
414 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
415 }
416 global_cache_flush();
417 return 0;
418}
419
420static void intel_i810_cleanup(void)
421{
422 writel(0, intel_private.registers+I810_PGETBL_CTL);
423 readl(intel_private.registers); /* PCI Posting. */
424 iounmap(intel_private.registers);
425}
426
427static void intel_i810_tlbflush(struct agp_memory *mem)
428{
429 return;
430}
431
432static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
433{
434 return;
435}
436
437/* Exists to support ARGB cursors */
438static struct page *i8xx_alloc_pages(void)
439{
440 struct page *page;
441
442 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
443 if (page == NULL)
444 return NULL;
445
446 if (set_pages_uc(page, 4) < 0) {
447 set_pages_wb(page, 4);
448 __free_pages(page, 2);
449 return NULL;
450 }
451 get_page(page);
452 atomic_inc(&agp_bridge->current_memory_agp);
453 return page;
454}
455
456static void i8xx_destroy_pages(struct page *page)
457{
458 if (page == NULL)
459 return;
460
461 set_pages_wb(page, 4);
462 put_page(page);
463 __free_pages(page, 2);
464 atomic_dec(&agp_bridge->current_memory_agp);
465}
466
467static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
468 int type)
469{
470 if (type < AGP_USER_TYPES)
471 return type;
472 else if (type == AGP_USER_CACHED_MEMORY)
473 return INTEL_AGP_CACHED_MEMORY;
474 else
475 return 0;
476}
477
478static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
479 int type)
480{
481 int i, j, num_entries;
482 void *temp;
483 int ret = -EINVAL;
484 int mask_type;
485
486 if (mem->page_count == 0)
487 goto out;
488
489 temp = agp_bridge->current_size;
490 num_entries = A_SIZE_FIX(temp)->num_entries;
491
492 if ((pg_start + mem->page_count) > num_entries)
493 goto out_err;
494
495
496 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
497 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
498 ret = -EBUSY;
499 goto out_err;
500 }
501 }
502
503 if (type != mem->type)
504 goto out_err;
505
506 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
507
508 switch (mask_type) {
509 case AGP_DCACHE_MEMORY:
510 if (!mem->is_flushed)
511 global_cache_flush();
512 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
513 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
514 intel_private.registers+I810_PTE_BASE+(i*4));
515 }
516 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
517 break;
518 case AGP_PHYS_MEMORY:
519 case AGP_NORMAL_MEMORY:
520 if (!mem->is_flushed)
521 global_cache_flush();
522 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
523 writel(agp_bridge->driver->mask_memory(agp_bridge,
524 page_to_phys(mem->pages[i]), mask_type),
525 intel_private.registers+I810_PTE_BASE+(j*4));
526 }
527 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
528 break;
529 default:
530 goto out_err;
531 }
532
533 agp_bridge->driver->tlb_flush(mem);
534out:
535 ret = 0;
536out_err:
537 mem->is_flushed = true;
538 return ret;
539}
540
541static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
542 int type)
543{
544 int i;
545
546 if (mem->page_count == 0)
547 return 0;
548
549 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
550 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
551 }
552 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
553
554 agp_bridge->driver->tlb_flush(mem);
555 return 0;
556}
557
558/*
559 * The i810/i830 requires a physical address to program its mouse
560 * pointer into hardware.
561 * However the Xserver still writes to it through the agp aperture.
562 */
563static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
564{
565 struct agp_memory *new;
566 struct page *page;
567
568 switch (pg_count) {
569 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
570 break;
571 case 4:
572 /* kludge to get 4 physical pages for ARGB cursor */
573 page = i8xx_alloc_pages();
574 break;
575 default:
576 return NULL;
577 }
578
579 if (page == NULL)
580 return NULL;
581
582 new = agp_create_memory(pg_count);
583 if (new == NULL)
584 return NULL;
585
586 new->pages[0] = page;
587 if (pg_count == 4) {
588 /* kludge to get 4 physical pages for ARGB cursor */
589 new->pages[1] = new->pages[0] + 1;
590 new->pages[2] = new->pages[1] + 1;
591 new->pages[3] = new->pages[2] + 1;
592 }
593 new->page_count = pg_count;
594 new->num_scratch_pages = pg_count;
595 new->type = AGP_PHYS_MEMORY;
596 new->physical = page_to_phys(new->pages[0]);
597 return new;
598}
599
600static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
601{
602 struct agp_memory *new;
603
604 if (type == AGP_DCACHE_MEMORY) {
605 if (pg_count != intel_private.num_dcache_entries)
606 return NULL;
607
608 new = agp_create_memory(1);
609 if (new == NULL)
610 return NULL;
611
612 new->type = AGP_DCACHE_MEMORY;
613 new->page_count = pg_count;
614 new->num_scratch_pages = 0;
615 agp_free_page_array(new);
616 return new;
617 }
618 if (type == AGP_PHYS_MEMORY)
619 return alloc_agpphysmem_i8xx(pg_count, type);
620 return NULL;
621}
622
623static void intel_i810_free_by_type(struct agp_memory *curr)
624{
625 agp_free_key(curr->key);
626 if (curr->type == AGP_PHYS_MEMORY) {
627 if (curr->page_count == 4)
628 i8xx_destroy_pages(curr->pages[0]);
629 else {
630 agp_bridge->driver->agp_destroy_page(curr->pages[0],
631 AGP_PAGE_DESTROY_UNMAP);
632 agp_bridge->driver->agp_destroy_page(curr->pages[0],
633 AGP_PAGE_DESTROY_FREE);
634 }
635 agp_free_page_array(curr);
636 }
637 kfree(curr);
638}
639
640static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
641 dma_addr_t addr, int type)
642{
643 /* Type checking must be done elsewhere */
644 return addr | bridge->driver->masks[type].mask;
645}
646
647static struct aper_size_info_fixed intel_i830_sizes[] =
648{
649 {128, 32768, 5},
650 /* The 64M mode still requires a 128k gatt */
651 {64, 16384, 5},
652 {256, 65536, 6},
653 {512, 131072, 7},
654};
655
656static void intel_i830_init_gtt_entries(void)
657{
658 u16 gmch_ctrl;
659 int gtt_entries = 0;
660 u8 rdct;
661 int local = 0;
662 static const int ddt[4] = { 0, 16, 32, 64 };
663 int size; /* reserved space (in kb) at the top of stolen memory */
664
665 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
666
667 if (IS_I965) {
668 u32 pgetbl_ctl;
669 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
670
671 /* The 965 has a field telling us the size of the GTT,
672 * which may be larger than what is necessary to map the
673 * aperture.
674 */
675 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
676 case I965_PGETBL_SIZE_128KB:
677 size = 128;
678 break;
679 case I965_PGETBL_SIZE_256KB:
680 size = 256;
681 break;
682 case I965_PGETBL_SIZE_512KB:
683 size = 512;
684 break;
685 case I965_PGETBL_SIZE_1MB:
686 size = 1024;
687 break;
688 case I965_PGETBL_SIZE_2MB:
689 size = 2048;
690 break;
691 case I965_PGETBL_SIZE_1_5MB:
692 size = 1024 + 512;
693 break;
694 default:
695 dev_info(&intel_private.pcidev->dev,
696 "unknown page table size, assuming 512KB\n");
697 size = 512;
698 }
699 size += 4; /* add in BIOS popup space */
700 } else if (IS_G33 && !IS_PINEVIEW) {
701 /* G33's GTT size defined in gmch_ctrl */
702 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
703 case G33_PGETBL_SIZE_1M:
704 size = 1024;
705 break;
706 case G33_PGETBL_SIZE_2M:
707 size = 2048;
708 break;
709 default:
710 dev_info(&agp_bridge->dev->dev,
711 "unknown page table size 0x%x, assuming 512KB\n",
712 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
713 size = 512;
714 }
715 size += 4;
716 } else if (IS_G4X || IS_PINEVIEW) {
717 /* On 4 series hardware, GTT stolen is separate from graphics
718 * stolen, ignore it in stolen gtt entries counting. However,
719 * 4KB of the stolen memory doesn't get mapped to the GTT.
720 */
721 size = 4;
722 } else {
723 /* On previous hardware, the GTT size was just what was
724 * required to map the aperture.
725 */
726 size = agp_bridge->driver->fetch_size() + 4;
727 }
728
729 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
730 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
731 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
732 case I830_GMCH_GMS_STOLEN_512:
733 gtt_entries = KB(512) - KB(size);
734 break;
735 case I830_GMCH_GMS_STOLEN_1024:
736 gtt_entries = MB(1) - KB(size);
737 break;
738 case I830_GMCH_GMS_STOLEN_8192:
739 gtt_entries = MB(8) - KB(size);
740 break;
741 case I830_GMCH_GMS_LOCAL:
742 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
743 gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
744 MB(ddt[I830_RDRAM_DDT(rdct)]);
745 local = 1;
746 break;
747 default:
748 gtt_entries = 0;
749 break;
750 }
751 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
752 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
753 /*
754 * SandyBridge has new memory control reg at 0x50.w
755 */
756 u16 snb_gmch_ctl;
757 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
758 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
759 case SNB_GMCH_GMS_STOLEN_32M:
760 gtt_entries = MB(32) - KB(size);
761 break;
762 case SNB_GMCH_GMS_STOLEN_64M:
763 gtt_entries = MB(64) - KB(size);
764 break;
765 case SNB_GMCH_GMS_STOLEN_96M:
766 gtt_entries = MB(96) - KB(size);
767 break;
768 case SNB_GMCH_GMS_STOLEN_128M:
769 gtt_entries = MB(128) - KB(size);
770 break;
771 case SNB_GMCH_GMS_STOLEN_160M:
772 gtt_entries = MB(160) - KB(size);
773 break;
774 case SNB_GMCH_GMS_STOLEN_192M:
775 gtt_entries = MB(192) - KB(size);
776 break;
777 case SNB_GMCH_GMS_STOLEN_224M:
778 gtt_entries = MB(224) - KB(size);
779 break;
780 case SNB_GMCH_GMS_STOLEN_256M:
781 gtt_entries = MB(256) - KB(size);
782 break;
783 case SNB_GMCH_GMS_STOLEN_288M:
784 gtt_entries = MB(288) - KB(size);
785 break;
786 case SNB_GMCH_GMS_STOLEN_320M:
787 gtt_entries = MB(320) - KB(size);
788 break;
789 case SNB_GMCH_GMS_STOLEN_352M:
790 gtt_entries = MB(352) - KB(size);
791 break;
792 case SNB_GMCH_GMS_STOLEN_384M:
793 gtt_entries = MB(384) - KB(size);
794 break;
795 case SNB_GMCH_GMS_STOLEN_416M:
796 gtt_entries = MB(416) - KB(size);
797 break;
798 case SNB_GMCH_GMS_STOLEN_448M:
799 gtt_entries = MB(448) - KB(size);
800 break;
801 case SNB_GMCH_GMS_STOLEN_480M:
802 gtt_entries = MB(480) - KB(size);
803 break;
804 case SNB_GMCH_GMS_STOLEN_512M:
805 gtt_entries = MB(512) - KB(size);
806 break;
807 }
808 } else {
809 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
810 case I855_GMCH_GMS_STOLEN_1M:
811 gtt_entries = MB(1) - KB(size);
812 break;
813 case I855_GMCH_GMS_STOLEN_4M:
814 gtt_entries = MB(4) - KB(size);
815 break;
816 case I855_GMCH_GMS_STOLEN_8M:
817 gtt_entries = MB(8) - KB(size);
818 break;
819 case I855_GMCH_GMS_STOLEN_16M:
820 gtt_entries = MB(16) - KB(size);
821 break;
822 case I855_GMCH_GMS_STOLEN_32M:
823 gtt_entries = MB(32) - KB(size);
824 break;
825 case I915_GMCH_GMS_STOLEN_48M:
826 /* Check it's really I915G */
827 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
828 gtt_entries = MB(48) - KB(size);
829 else
830 gtt_entries = 0;
831 break;
832 case I915_GMCH_GMS_STOLEN_64M:
833 /* Check it's really I915G */
834 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
835 gtt_entries = MB(64) - KB(size);
836 else
837 gtt_entries = 0;
838 break;
839 case G33_GMCH_GMS_STOLEN_128M:
840 if (IS_G33 || IS_I965 || IS_G4X)
841 gtt_entries = MB(128) - KB(size);
842 else
843 gtt_entries = 0;
844 break;
845 case G33_GMCH_GMS_STOLEN_256M:
846 if (IS_G33 || IS_I965 || IS_G4X)
847 gtt_entries = MB(256) - KB(size);
848 else
849 gtt_entries = 0;
850 break;
851 case INTEL_GMCH_GMS_STOLEN_96M:
852 if (IS_I965 || IS_G4X)
853 gtt_entries = MB(96) - KB(size);
854 else
855 gtt_entries = 0;
856 break;
857 case INTEL_GMCH_GMS_STOLEN_160M:
858 if (IS_I965 || IS_G4X)
859 gtt_entries = MB(160) - KB(size);
860 else
861 gtt_entries = 0;
862 break;
863 case INTEL_GMCH_GMS_STOLEN_224M:
864 if (IS_I965 || IS_G4X)
865 gtt_entries = MB(224) - KB(size);
866 else
867 gtt_entries = 0;
868 break;
869 case INTEL_GMCH_GMS_STOLEN_352M:
870 if (IS_I965 || IS_G4X)
871 gtt_entries = MB(352) - KB(size);
872 else
873 gtt_entries = 0;
874 break;
875 default:
876 gtt_entries = 0;
877 break;
878 }
879 }
880 if (gtt_entries > 0) {
881 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
882 gtt_entries / KB(1), local ? "local" : "stolen");
883 gtt_entries /= KB(4);
884 } else {
885 dev_info(&agp_bridge->dev->dev,
886 "no pre-allocated video memory detected\n");
887 gtt_entries = 0;
888 }
889
890 intel_private.gtt_entries = gtt_entries;
891}
892
893static void intel_i830_fini_flush(void)
894{
895 kunmap(intel_private.i8xx_page);
896 intel_private.i8xx_flush_page = NULL;
897 unmap_page_from_agp(intel_private.i8xx_page);
898
899 __free_page(intel_private.i8xx_page);
900 intel_private.i8xx_page = NULL;
901}
902
903static void intel_i830_setup_flush(void)
904{
905 /* return if we've already set the flush mechanism up */
906 if (intel_private.i8xx_page)
907 return;
908
909 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
910 if (!intel_private.i8xx_page)
911 return;
912
913 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
914 if (!intel_private.i8xx_flush_page)
915 intel_i830_fini_flush();
916}
917
918/* The chipset_flush interface needs to get data that has already been
919 * flushed out of the CPU all the way out to main memory, because the GPU
920 * doesn't snoop those buffers.
921 *
922 * The 8xx series doesn't have the same lovely interface for flushing the
923 * chipset write buffers that the later chips do. According to the 865
924 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
925 * that buffer out, we just fill 1KB and clflush it out, on the assumption
926 * that it'll push whatever was in there out. It appears to work.
927 */
928static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
929{
930 unsigned int *pg = intel_private.i8xx_flush_page;
931
932 memset(pg, 0, 1024);
933
934 if (cpu_has_clflush)
935 clflush_cache_range(pg, 1024);
936 else if (wbinvd_on_all_cpus() != 0)
937 printk(KERN_ERR "Timed out waiting for cache flush.\n");
938}
939
940/* The intel i830 automatically initializes the agp aperture during POST.
941 * Use the memory already set aside for in the GTT.
942 */
943static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
944{
945 int page_order;
946 struct aper_size_info_fixed *size;
947 int num_entries;
948 u32 temp;
949
950 size = agp_bridge->current_size;
951 page_order = size->page_order;
952 num_entries = size->num_entries;
953 agp_bridge->gatt_table_real = NULL;
954
955 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
956 temp &= 0xfff80000;
957
958 intel_private.registers = ioremap(temp, 128 * 4096);
959 if (!intel_private.registers)
960 return -ENOMEM;
961
962 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
963 global_cache_flush(); /* FIXME: ?? */
964
965 /* we have to call this as early as possible after the MMIO base address is known */
966 intel_i830_init_gtt_entries();
967
968 agp_bridge->gatt_table = NULL;
969
970 agp_bridge->gatt_bus_addr = temp;
971
972 return 0;
973}
974
975/* Return the gatt table to a sane state. Use the top of stolen
976 * memory for the GTT.
977 */
978static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
979{
980 return 0;
981}
982
983static int intel_i830_fetch_size(void)
984{
985 u16 gmch_ctrl;
986 struct aper_size_info_fixed *values;
987
988 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
989
990 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
991 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
992 /* 855GM/852GM/865G has 128MB aperture size */
993 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
994 agp_bridge->aperture_size_idx = 0;
995 return values[0].size;
996 }
997
998 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
999
1000 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
1001 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
1002 agp_bridge->aperture_size_idx = 0;
1003 return values[0].size;
1004 } else {
1005 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
1006 agp_bridge->aperture_size_idx = 1;
1007 return values[1].size;
1008 }
1009
1010 return 0;
1011}
1012
1013static int intel_i830_configure(void)
1014{
1015 struct aper_size_info_fixed *current_size;
1016 u32 temp;
1017 u16 gmch_ctrl;
1018 int i;
1019
1020 current_size = A_SIZE_FIX(agp_bridge->current_size);
1021
1022 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
1023 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1024
1025 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1026 gmch_ctrl |= I830_GMCH_ENABLED;
1027 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1028
1029 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1030 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1031
1032 if (agp_bridge->driver->needs_scratch_page) {
1033 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
1034 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
1035 }
1036 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
1037 }
1038
1039 global_cache_flush();
1040
1041 intel_i830_setup_flush();
1042 return 0;
1043}
1044
1045static void intel_i830_cleanup(void)
1046{
1047 iounmap(intel_private.registers);
1048}
1049
1050static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
1051 int type)
1052{
1053 int i, j, num_entries;
1054 void *temp;
1055 int ret = -EINVAL;
1056 int mask_type;
1057
1058 if (mem->page_count == 0)
1059 goto out;
1060
1061 temp = agp_bridge->current_size;
1062 num_entries = A_SIZE_FIX(temp)->num_entries;
1063
1064 if (pg_start < intel_private.gtt_entries) {
1065 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1066 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1067 pg_start, intel_private.gtt_entries);
1068
1069 dev_info(&intel_private.pcidev->dev,
1070 "trying to insert into local/stolen memory\n");
1071 goto out_err;
1072 }
1073
1074 if ((pg_start + mem->page_count) > num_entries)
1075 goto out_err;
1076
1077 /* The i830 can't check the GTT for entries since its read only,
1078 * depend on the caller to make the correct offset decisions.
1079 */
1080
1081 if (type != mem->type)
1082 goto out_err;
1083
1084 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1085
1086 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1087 mask_type != INTEL_AGP_CACHED_MEMORY)
1088 goto out_err;
1089
1090 if (!mem->is_flushed)
1091 global_cache_flush();
1092
1093 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1094 writel(agp_bridge->driver->mask_memory(agp_bridge,
1095 page_to_phys(mem->pages[i]), mask_type),
1096 intel_private.registers+I810_PTE_BASE+(j*4));
1097 }
1098 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
1099 agp_bridge->driver->tlb_flush(mem);
1100
1101out:
1102 ret = 0;
1103out_err:
1104 mem->is_flushed = true;
1105 return ret;
1106}
1107
1108static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
1109 int type)
1110{
1111 int i;
1112
1113 if (mem->page_count == 0)
1114 return 0;
1115
1116 if (pg_start < intel_private.gtt_entries) {
1117 dev_info(&intel_private.pcidev->dev,
1118 "trying to disable local/stolen memory\n");
1119 return -EINVAL;
1120 }
1121
1122 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1123 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
1124 }
1125 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
1126
1127 agp_bridge->driver->tlb_flush(mem);
1128 return 0;
1129}
1130
1131static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
1132{
1133 if (type == AGP_PHYS_MEMORY)
1134 return alloc_agpphysmem_i8xx(pg_count, type);
1135 /* always return NULL for other allocation types for now */
1136 return NULL;
1137}
1138
1139static int intel_alloc_chipset_flush_resource(void)
1140{
1141 int ret;
1142 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1143 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1144 pcibios_align_resource, agp_bridge->dev);
1145
1146 return ret;
1147}
1148
1149static void intel_i915_setup_chipset_flush(void)
1150{
1151 int ret;
1152 u32 temp;
1153
1154 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
1155 if (!(temp & 0x1)) {
1156 intel_alloc_chipset_flush_resource();
1157 intel_private.resource_valid = 1;
1158 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1159 } else {
1160 temp &= ~1;
1161
1162 intel_private.resource_valid = 1;
1163 intel_private.ifp_resource.start = temp;
1164 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1165 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1166 /* some BIOSes reserve this area in a pnp some don't */
1167 if (ret)
1168 intel_private.resource_valid = 0;
1169 }
1170}
1171
1172static void intel_i965_g33_setup_chipset_flush(void)
1173{
1174 u32 temp_hi, temp_lo;
1175 int ret;
1176
1177 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1178 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1179
1180 if (!(temp_lo & 0x1)) {
1181
1182 intel_alloc_chipset_flush_resource();
1183
1184 intel_private.resource_valid = 1;
1185 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1186 upper_32_bits(intel_private.ifp_resource.start));
1187 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1188 } else {
1189 u64 l64;
1190
1191 temp_lo &= ~0x1;
1192 l64 = ((u64)temp_hi << 32) | temp_lo;
1193
1194 intel_private.resource_valid = 1;
1195 intel_private.ifp_resource.start = l64;
1196 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1197 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1198 /* some BIOSes reserve this area in a pnp some don't */
1199 if (ret)
1200 intel_private.resource_valid = 0;
1201 }
1202}
1203
1204static void intel_i9xx_setup_flush(void)
1205{
1206 /* return if already configured */
1207 if (intel_private.ifp_resource.start)
1208 return;
1209
1210 if (IS_SNB)
1211 return;
1212
1213 /* setup a resource for this object */
1214 intel_private.ifp_resource.name = "Intel Flush Page";
1215 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1216
1217 /* Setup chipset flush for 915 */
1218 if (IS_I965 || IS_G33 || IS_G4X) {
1219 intel_i965_g33_setup_chipset_flush();
1220 } else {
1221 intel_i915_setup_chipset_flush();
1222 }
1223
1224 if (intel_private.ifp_resource.start) {
1225 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1226 if (!intel_private.i9xx_flush_page)
1227 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
1228 }
1229}
1230
1231static int intel_i915_configure(void)
1232{
1233 struct aper_size_info_fixed *current_size;
1234 u32 temp;
1235 u16 gmch_ctrl;
1236 int i;
1237
1238 current_size = A_SIZE_FIX(agp_bridge->current_size);
1239
1240 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1241
1242 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1243
1244 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1245 gmch_ctrl |= I830_GMCH_ENABLED;
1246 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1247
1248 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1249 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1250
1251 if (agp_bridge->driver->needs_scratch_page) {
1252 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1253 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1254 }
1255 readl(intel_private.gtt+i-1); /* PCI Posting. */
1256 }
1257
1258 global_cache_flush();
1259
1260 intel_i9xx_setup_flush();
1261
1262 return 0;
1263}
1264
1265static void intel_i915_cleanup(void)
1266{
1267 if (intel_private.i9xx_flush_page)
1268 iounmap(intel_private.i9xx_flush_page);
1269 if (intel_private.resource_valid)
1270 release_resource(&intel_private.ifp_resource);
1271 intel_private.ifp_resource.start = 0;
1272 intel_private.resource_valid = 0;
1273 iounmap(intel_private.gtt);
1274 iounmap(intel_private.registers);
1275}
1276
1277static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1278{
1279 if (intel_private.i9xx_flush_page)
1280 writel(1, intel_private.i9xx_flush_page);
1281}
1282
1283static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1284 int type)
1285{
1286 int num_entries;
1287 void *temp;
1288 int ret = -EINVAL;
1289 int mask_type;
1290
1291 if (mem->page_count == 0)
1292 goto out;
1293
1294 temp = agp_bridge->current_size;
1295 num_entries = A_SIZE_FIX(temp)->num_entries;
1296
1297 if (pg_start < intel_private.gtt_entries) {
1298 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1299 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1300 pg_start, intel_private.gtt_entries);
1301
1302 dev_info(&intel_private.pcidev->dev,
1303 "trying to insert into local/stolen memory\n");
1304 goto out_err;
1305 }
1306
1307 if ((pg_start + mem->page_count) > num_entries)
1308 goto out_err;
1309
1310 /* The i915 can't check the GTT for entries since it's read only;
1311 * depend on the caller to make the correct offset decisions.
1312 */
1313
1314 if (type != mem->type)
1315 goto out_err;
1316
1317 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1318
1319 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1320 mask_type != INTEL_AGP_CACHED_MEMORY)
1321 goto out_err;
1322
1323 if (!mem->is_flushed)
1324 global_cache_flush();
1325
1326 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1327 agp_bridge->driver->tlb_flush(mem);
1328
1329 out:
1330 ret = 0;
1331 out_err:
1332 mem->is_flushed = true;
1333 return ret;
1334}
1335
1336static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1337 int type)
1338{
1339 int i;
1340
1341 if (mem->page_count == 0)
1342 return 0;
1343
1344 if (pg_start < intel_private.gtt_entries) {
1345 dev_info(&intel_private.pcidev->dev,
1346 "trying to disable local/stolen memory\n");
1347 return -EINVAL;
1348 }
1349
1350 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1351 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1352
1353 readl(intel_private.gtt+i-1);
1354
1355 agp_bridge->driver->tlb_flush(mem);
1356 return 0;
1357}
1358
1359/* Return the aperture size by just checking the resource length. The effect
1360 * described in the spec of the MSAC registers is just changing of the
1361 * resource size.
1362 */
1363static int intel_i9xx_fetch_size(void)
1364{
1365 int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1366 int aper_size; /* size in megabytes */
1367 int i;
1368
1369 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1370
1371 for (i = 0; i < num_sizes; i++) {
1372 if (aper_size == intel_i830_sizes[i].size) {
1373 agp_bridge->current_size = intel_i830_sizes + i;
1374 agp_bridge->previous_size = agp_bridge->current_size;
1375 return aper_size;
1376 }
1377 }
1378
1379 return 0;
1380}
1381
1382/* The intel i915 automatically initializes the agp aperture during POST.
1383 * Use the memory already set aside for in the GTT.
1384 */
1385static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1386{
1387 int page_order;
1388 struct aper_size_info_fixed *size;
1389 int num_entries;
1390 u32 temp, temp2;
1391 int gtt_map_size = 256 * 1024;
1392
1393 size = agp_bridge->current_size;
1394 page_order = size->page_order;
1395 num_entries = size->num_entries;
1396 agp_bridge->gatt_table_real = NULL;
1397
1398 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1399 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1400
1401 if (IS_G33)
1402 gtt_map_size = 1024 * 1024; /* 1M on G33 */
1403 intel_private.gtt = ioremap(temp2, gtt_map_size);
1404 if (!intel_private.gtt)
1405 return -ENOMEM;
1406
1407 intel_private.gtt_total_size = gtt_map_size / 4;
1408
1409 temp &= 0xfff80000;
1410
1411 intel_private.registers = ioremap(temp, 128 * 4096);
1412 if (!intel_private.registers) {
1413 iounmap(intel_private.gtt);
1414 return -ENOMEM;
1415 }
1416
1417 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1418 global_cache_flush(); /* FIXME: ? */
1419
1420 /* we have to call this as early as possible after the MMIO base address is known */
1421 intel_i830_init_gtt_entries();
1422
1423 agp_bridge->gatt_table = NULL;
1424
1425 agp_bridge->gatt_bus_addr = temp;
1426
1427 return 0;
1428}
1429
1430/*
1431 * The i965 supports 36-bit physical addresses, but to keep
1432 * the format of the GTT the same, the bits that don't fit
1433 * in a 32-bit word are shifted down to bits 4..7.
1434 *
1435 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1436 * is always zero on 32-bit architectures, so no need to make
1437 * this conditional.
1438 */
1439static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1440 dma_addr_t addr, int type)
1441{
1442 /* Shift high bits down */
1443 addr |= (addr >> 28) & 0xf0;
1444
1445 /* Type checking must be done elsewhere */
1446 return addr | bridge->driver->masks[type].mask;
1447}
1448
1449static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1450{
1451 u16 snb_gmch_ctl;
1452
1453 switch (agp_bridge->dev->device) {
1454 case PCI_DEVICE_ID_INTEL_GM45_HB:
1455 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1456 case PCI_DEVICE_ID_INTEL_Q45_HB:
1457 case PCI_DEVICE_ID_INTEL_G45_HB:
1458 case PCI_DEVICE_ID_INTEL_G41_HB:
1459 case PCI_DEVICE_ID_INTEL_B43_HB:
1460 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1461 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1462 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1463 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1464 *gtt_offset = *gtt_size = MB(2);
1465 break;
1466 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1467 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1468 *gtt_offset = MB(2);
1469
1470 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1471 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1472 default:
1473 case SNB_GTT_SIZE_0M:
1474 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1475 *gtt_size = MB(0);
1476 break;
1477 case SNB_GTT_SIZE_1M:
1478 *gtt_size = MB(1);
1479 break;
1480 case SNB_GTT_SIZE_2M:
1481 *gtt_size = MB(2);
1482 break;
1483 }
1484 break;
1485 default:
1486 *gtt_offset = *gtt_size = KB(512);
1487 }
1488}
1489
1490/* The intel i965 automatically initializes the agp aperture during POST.
1491 * Use the memory already set aside for in the GTT.
1492 */
1493static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1494{
1495 int page_order;
1496 struct aper_size_info_fixed *size;
1497 int num_entries;
1498 u32 temp;
1499 int gtt_offset, gtt_size;
1500
1501 size = agp_bridge->current_size;
1502 page_order = size->page_order;
1503 num_entries = size->num_entries;
1504 agp_bridge->gatt_table_real = NULL;
1505
1506 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1507
1508 temp &= 0xfff00000;
1509
1510 intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1511
1512 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1513
1514 if (!intel_private.gtt)
1515 return -ENOMEM;
1516
1517 intel_private.gtt_total_size = gtt_size / 4;
1518
1519 intel_private.registers = ioremap(temp, 128 * 4096);
1520 if (!intel_private.registers) {
1521 iounmap(intel_private.gtt);
1522 return -ENOMEM;
1523 }
1524
1525 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1526 global_cache_flush(); /* FIXME: ? */
1527
1528 /* we have to call this as early as possible after the MMIO base address is known */
1529 intel_i830_init_gtt_entries();
1530
1531 agp_bridge->gatt_table = NULL;
1532
1533 agp_bridge->gatt_bus_addr = temp;
1534
1535 return 0;
1536}
1537
1538
1539static int intel_fetch_size(void) 21static int intel_fetch_size(void)
1540{ 22{
1541 int i; 23 int i;
@@ -1982,6 +464,7 @@ static const struct agp_bridge_driver intel_generic_driver = {
1982 .aperture_sizes = intel_generic_sizes, 464 .aperture_sizes = intel_generic_sizes,
1983 .size_type = U16_APER_SIZE, 465 .size_type = U16_APER_SIZE,
1984 .num_aperture_sizes = 7, 466 .num_aperture_sizes = 7,
467 .needs_scratch_page = true,
1985 .configure = intel_configure, 468 .configure = intel_configure,
1986 .fetch_size = intel_fetch_size, 469 .fetch_size = intel_fetch_size,
1987 .cleanup = intel_cleanup, 470 .cleanup = intel_cleanup,
@@ -2003,38 +486,12 @@ static const struct agp_bridge_driver intel_generic_driver = {
2003 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 486 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2004}; 487};
2005 488
2006static const struct agp_bridge_driver intel_810_driver = {
2007 .owner = THIS_MODULE,
2008 .aperture_sizes = intel_i810_sizes,
2009 .size_type = FIXED_APER_SIZE,
2010 .num_aperture_sizes = 2,
2011 .needs_scratch_page = true,
2012 .configure = intel_i810_configure,
2013 .fetch_size = intel_i810_fetch_size,
2014 .cleanup = intel_i810_cleanup,
2015 .tlb_flush = intel_i810_tlbflush,
2016 .mask_memory = intel_i810_mask_memory,
2017 .masks = intel_i810_masks,
2018 .agp_enable = intel_i810_agp_enable,
2019 .cache_flush = global_cache_flush,
2020 .create_gatt_table = agp_generic_create_gatt_table,
2021 .free_gatt_table = agp_generic_free_gatt_table,
2022 .insert_memory = intel_i810_insert_entries,
2023 .remove_memory = intel_i810_remove_entries,
2024 .alloc_by_type = intel_i810_alloc_by_type,
2025 .free_by_type = intel_i810_free_by_type,
2026 .agp_alloc_page = agp_generic_alloc_page,
2027 .agp_alloc_pages = agp_generic_alloc_pages,
2028 .agp_destroy_page = agp_generic_destroy_page,
2029 .agp_destroy_pages = agp_generic_destroy_pages,
2030 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2031};
2032
2033static const struct agp_bridge_driver intel_815_driver = { 489static const struct agp_bridge_driver intel_815_driver = {
2034 .owner = THIS_MODULE, 490 .owner = THIS_MODULE,
2035 .aperture_sizes = intel_815_sizes, 491 .aperture_sizes = intel_815_sizes,
2036 .size_type = U8_APER_SIZE, 492 .size_type = U8_APER_SIZE,
2037 .num_aperture_sizes = 2, 493 .num_aperture_sizes = 2,
494 .needs_scratch_page = true,
2038 .configure = intel_815_configure, 495 .configure = intel_815_configure,
2039 .fetch_size = intel_815_fetch_size, 496 .fetch_size = intel_815_fetch_size,
2040 .cleanup = intel_8xx_cleanup, 497 .cleanup = intel_8xx_cleanup,
@@ -2056,39 +513,12 @@ static const struct agp_bridge_driver intel_815_driver = {
2056 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 513 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2057}; 514};
2058 515
2059static const struct agp_bridge_driver intel_830_driver = {
2060 .owner = THIS_MODULE,
2061 .aperture_sizes = intel_i830_sizes,
2062 .size_type = FIXED_APER_SIZE,
2063 .num_aperture_sizes = 4,
2064 .needs_scratch_page = true,
2065 .configure = intel_i830_configure,
2066 .fetch_size = intel_i830_fetch_size,
2067 .cleanup = intel_i830_cleanup,
2068 .tlb_flush = intel_i810_tlbflush,
2069 .mask_memory = intel_i810_mask_memory,
2070 .masks = intel_i810_masks,
2071 .agp_enable = intel_i810_agp_enable,
2072 .cache_flush = global_cache_flush,
2073 .create_gatt_table = intel_i830_create_gatt_table,
2074 .free_gatt_table = intel_i830_free_gatt_table,
2075 .insert_memory = intel_i830_insert_entries,
2076 .remove_memory = intel_i830_remove_entries,
2077 .alloc_by_type = intel_i830_alloc_by_type,
2078 .free_by_type = intel_i810_free_by_type,
2079 .agp_alloc_page = agp_generic_alloc_page,
2080 .agp_alloc_pages = agp_generic_alloc_pages,
2081 .agp_destroy_page = agp_generic_destroy_page,
2082 .agp_destroy_pages = agp_generic_destroy_pages,
2083 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2084 .chipset_flush = intel_i830_chipset_flush,
2085};
2086
2087static const struct agp_bridge_driver intel_820_driver = { 516static const struct agp_bridge_driver intel_820_driver = {
2088 .owner = THIS_MODULE, 517 .owner = THIS_MODULE,
2089 .aperture_sizes = intel_8xx_sizes, 518 .aperture_sizes = intel_8xx_sizes,
2090 .size_type = U8_APER_SIZE, 519 .size_type = U8_APER_SIZE,
2091 .num_aperture_sizes = 7, 520 .num_aperture_sizes = 7,
521 .needs_scratch_page = true,
2092 .configure = intel_820_configure, 522 .configure = intel_820_configure,
2093 .fetch_size = intel_8xx_fetch_size, 523 .fetch_size = intel_8xx_fetch_size,
2094 .cleanup = intel_820_cleanup, 524 .cleanup = intel_820_cleanup,
@@ -2115,6 +545,7 @@ static const struct agp_bridge_driver intel_830mp_driver = {
2115 .aperture_sizes = intel_830mp_sizes, 545 .aperture_sizes = intel_830mp_sizes,
2116 .size_type = U8_APER_SIZE, 546 .size_type = U8_APER_SIZE,
2117 .num_aperture_sizes = 4, 547 .num_aperture_sizes = 4,
548 .needs_scratch_page = true,
2118 .configure = intel_830mp_configure, 549 .configure = intel_830mp_configure,
2119 .fetch_size = intel_8xx_fetch_size, 550 .fetch_size = intel_8xx_fetch_size,
2120 .cleanup = intel_8xx_cleanup, 551 .cleanup = intel_8xx_cleanup,
@@ -2141,6 +572,7 @@ static const struct agp_bridge_driver intel_840_driver = {
2141 .aperture_sizes = intel_8xx_sizes, 572 .aperture_sizes = intel_8xx_sizes,
2142 .size_type = U8_APER_SIZE, 573 .size_type = U8_APER_SIZE,
2143 .num_aperture_sizes = 7, 574 .num_aperture_sizes = 7,
575 .needs_scratch_page = true,
2144 .configure = intel_840_configure, 576 .configure = intel_840_configure,
2145 .fetch_size = intel_8xx_fetch_size, 577 .fetch_size = intel_8xx_fetch_size,
2146 .cleanup = intel_8xx_cleanup, 578 .cleanup = intel_8xx_cleanup,
@@ -2167,6 +599,7 @@ static const struct agp_bridge_driver intel_845_driver = {
2167 .aperture_sizes = intel_8xx_sizes, 599 .aperture_sizes = intel_8xx_sizes,
2168 .size_type = U8_APER_SIZE, 600 .size_type = U8_APER_SIZE,
2169 .num_aperture_sizes = 7, 601 .num_aperture_sizes = 7,
602 .needs_scratch_page = true,
2170 .configure = intel_845_configure, 603 .configure = intel_845_configure,
2171 .fetch_size = intel_8xx_fetch_size, 604 .fetch_size = intel_8xx_fetch_size,
2172 .cleanup = intel_8xx_cleanup, 605 .cleanup = intel_8xx_cleanup,
@@ -2193,6 +626,7 @@ static const struct agp_bridge_driver intel_850_driver = {
2193 .aperture_sizes = intel_8xx_sizes, 626 .aperture_sizes = intel_8xx_sizes,
2194 .size_type = U8_APER_SIZE, 627 .size_type = U8_APER_SIZE,
2195 .num_aperture_sizes = 7, 628 .num_aperture_sizes = 7,
629 .needs_scratch_page = true,
2196 .configure = intel_850_configure, 630 .configure = intel_850_configure,
2197 .fetch_size = intel_8xx_fetch_size, 631 .fetch_size = intel_8xx_fetch_size,
2198 .cleanup = intel_8xx_cleanup, 632 .cleanup = intel_8xx_cleanup,
@@ -2219,6 +653,7 @@ static const struct agp_bridge_driver intel_860_driver = {
2219 .aperture_sizes = intel_8xx_sizes, 653 .aperture_sizes = intel_8xx_sizes,
2220 .size_type = U8_APER_SIZE, 654 .size_type = U8_APER_SIZE,
2221 .num_aperture_sizes = 7, 655 .num_aperture_sizes = 7,
656 .needs_scratch_page = true,
2222 .configure = intel_860_configure, 657 .configure = intel_860_configure,
2223 .fetch_size = intel_8xx_fetch_size, 658 .fetch_size = intel_8xx_fetch_size,
2224 .cleanup = intel_8xx_cleanup, 659 .cleanup = intel_8xx_cleanup,
@@ -2240,79 +675,12 @@ static const struct agp_bridge_driver intel_860_driver = {
2240 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 675 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2241}; 676};
2242 677
2243static const struct agp_bridge_driver intel_915_driver = {
2244 .owner = THIS_MODULE,
2245 .aperture_sizes = intel_i830_sizes,
2246 .size_type = FIXED_APER_SIZE,
2247 .num_aperture_sizes = 4,
2248 .needs_scratch_page = true,
2249 .configure = intel_i915_configure,
2250 .fetch_size = intel_i9xx_fetch_size,
2251 .cleanup = intel_i915_cleanup,
2252 .tlb_flush = intel_i810_tlbflush,
2253 .mask_memory = intel_i810_mask_memory,
2254 .masks = intel_i810_masks,
2255 .agp_enable = intel_i810_agp_enable,
2256 .cache_flush = global_cache_flush,
2257 .create_gatt_table = intel_i915_create_gatt_table,
2258 .free_gatt_table = intel_i830_free_gatt_table,
2259 .insert_memory = intel_i915_insert_entries,
2260 .remove_memory = intel_i915_remove_entries,
2261 .alloc_by_type = intel_i830_alloc_by_type,
2262 .free_by_type = intel_i810_free_by_type,
2263 .agp_alloc_page = agp_generic_alloc_page,
2264 .agp_alloc_pages = agp_generic_alloc_pages,
2265 .agp_destroy_page = agp_generic_destroy_page,
2266 .agp_destroy_pages = agp_generic_destroy_pages,
2267 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2268 .chipset_flush = intel_i915_chipset_flush,
2269#ifdef USE_PCI_DMA_API
2270 .agp_map_page = intel_agp_map_page,
2271 .agp_unmap_page = intel_agp_unmap_page,
2272 .agp_map_memory = intel_agp_map_memory,
2273 .agp_unmap_memory = intel_agp_unmap_memory,
2274#endif
2275};
2276
2277static const struct agp_bridge_driver intel_i965_driver = {
2278 .owner = THIS_MODULE,
2279 .aperture_sizes = intel_i830_sizes,
2280 .size_type = FIXED_APER_SIZE,
2281 .num_aperture_sizes = 4,
2282 .needs_scratch_page = true,
2283 .configure = intel_i915_configure,
2284 .fetch_size = intel_i9xx_fetch_size,
2285 .cleanup = intel_i915_cleanup,
2286 .tlb_flush = intel_i810_tlbflush,
2287 .mask_memory = intel_i965_mask_memory,
2288 .masks = intel_i810_masks,
2289 .agp_enable = intel_i810_agp_enable,
2290 .cache_flush = global_cache_flush,
2291 .create_gatt_table = intel_i965_create_gatt_table,
2292 .free_gatt_table = intel_i830_free_gatt_table,
2293 .insert_memory = intel_i915_insert_entries,
2294 .remove_memory = intel_i915_remove_entries,
2295 .alloc_by_type = intel_i830_alloc_by_type,
2296 .free_by_type = intel_i810_free_by_type,
2297 .agp_alloc_page = agp_generic_alloc_page,
2298 .agp_alloc_pages = agp_generic_alloc_pages,
2299 .agp_destroy_page = agp_generic_destroy_page,
2300 .agp_destroy_pages = agp_generic_destroy_pages,
2301 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2302 .chipset_flush = intel_i915_chipset_flush,
2303#ifdef USE_PCI_DMA_API
2304 .agp_map_page = intel_agp_map_page,
2305 .agp_unmap_page = intel_agp_unmap_page,
2306 .agp_map_memory = intel_agp_map_memory,
2307 .agp_unmap_memory = intel_agp_unmap_memory,
2308#endif
2309};
2310
2311static const struct agp_bridge_driver intel_7505_driver = { 678static const struct agp_bridge_driver intel_7505_driver = {
2312 .owner = THIS_MODULE, 679 .owner = THIS_MODULE,
2313 .aperture_sizes = intel_8xx_sizes, 680 .aperture_sizes = intel_8xx_sizes,
2314 .size_type = U8_APER_SIZE, 681 .size_type = U8_APER_SIZE,
2315 .num_aperture_sizes = 7, 682 .num_aperture_sizes = 7,
683 .needs_scratch_page = true,
2316 .configure = intel_7505_configure, 684 .configure = intel_7505_configure,
2317 .fetch_size = intel_8xx_fetch_size, 685 .fetch_size = intel_8xx_fetch_size,
2318 .cleanup = intel_8xx_cleanup, 686 .cleanup = intel_8xx_cleanup,
@@ -2334,40 +702,6 @@ static const struct agp_bridge_driver intel_7505_driver = {
2334 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 702 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2335}; 703};
2336 704
2337static const struct agp_bridge_driver intel_g33_driver = {
2338 .owner = THIS_MODULE,
2339 .aperture_sizes = intel_i830_sizes,
2340 .size_type = FIXED_APER_SIZE,
2341 .num_aperture_sizes = 4,
2342 .needs_scratch_page = true,
2343 .configure = intel_i915_configure,
2344 .fetch_size = intel_i9xx_fetch_size,
2345 .cleanup = intel_i915_cleanup,
2346 .tlb_flush = intel_i810_tlbflush,
2347 .mask_memory = intel_i965_mask_memory,
2348 .masks = intel_i810_masks,
2349 .agp_enable = intel_i810_agp_enable,
2350 .cache_flush = global_cache_flush,
2351 .create_gatt_table = intel_i915_create_gatt_table,
2352 .free_gatt_table = intel_i830_free_gatt_table,
2353 .insert_memory = intel_i915_insert_entries,
2354 .remove_memory = intel_i915_remove_entries,
2355 .alloc_by_type = intel_i830_alloc_by_type,
2356 .free_by_type = intel_i810_free_by_type,
2357 .agp_alloc_page = agp_generic_alloc_page,
2358 .agp_alloc_pages = agp_generic_alloc_pages,
2359 .agp_destroy_page = agp_generic_destroy_page,
2360 .agp_destroy_pages = agp_generic_destroy_pages,
2361 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2362 .chipset_flush = intel_i915_chipset_flush,
2363#ifdef USE_PCI_DMA_API
2364 .agp_map_page = intel_agp_map_page,
2365 .agp_unmap_page = intel_agp_unmap_page,
2366 .agp_map_memory = intel_agp_map_memory,
2367 .agp_unmap_memory = intel_agp_unmap_memory,
2368#endif
2369};
2370
2371static int find_gmch(u16 device) 705static int find_gmch(u16 device)
2372{ 706{
2373 struct pci_dev *gmch_device; 707 struct pci_dev *gmch_device;
@@ -2392,103 +726,137 @@ static int find_gmch(u16 device)
2392static const struct intel_driver_description { 726static const struct intel_driver_description {
2393 unsigned int chip_id; 727 unsigned int chip_id;
2394 unsigned int gmch_chip_id; 728 unsigned int gmch_chip_id;
2395 unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */
2396 char *name; 729 char *name;
2397 const struct agp_bridge_driver *driver; 730 const struct agp_bridge_driver *driver;
2398 const struct agp_bridge_driver *gmch_driver; 731 const struct agp_bridge_driver *gmch_driver;
2399} intel_agp_chipsets[] = { 732} intel_agp_chipsets[] = {
2400 { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL }, 733 { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
2401 { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL }, 734 { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
2402 { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL }, 735 { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
2403 { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810", 736 { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
2404 NULL, &intel_810_driver }, 737 NULL, &intel_810_driver },
2405 { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810", 738 { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
2406 NULL, &intel_810_driver }, 739 NULL, &intel_810_driver },
2407 { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810", 740 { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
2408 NULL, &intel_810_driver }, 741 NULL, &intel_810_driver },
2409 { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815", 742 { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
2410 &intel_815_driver, &intel_810_driver }, 743 &intel_815_driver, &intel_810_driver },
2411 { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL }, 744 { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
2412 { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL }, 745 { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
2413 { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M", 746 { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
2414 &intel_830mp_driver, &intel_830_driver }, 747 &intel_830mp_driver, &intel_830_driver },
2415 { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL }, 748 { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
2416 { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL }, 749 { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
2417 { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M", 750 { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
2418 &intel_845_driver, &intel_830_driver }, 751 &intel_845_driver, &intel_830_driver },
2419 { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL }, 752 { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
2420 { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854", 753 { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
2421 &intel_845_driver, &intel_830_driver }, 754 &intel_845_driver, &intel_830_driver },
2422 { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL }, 755 { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
2423 { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM", 756 { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
2424 &intel_845_driver, &intel_830_driver }, 757 &intel_845_driver, &intel_830_driver },
2425 { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL }, 758 { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
2426 { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865", 759 { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
2427 &intel_845_driver, &intel_830_driver }, 760 &intel_845_driver, &intel_830_driver },
2428 { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL }, 761 { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
2429 { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)", 762 { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
2430 NULL, &intel_915_driver }, 763 NULL, &intel_915_driver },
2431 { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G", 764 { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
2432 NULL, &intel_915_driver }, 765 NULL, &intel_915_driver },
2433 { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM", 766 { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
2434 NULL, &intel_915_driver }, 767 NULL, &intel_915_driver },
2435 { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G", 768 { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
2436 NULL, &intel_915_driver }, 769 NULL, &intel_915_driver },
2437 { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM", 770 { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
2438 NULL, &intel_915_driver }, 771 NULL, &intel_915_driver },
2439 { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME", 772 { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
2440 NULL, &intel_915_driver }, 773 NULL, &intel_915_driver },
2441 { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ", 774 { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
2442 NULL, &intel_i965_driver }, 775 NULL, &intel_i965_driver },
2443 { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35", 776 { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
2444 NULL, &intel_i965_driver }, 777 NULL, &intel_i965_driver },
2445 { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q", 778 { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
2446 NULL, &intel_i965_driver }, 779 NULL, &intel_i965_driver },
2447 { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G", 780 { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
2448 NULL, &intel_i965_driver }, 781 NULL, &intel_i965_driver },
2449 { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM", 782 { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
2450 NULL, &intel_i965_driver }, 783 NULL, &intel_i965_driver },
2451 { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE", 784 { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
2452 NULL, &intel_i965_driver }, 785 NULL, &intel_i965_driver },
2453 { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL }, 786 { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
2454 { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL }, 787 { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
2455 { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33", 788 { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
2456 NULL, &intel_g33_driver }, 789 NULL, &intel_g33_driver },
2457 { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35", 790 { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
2458 NULL, &intel_g33_driver }, 791 NULL, &intel_g33_driver },
2459 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 792 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
2460 NULL, &intel_g33_driver }, 793 NULL, &intel_g33_driver },
2461 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150", 794 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
2462 NULL, &intel_g33_driver }, 795 NULL, &intel_g33_driver },
2463 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150", 796 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
2464 NULL, &intel_g33_driver }, 797 NULL, &intel_g33_driver },
2465 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, 798 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
2466 "GM45", NULL, &intel_i965_driver }, 799 "GM45", NULL, &intel_i965_driver },
2467 { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0, 800 { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
2468 "Eaglelake", NULL, &intel_i965_driver }, 801 "Eaglelake", NULL, &intel_i965_driver },
2469 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, 802 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
2470 "Q45/Q43", NULL, &intel_i965_driver }, 803 "Q45/Q43", NULL, &intel_i965_driver },
2471 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, 804 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
2472 "G45/G43", NULL, &intel_i965_driver }, 805 "G45/G43", NULL, &intel_i965_driver },
2473 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0, 806 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
2474 "B43", NULL, &intel_i965_driver }, 807 "B43", NULL, &intel_i965_driver },
2475 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, 808 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
2476 "G41", NULL, &intel_i965_driver }, 809 "G41", NULL, &intel_i965_driver },
2477 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, 810 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
2478 "HD Graphics", NULL, &intel_i965_driver }, 811 "HD Graphics", NULL, &intel_i965_driver },
2479 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 812 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
2480 "HD Graphics", NULL, &intel_i965_driver }, 813 "HD Graphics", NULL, &intel_i965_driver },
2481 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 814 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
2482 "HD Graphics", NULL, &intel_i965_driver }, 815 "HD Graphics", NULL, &intel_i965_driver },
2483 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 816 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
2484 "HD Graphics", NULL, &intel_i965_driver }, 817 "HD Graphics", NULL, &intel_i965_driver },
2485 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0, 818 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
2486 "Sandybridge", NULL, &intel_i965_driver }, 819 "Sandybridge", NULL, &intel_i965_driver },
2487 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0, 820 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
2488 "Sandybridge", NULL, &intel_i965_driver }, 821 "Sandybridge", NULL, &intel_i965_driver },
2489 { 0, 0, 0, NULL, NULL, NULL } 822 { 0, 0, NULL, NULL, NULL }
2490}; 823};
2491 824
825static int __devinit intel_gmch_probe(struct pci_dev *pdev,
826 struct agp_bridge_data *bridge)
827{
828 int i;
829 bridge->driver = NULL;
830
831 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
832 if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
833 find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
834 bridge->driver =
835 intel_agp_chipsets[i].gmch_driver;
836 break;
837 }
838 }
839
840 if (!bridge->driver)
841 return 0;
842
843 bridge->dev_private_data = &intel_private;
844 bridge->dev = pdev;
845
846 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
847
848 if (bridge->driver->mask_memory == intel_i965_mask_memory) {
849 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
850 dev_err(&intel_private.pcidev->dev,
851 "set gfx device dma mask 36bit failed!\n");
852 else
853 pci_set_consistent_dma_mask(intel_private.pcidev,
854 DMA_BIT_MASK(36));
855 }
856
857 return 1;
858}
859
2492static int __devinit agp_intel_probe(struct pci_dev *pdev, 860static int __devinit agp_intel_probe(struct pci_dev *pdev,
2493 const struct pci_device_id *ent) 861 const struct pci_device_id *ent)
2494{ 862{
@@ -2503,22 +871,18 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2503 if (!bridge) 871 if (!bridge)
2504 return -ENOMEM; 872 return -ENOMEM;
2505 873
874 bridge->capndx = cap_ptr;
875
876 if (intel_gmch_probe(pdev, bridge))
877 goto found_gmch;
878
2506 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { 879 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
2507 /* In case that multiple models of gfx chip may 880 /* In case that multiple models of gfx chip may
2508 stand on same host bridge type, this can be 881 stand on same host bridge type, this can be
2509 sure we detect the right IGD. */ 882 sure we detect the right IGD. */
2510 if (pdev->device == intel_agp_chipsets[i].chip_id) { 883 if (pdev->device == intel_agp_chipsets[i].chip_id) {
2511 if ((intel_agp_chipsets[i].gmch_chip_id != 0) && 884 bridge->driver = intel_agp_chipsets[i].driver;
2512 find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { 885 break;
2513 bridge->driver =
2514 intel_agp_chipsets[i].gmch_driver;
2515 break;
2516 } else if (intel_agp_chipsets[i].multi_gmch_chip) {
2517 continue;
2518 } else {
2519 bridge->driver = intel_agp_chipsets[i].driver;
2520 break;
2521 }
2522 } 886 }
2523 } 887 }
2524 888
@@ -2530,18 +894,16 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2530 return -ENODEV; 894 return -ENODEV;
2531 } 895 }
2532 896
2533 if (bridge->driver == NULL) { 897 if (!bridge->driver) {
2534 /* bridge has no AGP and no IGD detected */
2535 if (cap_ptr) 898 if (cap_ptr)
2536 dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", 899 dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
2537 intel_agp_chipsets[i].gmch_chip_id); 900 intel_agp_chipsets[i].gmch_chip_id);
2538 agp_put_bridge(bridge); 901 agp_put_bridge(bridge);
2539 return -ENODEV; 902 return -ENODEV;
2540 } 903 }
2541 904
2542 bridge->dev = pdev; 905 bridge->dev = pdev;
2543 bridge->capndx = cap_ptr; 906 bridge->dev_private_data = NULL;
2544 bridge->dev_private_data = &intel_private;
2545 907
2546 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); 908 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
2547 909
@@ -2577,15 +939,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2577 &bridge->mode); 939 &bridge->mode);
2578 } 940 }
2579 941
2580 if (bridge->driver->mask_memory == intel_i965_mask_memory) { 942found_gmch:
2581 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
2582 dev_err(&intel_private.pcidev->dev,
2583 "set gfx device dma mask 36bit failed!\n");
2584 else
2585 pci_set_consistent_dma_mask(intel_private.pcidev,
2586 DMA_BIT_MASK(36));
2587 }
2588
2589 pci_set_drvdata(pdev, bridge); 943 pci_set_drvdata(pdev, bridge);
2590 err = agp_add_bridge(bridge); 944 err = agp_add_bridge(bridge);
2591 if (!err) 945 if (!err)
@@ -2611,22 +965,7 @@ static int agp_intel_resume(struct pci_dev *pdev)
2611 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 965 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
2612 int ret_val; 966 int ret_val;
2613 967
2614 if (bridge->driver == &intel_generic_driver) 968 bridge->driver->configure();
2615 intel_configure();
2616 else if (bridge->driver == &intel_850_driver)
2617 intel_850_configure();
2618 else if (bridge->driver == &intel_845_driver)
2619 intel_845_configure();
2620 else if (bridge->driver == &intel_830mp_driver)
2621 intel_830mp_configure();
2622 else if (bridge->driver == &intel_915_driver)
2623 intel_i915_configure();
2624 else if (bridge->driver == &intel_830_driver)
2625 intel_i830_configure();
2626 else if (bridge->driver == &intel_810_driver)
2627 intel_i810_configure();
2628 else if (bridge->driver == &intel_i965_driver)
2629 intel_i915_configure();
2630 969
2631 ret_val = agp_rebind_memory(); 970 ret_val = agp_rebind_memory();
2632 if (ret_val != 0) 971 if (ret_val != 0)
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
new file mode 100644
index 000000000000..2547465d4658
--- /dev/null
+++ b/drivers/char/agp/intel-agp.h
@@ -0,0 +1,239 @@
1/*
2 * Common Intel AGPGART and GTT definitions.
3 */
4
5/* Intel registers */
6#define INTEL_APSIZE 0xb4
7#define INTEL_ATTBASE 0xb8
8#define INTEL_AGPCTRL 0xb0
9#define INTEL_NBXCFG 0x50
10#define INTEL_ERRSTS 0x91
11
12/* Intel i830 registers */
13#define I830_GMCH_CTRL 0x52
14#define I830_GMCH_ENABLED 0x4
15#define I830_GMCH_MEM_MASK 0x1
16#define I830_GMCH_MEM_64M 0x1
17#define I830_GMCH_MEM_128M 0
18#define I830_GMCH_GMS_MASK 0x70
19#define I830_GMCH_GMS_DISABLED 0x00
20#define I830_GMCH_GMS_LOCAL 0x10
21#define I830_GMCH_GMS_STOLEN_512 0x20
22#define I830_GMCH_GMS_STOLEN_1024 0x30
23#define I830_GMCH_GMS_STOLEN_8192 0x40
24#define I830_RDRAM_CHANNEL_TYPE 0x03010
25#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
26#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
27
28/* This one is for I830MP w. an external graphic card */
29#define INTEL_I830_ERRSTS 0x92
30
31/* Intel 855GM/852GM registers */
32#define I855_GMCH_GMS_MASK 0xF0
33#define I855_GMCH_GMS_STOLEN_0M 0x0
34#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
35#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
36#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
37#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
38#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
39#define I85X_CAPID 0x44
40#define I85X_VARIANT_MASK 0x7
41#define I85X_VARIANT_SHIFT 5
42#define I855_GME 0x0
43#define I855_GM 0x4
44#define I852_GME 0x2
45#define I852_GM 0x5
46
47/* Intel i845 registers */
48#define INTEL_I845_AGPM 0x51
49#define INTEL_I845_ERRSTS 0xc8
50
51/* Intel i860 registers */
52#define INTEL_I860_MCHCFG 0x50
53#define INTEL_I860_ERRSTS 0xc8
54
55/* Intel i810 registers */
56#define I810_GMADDR 0x10
57#define I810_MMADDR 0x14
58#define I810_PTE_BASE 0x10000
59#define I810_PTE_MAIN_UNCACHED 0x00000000
60#define I810_PTE_LOCAL 0x00000002
61#define I810_PTE_VALID 0x00000001
62#define I830_PTE_SYSTEM_CACHED 0x00000006
63#define I810_SMRAM_MISCC 0x70
64#define I810_GFX_MEM_WIN_SIZE 0x00010000
65#define I810_GFX_MEM_WIN_32M 0x00010000
66#define I810_GMS 0x000000c0
67#define I810_GMS_DISABLE 0x00000000
68#define I810_PGETBL_CTL 0x2020
69#define I810_PGETBL_ENABLED 0x00000001
70#define I965_PGETBL_SIZE_MASK 0x0000000e
71#define I965_PGETBL_SIZE_512KB (0 << 1)
72#define I965_PGETBL_SIZE_256KB (1 << 1)
73#define I965_PGETBL_SIZE_128KB (2 << 1)
74#define I965_PGETBL_SIZE_1MB (3 << 1)
75#define I965_PGETBL_SIZE_2MB (4 << 1)
76#define I965_PGETBL_SIZE_1_5MB (5 << 1)
77#define G33_PGETBL_SIZE_MASK (3 << 8)
78#define G33_PGETBL_SIZE_1M (1 << 8)
79#define G33_PGETBL_SIZE_2M (2 << 8)
80
81#define I810_DRAM_CTL 0x3000
82#define I810_DRAM_ROW_0 0x00000001
83#define I810_DRAM_ROW_0_SDRAM 0x00000001
84
85/* Intel 815 register */
86#define INTEL_815_APCONT 0x51
87#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
88
89/* Intel i820 registers */
90#define INTEL_I820_RDCR 0x51
91#define INTEL_I820_ERRSTS 0xc8
92
93/* Intel i840 registers */
94#define INTEL_I840_MCHCFG 0x50
95#define INTEL_I840_ERRSTS 0xc8
96
97/* Intel i850 registers */
98#define INTEL_I850_MCHCFG 0x50
99#define INTEL_I850_ERRSTS 0xc8
100
101/* intel 915G registers */
102#define I915_GMADDR 0x18
103#define I915_MMADDR 0x10
104#define I915_PTEADDR 0x1C
105#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
106#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
107#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
108#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
109#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
110#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
111#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
112#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
113
114#define I915_IFPADDR 0x60
115
116/* Intel 965G registers */
117#define I965_MSAC 0x62
118#define I965_IFPADDR 0x70
119
120/* Intel 7505 registers */
121#define INTEL_I7505_APSIZE 0x74
122#define INTEL_I7505_NCAPID 0x60
123#define INTEL_I7505_NISTAT 0x6c
124#define INTEL_I7505_ATTBASE 0x78
125#define INTEL_I7505_ERRSTS 0x42
126#define INTEL_I7505_AGPCTRL 0x70
127#define INTEL_I7505_MCHCFG 0x50
128
129#define SNB_GMCH_CTRL 0x50
130#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
131#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
132#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
133#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
134#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
135#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
136#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
137#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
138#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
139#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
140#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
141#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
142#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
143#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
144#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
145#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
146#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
147#define SNB_GTT_SIZE_0M (0 << 8)
148#define SNB_GTT_SIZE_1M (1 << 8)
149#define SNB_GTT_SIZE_2M (2 << 8)
150#define SNB_GTT_SIZE_MASK (3 << 8)
151
152/* pci devices ids */
153#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
154#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
155#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
156#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
157#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
158#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
159#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
160#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
161#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
162#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
163#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
164#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
165#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
166#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
167#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
168#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
169#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
170#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
171#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
172#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
173#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
174#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
175#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
176#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
177#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
178#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
179#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
180#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
181#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
182#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
183#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
184#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
185#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
186#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
187#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
188#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
189#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
190#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
191#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
192#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
193#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
194#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
195#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
196#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
197#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
198#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
199#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
200#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
201
202/* cover 915 and 945 variants */
203#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
204 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
205 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
206 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
207 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
208 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
209
210#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
211 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
212 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
213 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
214 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
215 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
216
217#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
218 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
219 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
220 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
221 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
222
223#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
224 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
225
226#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
227 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
228
229#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
230 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
231 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
232 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
233 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
234 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
235 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
236 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
237 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
238 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
239 IS_SNB)
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
new file mode 100644
index 000000000000..e8ea6825822c
--- /dev/null
+++ b/drivers/char/agp/intel-gtt.c
@@ -0,0 +1,1516 @@
1/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18/*
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
23 */
24#ifdef CONFIG_DMAR
25#define USE_PCI_DMA_API 1
26#endif
27
28static const struct aper_size_info_fixed intel_i810_sizes[] =
29{
30 {64, 16384, 4},
31 /* The 32M mode still requires a 64k gatt */
32 {32, 8192, 4}
33};
34
35#define AGP_DCACHE_MEMORY 1
36#define AGP_PHYS_MEMORY 2
37#define INTEL_AGP_CACHED_MEMORY 3
38
39static struct gatt_mask intel_i810_masks[] =
40{
41 {.mask = I810_PTE_VALID, .type = 0},
42 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
43 {.mask = I810_PTE_VALID, .type = 0},
44 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
45 .type = INTEL_AGP_CACHED_MEMORY}
46};
47
48static struct _intel_private {
49 struct pci_dev *pcidev; /* device one */
50 u8 __iomem *registers;
51 u32 __iomem *gtt; /* I915G */
52 int num_dcache_entries;
53 /* gtt_entries is the number of gtt entries that are already mapped
54 * to stolen memory. Stolen memory is larger than the memory mapped
55 * through gtt_entries, as it includes some reserved space for the BIOS
56 * popup and for the GTT.
57 */
58 int gtt_entries; /* i830+ */
59 int gtt_total_size;
60 union {
61 void __iomem *i9xx_flush_page;
62 void *i8xx_flush_page;
63 };
64 struct page *i8xx_page;
65 struct resource ifp_resource;
66 int resource_valid;
67} intel_private;
68
69#ifdef USE_PCI_DMA_API
70static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
71{
72 *ret = pci_map_page(intel_private.pcidev, page, 0,
73 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
74 if (pci_dma_mapping_error(intel_private.pcidev, *ret))
75 return -EINVAL;
76 return 0;
77}
78
79static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
80{
81 pci_unmap_page(intel_private.pcidev, dma,
82 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
83}
84
85static void intel_agp_free_sglist(struct agp_memory *mem)
86{
87 struct sg_table st;
88
89 st.sgl = mem->sg_list;
90 st.orig_nents = st.nents = mem->page_count;
91
92 sg_free_table(&st);
93
94 mem->sg_list = NULL;
95 mem->num_sg = 0;
96}
97
98static int intel_agp_map_memory(struct agp_memory *mem)
99{
100 struct sg_table st;
101 struct scatterlist *sg;
102 int i;
103
104 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
105
106 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
107 return -ENOMEM;
108
109 mem->sg_list = sg = st.sgl;
110
111 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
112 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
113
114 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
115 mem->page_count, PCI_DMA_BIDIRECTIONAL);
116 if (unlikely(!mem->num_sg)) {
117 intel_agp_free_sglist(mem);
118 return -ENOMEM;
119 }
120 return 0;
121}
122
123static void intel_agp_unmap_memory(struct agp_memory *mem)
124{
125 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
126
127 pci_unmap_sg(intel_private.pcidev, mem->sg_list,
128 mem->page_count, PCI_DMA_BIDIRECTIONAL);
129 intel_agp_free_sglist(mem);
130}
131
132static void intel_agp_insert_sg_entries(struct agp_memory *mem,
133 off_t pg_start, int mask_type)
134{
135 struct scatterlist *sg;
136 int i, j;
137
138 j = pg_start;
139
140 WARN_ON(!mem->num_sg);
141
142 if (mem->num_sg == mem->page_count) {
143 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
144 writel(agp_bridge->driver->mask_memory(agp_bridge,
145 sg_dma_address(sg), mask_type),
146 intel_private.gtt+j);
147 j++;
148 }
149 } else {
150 /* sg may merge pages, but we have to separate
151 * per-page addr for GTT */
152 unsigned int len, m;
153
154 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
155 len = sg_dma_len(sg) / PAGE_SIZE;
156 for (m = 0; m < len; m++) {
157 writel(agp_bridge->driver->mask_memory(agp_bridge,
158 sg_dma_address(sg) + m * PAGE_SIZE,
159 mask_type),
160 intel_private.gtt+j);
161 j++;
162 }
163 }
164 }
165 readl(intel_private.gtt+j-1);
166}
167
168#else
169
170static void intel_agp_insert_sg_entries(struct agp_memory *mem,
171 off_t pg_start, int mask_type)
172{
173 int i, j;
174 u32 cache_bits = 0;
175
176 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
177 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
178 {
179 cache_bits = I830_PTE_SYSTEM_CACHED;
180 }
181
182 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
183 writel(agp_bridge->driver->mask_memory(agp_bridge,
184 page_to_phys(mem->pages[i]), mask_type),
185 intel_private.gtt+j);
186 }
187
188 readl(intel_private.gtt+j-1);
189}
190
191#endif
192
193static int intel_i810_fetch_size(void)
194{
195 u32 smram_miscc;
196 struct aper_size_info_fixed *values;
197
198 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
199 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
200
201 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
202 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
203 return 0;
204 }
205 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
206 agp_bridge->current_size = (void *) (values + 1);
207 agp_bridge->aperture_size_idx = 1;
208 return values[1].size;
209 } else {
210 agp_bridge->current_size = (void *) (values);
211 agp_bridge->aperture_size_idx = 0;
212 return values[0].size;
213 }
214
215 return 0;
216}
217
218static int intel_i810_configure(void)
219{
220 struct aper_size_info_fixed *current_size;
221 u32 temp;
222 int i;
223
224 current_size = A_SIZE_FIX(agp_bridge->current_size);
225
226 if (!intel_private.registers) {
227 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
228 temp &= 0xfff80000;
229
230 intel_private.registers = ioremap(temp, 128 * 4096);
231 if (!intel_private.registers) {
232 dev_err(&intel_private.pcidev->dev,
233 "can't remap memory\n");
234 return -ENOMEM;
235 }
236 }
237
238 if ((readl(intel_private.registers+I810_DRAM_CTL)
239 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
240 /* This will need to be dynamically assigned */
241 dev_info(&intel_private.pcidev->dev,
242 "detected 4MB dedicated video ram\n");
243 intel_private.num_dcache_entries = 1024;
244 }
245 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
246 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
247 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
248 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
249
250 if (agp_bridge->driver->needs_scratch_page) {
251 for (i = 0; i < current_size->num_entries; i++) {
252 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
253 }
254 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
255 }
256 global_cache_flush();
257 return 0;
258}
259
260static void intel_i810_cleanup(void)
261{
262 writel(0, intel_private.registers+I810_PGETBL_CTL);
263 readl(intel_private.registers); /* PCI Posting. */
264 iounmap(intel_private.registers);
265}
266
267static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
268{
269 return;
270}
271
272/* Exists to support ARGB cursors */
273static struct page *i8xx_alloc_pages(void)
274{
275 struct page *page;
276
277 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
278 if (page == NULL)
279 return NULL;
280
281 if (set_pages_uc(page, 4) < 0) {
282 set_pages_wb(page, 4);
283 __free_pages(page, 2);
284 return NULL;
285 }
286 get_page(page);
287 atomic_inc(&agp_bridge->current_memory_agp);
288 return page;
289}
290
291static void i8xx_destroy_pages(struct page *page)
292{
293 if (page == NULL)
294 return;
295
296 set_pages_wb(page, 4);
297 put_page(page);
298 __free_pages(page, 2);
299 atomic_dec(&agp_bridge->current_memory_agp);
300}
301
302static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
303 int type)
304{
305 if (type < AGP_USER_TYPES)
306 return type;
307 else if (type == AGP_USER_CACHED_MEMORY)
308 return INTEL_AGP_CACHED_MEMORY;
309 else
310 return 0;
311}
312
313static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
314 int type)
315{
316 int i, j, num_entries;
317 void *temp;
318 int ret = -EINVAL;
319 int mask_type;
320
321 if (mem->page_count == 0)
322 goto out;
323
324 temp = agp_bridge->current_size;
325 num_entries = A_SIZE_FIX(temp)->num_entries;
326
327 if ((pg_start + mem->page_count) > num_entries)
328 goto out_err;
329
330
331 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
332 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
333 ret = -EBUSY;
334 goto out_err;
335 }
336 }
337
338 if (type != mem->type)
339 goto out_err;
340
341 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
342
343 switch (mask_type) {
344 case AGP_DCACHE_MEMORY:
345 if (!mem->is_flushed)
346 global_cache_flush();
347 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
348 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
349 intel_private.registers+I810_PTE_BASE+(i*4));
350 }
351 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
352 break;
353 case AGP_PHYS_MEMORY:
354 case AGP_NORMAL_MEMORY:
355 if (!mem->is_flushed)
356 global_cache_flush();
357 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
358 writel(agp_bridge->driver->mask_memory(agp_bridge,
359 page_to_phys(mem->pages[i]), mask_type),
360 intel_private.registers+I810_PTE_BASE+(j*4));
361 }
362 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
363 break;
364 default:
365 goto out_err;
366 }
367
368out:
369 ret = 0;
370out_err:
371 mem->is_flushed = true;
372 return ret;
373}
374
375static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
376 int type)
377{
378 int i;
379
380 if (mem->page_count == 0)
381 return 0;
382
383 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
384 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
385 }
386 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
387
388 return 0;
389}
390
391/*
392 * The i810/i830 requires a physical address to program its mouse
393 * pointer into hardware.
394 * However the Xserver still writes to it through the agp aperture.
395 */
396static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
397{
398 struct agp_memory *new;
399 struct page *page;
400
401 switch (pg_count) {
402 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
403 break;
404 case 4:
405 /* kludge to get 4 physical pages for ARGB cursor */
406 page = i8xx_alloc_pages();
407 break;
408 default:
409 return NULL;
410 }
411
412 if (page == NULL)
413 return NULL;
414
415 new = agp_create_memory(pg_count);
416 if (new == NULL)
417 return NULL;
418
419 new->pages[0] = page;
420 if (pg_count == 4) {
421 /* kludge to get 4 physical pages for ARGB cursor */
422 new->pages[1] = new->pages[0] + 1;
423 new->pages[2] = new->pages[1] + 1;
424 new->pages[3] = new->pages[2] + 1;
425 }
426 new->page_count = pg_count;
427 new->num_scratch_pages = pg_count;
428 new->type = AGP_PHYS_MEMORY;
429 new->physical = page_to_phys(new->pages[0]);
430 return new;
431}
432
433static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
434{
435 struct agp_memory *new;
436
437 if (type == AGP_DCACHE_MEMORY) {
438 if (pg_count != intel_private.num_dcache_entries)
439 return NULL;
440
441 new = agp_create_memory(1);
442 if (new == NULL)
443 return NULL;
444
445 new->type = AGP_DCACHE_MEMORY;
446 new->page_count = pg_count;
447 new->num_scratch_pages = 0;
448 agp_free_page_array(new);
449 return new;
450 }
451 if (type == AGP_PHYS_MEMORY)
452 return alloc_agpphysmem_i8xx(pg_count, type);
453 return NULL;
454}
455
456static void intel_i810_free_by_type(struct agp_memory *curr)
457{
458 agp_free_key(curr->key);
459 if (curr->type == AGP_PHYS_MEMORY) {
460 if (curr->page_count == 4)
461 i8xx_destroy_pages(curr->pages[0]);
462 else {
463 agp_bridge->driver->agp_destroy_page(curr->pages[0],
464 AGP_PAGE_DESTROY_UNMAP);
465 agp_bridge->driver->agp_destroy_page(curr->pages[0],
466 AGP_PAGE_DESTROY_FREE);
467 }
468 agp_free_page_array(curr);
469 }
470 kfree(curr);
471}
472
473static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
474 dma_addr_t addr, int type)
475{
476 /* Type checking must be done elsewhere */
477 return addr | bridge->driver->masks[type].mask;
478}
479
480static struct aper_size_info_fixed intel_i830_sizes[] =
481{
482 {128, 32768, 5},
483 /* The 64M mode still requires a 128k gatt */
484 {64, 16384, 5},
485 {256, 65536, 6},
486 {512, 131072, 7},
487};
488
489static void intel_i830_init_gtt_entries(void)
490{
491 u16 gmch_ctrl;
492 int gtt_entries = 0;
493 u8 rdct;
494 int local = 0;
495 static const int ddt[4] = { 0, 16, 32, 64 };
496 int size; /* reserved space (in kb) at the top of stolen memory */
497
498 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
499
500 if (IS_I965) {
501 u32 pgetbl_ctl;
502 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
503
504 /* The 965 has a field telling us the size of the GTT,
505 * which may be larger than what is necessary to map the
506 * aperture.
507 */
508 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
509 case I965_PGETBL_SIZE_128KB:
510 size = 128;
511 break;
512 case I965_PGETBL_SIZE_256KB:
513 size = 256;
514 break;
515 case I965_PGETBL_SIZE_512KB:
516 size = 512;
517 break;
518 case I965_PGETBL_SIZE_1MB:
519 size = 1024;
520 break;
521 case I965_PGETBL_SIZE_2MB:
522 size = 2048;
523 break;
524 case I965_PGETBL_SIZE_1_5MB:
525 size = 1024 + 512;
526 break;
527 default:
528 dev_info(&intel_private.pcidev->dev,
529 "unknown page table size, assuming 512KB\n");
530 size = 512;
531 }
532 size += 4; /* add in BIOS popup space */
533 } else if (IS_G33 && !IS_PINEVIEW) {
534 /* G33's GTT size defined in gmch_ctrl */
535 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
536 case G33_PGETBL_SIZE_1M:
537 size = 1024;
538 break;
539 case G33_PGETBL_SIZE_2M:
540 size = 2048;
541 break;
542 default:
543 dev_info(&agp_bridge->dev->dev,
544 "unknown page table size 0x%x, assuming 512KB\n",
545 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
546 size = 512;
547 }
548 size += 4;
549 } else if (IS_G4X || IS_PINEVIEW) {
550 /* On 4 series hardware, GTT stolen is separate from graphics
551 * stolen, ignore it in stolen gtt entries counting. However,
552 * 4KB of the stolen memory doesn't get mapped to the GTT.
553 */
554 size = 4;
555 } else {
556 /* On previous hardware, the GTT size was just what was
557 * required to map the aperture.
558 */
559 size = agp_bridge->driver->fetch_size() + 4;
560 }
561
562 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
563 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
564 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
565 case I830_GMCH_GMS_STOLEN_512:
566 gtt_entries = KB(512) - KB(size);
567 break;
568 case I830_GMCH_GMS_STOLEN_1024:
569 gtt_entries = MB(1) - KB(size);
570 break;
571 case I830_GMCH_GMS_STOLEN_8192:
572 gtt_entries = MB(8) - KB(size);
573 break;
574 case I830_GMCH_GMS_LOCAL:
575 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
576 gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
577 MB(ddt[I830_RDRAM_DDT(rdct)]);
578 local = 1;
579 break;
580 default:
581 gtt_entries = 0;
582 break;
583 }
584 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
585 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
586 /*
587 * SandyBridge has new memory control reg at 0x50.w
588 */
589 u16 snb_gmch_ctl;
590 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
591 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
592 case SNB_GMCH_GMS_STOLEN_32M:
593 gtt_entries = MB(32) - KB(size);
594 break;
595 case SNB_GMCH_GMS_STOLEN_64M:
596 gtt_entries = MB(64) - KB(size);
597 break;
598 case SNB_GMCH_GMS_STOLEN_96M:
599 gtt_entries = MB(96) - KB(size);
600 break;
601 case SNB_GMCH_GMS_STOLEN_128M:
602 gtt_entries = MB(128) - KB(size);
603 break;
604 case SNB_GMCH_GMS_STOLEN_160M:
605 gtt_entries = MB(160) - KB(size);
606 break;
607 case SNB_GMCH_GMS_STOLEN_192M:
608 gtt_entries = MB(192) - KB(size);
609 break;
610 case SNB_GMCH_GMS_STOLEN_224M:
611 gtt_entries = MB(224) - KB(size);
612 break;
613 case SNB_GMCH_GMS_STOLEN_256M:
614 gtt_entries = MB(256) - KB(size);
615 break;
616 case SNB_GMCH_GMS_STOLEN_288M:
617 gtt_entries = MB(288) - KB(size);
618 break;
619 case SNB_GMCH_GMS_STOLEN_320M:
620 gtt_entries = MB(320) - KB(size);
621 break;
622 case SNB_GMCH_GMS_STOLEN_352M:
623 gtt_entries = MB(352) - KB(size);
624 break;
625 case SNB_GMCH_GMS_STOLEN_384M:
626 gtt_entries = MB(384) - KB(size);
627 break;
628 case SNB_GMCH_GMS_STOLEN_416M:
629 gtt_entries = MB(416) - KB(size);
630 break;
631 case SNB_GMCH_GMS_STOLEN_448M:
632 gtt_entries = MB(448) - KB(size);
633 break;
634 case SNB_GMCH_GMS_STOLEN_480M:
635 gtt_entries = MB(480) - KB(size);
636 break;
637 case SNB_GMCH_GMS_STOLEN_512M:
638 gtt_entries = MB(512) - KB(size);
639 break;
640 }
641 } else {
642 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
643 case I855_GMCH_GMS_STOLEN_1M:
644 gtt_entries = MB(1) - KB(size);
645 break;
646 case I855_GMCH_GMS_STOLEN_4M:
647 gtt_entries = MB(4) - KB(size);
648 break;
649 case I855_GMCH_GMS_STOLEN_8M:
650 gtt_entries = MB(8) - KB(size);
651 break;
652 case I855_GMCH_GMS_STOLEN_16M:
653 gtt_entries = MB(16) - KB(size);
654 break;
655 case I855_GMCH_GMS_STOLEN_32M:
656 gtt_entries = MB(32) - KB(size);
657 break;
658 case I915_GMCH_GMS_STOLEN_48M:
659 /* Check it's really I915G */
660 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
661 gtt_entries = MB(48) - KB(size);
662 else
663 gtt_entries = 0;
664 break;
665 case I915_GMCH_GMS_STOLEN_64M:
666 /* Check it's really I915G */
667 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
668 gtt_entries = MB(64) - KB(size);
669 else
670 gtt_entries = 0;
671 break;
672 case G33_GMCH_GMS_STOLEN_128M:
673 if (IS_G33 || IS_I965 || IS_G4X)
674 gtt_entries = MB(128) - KB(size);
675 else
676 gtt_entries = 0;
677 break;
678 case G33_GMCH_GMS_STOLEN_256M:
679 if (IS_G33 || IS_I965 || IS_G4X)
680 gtt_entries = MB(256) - KB(size);
681 else
682 gtt_entries = 0;
683 break;
684 case INTEL_GMCH_GMS_STOLEN_96M:
685 if (IS_I965 || IS_G4X)
686 gtt_entries = MB(96) - KB(size);
687 else
688 gtt_entries = 0;
689 break;
690 case INTEL_GMCH_GMS_STOLEN_160M:
691 if (IS_I965 || IS_G4X)
692 gtt_entries = MB(160) - KB(size);
693 else
694 gtt_entries = 0;
695 break;
696 case INTEL_GMCH_GMS_STOLEN_224M:
697 if (IS_I965 || IS_G4X)
698 gtt_entries = MB(224) - KB(size);
699 else
700 gtt_entries = 0;
701 break;
702 case INTEL_GMCH_GMS_STOLEN_352M:
703 if (IS_I965 || IS_G4X)
704 gtt_entries = MB(352) - KB(size);
705 else
706 gtt_entries = 0;
707 break;
708 default:
709 gtt_entries = 0;
710 break;
711 }
712 }
713 if (gtt_entries > 0) {
714 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
715 gtt_entries / KB(1), local ? "local" : "stolen");
716 gtt_entries /= KB(4);
717 } else {
718 dev_info(&agp_bridge->dev->dev,
719 "no pre-allocated video memory detected\n");
720 gtt_entries = 0;
721 }
722
723 intel_private.gtt_entries = gtt_entries;
724}
725
726static void intel_i830_fini_flush(void)
727{
728 kunmap(intel_private.i8xx_page);
729 intel_private.i8xx_flush_page = NULL;
730 unmap_page_from_agp(intel_private.i8xx_page);
731
732 __free_page(intel_private.i8xx_page);
733 intel_private.i8xx_page = NULL;
734}
735
736static void intel_i830_setup_flush(void)
737{
738 /* return if we've already set the flush mechanism up */
739 if (intel_private.i8xx_page)
740 return;
741
742 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
743 if (!intel_private.i8xx_page)
744 return;
745
746 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
747 if (!intel_private.i8xx_flush_page)
748 intel_i830_fini_flush();
749}
750
751/* The chipset_flush interface needs to get data that has already been
752 * flushed out of the CPU all the way out to main memory, because the GPU
753 * doesn't snoop those buffers.
754 *
755 * The 8xx series doesn't have the same lovely interface for flushing the
756 * chipset write buffers that the later chips do. According to the 865
757 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
758 * that buffer out, we just fill 1KB and clflush it out, on the assumption
759 * that it'll push whatever was in there out. It appears to work.
760 */
761static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
762{
763 unsigned int *pg = intel_private.i8xx_flush_page;
764
765 memset(pg, 0, 1024);
766
767 if (cpu_has_clflush)
768 clflush_cache_range(pg, 1024);
769 else if (wbinvd_on_all_cpus() != 0)
770 printk(KERN_ERR "Timed out waiting for cache flush.\n");
771}
772
773/* The intel i830 automatically initializes the agp aperture during POST.
774 * Use the memory already set aside for in the GTT.
775 */
776static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
777{
778 int page_order;
779 struct aper_size_info_fixed *size;
780 int num_entries;
781 u32 temp;
782
783 size = agp_bridge->current_size;
784 page_order = size->page_order;
785 num_entries = size->num_entries;
786 agp_bridge->gatt_table_real = NULL;
787
788 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
789 temp &= 0xfff80000;
790
791 intel_private.registers = ioremap(temp, 128 * 4096);
792 if (!intel_private.registers)
793 return -ENOMEM;
794
795 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
796 global_cache_flush(); /* FIXME: ?? */
797
798 /* we have to call this as early as possible after the MMIO base address is known */
799 intel_i830_init_gtt_entries();
800
801 agp_bridge->gatt_table = NULL;
802
803 agp_bridge->gatt_bus_addr = temp;
804
805 return 0;
806}
807
808/* Return the gatt table to a sane state. Use the top of stolen
809 * memory for the GTT.
810 */
811static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
812{
813 return 0;
814}
815
816static int intel_i830_fetch_size(void)
817{
818 u16 gmch_ctrl;
819 struct aper_size_info_fixed *values;
820
821 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
822
823 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
824 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
825 /* 855GM/852GM/865G has 128MB aperture size */
826 agp_bridge->current_size = (void *) values;
827 agp_bridge->aperture_size_idx = 0;
828 return values[0].size;
829 }
830
831 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
832
833 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
834 agp_bridge->current_size = (void *) values;
835 agp_bridge->aperture_size_idx = 0;
836 return values[0].size;
837 } else {
838 agp_bridge->current_size = (void *) (values + 1);
839 agp_bridge->aperture_size_idx = 1;
840 return values[1].size;
841 }
842
843 return 0;
844}
845
846static int intel_i830_configure(void)
847{
848 struct aper_size_info_fixed *current_size;
849 u32 temp;
850 u16 gmch_ctrl;
851 int i;
852
853 current_size = A_SIZE_FIX(agp_bridge->current_size);
854
855 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
856 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
857
858 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
859 gmch_ctrl |= I830_GMCH_ENABLED;
860 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
861
862 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
863 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
864
865 if (agp_bridge->driver->needs_scratch_page) {
866 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
867 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
868 }
869 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
870 }
871
872 global_cache_flush();
873
874 intel_i830_setup_flush();
875 return 0;
876}
877
878static void intel_i830_cleanup(void)
879{
880 iounmap(intel_private.registers);
881}
882
883static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
884 int type)
885{
886 int i, j, num_entries;
887 void *temp;
888 int ret = -EINVAL;
889 int mask_type;
890
891 if (mem->page_count == 0)
892 goto out;
893
894 temp = agp_bridge->current_size;
895 num_entries = A_SIZE_FIX(temp)->num_entries;
896
897 if (pg_start < intel_private.gtt_entries) {
898 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
899 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
900 pg_start, intel_private.gtt_entries);
901
902 dev_info(&intel_private.pcidev->dev,
903 "trying to insert into local/stolen memory\n");
904 goto out_err;
905 }
906
907 if ((pg_start + mem->page_count) > num_entries)
908 goto out_err;
909
910 /* The i830 can't check the GTT for entries since its read only,
911 * depend on the caller to make the correct offset decisions.
912 */
913
914 if (type != mem->type)
915 goto out_err;
916
917 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
918
919 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
920 mask_type != INTEL_AGP_CACHED_MEMORY)
921 goto out_err;
922
923 if (!mem->is_flushed)
924 global_cache_flush();
925
926 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
927 writel(agp_bridge->driver->mask_memory(agp_bridge,
928 page_to_phys(mem->pages[i]), mask_type),
929 intel_private.registers+I810_PTE_BASE+(j*4));
930 }
931 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
932
933out:
934 ret = 0;
935out_err:
936 mem->is_flushed = true;
937 return ret;
938}
939
940static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
941 int type)
942{
943 int i;
944
945 if (mem->page_count == 0)
946 return 0;
947
948 if (pg_start < intel_private.gtt_entries) {
949 dev_info(&intel_private.pcidev->dev,
950 "trying to disable local/stolen memory\n");
951 return -EINVAL;
952 }
953
954 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
955 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
956 }
957 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
958
959 return 0;
960}
961
962static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
963{
964 if (type == AGP_PHYS_MEMORY)
965 return alloc_agpphysmem_i8xx(pg_count, type);
966 /* always return NULL for other allocation types for now */
967 return NULL;
968}
969
970static int intel_alloc_chipset_flush_resource(void)
971{
972 int ret;
973 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
974 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
975 pcibios_align_resource, agp_bridge->dev);
976
977 return ret;
978}
979
980static void intel_i915_setup_chipset_flush(void)
981{
982 int ret;
983 u32 temp;
984
985 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
986 if (!(temp & 0x1)) {
987 intel_alloc_chipset_flush_resource();
988 intel_private.resource_valid = 1;
989 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
990 } else {
991 temp &= ~1;
992
993 intel_private.resource_valid = 1;
994 intel_private.ifp_resource.start = temp;
995 intel_private.ifp_resource.end = temp + PAGE_SIZE;
996 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
997 /* some BIOSes reserve this area in a pnp some don't */
998 if (ret)
999 intel_private.resource_valid = 0;
1000 }
1001}
1002
1003static void intel_i965_g33_setup_chipset_flush(void)
1004{
1005 u32 temp_hi, temp_lo;
1006 int ret;
1007
1008 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1009 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1010
1011 if (!(temp_lo & 0x1)) {
1012
1013 intel_alloc_chipset_flush_resource();
1014
1015 intel_private.resource_valid = 1;
1016 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1017 upper_32_bits(intel_private.ifp_resource.start));
1018 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1019 } else {
1020 u64 l64;
1021
1022 temp_lo &= ~0x1;
1023 l64 = ((u64)temp_hi << 32) | temp_lo;
1024
1025 intel_private.resource_valid = 1;
1026 intel_private.ifp_resource.start = l64;
1027 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1028 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1029 /* some BIOSes reserve this area in a pnp some don't */
1030 if (ret)
1031 intel_private.resource_valid = 0;
1032 }
1033}
1034
1035static void intel_i9xx_setup_flush(void)
1036{
1037 /* return if already configured */
1038 if (intel_private.ifp_resource.start)
1039 return;
1040
1041 if (IS_SNB)
1042 return;
1043
1044 /* setup a resource for this object */
1045 intel_private.ifp_resource.name = "Intel Flush Page";
1046 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1047
1048 /* Setup chipset flush for 915 */
1049 if (IS_I965 || IS_G33 || IS_G4X) {
1050 intel_i965_g33_setup_chipset_flush();
1051 } else {
1052 intel_i915_setup_chipset_flush();
1053 }
1054
1055 if (intel_private.ifp_resource.start) {
1056 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1057 if (!intel_private.i9xx_flush_page)
1058 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
1059 }
1060}
1061
1062static int intel_i915_configure(void)
1063{
1064 struct aper_size_info_fixed *current_size;
1065 u32 temp;
1066 u16 gmch_ctrl;
1067 int i;
1068
1069 current_size = A_SIZE_FIX(agp_bridge->current_size);
1070
1071 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1072
1073 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1074
1075 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1076 gmch_ctrl |= I830_GMCH_ENABLED;
1077 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1078
1079 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1080 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1081
1082 if (agp_bridge->driver->needs_scratch_page) {
1083 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1084 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1085 }
1086 readl(intel_private.gtt+i-1); /* PCI Posting. */
1087 }
1088
1089 global_cache_flush();
1090
1091 intel_i9xx_setup_flush();
1092
1093 return 0;
1094}
1095
1096static void intel_i915_cleanup(void)
1097{
1098 if (intel_private.i9xx_flush_page)
1099 iounmap(intel_private.i9xx_flush_page);
1100 if (intel_private.resource_valid)
1101 release_resource(&intel_private.ifp_resource);
1102 intel_private.ifp_resource.start = 0;
1103 intel_private.resource_valid = 0;
1104 iounmap(intel_private.gtt);
1105 iounmap(intel_private.registers);
1106}
1107
1108static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1109{
1110 if (intel_private.i9xx_flush_page)
1111 writel(1, intel_private.i9xx_flush_page);
1112}
1113
1114static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1115 int type)
1116{
1117 int num_entries;
1118 void *temp;
1119 int ret = -EINVAL;
1120 int mask_type;
1121
1122 if (mem->page_count == 0)
1123 goto out;
1124
1125 temp = agp_bridge->current_size;
1126 num_entries = A_SIZE_FIX(temp)->num_entries;
1127
1128 if (pg_start < intel_private.gtt_entries) {
1129 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1130 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1131 pg_start, intel_private.gtt_entries);
1132
1133 dev_info(&intel_private.pcidev->dev,
1134 "trying to insert into local/stolen memory\n");
1135 goto out_err;
1136 }
1137
1138 if ((pg_start + mem->page_count) > num_entries)
1139 goto out_err;
1140
1141 /* The i915 can't check the GTT for entries since it's read only;
1142 * depend on the caller to make the correct offset decisions.
1143 */
1144
1145 if (type != mem->type)
1146 goto out_err;
1147
1148 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1149
1150 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1151 mask_type != INTEL_AGP_CACHED_MEMORY)
1152 goto out_err;
1153
1154 if (!mem->is_flushed)
1155 global_cache_flush();
1156
1157 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1158
1159 out:
1160 ret = 0;
1161 out_err:
1162 mem->is_flushed = true;
1163 return ret;
1164}
1165
1166static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1167 int type)
1168{
1169 int i;
1170
1171 if (mem->page_count == 0)
1172 return 0;
1173
1174 if (pg_start < intel_private.gtt_entries) {
1175 dev_info(&intel_private.pcidev->dev,
1176 "trying to disable local/stolen memory\n");
1177 return -EINVAL;
1178 }
1179
1180 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1181 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1182
1183 readl(intel_private.gtt+i-1);
1184
1185 return 0;
1186}
1187
1188/* Return the aperture size by just checking the resource length. The effect
1189 * described in the spec of the MSAC registers is just changing of the
1190 * resource size.
1191 */
1192static int intel_i9xx_fetch_size(void)
1193{
1194 int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1195 int aper_size; /* size in megabytes */
1196 int i;
1197
1198 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1199
1200 for (i = 0; i < num_sizes; i++) {
1201 if (aper_size == intel_i830_sizes[i].size) {
1202 agp_bridge->current_size = intel_i830_sizes + i;
1203 return aper_size;
1204 }
1205 }
1206
1207 return 0;
1208}
1209
1210/* The intel i915 automatically initializes the agp aperture during POST.
1211 * Use the memory already set aside for in the GTT.
1212 */
1213static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1214{
1215 int page_order;
1216 struct aper_size_info_fixed *size;
1217 int num_entries;
1218 u32 temp, temp2;
1219 int gtt_map_size = 256 * 1024;
1220
1221 size = agp_bridge->current_size;
1222 page_order = size->page_order;
1223 num_entries = size->num_entries;
1224 agp_bridge->gatt_table_real = NULL;
1225
1226 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1227 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1228
1229 if (IS_G33)
1230 gtt_map_size = 1024 * 1024; /* 1M on G33 */
1231 intel_private.gtt = ioremap(temp2, gtt_map_size);
1232 if (!intel_private.gtt)
1233 return -ENOMEM;
1234
1235 intel_private.gtt_total_size = gtt_map_size / 4;
1236
1237 temp &= 0xfff80000;
1238
1239 intel_private.registers = ioremap(temp, 128 * 4096);
1240 if (!intel_private.registers) {
1241 iounmap(intel_private.gtt);
1242 return -ENOMEM;
1243 }
1244
1245 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1246 global_cache_flush(); /* FIXME: ? */
1247
1248 /* we have to call this as early as possible after the MMIO base address is known */
1249 intel_i830_init_gtt_entries();
1250
1251 agp_bridge->gatt_table = NULL;
1252
1253 agp_bridge->gatt_bus_addr = temp;
1254
1255 return 0;
1256}
1257
1258/*
1259 * The i965 supports 36-bit physical addresses, but to keep
1260 * the format of the GTT the same, the bits that don't fit
1261 * in a 32-bit word are shifted down to bits 4..7.
1262 *
1263 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1264 * is always zero on 32-bit architectures, so no need to make
1265 * this conditional.
1266 */
1267static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1268 dma_addr_t addr, int type)
1269{
1270 /* Shift high bits down */
1271 addr |= (addr >> 28) & 0xf0;
1272
1273 /* Type checking must be done elsewhere */
1274 return addr | bridge->driver->masks[type].mask;
1275}
1276
1277static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1278{
1279 u16 snb_gmch_ctl;
1280
1281 switch (agp_bridge->dev->device) {
1282 case PCI_DEVICE_ID_INTEL_GM45_HB:
1283 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1284 case PCI_DEVICE_ID_INTEL_Q45_HB:
1285 case PCI_DEVICE_ID_INTEL_G45_HB:
1286 case PCI_DEVICE_ID_INTEL_G41_HB:
1287 case PCI_DEVICE_ID_INTEL_B43_HB:
1288 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1289 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1290 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1291 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1292 *gtt_offset = *gtt_size = MB(2);
1293 break;
1294 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1295 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1296 *gtt_offset = MB(2);
1297
1298 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1299 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1300 default:
1301 case SNB_GTT_SIZE_0M:
1302 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1303 *gtt_size = MB(0);
1304 break;
1305 case SNB_GTT_SIZE_1M:
1306 *gtt_size = MB(1);
1307 break;
1308 case SNB_GTT_SIZE_2M:
1309 *gtt_size = MB(2);
1310 break;
1311 }
1312 break;
1313 default:
1314 *gtt_offset = *gtt_size = KB(512);
1315 }
1316}
1317
1318/* The intel i965 automatically initializes the agp aperture during POST.
1319 * Use the memory already set aside for in the GTT.
1320 */
1321static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1322{
1323 int page_order;
1324 struct aper_size_info_fixed *size;
1325 int num_entries;
1326 u32 temp;
1327 int gtt_offset, gtt_size;
1328
1329 size = agp_bridge->current_size;
1330 page_order = size->page_order;
1331 num_entries = size->num_entries;
1332 agp_bridge->gatt_table_real = NULL;
1333
1334 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1335
1336 temp &= 0xfff00000;
1337
1338 intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1339
1340 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1341
1342 if (!intel_private.gtt)
1343 return -ENOMEM;
1344
1345 intel_private.gtt_total_size = gtt_size / 4;
1346
1347 intel_private.registers = ioremap(temp, 128 * 4096);
1348 if (!intel_private.registers) {
1349 iounmap(intel_private.gtt);
1350 return -ENOMEM;
1351 }
1352
1353 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1354 global_cache_flush(); /* FIXME: ? */
1355
1356 /* we have to call this as early as possible after the MMIO base address is known */
1357 intel_i830_init_gtt_entries();
1358
1359 agp_bridge->gatt_table = NULL;
1360
1361 agp_bridge->gatt_bus_addr = temp;
1362
1363 return 0;
1364}
1365
1366static const struct agp_bridge_driver intel_810_driver = {
1367 .owner = THIS_MODULE,
1368 .aperture_sizes = intel_i810_sizes,
1369 .size_type = FIXED_APER_SIZE,
1370 .num_aperture_sizes = 2,
1371 .needs_scratch_page = true,
1372 .configure = intel_i810_configure,
1373 .fetch_size = intel_i810_fetch_size,
1374 .cleanup = intel_i810_cleanup,
1375 .mask_memory = intel_i810_mask_memory,
1376 .masks = intel_i810_masks,
1377 .agp_enable = intel_i810_agp_enable,
1378 .cache_flush = global_cache_flush,
1379 .create_gatt_table = agp_generic_create_gatt_table,
1380 .free_gatt_table = agp_generic_free_gatt_table,
1381 .insert_memory = intel_i810_insert_entries,
1382 .remove_memory = intel_i810_remove_entries,
1383 .alloc_by_type = intel_i810_alloc_by_type,
1384 .free_by_type = intel_i810_free_by_type,
1385 .agp_alloc_page = agp_generic_alloc_page,
1386 .agp_alloc_pages = agp_generic_alloc_pages,
1387 .agp_destroy_page = agp_generic_destroy_page,
1388 .agp_destroy_pages = agp_generic_destroy_pages,
1389 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1390};
1391
1392static const struct agp_bridge_driver intel_830_driver = {
1393 .owner = THIS_MODULE,
1394 .aperture_sizes = intel_i830_sizes,
1395 .size_type = FIXED_APER_SIZE,
1396 .num_aperture_sizes = 4,
1397 .needs_scratch_page = true,
1398 .configure = intel_i830_configure,
1399 .fetch_size = intel_i830_fetch_size,
1400 .cleanup = intel_i830_cleanup,
1401 .mask_memory = intel_i810_mask_memory,
1402 .masks = intel_i810_masks,
1403 .agp_enable = intel_i810_agp_enable,
1404 .cache_flush = global_cache_flush,
1405 .create_gatt_table = intel_i830_create_gatt_table,
1406 .free_gatt_table = intel_i830_free_gatt_table,
1407 .insert_memory = intel_i830_insert_entries,
1408 .remove_memory = intel_i830_remove_entries,
1409 .alloc_by_type = intel_i830_alloc_by_type,
1410 .free_by_type = intel_i810_free_by_type,
1411 .agp_alloc_page = agp_generic_alloc_page,
1412 .agp_alloc_pages = agp_generic_alloc_pages,
1413 .agp_destroy_page = agp_generic_destroy_page,
1414 .agp_destroy_pages = agp_generic_destroy_pages,
1415 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1416 .chipset_flush = intel_i830_chipset_flush,
1417};
1418
1419static const struct agp_bridge_driver intel_915_driver = {
1420 .owner = THIS_MODULE,
1421 .aperture_sizes = intel_i830_sizes,
1422 .size_type = FIXED_APER_SIZE,
1423 .num_aperture_sizes = 4,
1424 .needs_scratch_page = true,
1425 .configure = intel_i915_configure,
1426 .fetch_size = intel_i9xx_fetch_size,
1427 .cleanup = intel_i915_cleanup,
1428 .mask_memory = intel_i810_mask_memory,
1429 .masks = intel_i810_masks,
1430 .agp_enable = intel_i810_agp_enable,
1431 .cache_flush = global_cache_flush,
1432 .create_gatt_table = intel_i915_create_gatt_table,
1433 .free_gatt_table = intel_i830_free_gatt_table,
1434 .insert_memory = intel_i915_insert_entries,
1435 .remove_memory = intel_i915_remove_entries,
1436 .alloc_by_type = intel_i830_alloc_by_type,
1437 .free_by_type = intel_i810_free_by_type,
1438 .agp_alloc_page = agp_generic_alloc_page,
1439 .agp_alloc_pages = agp_generic_alloc_pages,
1440 .agp_destroy_page = agp_generic_destroy_page,
1441 .agp_destroy_pages = agp_generic_destroy_pages,
1442 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1443 .chipset_flush = intel_i915_chipset_flush,
1444#ifdef USE_PCI_DMA_API
1445 .agp_map_page = intel_agp_map_page,
1446 .agp_unmap_page = intel_agp_unmap_page,
1447 .agp_map_memory = intel_agp_map_memory,
1448 .agp_unmap_memory = intel_agp_unmap_memory,
1449#endif
1450};
1451
1452static const struct agp_bridge_driver intel_i965_driver = {
1453 .owner = THIS_MODULE,
1454 .aperture_sizes = intel_i830_sizes,
1455 .size_type = FIXED_APER_SIZE,
1456 .num_aperture_sizes = 4,
1457 .needs_scratch_page = true,
1458 .configure = intel_i915_configure,
1459 .fetch_size = intel_i9xx_fetch_size,
1460 .cleanup = intel_i915_cleanup,
1461 .mask_memory = intel_i965_mask_memory,
1462 .masks = intel_i810_masks,
1463 .agp_enable = intel_i810_agp_enable,
1464 .cache_flush = global_cache_flush,
1465 .create_gatt_table = intel_i965_create_gatt_table,
1466 .free_gatt_table = intel_i830_free_gatt_table,
1467 .insert_memory = intel_i915_insert_entries,
1468 .remove_memory = intel_i915_remove_entries,
1469 .alloc_by_type = intel_i830_alloc_by_type,
1470 .free_by_type = intel_i810_free_by_type,
1471 .agp_alloc_page = agp_generic_alloc_page,
1472 .agp_alloc_pages = agp_generic_alloc_pages,
1473 .agp_destroy_page = agp_generic_destroy_page,
1474 .agp_destroy_pages = agp_generic_destroy_pages,
1475 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1476 .chipset_flush = intel_i915_chipset_flush,
1477#ifdef USE_PCI_DMA_API
1478 .agp_map_page = intel_agp_map_page,
1479 .agp_unmap_page = intel_agp_unmap_page,
1480 .agp_map_memory = intel_agp_map_memory,
1481 .agp_unmap_memory = intel_agp_unmap_memory,
1482#endif
1483};
1484
1485static const struct agp_bridge_driver intel_g33_driver = {
1486 .owner = THIS_MODULE,
1487 .aperture_sizes = intel_i830_sizes,
1488 .size_type = FIXED_APER_SIZE,
1489 .num_aperture_sizes = 4,
1490 .needs_scratch_page = true,
1491 .configure = intel_i915_configure,
1492 .fetch_size = intel_i9xx_fetch_size,
1493 .cleanup = intel_i915_cleanup,
1494 .mask_memory = intel_i965_mask_memory,
1495 .masks = intel_i810_masks,
1496 .agp_enable = intel_i810_agp_enable,
1497 .cache_flush = global_cache_flush,
1498 .create_gatt_table = intel_i915_create_gatt_table,
1499 .free_gatt_table = intel_i830_free_gatt_table,
1500 .insert_memory = intel_i915_insert_entries,
1501 .remove_memory = intel_i915_remove_entries,
1502 .alloc_by_type = intel_i830_alloc_by_type,
1503 .free_by_type = intel_i810_free_by_type,
1504 .agp_alloc_page = agp_generic_alloc_page,
1505 .agp_alloc_pages = agp_generic_alloc_pages,
1506 .agp_destroy_page = agp_generic_destroy_page,
1507 .agp_destroy_pages = agp_generic_destroy_pages,
1508 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1509 .chipset_flush = intel_i915_chipset_flush,
1510#ifdef USE_PCI_DMA_API
1511 .agp_map_page = intel_agp_map_page,
1512 .agp_unmap_page = intel_agp_unmap_page,
1513 .agp_map_memory = intel_agp_map_memory,
1514 .agp_unmap_memory = intel_agp_unmap_memory,
1515#endif
1516};
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 10f24e349a26..b9734a978186 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -310,6 +310,7 @@ static const struct agp_bridge_driver nvidia_driver = {
310 .aperture_sizes = nvidia_generic_sizes, 310 .aperture_sizes = nvidia_generic_sizes,
311 .size_type = U8_APER_SIZE, 311 .size_type = U8_APER_SIZE,
312 .num_aperture_sizes = 5, 312 .num_aperture_sizes = 5,
313 .needs_scratch_page = true,
313 .configure = nvidia_configure, 314 .configure = nvidia_configure,
314 .fetch_size = nvidia_fetch_size, 315 .fetch_size = nvidia_fetch_size,
315 .cleanup = nvidia_cleanup, 316 .cleanup = nvidia_cleanup,
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 6c3837a0184d..29aacd81de78 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -125,6 +125,7 @@ static struct agp_bridge_driver sis_driver = {
125 .aperture_sizes = sis_generic_sizes, 125 .aperture_sizes = sis_generic_sizes,
126 .size_type = U8_APER_SIZE, 126 .size_type = U8_APER_SIZE,
127 .num_aperture_sizes = 7, 127 .num_aperture_sizes = 7,
128 .needs_scratch_page = true,
128 .configure = sis_configure, 129 .configure = sis_configure,
129 .fetch_size = sis_fetch_size, 130 .fetch_size = sis_fetch_size,
130 .cleanup = sis_cleanup, 131 .cleanup = sis_cleanup,
@@ -415,14 +416,6 @@ static struct pci_device_id agp_sis_pci_table[] = {
415 .subvendor = PCI_ANY_ID, 416 .subvendor = PCI_ANY_ID,
416 .subdevice = PCI_ANY_ID, 417 .subdevice = PCI_ANY_ID,
417 }, 418 },
418 {
419 .class = (PCI_CLASS_BRIDGE_HOST << 8),
420 .class_mask = ~0,
421 .vendor = PCI_VENDOR_ID_SI,
422 .device = PCI_DEVICE_ID_SI_760,
423 .subvendor = PCI_ANY_ID,
424 .subdevice = PCI_ANY_ID,
425 },
426 { } 419 { }
427}; 420};
428 421
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 6f48931ac1ce..95db71360d24 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -28,6 +28,7 @@
28 */ 28 */
29static int uninorth_rev; 29static int uninorth_rev;
30static int is_u3; 30static int is_u3;
31static u32 scratch_value;
31 32
32#define DEFAULT_APERTURE_SIZE 256 33#define DEFAULT_APERTURE_SIZE 256
33#define DEFAULT_APERTURE_STRING "256" 34#define DEFAULT_APERTURE_STRING "256"
@@ -172,7 +173,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty
172 173
173 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; 174 gp = (u32 *) &agp_bridge->gatt_table[pg_start];
174 for (i = 0; i < mem->page_count; ++i) { 175 for (i = 0; i < mem->page_count; ++i) {
175 if (gp[i]) { 176 if (gp[i] != scratch_value) {
176 dev_info(&agp_bridge->dev->dev, 177 dev_info(&agp_bridge->dev->dev,
177 "uninorth_insert_memory: entry 0x%x occupied (%x)\n", 178 "uninorth_insert_memory: entry 0x%x occupied (%x)\n",
178 i, gp[i]); 179 i, gp[i]);
@@ -214,8 +215,9 @@ int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
214 return 0; 215 return 0;
215 216
216 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; 217 gp = (u32 *) &agp_bridge->gatt_table[pg_start];
217 for (i = 0; i < mem->page_count; ++i) 218 for (i = 0; i < mem->page_count; ++i) {
218 gp[i] = 0; 219 gp[i] = scratch_value;
220 }
219 mb(); 221 mb();
220 uninorth_tlbflush(mem); 222 uninorth_tlbflush(mem);
221 223
@@ -421,8 +423,13 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
421 423
422 bridge->gatt_bus_addr = virt_to_phys(table); 424 bridge->gatt_bus_addr = virt_to_phys(table);
423 425
426 if (is_u3)
427 scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL;
428 else
429 scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) |
430 0x1UL);
424 for (i = 0; i < num_entries; i++) 431 for (i = 0; i < num_entries; i++)
425 bridge->gatt_table[i] = 0; 432 bridge->gatt_table[i] = scratch_value;
426 433
427 return 0; 434 return 0;
428 435
@@ -519,6 +526,7 @@ const struct agp_bridge_driver uninorth_agp_driver = {
519 .agp_destroy_pages = agp_generic_destroy_pages, 526 .agp_destroy_pages = agp_generic_destroy_pages,
520 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 527 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
521 .cant_use_aperture = true, 528 .cant_use_aperture = true,
529 .needs_scratch_page = true,
522}; 530};
523 531
524const struct agp_bridge_driver u3_agp_driver = { 532const struct agp_bridge_driver u3_agp_driver = {
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index d3bd243867fc..df67e80019d2 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -175,6 +175,7 @@ static const struct agp_bridge_driver via_agp3_driver = {
175 .aperture_sizes = agp3_generic_sizes, 175 .aperture_sizes = agp3_generic_sizes,
176 .size_type = U8_APER_SIZE, 176 .size_type = U8_APER_SIZE,
177 .num_aperture_sizes = 10, 177 .num_aperture_sizes = 10,
178 .needs_scratch_page = true,
178 .configure = via_configure_agp3, 179 .configure = via_configure_agp3,
179 .fetch_size = via_fetch_size_agp3, 180 .fetch_size = via_fetch_size_agp3,
180 .cleanup = via_cleanup_agp3, 181 .cleanup = via_cleanup_agp3,
@@ -201,6 +202,7 @@ static const struct agp_bridge_driver via_driver = {
201 .aperture_sizes = via_generic_sizes, 202 .aperture_sizes = via_generic_sizes,
202 .size_type = U8_APER_SIZE, 203 .size_type = U8_APER_SIZE,
203 .num_aperture_sizes = 9, 204 .num_aperture_sizes = 9,
205 .needs_scratch_page = true,
204 .configure = via_configure, 206 .configure = via_configure,
205 .fetch_size = via_fetch_size, 207 .fetch_size = via_fetch_size,
206 .cleanup = via_cleanup, 208 .cleanup = via_cleanup,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 305c59003963..88910e5a2c77 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,6 +9,7 @@ menuconfig DRM
9 depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU 9 depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
10 select I2C 10 select I2C
11 select I2C_ALGOBIT 11 select I2C_ALGOBIT
12 select SLOW_WORK
12 help 13 help
13 Kernel-level support for the Direct Rendering Infrastructure (DRI) 14 Kernel-level support for the Direct Rendering Infrastructure (DRI)
14 introduced in XFree86 4.0. If you say Y here, you need to select 15 introduced in XFree86 4.0. If you say Y here, you need to select
@@ -59,6 +60,7 @@ config DRM_RADEON
59 select FW_LOADER 60 select FW_LOADER
60 select DRM_KMS_HELPER 61 select DRM_KMS_HELPER
61 select DRM_TTM 62 select DRM_TTM
63 select POWER_SUPPLY
62 help 64 help
63 Choose this option if you have an ATI Radeon graphics card. There 65 Choose this option if you have an ATI Radeon graphics card. There
64 are both PCI and AGP versions. You don't need to choose this to 66 are both PCI and AGP versions. You don't need to choose this to
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 932b5aa96a67..3f46772f0cb2 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -79,10 +79,9 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
79 struct drm_device *dev = master->minor->dev; 79 struct drm_device *dev = master->minor->dev;
80 DRM_DEBUG("%d\n", magic); 80 DRM_DEBUG("%d\n", magic);
81 81
82 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 82 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
83 if (!entry) 83 if (!entry)
84 return -ENOMEM; 84 return -ENOMEM;
85 memset(entry, 0, sizeof(*entry));
86 entry->priv = priv; 85 entry->priv = priv;
87 entry->hash_item.key = (unsigned long)magic; 86 entry->hash_item.key = (unsigned long)magic;
88 mutex_lock(&dev->struct_mutex); 87 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 61b9bcfdf040..994d23beeb1d 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -34,6 +34,7 @@
34#include "drm.h" 34#include "drm.h"
35#include "drmP.h" 35#include "drmP.h"
36#include "drm_crtc.h" 36#include "drm_crtc.h"
37#include "drm_edid.h"
37 38
38struct drm_prop_enum_list { 39struct drm_prop_enum_list {
39 int type; 40 int type;
@@ -494,7 +495,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
494 list_for_each_entry_safe(mode, t, &connector->user_modes, head) 495 list_for_each_entry_safe(mode, t, &connector->user_modes, head)
495 drm_mode_remove(connector, mode); 496 drm_mode_remove(connector, mode);
496 497
497 kfree(connector->fb_helper_private);
498 mutex_lock(&dev->mode_config.mutex); 498 mutex_lock(&dev->mode_config.mutex);
499 drm_mode_object_put(dev, &connector->base); 499 drm_mode_object_put(dev, &connector->base);
500 list_del(&connector->head); 500 list_del(&connector->head);
@@ -858,7 +858,6 @@ void drm_mode_config_init(struct drm_device *dev)
858 mutex_init(&dev->mode_config.mutex); 858 mutex_init(&dev->mode_config.mutex);
859 mutex_init(&dev->mode_config.idr_mutex); 859 mutex_init(&dev->mode_config.idr_mutex);
860 INIT_LIST_HEAD(&dev->mode_config.fb_list); 860 INIT_LIST_HEAD(&dev->mode_config.fb_list);
861 INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
862 INIT_LIST_HEAD(&dev->mode_config.crtc_list); 861 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
863 INIT_LIST_HEAD(&dev->mode_config.connector_list); 862 INIT_LIST_HEAD(&dev->mode_config.connector_list);
864 INIT_LIST_HEAD(&dev->mode_config.encoder_list); 863 INIT_LIST_HEAD(&dev->mode_config.encoder_list);
@@ -2350,7 +2349,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
2350 struct edid *edid) 2349 struct edid *edid)
2351{ 2350{
2352 struct drm_device *dev = connector->dev; 2351 struct drm_device *dev = connector->dev;
2353 int ret = 0; 2352 int ret = 0, size;
2354 2353
2355 if (connector->edid_blob_ptr) 2354 if (connector->edid_blob_ptr)
2356 drm_property_destroy_blob(dev, connector->edid_blob_ptr); 2355 drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -2362,7 +2361,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
2362 return ret; 2361 return ret;
2363 } 2362 }
2364 2363
2365 connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid); 2364 size = EDID_LENGTH * (1 + edid->extensions);
2365 connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
2366 size, edid);
2366 2367
2367 ret = drm_connector_property_set_value(connector, 2368 ret = drm_connector_property_set_value(connector,
2368 dev->mode_config.edid_property, 2369 dev->mode_config.edid_property,
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 51103aa469f8..764401951041 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -55,7 +55,7 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
55} 55}
56 56
57/** 57/**
58 * drm_helper_probe_connector_modes - get complete set of display modes 58 * drm_helper_probe_single_connector_modes - get complete set of display modes
59 * @dev: DRM device 59 * @dev: DRM device
60 * @maxX: max width for modes 60 * @maxX: max width for modes
61 * @maxY: max height for modes 61 * @maxY: max height for modes
@@ -154,21 +154,6 @@ prune:
154} 154}
155EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); 155EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
156 156
157int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
158 uint32_t maxY)
159{
160 struct drm_connector *connector;
161 int count = 0;
162
163 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
164 count += drm_helper_probe_single_connector_modes(connector,
165 maxX, maxY);
166 }
167
168 return count;
169}
170EXPORT_SYMBOL(drm_helper_probe_connector_modes);
171
172/** 157/**
173 * drm_helper_encoder_in_use - check if a given encoder is in use 158 * drm_helper_encoder_in_use - check if a given encoder is in use
174 * @encoder: encoder to check 159 * @encoder: encoder to check
@@ -263,302 +248,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
263} 248}
264EXPORT_SYMBOL(drm_helper_disable_unused_functions); 249EXPORT_SYMBOL(drm_helper_disable_unused_functions);
265 250
266static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
267{
268 struct drm_display_mode *mode;
269
270 list_for_each_entry(mode, &connector->modes, head) {
271 if (drm_mode_width(mode) > width ||
272 drm_mode_height(mode) > height)
273 continue;
274 if (mode->type & DRM_MODE_TYPE_PREFERRED)
275 return mode;
276 }
277 return NULL;
278}
279
280static bool drm_has_cmdline_mode(struct drm_connector *connector)
281{
282 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
283 struct drm_fb_helper_cmdline_mode *cmdline_mode;
284
285 if (!fb_help_conn)
286 return false;
287
288 cmdline_mode = &fb_help_conn->cmdline_mode;
289 return cmdline_mode->specified;
290}
291
292static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
293{
294 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
295 struct drm_fb_helper_cmdline_mode *cmdline_mode;
296 struct drm_display_mode *mode = NULL;
297
298 if (!fb_help_conn)
299 return mode;
300
301 cmdline_mode = &fb_help_conn->cmdline_mode;
302 if (cmdline_mode->specified == false)
303 return mode;
304
305 /* attempt to find a matching mode in the list of modes
306 * we have gotten so far, if not add a CVT mode that conforms
307 */
308 if (cmdline_mode->rb || cmdline_mode->margins)
309 goto create_mode;
310
311 list_for_each_entry(mode, &connector->modes, head) {
312 /* check width/height */
313 if (mode->hdisplay != cmdline_mode->xres ||
314 mode->vdisplay != cmdline_mode->yres)
315 continue;
316
317 if (cmdline_mode->refresh_specified) {
318 if (mode->vrefresh != cmdline_mode->refresh)
319 continue;
320 }
321
322 if (cmdline_mode->interlace) {
323 if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
324 continue;
325 }
326 return mode;
327 }
328
329create_mode:
330 mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
331 cmdline_mode->yres,
332 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
333 cmdline_mode->rb, cmdline_mode->interlace,
334 cmdline_mode->margins);
335 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
336 list_add(&mode->head, &connector->modes);
337 return mode;
338}
339
340static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
341{
342 bool enable;
343
344 if (strict) {
345 enable = connector->status == connector_status_connected;
346 } else {
347 enable = connector->status != connector_status_disconnected;
348 }
349 return enable;
350}
351
352static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
353{
354 bool any_enabled = false;
355 struct drm_connector *connector;
356 int i = 0;
357
358 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
359 enabled[i] = drm_connector_enabled(connector, true);
360 DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
361 enabled[i] ? "yes" : "no");
362 any_enabled |= enabled[i];
363 i++;
364 }
365
366 if (any_enabled)
367 return;
368
369 i = 0;
370 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
371 enabled[i] = drm_connector_enabled(connector, false);
372 i++;
373 }
374}
375
376static bool drm_target_preferred(struct drm_device *dev,
377 struct drm_display_mode **modes,
378 bool *enabled, int width, int height)
379{
380 struct drm_connector *connector;
381 int i = 0;
382
383 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
384
385 if (enabled[i] == false) {
386 i++;
387 continue;
388 }
389
390 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
391 connector->base.id);
392
393 /* got for command line mode first */
394 modes[i] = drm_pick_cmdline_mode(connector, width, height);
395 if (!modes[i]) {
396 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
397 connector->base.id);
398 modes[i] = drm_has_preferred_mode(connector, width, height);
399 }
400 /* No preferred modes, pick one off the list */
401 if (!modes[i] && !list_empty(&connector->modes)) {
402 list_for_each_entry(modes[i], &connector->modes, head)
403 break;
404 }
405 DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
406 "none");
407 i++;
408 }
409 return true;
410}
411
412static int drm_pick_crtcs(struct drm_device *dev,
413 struct drm_crtc **best_crtcs,
414 struct drm_display_mode **modes,
415 int n, int width, int height)
416{
417 int c, o;
418 struct drm_connector *connector;
419 struct drm_connector_helper_funcs *connector_funcs;
420 struct drm_encoder *encoder;
421 struct drm_crtc *best_crtc;
422 int my_score, best_score, score;
423 struct drm_crtc **crtcs, *crtc;
424
425 if (n == dev->mode_config.num_connector)
426 return 0;
427 c = 0;
428 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
429 if (c == n)
430 break;
431 c++;
432 }
433
434 best_crtcs[n] = NULL;
435 best_crtc = NULL;
436 best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
437 if (modes[n] == NULL)
438 return best_score;
439
440 crtcs = kmalloc(dev->mode_config.num_connector *
441 sizeof(struct drm_crtc *), GFP_KERNEL);
442 if (!crtcs)
443 return best_score;
444
445 my_score = 1;
446 if (connector->status == connector_status_connected)
447 my_score++;
448 if (drm_has_cmdline_mode(connector))
449 my_score++;
450 if (drm_has_preferred_mode(connector, width, height))
451 my_score++;
452
453 connector_funcs = connector->helper_private;
454 encoder = connector_funcs->best_encoder(connector);
455 if (!encoder)
456 goto out;
457
458 connector->encoder = encoder;
459
460 /* select a crtc for this connector and then attempt to configure
461 remaining connectors */
462 c = 0;
463 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
464
465 if ((encoder->possible_crtcs & (1 << c)) == 0) {
466 c++;
467 continue;
468 }
469
470 for (o = 0; o < n; o++)
471 if (best_crtcs[o] == crtc)
472 break;
473
474 if (o < n) {
475 /* ignore cloning for now */
476 c++;
477 continue;
478 }
479
480 crtcs[n] = crtc;
481 memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
482 score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
483 width, height);
484 if (score > best_score) {
485 best_crtc = crtc;
486 best_score = score;
487 memcpy(best_crtcs, crtcs,
488 dev->mode_config.num_connector *
489 sizeof(struct drm_crtc *));
490 }
491 c++;
492 }
493out:
494 kfree(crtcs);
495 return best_score;
496}
497
498static void drm_setup_crtcs(struct drm_device *dev)
499{
500 struct drm_crtc **crtcs;
501 struct drm_display_mode **modes;
502 struct drm_encoder *encoder;
503 struct drm_connector *connector;
504 bool *enabled;
505 int width, height;
506 int i, ret;
507
508 DRM_DEBUG_KMS("\n");
509
510 width = dev->mode_config.max_width;
511 height = dev->mode_config.max_height;
512
513 /* clean out all the encoder/crtc combos */
514 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
515 encoder->crtc = NULL;
516 }
517
518 crtcs = kcalloc(dev->mode_config.num_connector,
519 sizeof(struct drm_crtc *), GFP_KERNEL);
520 modes = kcalloc(dev->mode_config.num_connector,
521 sizeof(struct drm_display_mode *), GFP_KERNEL);
522 enabled = kcalloc(dev->mode_config.num_connector,
523 sizeof(bool), GFP_KERNEL);
524
525 drm_enable_connectors(dev, enabled);
526
527 ret = drm_target_preferred(dev, modes, enabled, width, height);
528 if (!ret)
529 DRM_ERROR("Unable to find initial modes\n");
530
531 DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
532
533 drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
534
535 i = 0;
536 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
537 struct drm_display_mode *mode = modes[i];
538 struct drm_crtc *crtc = crtcs[i];
539
540 if (connector->encoder == NULL) {
541 i++;
542 continue;
543 }
544
545 if (mode && crtc) {
546 DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
547 mode->name, crtc->base.id);
548 crtc->desired_mode = mode;
549 connector->encoder->crtc = crtc;
550 } else {
551 connector->encoder->crtc = NULL;
552 connector->encoder = NULL;
553 }
554 i++;
555 }
556
557 kfree(crtcs);
558 kfree(modes);
559 kfree(enabled);
560}
561
562/** 251/**
563 * drm_encoder_crtc_ok - can a given crtc drive a given encoder? 252 * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
564 * @encoder: encoder to test 253 * @encoder: encoder to test
@@ -936,10 +625,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
936 ret = -EINVAL; 625 ret = -EINVAL;
937 goto fail; 626 goto fail;
938 } 627 }
939 /* TODO are these needed? */
940 set->crtc->desired_x = set->x;
941 set->crtc->desired_y = set->y;
942 set->crtc->desired_mode = set->mode;
943 } 628 }
944 drm_helper_disable_unused_functions(dev); 629 drm_helper_disable_unused_functions(dev);
945 } else if (fb_changed) { 630 } else if (fb_changed) {
@@ -984,63 +669,6 @@ fail:
984} 669}
985EXPORT_SYMBOL(drm_crtc_helper_set_config); 670EXPORT_SYMBOL(drm_crtc_helper_set_config);
986 671
987bool drm_helper_plugged_event(struct drm_device *dev)
988{
989 DRM_DEBUG_KMS("\n");
990
991 drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
992 dev->mode_config.max_height);
993
994 drm_setup_crtcs(dev);
995
996 /* alert the driver fb layer */
997 dev->mode_config.funcs->fb_changed(dev);
998
999 /* FIXME: send hotplug event */
1000 return true;
1001}
1002/**
1003 * drm_initial_config - setup a sane initial connector configuration
1004 * @dev: DRM device
1005 *
1006 * LOCKING:
1007 * Called at init time, must take mode config lock.
1008 *
1009 * Scan the CRTCs and connectors and try to put together an initial setup.
1010 * At the moment, this is a cloned configuration across all heads with
1011 * a new framebuffer object as the backing store.
1012 *
1013 * RETURNS:
1014 * Zero if everything went ok, nonzero otherwise.
1015 */
1016bool drm_helper_initial_config(struct drm_device *dev)
1017{
1018 int count = 0;
1019
1020 /* disable all the possible outputs/crtcs before entering KMS mode */
1021 drm_helper_disable_unused_functions(dev);
1022
1023 drm_fb_helper_parse_command_line(dev);
1024
1025 count = drm_helper_probe_connector_modes(dev,
1026 dev->mode_config.max_width,
1027 dev->mode_config.max_height);
1028
1029 /*
1030 * we shouldn't end up with no modes here.
1031 */
1032 if (count == 0)
1033 printk(KERN_INFO "No connectors reported connected with modes\n");
1034
1035 drm_setup_crtcs(dev);
1036
1037 /* alert the driver fb layer */
1038 dev->mode_config.funcs->fb_changed(dev);
1039
1040 return 0;
1041}
1042EXPORT_SYMBOL(drm_helper_initial_config);
1043
1044static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) 672static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
1045{ 673{
1046 int dpms = DRM_MODE_DPMS_OFF; 674 int dpms = DRM_MODE_DPMS_OFF;
@@ -1123,27 +751,6 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
1123} 751}
1124EXPORT_SYMBOL(drm_helper_connector_dpms); 752EXPORT_SYMBOL(drm_helper_connector_dpms);
1125 753
1126/**
1127 * drm_hotplug_stage_two
1128 * @dev DRM device
1129 * @connector hotpluged connector
1130 *
1131 * LOCKING.
1132 * Caller must hold mode config lock, function might grab struct lock.
1133 *
1134 * Stage two of a hotplug.
1135 *
1136 * RETURNS:
1137 * Zero on success, errno on failure.
1138 */
1139int drm_helper_hotplug_stage_two(struct drm_device *dev)
1140{
1141 drm_helper_plugged_event(dev);
1142
1143 return 0;
1144}
1145EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
1146
1147int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, 754int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
1148 struct drm_mode_fb_cmd *mode_cmd) 755 struct drm_mode_fb_cmd *mode_cmd)
1149{ 756{
@@ -1200,3 +807,98 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
1200 return 0; 807 return 0;
1201} 808}
1202EXPORT_SYMBOL(drm_helper_resume_force_mode); 809EXPORT_SYMBOL(drm_helper_resume_force_mode);
810
811static struct slow_work_ops output_poll_ops;
812
813#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
814static void output_poll_execute(struct slow_work *work)
815{
816 struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
817 struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work);
818 struct drm_connector *connector;
819 enum drm_connector_status old_status, status;
820 bool repoll = false, changed = false;
821 int ret;
822
823 mutex_lock(&dev->mode_config.mutex);
824 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
825
826 /* if this is HPD or polled don't check it -
827 TV out for instance */
828 if (!connector->polled)
829 continue;
830
831 else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
832 repoll = true;
833
834 old_status = connector->status;
835 /* if we are connected and don't want to poll for disconnect
836 skip it */
837 if (old_status == connector_status_connected &&
838 !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
839 !(connector->polled & DRM_CONNECTOR_POLL_HPD))
840 continue;
841
842 status = connector->funcs->detect(connector);
843 if (old_status != status)
844 changed = true;
845 }
846
847 mutex_unlock(&dev->mode_config.mutex);
848
849 if (changed) {
850 /* send a uevent + call fbdev */
851 drm_sysfs_hotplug_event(dev);
852 if (dev->mode_config.funcs->output_poll_changed)
853 dev->mode_config.funcs->output_poll_changed(dev);
854 }
855
856 if (repoll) {
857 ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD);
858 if (ret)
859 DRM_ERROR("delayed enqueue failed %d\n", ret);
860 }
861}
862
863void drm_kms_helper_poll_init(struct drm_device *dev)
864{
865 struct drm_connector *connector;
866 bool poll = false;
867 int ret;
868
869 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
870 if (connector->polled)
871 poll = true;
872 }
873 slow_work_register_user(THIS_MODULE);
874 delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
875 &output_poll_ops);
876
877 if (poll) {
878 ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
879 if (ret)
880 DRM_ERROR("delayed enqueue failed %d\n", ret);
881 }
882}
883EXPORT_SYMBOL(drm_kms_helper_poll_init);
884
885void drm_kms_helper_poll_fini(struct drm_device *dev)
886{
887 delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
888 slow_work_unregister_user(THIS_MODULE);
889}
890EXPORT_SYMBOL(drm_kms_helper_poll_fini);
891
892void drm_helper_hpd_irq_event(struct drm_device *dev)
893{
894 if (!dev->mode_config.poll_enabled)
895 return;
896 delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
897 /* schedule a slow work asap */
898 delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0);
899}
900EXPORT_SYMBOL(drm_helper_hpd_irq_event);
901
902static struct slow_work_ops output_poll_ops = {
903 .execute = output_poll_execute,
904};
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 13f1537413fb..252cbd74df0e 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -47,12 +47,10 @@ int drm_dma_setup(struct drm_device *dev)
47{ 47{
48 int i; 48 int i;
49 49
50 dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL); 50 dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
51 if (!dev->dma) 51 if (!dev->dma)
52 return -ENOMEM; 52 return -ENOMEM;
53 53
54 memset(dev->dma, 0, sizeof(*dev->dma));
55
56 for (i = 0; i <= DRM_MAX_ORDER; i++) 54 for (i = 0; i <= DRM_MAX_ORDER; i++)
57 memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); 55 memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
58 56
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 18f41d7061f0..f569ae88ab38 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2,6 +2,7 @@
2 * Copyright (c) 2006 Luc Verhaegen (quirks list) 2 * Copyright (c) 2006 Luc Verhaegen (quirks list)
3 * Copyright (c) 2007-2008 Intel Corporation 3 * Copyright (c) 2007-2008 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com> 4 * Jesse Barnes <jesse.barnes@intel.com>
5 * Copyright 2010 Red Hat, Inc.
5 * 6 *
6 * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from 7 * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
7 * FB layer. 8 * FB layer.
@@ -33,10 +34,9 @@
33#include "drmP.h" 34#include "drmP.h"
34#include "drm_edid.h" 35#include "drm_edid.h"
35 36
36/* 37#define EDID_EST_TIMINGS 16
37 * TODO: 38#define EDID_STD_TIMINGS 8
38 * - support EDID 1.4 (incl. CE blocks) 39#define EDID_DETAILED_TIMINGS 4
39 */
40 40
41/* 41/*
42 * EDID blocks out in the wild have a variety of bugs, try to collect 42 * EDID blocks out in the wild have a variety of bugs, try to collect
@@ -65,7 +65,8 @@
65 65
66#define LEVEL_DMT 0 66#define LEVEL_DMT 0
67#define LEVEL_GTF 1 67#define LEVEL_GTF 1
68#define LEVEL_CVT 2 68#define LEVEL_GTF2 2
69#define LEVEL_CVT 3
69 70
70static struct edid_quirk { 71static struct edid_quirk {
71 char *vendor; 72 char *vendor;
@@ -109,36 +110,38 @@ static struct edid_quirk {
109 { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, 110 { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
110}; 111};
111 112
113/*** DDC fetch and block validation ***/
112 114
113/* Valid EDID header has these bytes */
114static const u8 edid_header[] = { 115static const u8 edid_header[] = {
115 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 116 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
116}; 117};
117 118
118/** 119/*
119 * drm_edid_is_valid - sanity check EDID data 120 * Sanity check the EDID block (base or extension). Return 0 if the block
120 * @edid: EDID data 121 * doesn't check out, or 1 if it's valid.
121 *
122 * Sanity check the EDID block by looking at the header, the version number
123 * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
124 * valid.
125 */ 122 */
126bool drm_edid_is_valid(struct edid *edid) 123static bool
124drm_edid_block_valid(u8 *raw_edid)
127{ 125{
128 int i, score = 0; 126 int i;
129 u8 csum = 0; 127 u8 csum = 0;
130 u8 *raw_edid = (u8 *)edid; 128 struct edid *edid = (struct edid *)raw_edid;
131 129
132 for (i = 0; i < sizeof(edid_header); i++) 130 if (raw_edid[0] == 0x00) {
133 if (raw_edid[i] == edid_header[i]) 131 int score = 0;
134 score++;
135 132
136 if (score == 8) ; 133 for (i = 0; i < sizeof(edid_header); i++)
137 else if (score >= 6) { 134 if (raw_edid[i] == edid_header[i])
138 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); 135 score++;
139 memcpy(raw_edid, edid_header, sizeof(edid_header)); 136
140 } else 137 if (score == 8) ;
141 goto bad; 138 else if (score >= 6) {
139 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
140 memcpy(raw_edid, edid_header, sizeof(edid_header));
141 } else {
142 goto bad;
143 }
144 }
142 145
143 for (i = 0; i < EDID_LENGTH; i++) 146 for (i = 0; i < EDID_LENGTH; i++)
144 csum += raw_edid[i]; 147 csum += raw_edid[i];
@@ -147,13 +150,21 @@ bool drm_edid_is_valid(struct edid *edid)
147 goto bad; 150 goto bad;
148 } 151 }
149 152
150 if (edid->version != 1) { 153 /* per-block-type checks */
151 DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); 154 switch (raw_edid[0]) {
152 goto bad; 155 case 0: /* base */
153 } 156 if (edid->version != 1) {
157 DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
158 goto bad;
159 }
154 160
155 if (edid->revision > 4) 161 if (edid->revision > 4)
156 DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); 162 DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
163 break;
164
165 default:
166 break;
167 }
157 168
158 return 1; 169 return 1;
159 170
@@ -165,8 +176,158 @@ bad:
165 } 176 }
166 return 0; 177 return 0;
167} 178}
179
180/**
181 * drm_edid_is_valid - sanity check EDID data
182 * @edid: EDID data
183 *
184 * Sanity-check an entire EDID record (including extensions)
185 */
186bool drm_edid_is_valid(struct edid *edid)
187{
188 int i;
189 u8 *raw = (u8 *)edid;
190
191 if (!edid)
192 return false;
193
194 for (i = 0; i <= edid->extensions; i++)
195 if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
196 return false;
197
198 return true;
199}
168EXPORT_SYMBOL(drm_edid_is_valid); 200EXPORT_SYMBOL(drm_edid_is_valid);
169 201
202#define DDC_ADDR 0x50
203#define DDC_SEGMENT_ADDR 0x30
204/**
205 * Get EDID information via I2C.
206 *
207 * \param adapter : i2c device adaptor
208 * \param buf : EDID data buffer to be filled
209 * \param len : EDID data buffer length
210 * \return 0 on success or -1 on failure.
211 *
212 * Try to fetch EDID information by calling i2c driver function.
213 */
214static int
215drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
216 int block, int len)
217{
218 unsigned char start = block * EDID_LENGTH;
219 struct i2c_msg msgs[] = {
220 {
221 .addr = DDC_ADDR,
222 .flags = 0,
223 .len = 1,
224 .buf = &start,
225 }, {
226 .addr = DDC_ADDR,
227 .flags = I2C_M_RD,
228 .len = len,
229 .buf = buf + start,
230 }
231 };
232
233 if (i2c_transfer(adapter, msgs, 2) == 2)
234 return 0;
235
236 return -1;
237}
238
239static u8 *
240drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
241{
242 int i, j = 0;
243 u8 *block, *new;
244
245 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
246 return NULL;
247
248 /* base block fetch */
249 for (i = 0; i < 4; i++) {
250 if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
251 goto out;
252 if (drm_edid_block_valid(block))
253 break;
254 }
255 if (i == 4)
256 goto carp;
257
258 /* if there's no extensions, we're done */
259 if (block[0x7e] == 0)
260 return block;
261
262 new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
263 if (!new)
264 goto out;
265 block = new;
266
267 for (j = 1; j <= block[0x7e]; j++) {
268 for (i = 0; i < 4; i++) {
269 if (drm_do_probe_ddc_edid(adapter, block, j,
270 EDID_LENGTH))
271 goto out;
272 if (drm_edid_block_valid(block + j * EDID_LENGTH))
273 break;
274 }
275 if (i == 4)
276 goto carp;
277 }
278
279 return block;
280
281carp:
282 dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n",
283 drm_get_connector_name(connector), j);
284
285out:
286 kfree(block);
287 return NULL;
288}
289
290/**
291 * Probe DDC presence.
292 *
293 * \param adapter : i2c device adaptor
294 * \return 1 on success
295 */
296static bool
297drm_probe_ddc(struct i2c_adapter *adapter)
298{
299 unsigned char out;
300
301 return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
302}
303
304/**
305 * drm_get_edid - get EDID data, if available
306 * @connector: connector we're probing
307 * @adapter: i2c adapter to use for DDC
308 *
309 * Poke the given i2c channel to grab EDID data if possible. If found,
310 * attach it to the connector.
311 *
312 * Return edid data or NULL if we couldn't find any.
313 */
314struct edid *drm_get_edid(struct drm_connector *connector,
315 struct i2c_adapter *adapter)
316{
317 struct edid *edid = NULL;
318
319 if (drm_probe_ddc(adapter))
320 edid = (struct edid *)drm_do_get_edid(connector, adapter);
321
322 connector->display_info.raw_edid = (char *)edid;
323
324 return edid;
325
326}
327EXPORT_SYMBOL(drm_get_edid);
328
329/*** EDID parsing ***/
330
170/** 331/**
171 * edid_vendor - match a string against EDID's obfuscated vendor field 332 * edid_vendor - match a string against EDID's obfuscated vendor field
172 * @edid: EDID to match 333 * @edid: EDID to match
@@ -335,7 +496,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
335 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 496 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
336 /* 1024x768@85Hz */ 497 /* 1024x768@85Hz */
337 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, 498 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
338 1072, 1376, 0, 768, 769, 772, 808, 0, 499 1168, 1376, 0, 768, 769, 772, 808, 0,
339 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 500 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
340 /* 1152x864@75Hz */ 501 /* 1152x864@75Hz */
341 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 502 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
@@ -426,7 +587,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
426 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, 587 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
427 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 588 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
428 /* 1600x1200@75Hz */ 589 /* 1600x1200@75Hz */
429 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664, 590 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
430 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, 591 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
431 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 592 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
432 /* 1600x1200@85Hz */ 593 /* 1600x1200@85Hz */
@@ -497,8 +658,8 @@ static struct drm_display_mode drm_dmt_modes[] = {
497static const int drm_num_dmt_modes = 658static const int drm_num_dmt_modes =
498 sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); 659 sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
499 660
500static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, 661struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
501 int hsize, int vsize, int fresh) 662 int hsize, int vsize, int fresh)
502{ 663{
503 int i; 664 int i;
504 struct drm_display_mode *ptr, *mode; 665 struct drm_display_mode *ptr, *mode;
@@ -516,6 +677,111 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
516 } 677 }
517 return mode; 678 return mode;
518} 679}
680EXPORT_SYMBOL(drm_mode_find_dmt);
681
682typedef void detailed_cb(struct detailed_timing *timing, void *closure);
683
684static void
685drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
686{
687 int i;
688 struct edid *edid = (struct edid *)raw_edid;
689
690 if (edid == NULL)
691 return;
692
693 for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
694 cb(&(edid->detailed_timings[i]), closure);
695
696 /* XXX extension block walk */
697}
698
699static void
700is_rb(struct detailed_timing *t, void *data)
701{
702 u8 *r = (u8 *)t;
703 if (r[3] == EDID_DETAIL_MONITOR_RANGE)
704 if (r[15] & 0x10)
705 *(bool *)data = true;
706}
707
708/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
709static bool
710drm_monitor_supports_rb(struct edid *edid)
711{
712 if (edid->revision >= 4) {
713 bool ret;
714 drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
715 return ret;
716 }
717
718 return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
719}
720
721static void
722find_gtf2(struct detailed_timing *t, void *data)
723{
724 u8 *r = (u8 *)t;
725 if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
726 *(u8 **)data = r;
727}
728
729/* Secondary GTF curve kicks in above some break frequency */
730static int
731drm_gtf2_hbreak(struct edid *edid)
732{
733 u8 *r = NULL;
734 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
735 return r ? (r[12] * 2) : 0;
736}
737
738static int
739drm_gtf2_2c(struct edid *edid)
740{
741 u8 *r = NULL;
742 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
743 return r ? r[13] : 0;
744}
745
746static int
747drm_gtf2_m(struct edid *edid)
748{
749 u8 *r = NULL;
750 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
751 return r ? (r[15] << 8) + r[14] : 0;
752}
753
754static int
755drm_gtf2_k(struct edid *edid)
756{
757 u8 *r = NULL;
758 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
759 return r ? r[16] : 0;
760}
761
762static int
763drm_gtf2_2j(struct edid *edid)
764{
765 u8 *r = NULL;
766 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
767 return r ? r[17] : 0;
768}
769
770/**
771 * standard_timing_level - get std. timing level(CVT/GTF/DMT)
772 * @edid: EDID block to scan
773 */
774static int standard_timing_level(struct edid *edid)
775{
776 if (edid->revision >= 2) {
777 if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
778 return LEVEL_CVT;
779 if (drm_gtf2_hbreak(edid))
780 return LEVEL_GTF2;
781 return LEVEL_GTF;
782 }
783 return LEVEL_DMT;
784}
519 785
520/* 786/*
521 * 0 is reserved. The spec says 0x01 fill for unused timings. Some old 787 * 0 is reserved. The spec says 0x01 fill for unused timings. Some old
@@ -536,22 +802,20 @@ bad_std_timing(u8 a, u8 b)
536 * 802 *
537 * Take the standard timing params (in this case width, aspect, and refresh) 803 * Take the standard timing params (in this case width, aspect, and refresh)
538 * and convert them into a real mode using CVT/GTF/DMT. 804 * and convert them into a real mode using CVT/GTF/DMT.
539 *
540 * Punts for now, but should eventually use the FB layer's CVT based mode
541 * generation code.
542 */ 805 */
543struct drm_display_mode *drm_mode_std(struct drm_device *dev, 806static struct drm_display_mode *
544 struct std_timing *t, 807drm_mode_std(struct drm_connector *connector, struct edid *edid,
545 int revision, 808 struct std_timing *t, int revision)
546 int timing_level)
547{ 809{
548 struct drm_display_mode *mode; 810 struct drm_device *dev = connector->dev;
811 struct drm_display_mode *m, *mode = NULL;
549 int hsize, vsize; 812 int hsize, vsize;
550 int vrefresh_rate; 813 int vrefresh_rate;
551 unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) 814 unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
552 >> EDID_TIMING_ASPECT_SHIFT; 815 >> EDID_TIMING_ASPECT_SHIFT;
553 unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) 816 unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
554 >> EDID_TIMING_VFREQ_SHIFT; 817 >> EDID_TIMING_VFREQ_SHIFT;
818 int timing_level = standard_timing_level(edid);
555 819
556 if (bad_std_timing(t->hsize, t->vfreq_aspect)) 820 if (bad_std_timing(t->hsize, t->vfreq_aspect))
557 return NULL; 821 return NULL;
@@ -572,18 +836,38 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
572 vsize = (hsize * 4) / 5; 836 vsize = (hsize * 4) / 5;
573 else 837 else
574 vsize = (hsize * 9) / 16; 838 vsize = (hsize * 9) / 16;
575 /* HDTV hack */ 839
576 if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) { 840 /* HDTV hack, part 1 */
577 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, 841 if (vrefresh_rate == 60 &&
842 ((hsize == 1360 && vsize == 765) ||
843 (hsize == 1368 && vsize == 769))) {
844 hsize = 1366;
845 vsize = 768;
846 }
847
848 /*
849 * If this connector already has a mode for this size and refresh
850 * rate (because it came from detailed or CVT info), use that
851 * instead. This way we don't have to guess at interlace or
852 * reduced blanking.
853 */
854 list_for_each_entry(m, &connector->probed_modes, head)
855 if (m->hdisplay == hsize && m->vdisplay == vsize &&
856 drm_mode_vrefresh(m) == vrefresh_rate)
857 return NULL;
858
859 /* HDTV hack, part 2 */
860 if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
861 mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
578 false); 862 false);
579 mode->hdisplay = 1366; 863 mode->hdisplay = 1366;
580 mode->vsync_start = mode->vsync_start - 1; 864 mode->vsync_start = mode->vsync_start - 1;
581 mode->vsync_end = mode->vsync_end - 1; 865 mode->vsync_end = mode->vsync_end - 1;
582 return mode; 866 return mode;
583 } 867 }
584 mode = NULL; 868
585 /* check whether it can be found in default mode table */ 869 /* check whether it can be found in default mode table */
586 mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate); 870 mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
587 if (mode) 871 if (mode)
588 return mode; 872 return mode;
589 873
@@ -593,6 +877,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
593 case LEVEL_GTF: 877 case LEVEL_GTF:
594 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); 878 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
595 break; 879 break;
880 case LEVEL_GTF2:
881 /*
882 * This is potentially wrong if there's ever a monitor with
883 * more than one ranges section, each claiming a different
884 * secondary GTF curve. Please don't do that.
885 */
886 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
887 if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
888 kfree(mode);
889 mode = drm_gtf_mode_complex(dev, hsize, vsize,
890 vrefresh_rate, 0, 0,
891 drm_gtf2_m(edid),
892 drm_gtf2_2c(edid),
893 drm_gtf2_k(edid),
894 drm_gtf2_2j(edid));
895 }
896 break;
596 case LEVEL_CVT: 897 case LEVEL_CVT:
597 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, 898 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
598 false); 899 false);
@@ -716,10 +1017,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
716 if (mode->vsync_end > mode->vtotal) 1017 if (mode->vsync_end > mode->vtotal)
717 mode->vtotal = mode->vsync_end + 1; 1018 mode->vtotal = mode->vsync_end + 1;
718 1019
719 drm_mode_set_name(mode);
720
721 drm_mode_do_interlace_quirk(mode, pt); 1020 drm_mode_do_interlace_quirk(mode, pt);
722 1021
1022 drm_mode_set_name(mode);
1023
723 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { 1024 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
724 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; 1025 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
725 } 1026 }
@@ -802,10 +1103,6 @@ static struct drm_display_mode edid_est_modes[] = {
802 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ 1103 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
803}; 1104};
804 1105
805#define EDID_EST_TIMINGS 16
806#define EDID_STD_TIMINGS 8
807#define EDID_DETAILED_TIMINGS 4
808
809/** 1106/**
810 * add_established_modes - get est. modes from EDID and add them 1107 * add_established_modes - get est. modes from EDID and add them
811 * @edid: EDID block to scan 1108 * @edid: EDID block to scan
@@ -833,19 +1130,6 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e
833 1130
834 return modes; 1131 return modes;
835} 1132}
836/**
837 * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
838 * @edid: EDID block to scan
839 */
840static int standard_timing_level(struct edid *edid)
841{
842 if (edid->revision >= 2) {
843 if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
844 return LEVEL_CVT;
845 return LEVEL_GTF;
846 }
847 return LEVEL_DMT;
848}
849 1133
850/** 1134/**
851 * add_standard_modes - get std. modes from EDID and add them 1135 * add_standard_modes - get std. modes from EDID and add them
@@ -856,22 +1140,14 @@ static int standard_timing_level(struct edid *edid)
856 */ 1140 */
857static int add_standard_modes(struct drm_connector *connector, struct edid *edid) 1141static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
858{ 1142{
859 struct drm_device *dev = connector->dev;
860 int i, modes = 0; 1143 int i, modes = 0;
861 int timing_level;
862
863 timing_level = standard_timing_level(edid);
864 1144
865 for (i = 0; i < EDID_STD_TIMINGS; i++) { 1145 for (i = 0; i < EDID_STD_TIMINGS; i++) {
866 struct std_timing *t = &edid->standard_timings[i];
867 struct drm_display_mode *newmode; 1146 struct drm_display_mode *newmode;
868 1147
869 /* If std timings bytes are 1, 1 it's empty */ 1148 newmode = drm_mode_std(connector, edid,
870 if (t->hsize == 1 && t->vfreq_aspect == 1) 1149 &edid->standard_timings[i],
871 continue; 1150 edid->revision);
872
873 newmode = drm_mode_std(dev, &edid->standard_timings[i],
874 edid->revision, timing_level);
875 if (newmode) { 1151 if (newmode) {
876 drm_mode_probed_add(connector, newmode); 1152 drm_mode_probed_add(connector, newmode);
877 modes++; 1153 modes++;
@@ -881,36 +1157,86 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
881 return modes; 1157 return modes;
882} 1158}
883 1159
884/*
885 * XXX fix this for:
886 * - GTF secondary curve formula
887 * - EDID 1.4 range offsets
888 * - CVT extended bits
889 */
890static bool 1160static bool
891mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) 1161mode_is_rb(struct drm_display_mode *mode)
892{ 1162{
893 struct detailed_data_monitor_range *range; 1163 return (mode->htotal - mode->hdisplay == 160) &&
894 int hsync, vrefresh; 1164 (mode->hsync_end - mode->hdisplay == 80) &&
895 1165 (mode->hsync_end - mode->hsync_start == 32) &&
896 range = &timing->data.other_data.data.range; 1166 (mode->vsync_start - mode->vdisplay == 3);
1167}
897 1168
1169static bool
1170mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
1171{
1172 int hsync, hmin, hmax;
1173
1174 hmin = t[7];
1175 if (edid->revision >= 4)
1176 hmin += ((t[4] & 0x04) ? 255 : 0);
1177 hmax = t[8];
1178 if (edid->revision >= 4)
1179 hmax += ((t[4] & 0x08) ? 255 : 0);
898 hsync = drm_mode_hsync(mode); 1180 hsync = drm_mode_hsync(mode);
899 vrefresh = drm_mode_vrefresh(mode);
900 1181
901 if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz) 1182 return (hsync <= hmax && hsync >= hmin);
1183}
1184
1185static bool
1186mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
1187{
1188 int vsync, vmin, vmax;
1189
1190 vmin = t[5];
1191 if (edid->revision >= 4)
1192 vmin += ((t[4] & 0x01) ? 255 : 0);
1193 vmax = t[6];
1194 if (edid->revision >= 4)
1195 vmax += ((t[4] & 0x02) ? 255 : 0);
1196 vsync = drm_mode_vrefresh(mode);
1197
1198 return (vsync <= vmax && vsync >= vmin);
1199}
1200
1201static u32
1202range_pixel_clock(struct edid *edid, u8 *t)
1203{
1204 /* unspecified */
1205 if (t[9] == 0 || t[9] == 255)
1206 return 0;
1207
1208 /* 1.4 with CVT support gives us real precision, yay */
1209 if (edid->revision >= 4 && t[10] == 0x04)
1210 return (t[9] * 10000) - ((t[12] >> 2) * 250);
1211
1212 /* 1.3 is pathetic, so fuzz up a bit */
1213 return t[9] * 10000 + 5001;
1214}
1215
1216static bool
1217mode_in_range(struct drm_display_mode *mode, struct edid *edid,
1218 struct detailed_timing *timing)
1219{
1220 u32 max_clock;
1221 u8 *t = (u8 *)timing;
1222
1223 if (!mode_in_hsync_range(mode, edid, t))
902 return false; 1224 return false;
903 1225
904 if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq) 1226 if (!mode_in_vsync_range(mode, edid, t))
905 return false; 1227 return false;
906 1228
907 if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) { 1229 if ((max_clock = range_pixel_clock(edid, t)))
908 /* be forgiving since it's in units of 10MHz */
909 int max_clock = range->pixel_clock_mhz * 10 + 9;
910 max_clock *= 1000;
911 if (mode->clock > max_clock) 1230 if (mode->clock > max_clock)
912 return false; 1231 return false;
913 } 1232
1233 /* 1.4 max horizontal check */
1234 if (edid->revision >= 4 && t[10] == 0x04)
1235 if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
1236 return false;
1237
1238 if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
1239 return false;
914 1240
915 return true; 1241 return true;
916} 1242}
@@ -919,15 +1245,16 @@ mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
919 * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will 1245 * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
920 * need to account for them. 1246 * need to account for them.
921 */ 1247 */
922static int drm_gtf_modes_for_range(struct drm_connector *connector, 1248static int
923 struct detailed_timing *timing) 1249drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
1250 struct detailed_timing *timing)
924{ 1251{
925 int i, modes = 0; 1252 int i, modes = 0;
926 struct drm_display_mode *newmode; 1253 struct drm_display_mode *newmode;
927 struct drm_device *dev = connector->dev; 1254 struct drm_device *dev = connector->dev;
928 1255
929 for (i = 0; i < drm_num_dmt_modes; i++) { 1256 for (i = 0; i < drm_num_dmt_modes; i++) {
930 if (mode_in_range(drm_dmt_modes + i, timing)) { 1257 if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
931 newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); 1258 newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
932 if (newmode) { 1259 if (newmode) {
933 drm_mode_probed_add(connector, newmode); 1260 drm_mode_probed_add(connector, newmode);
@@ -988,13 +1315,100 @@ static int drm_cvt_modes(struct drm_connector *connector,
988 return modes; 1315 return modes;
989} 1316}
990 1317
1318static const struct {
1319 short w;
1320 short h;
1321 short r;
1322 short rb;
1323} est3_modes[] = {
1324 /* byte 6 */
1325 { 640, 350, 85, 0 },
1326 { 640, 400, 85, 0 },
1327 { 720, 400, 85, 0 },
1328 { 640, 480, 85, 0 },
1329 { 848, 480, 60, 0 },
1330 { 800, 600, 85, 0 },
1331 { 1024, 768, 85, 0 },
1332 { 1152, 864, 75, 0 },
1333 /* byte 7 */
1334 { 1280, 768, 60, 1 },
1335 { 1280, 768, 60, 0 },
1336 { 1280, 768, 75, 0 },
1337 { 1280, 768, 85, 0 },
1338 { 1280, 960, 60, 0 },
1339 { 1280, 960, 85, 0 },
1340 { 1280, 1024, 60, 0 },
1341 { 1280, 1024, 85, 0 },
1342 /* byte 8 */
1343 { 1360, 768, 60, 0 },
1344 { 1440, 900, 60, 1 },
1345 { 1440, 900, 60, 0 },
1346 { 1440, 900, 75, 0 },
1347 { 1440, 900, 85, 0 },
1348 { 1400, 1050, 60, 1 },
1349 { 1400, 1050, 60, 0 },
1350 { 1400, 1050, 75, 0 },
1351 /* byte 9 */
1352 { 1400, 1050, 85, 0 },
1353 { 1680, 1050, 60, 1 },
1354 { 1680, 1050, 60, 0 },
1355 { 1680, 1050, 75, 0 },
1356 { 1680, 1050, 85, 0 },
1357 { 1600, 1200, 60, 0 },
1358 { 1600, 1200, 65, 0 },
1359 { 1600, 1200, 70, 0 },
1360 /* byte 10 */
1361 { 1600, 1200, 75, 0 },
1362 { 1600, 1200, 85, 0 },
1363 { 1792, 1344, 60, 0 },
1364 { 1792, 1344, 85, 0 },
1365 { 1856, 1392, 60, 0 },
1366 { 1856, 1392, 75, 0 },
1367 { 1920, 1200, 60, 1 },
1368 { 1920, 1200, 60, 0 },
1369 /* byte 11 */
1370 { 1920, 1200, 75, 0 },
1371 { 1920, 1200, 85, 0 },
1372 { 1920, 1440, 60, 0 },
1373 { 1920, 1440, 75, 0 },
1374};
1375static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
1376
1377static int
1378drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
1379{
1380 int i, j, m, modes = 0;
1381 struct drm_display_mode *mode;
1382 u8 *est = ((u8 *)timing) + 5;
1383
1384 for (i = 0; i < 6; i++) {
1385 for (j = 7; j > 0; j--) {
1386 m = (i * 8) + (7 - j);
1387 if (m >= num_est3_modes)
1388 break;
1389 if (est[i] & (1 << j)) {
1390 mode = drm_mode_find_dmt(connector->dev,
1391 est3_modes[m].w,
1392 est3_modes[m].h,
1393 est3_modes[m].r
1394 /*, est3_modes[m].rb */);
1395 if (mode) {
1396 drm_mode_probed_add(connector, mode);
1397 modes++;
1398 }
1399 }
1400 }
1401 }
1402
1403 return modes;
1404}
1405
991static int add_detailed_modes(struct drm_connector *connector, 1406static int add_detailed_modes(struct drm_connector *connector,
992 struct detailed_timing *timing, 1407 struct detailed_timing *timing,
993 struct edid *edid, u32 quirks, int preferred) 1408 struct edid *edid, u32 quirks, int preferred)
994{ 1409{
995 int i, modes = 0; 1410 int i, modes = 0;
996 struct detailed_non_pixel *data = &timing->data.other_data; 1411 struct detailed_non_pixel *data = &timing->data.other_data;
997 int timing_level = standard_timing_level(edid);
998 int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); 1412 int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
999 struct drm_display_mode *newmode; 1413 struct drm_display_mode *newmode;
1000 struct drm_device *dev = connector->dev; 1414 struct drm_device *dev = connector->dev;
@@ -1015,7 +1429,8 @@ static int add_detailed_modes(struct drm_connector *connector,
1015 switch (data->type) { 1429 switch (data->type) {
1016 case EDID_DETAIL_MONITOR_RANGE: 1430 case EDID_DETAIL_MONITOR_RANGE:
1017 if (gtf) 1431 if (gtf)
1018 modes += drm_gtf_modes_for_range(connector, timing); 1432 modes += drm_gtf_modes_for_range(connector, edid,
1433 timing);
1019 break; 1434 break;
1020 case EDID_DETAIL_STD_MODES: 1435 case EDID_DETAIL_STD_MODES:
1021 /* Six modes per detailed section */ 1436 /* Six modes per detailed section */
@@ -1024,8 +1439,8 @@ static int add_detailed_modes(struct drm_connector *connector,
1024 struct drm_display_mode *newmode; 1439 struct drm_display_mode *newmode;
1025 1440
1026 std = &data->data.timings[i]; 1441 std = &data->data.timings[i];
1027 newmode = drm_mode_std(dev, std, edid->revision, 1442 newmode = drm_mode_std(connector, edid, std,
1028 timing_level); 1443 edid->revision);
1029 if (newmode) { 1444 if (newmode) {
1030 drm_mode_probed_add(connector, newmode); 1445 drm_mode_probed_add(connector, newmode);
1031 modes++; 1446 modes++;
@@ -1035,6 +1450,9 @@ static int add_detailed_modes(struct drm_connector *connector,
1035 case EDID_DETAIL_CVT_3BYTE: 1450 case EDID_DETAIL_CVT_3BYTE:
1036 modes += drm_cvt_modes(connector, timing); 1451 modes += drm_cvt_modes(connector, timing);
1037 break; 1452 break;
1453 case EDID_DETAIL_EST_TIMINGS:
1454 modes += drm_est3_modes(connector, timing);
1455 break;
1038 default: 1456 default:
1039 break; 1457 break;
1040 } 1458 }
@@ -1058,7 +1476,10 @@ static int add_detailed_info(struct drm_connector *connector,
1058 1476
1059 for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { 1477 for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
1060 struct detailed_timing *timing = &edid->detailed_timings[i]; 1478 struct detailed_timing *timing = &edid->detailed_timings[i];
1061 int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); 1479 int preferred = (i == 0);
1480
1481 if (preferred && edid->version == 1 && edid->revision < 4)
1482 preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
1062 1483
1063 /* In 1.0, only timings are allowed */ 1484 /* In 1.0, only timings are allowed */
1064 if (!timing->pixel_clock && edid->version == 1 && 1485 if (!timing->pixel_clock && edid->version == 1 &&
@@ -1088,39 +1509,22 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
1088 int i, modes = 0; 1509 int i, modes = 0;
1089 char *edid_ext = NULL; 1510 char *edid_ext = NULL;
1090 struct detailed_timing *timing; 1511 struct detailed_timing *timing;
1091 int edid_ext_num;
1092 int start_offset, end_offset; 1512 int start_offset, end_offset;
1093 int timing_level;
1094 1513
1095 if (edid->version == 1 && edid->revision < 3) { 1514 if (edid->version == 1 && edid->revision < 3)
1096 /* If the EDID version is less than 1.3, there is no
1097 * extension EDID.
1098 */
1099 return 0; 1515 return 0;
1100 } 1516 if (!edid->extensions)
1101 if (!edid->extensions) {
1102 /* if there is no extension EDID, it is unnecessary to
1103 * parse the E-EDID to get detailed info
1104 */
1105 return 0; 1517 return 0;
1106 }
1107
1108 /* Chose real EDID extension number */
1109 edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
1110 DRM_MAX_EDID_EXT_NUM : edid->extensions;
1111 1518
1112 /* Find CEA extension */ 1519 /* Find CEA extension */
1113 for (i = 0; i < edid_ext_num; i++) { 1520 for (i = 0; i < edid->extensions; i++) {
1114 edid_ext = (char *)edid + EDID_LENGTH * (i + 1); 1521 edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
1115 /* This block is CEA extension */
1116 if (edid_ext[0] == 0x02) 1522 if (edid_ext[0] == 0x02)
1117 break; 1523 break;
1118 } 1524 }
1119 1525
1120 if (i == edid_ext_num) { 1526 if (i == edid->extensions)
1121 /* if there is no additional timing EDID block, return */
1122 return 0; 1527 return 0;
1123 }
1124 1528
1125 /* Get the start offset of detailed timing block */ 1529 /* Get the start offset of detailed timing block */
1126 start_offset = edid_ext[2]; 1530 start_offset = edid_ext[2];
@@ -1132,7 +1536,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
1132 return 0; 1536 return 0;
1133 } 1537 }
1134 1538
1135 timing_level = standard_timing_level(edid);
1136 end_offset = EDID_LENGTH; 1539 end_offset = EDID_LENGTH;
1137 end_offset -= sizeof(struct detailed_timing); 1540 end_offset -= sizeof(struct detailed_timing);
1138 for (i = start_offset; i < end_offset; 1541 for (i = start_offset; i < end_offset;
@@ -1144,123 +1547,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
1144 return modes; 1547 return modes;
1145} 1548}
1146 1549
1147#define DDC_ADDR 0x50
1148/**
1149 * Get EDID information via I2C.
1150 *
1151 * \param adapter : i2c device adaptor
1152 * \param buf : EDID data buffer to be filled
1153 * \param len : EDID data buffer length
1154 * \return 0 on success or -1 on failure.
1155 *
1156 * Try to fetch EDID information by calling i2c driver function.
1157 */
1158int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
1159 unsigned char *buf, int len)
1160{
1161 unsigned char start = 0x0;
1162 struct i2c_msg msgs[] = {
1163 {
1164 .addr = DDC_ADDR,
1165 .flags = 0,
1166 .len = 1,
1167 .buf = &start,
1168 }, {
1169 .addr = DDC_ADDR,
1170 .flags = I2C_M_RD,
1171 .len = len,
1172 .buf = buf,
1173 }
1174 };
1175
1176 if (i2c_transfer(adapter, msgs, 2) == 2)
1177 return 0;
1178
1179 return -1;
1180}
1181EXPORT_SYMBOL(drm_do_probe_ddc_edid);
1182
1183static int drm_ddc_read_edid(struct drm_connector *connector,
1184 struct i2c_adapter *adapter,
1185 char *buf, int len)
1186{
1187 int i;
1188
1189 for (i = 0; i < 4; i++) {
1190 if (drm_do_probe_ddc_edid(adapter, buf, len))
1191 return -1;
1192 if (drm_edid_is_valid((struct edid *)buf))
1193 return 0;
1194 }
1195
1196 /* repeated checksum failures; warn, but carry on */
1197 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
1198 drm_get_connector_name(connector));
1199 return -1;
1200}
1201
1202/**
1203 * drm_get_edid - get EDID data, if available
1204 * @connector: connector we're probing
1205 * @adapter: i2c adapter to use for DDC
1206 *
1207 * Poke the given connector's i2c channel to grab EDID data if possible.
1208 *
1209 * Return edid data or NULL if we couldn't find any.
1210 */
1211struct edid *drm_get_edid(struct drm_connector *connector,
1212 struct i2c_adapter *adapter)
1213{
1214 int ret;
1215 struct edid *edid;
1216
1217 edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
1218 GFP_KERNEL);
1219 if (edid == NULL) {
1220 dev_warn(&connector->dev->pdev->dev,
1221 "Failed to allocate EDID\n");
1222 goto end;
1223 }
1224
1225 /* Read first EDID block */
1226 ret = drm_ddc_read_edid(connector, adapter,
1227 (unsigned char *)edid, EDID_LENGTH);
1228 if (ret != 0)
1229 goto clean_up;
1230
1231 /* There are EDID extensions to be read */
1232 if (edid->extensions != 0) {
1233 int edid_ext_num = edid->extensions;
1234
1235 if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
1236 dev_warn(&connector->dev->pdev->dev,
1237 "The number of extension(%d) is "
1238 "over max (%d), actually read number (%d)\n",
1239 edid_ext_num, DRM_MAX_EDID_EXT_NUM,
1240 DRM_MAX_EDID_EXT_NUM);
1241 /* Reset EDID extension number to be read */
1242 edid_ext_num = DRM_MAX_EDID_EXT_NUM;
1243 }
1244 /* Read EDID including extensions too */
1245 ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
1246 EDID_LENGTH * (edid_ext_num + 1));
1247 if (ret != 0)
1248 goto clean_up;
1249
1250 }
1251
1252 connector->display_info.raw_edid = (char *)edid;
1253 goto end;
1254
1255clean_up:
1256 kfree(edid);
1257 edid = NULL;
1258end:
1259 return edid;
1260
1261}
1262EXPORT_SYMBOL(drm_get_edid);
1263
1264#define HDMI_IDENTIFIER 0x000C03 1550#define HDMI_IDENTIFIER 0x000C03
1265#define VENDOR_BLOCK 0x03 1551#define VENDOR_BLOCK 0x03
1266/** 1552/**
@@ -1273,7 +1559,7 @@ EXPORT_SYMBOL(drm_get_edid);
1273bool drm_detect_hdmi_monitor(struct edid *edid) 1559bool drm_detect_hdmi_monitor(struct edid *edid)
1274{ 1560{
1275 char *edid_ext = NULL; 1561 char *edid_ext = NULL;
1276 int i, hdmi_id, edid_ext_num; 1562 int i, hdmi_id;
1277 int start_offset, end_offset; 1563 int start_offset, end_offset;
1278 bool is_hdmi = false; 1564 bool is_hdmi = false;
1279 1565
@@ -1281,19 +1567,15 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
1281 if (edid == NULL || edid->extensions == 0) 1567 if (edid == NULL || edid->extensions == 0)
1282 goto end; 1568 goto end;
1283 1569
1284 /* Chose real EDID extension number */
1285 edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
1286 DRM_MAX_EDID_EXT_NUM : edid->extensions;
1287
1288 /* Find CEA extension */ 1570 /* Find CEA extension */
1289 for (i = 0; i < edid_ext_num; i++) { 1571 for (i = 0; i < edid->extensions; i++) {
1290 edid_ext = (char *)edid + EDID_LENGTH * (i + 1); 1572 edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
1291 /* This block is CEA extension */ 1573 /* This block is CEA extension */
1292 if (edid_ext[0] == 0x02) 1574 if (edid_ext[0] == 0x02)
1293 break; 1575 break;
1294 } 1576 }
1295 1577
1296 if (i == edid_ext_num) 1578 if (i == edid->extensions)
1297 goto end; 1579 goto end;
1298 1580
1299 /* Data block offset in CEA extension block */ 1581 /* Data block offset in CEA extension block */
@@ -1348,10 +1630,24 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
1348 1630
1349 quirks = edid_get_quirks(edid); 1631 quirks = edid_get_quirks(edid);
1350 1632
1351 num_modes += add_established_modes(connector, edid); 1633 /*
1352 num_modes += add_standard_modes(connector, edid); 1634 * EDID spec says modes should be preferred in this order:
1635 * - preferred detailed mode
1636 * - other detailed modes from base block
1637 * - detailed modes from extension blocks
1638 * - CVT 3-byte code modes
1639 * - standard timing codes
1640 * - established timing codes
1641 * - modes inferred from GTF or CVT range information
1642 *
1643 * We don't quite implement this yet, but we're close.
1644 *
1645 * XXX order for additional mode types in extension blocks?
1646 */
1353 num_modes += add_detailed_info(connector, edid, quirks); 1647 num_modes += add_detailed_info(connector, edid, quirks);
1354 num_modes += add_detailed_info_eedid(connector, edid, quirks); 1648 num_modes += add_detailed_info_eedid(connector, edid, quirks);
1649 num_modes += add_standard_modes(connector, edid);
1650 num_modes += add_established_modes(connector, edid);
1355 1651
1356 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) 1652 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
1357 edid_fixup_preferred(connector, quirks); 1653 edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 288ea2f32772..b3779d243aef 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -42,15 +42,33 @@ MODULE_LICENSE("GPL and additional rights");
42 42
43static LIST_HEAD(kernel_fb_helper_list); 43static LIST_HEAD(kernel_fb_helper_list);
44 44
45int drm_fb_helper_add_connector(struct drm_connector *connector) 45/* simple single crtc case helper function */
46int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
46{ 47{
47 connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); 48 struct drm_device *dev = fb_helper->dev;
48 if (!connector->fb_helper_private) 49 struct drm_connector *connector;
49 return -ENOMEM; 50 int i;
51
52 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
53 struct drm_fb_helper_connector *fb_helper_connector;
54
55 fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
56 if (!fb_helper_connector)
57 goto fail;
50 58
59 fb_helper_connector->connector = connector;
60 fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
61 }
51 return 0; 62 return 0;
63fail:
64 for (i = 0; i < fb_helper->connector_count; i++) {
65 kfree(fb_helper->connector_info[i]);
66 fb_helper->connector_info[i] = NULL;
67 }
68 fb_helper->connector_count = 0;
69 return -ENOMEM;
52} 70}
53EXPORT_SYMBOL(drm_fb_helper_add_connector); 71EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
54 72
55/** 73/**
56 * drm_fb_helper_connector_parse_command_line - parse command line for connector 74 * drm_fb_helper_connector_parse_command_line - parse command line for connector
@@ -65,7 +83,7 @@ EXPORT_SYMBOL(drm_fb_helper_add_connector);
65 * 83 *
66 * enable/enable Digital/disable bit at the end 84 * enable/enable Digital/disable bit at the end
67 */ 85 */
68static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector, 86static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
69 const char *mode_option) 87 const char *mode_option)
70{ 88{
71 const char *name; 89 const char *name;
@@ -75,13 +93,13 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
75 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0; 93 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
76 int i; 94 int i;
77 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; 95 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
78 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
79 struct drm_fb_helper_cmdline_mode *cmdline_mode; 96 struct drm_fb_helper_cmdline_mode *cmdline_mode;
97 struct drm_connector *connector = fb_helper_conn->connector;
80 98
81 if (!fb_help_conn) 99 if (!fb_helper_conn)
82 return false; 100 return false;
83 101
84 cmdline_mode = &fb_help_conn->cmdline_mode; 102 cmdline_mode = &fb_helper_conn->cmdline_mode;
85 if (!mode_option) 103 if (!mode_option)
86 mode_option = fb_mode_option; 104 mode_option = fb_mode_option;
87 105
@@ -204,18 +222,21 @@ done:
204 return true; 222 return true;
205} 223}
206 224
207int drm_fb_helper_parse_command_line(struct drm_device *dev) 225static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
208{ 226{
209 struct drm_connector *connector; 227 struct drm_fb_helper_connector *fb_helper_conn;
228 int i;
210 229
211 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 230 for (i = 0; i < fb_helper->connector_count; i++) {
212 char *option = NULL; 231 char *option = NULL;
213 232
233 fb_helper_conn = fb_helper->connector_info[i];
234
214 /* do something on return - turn off connector maybe */ 235 /* do something on return - turn off connector maybe */
215 if (fb_get_options(drm_get_connector_name(connector), &option)) 236 if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
216 continue; 237 continue;
217 238
218 drm_fb_helper_connector_parse_command_line(connector, option); 239 drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
219 } 240 }
220 return 0; 241 return 0;
221} 242}
@@ -293,6 +314,7 @@ static void drm_fb_helper_on(struct fb_info *info)
293 struct drm_fb_helper *fb_helper = info->par; 314 struct drm_fb_helper *fb_helper = info->par;
294 struct drm_device *dev = fb_helper->dev; 315 struct drm_device *dev = fb_helper->dev;
295 struct drm_crtc *crtc; 316 struct drm_crtc *crtc;
317 struct drm_crtc_helper_funcs *crtc_funcs;
296 struct drm_encoder *encoder; 318 struct drm_encoder *encoder;
297 int i; 319 int i;
298 320
@@ -300,33 +322,28 @@ static void drm_fb_helper_on(struct fb_info *info)
300 * For each CRTC in this fb, turn the crtc on then, 322 * For each CRTC in this fb, turn the crtc on then,
301 * find all associated encoders and turn them on. 323 * find all associated encoders and turn them on.
302 */ 324 */
325 mutex_lock(&dev->mode_config.mutex);
303 for (i = 0; i < fb_helper->crtc_count; i++) { 326 for (i = 0; i < fb_helper->crtc_count; i++) {
304 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 327 crtc = fb_helper->crtc_info[i].mode_set.crtc;
305 struct drm_crtc_helper_funcs *crtc_funcs = 328 crtc_funcs = crtc->helper_private;
306 crtc->helper_private;
307 329
308 /* Only mess with CRTCs in this fb */ 330 if (!crtc->enabled)
309 if (crtc->base.id != fb_helper->crtc_info[i].crtc_id || 331 continue;
310 !crtc->enabled)
311 continue;
312 332
313 mutex_lock(&dev->mode_config.mutex); 333 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
314 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
315 mutex_unlock(&dev->mode_config.mutex);
316 334
317 /* Found a CRTC on this fb, now find encoders */
318 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
319 if (encoder->crtc == crtc) {
320 struct drm_encoder_helper_funcs *encoder_funcs;
321 335
322 encoder_funcs = encoder->helper_private; 336 /* Found a CRTC on this fb, now find encoders */
323 mutex_lock(&dev->mode_config.mutex); 337 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
324 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 338 if (encoder->crtc == crtc) {
325 mutex_unlock(&dev->mode_config.mutex); 339 struct drm_encoder_helper_funcs *encoder_funcs;
326 } 340
341 encoder_funcs = encoder->helper_private;
342 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
327 } 343 }
328 } 344 }
329 } 345 }
346 mutex_unlock(&dev->mode_config.mutex);
330} 347}
331 348
332static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) 349static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
@@ -334,6 +351,7 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
334 struct drm_fb_helper *fb_helper = info->par; 351 struct drm_fb_helper *fb_helper = info->par;
335 struct drm_device *dev = fb_helper->dev; 352 struct drm_device *dev = fb_helper->dev;
336 struct drm_crtc *crtc; 353 struct drm_crtc *crtc;
354 struct drm_crtc_helper_funcs *crtc_funcs;
337 struct drm_encoder *encoder; 355 struct drm_encoder *encoder;
338 int i; 356 int i;
339 357
@@ -341,32 +359,26 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
341 * For each CRTC in this fb, find all associated encoders 359 * For each CRTC in this fb, find all associated encoders
342 * and turn them off, then turn off the CRTC. 360 * and turn them off, then turn off the CRTC.
343 */ 361 */
362 mutex_lock(&dev->mode_config.mutex);
344 for (i = 0; i < fb_helper->crtc_count; i++) { 363 for (i = 0; i < fb_helper->crtc_count; i++) {
345 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 364 crtc = fb_helper->crtc_info[i].mode_set.crtc;
346 struct drm_crtc_helper_funcs *crtc_funcs = 365 crtc_funcs = crtc->helper_private;
347 crtc->helper_private;
348 366
349 /* Only mess with CRTCs in this fb */ 367 if (!crtc->enabled)
350 if (crtc->base.id != fb_helper->crtc_info[i].crtc_id || 368 continue;
351 !crtc->enabled)
352 continue;
353 369
354 /* Found a CRTC on this fb, now find encoders */ 370 /* Found a CRTC on this fb, now find encoders */
355 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 371 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
356 if (encoder->crtc == crtc) { 372 if (encoder->crtc == crtc) {
357 struct drm_encoder_helper_funcs *encoder_funcs; 373 struct drm_encoder_helper_funcs *encoder_funcs;
358 374
359 encoder_funcs = encoder->helper_private; 375 encoder_funcs = encoder->helper_private;
360 mutex_lock(&dev->mode_config.mutex); 376 encoder_funcs->dpms(encoder, dpms_mode);
361 encoder_funcs->dpms(encoder, dpms_mode);
362 mutex_unlock(&dev->mode_config.mutex);
363 }
364 } 377 }
365 mutex_lock(&dev->mode_config.mutex);
366 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
367 mutex_unlock(&dev->mode_config.mutex);
368 } 378 }
379 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
369 } 380 }
381 mutex_unlock(&dev->mode_config.mutex);
370} 382}
371 383
372int drm_fb_helper_blank(int blank, struct fb_info *info) 384int drm_fb_helper_blank(int blank, struct fb_info *info)
@@ -401,50 +413,81 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
401{ 413{
402 int i; 414 int i;
403 415
416 for (i = 0; i < helper->connector_count; i++)
417 kfree(helper->connector_info[i]);
418 kfree(helper->connector_info);
404 for (i = 0; i < helper->crtc_count; i++) 419 for (i = 0; i < helper->crtc_count; i++)
405 kfree(helper->crtc_info[i].mode_set.connectors); 420 kfree(helper->crtc_info[i].mode_set.connectors);
406 kfree(helper->crtc_info); 421 kfree(helper->crtc_info);
407} 422}
408 423
409int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count) 424int drm_fb_helper_init(struct drm_device *dev,
425 struct drm_fb_helper *fb_helper,
426 int crtc_count, int max_conn_count)
410{ 427{
411 struct drm_device *dev = helper->dev;
412 struct drm_crtc *crtc; 428 struct drm_crtc *crtc;
413 int ret = 0; 429 int ret = 0;
414 int i; 430 int i;
415 431
416 helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); 432 fb_helper->dev = dev;
417 if (!helper->crtc_info) 433
434 INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
435
436 fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
437 if (!fb_helper->crtc_info)
418 return -ENOMEM; 438 return -ENOMEM;
419 439
420 helper->crtc_count = crtc_count; 440 fb_helper->crtc_count = crtc_count;
441 fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
442 if (!fb_helper->connector_info) {
443 kfree(fb_helper->crtc_info);
444 return -ENOMEM;
445 }
446 fb_helper->connector_count = 0;
421 447
422 for (i = 0; i < crtc_count; i++) { 448 for (i = 0; i < crtc_count; i++) {
423 helper->crtc_info[i].mode_set.connectors = 449 fb_helper->crtc_info[i].mode_set.connectors =
424 kcalloc(max_conn_count, 450 kcalloc(max_conn_count,
425 sizeof(struct drm_connector *), 451 sizeof(struct drm_connector *),
426 GFP_KERNEL); 452 GFP_KERNEL);
427 453
428 if (!helper->crtc_info[i].mode_set.connectors) { 454 if (!fb_helper->crtc_info[i].mode_set.connectors) {
429 ret = -ENOMEM; 455 ret = -ENOMEM;
430 goto out_free; 456 goto out_free;
431 } 457 }
432 helper->crtc_info[i].mode_set.num_connectors = 0; 458 fb_helper->crtc_info[i].mode_set.num_connectors = 0;
433 } 459 }
434 460
435 i = 0; 461 i = 0;
436 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 462 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
437 helper->crtc_info[i].crtc_id = crtc->base.id; 463 fb_helper->crtc_info[i].crtc_id = crtc->base.id;
438 helper->crtc_info[i].mode_set.crtc = crtc; 464 fb_helper->crtc_info[i].mode_set.crtc = crtc;
439 i++; 465 i++;
440 } 466 }
441 helper->conn_limit = max_conn_count; 467 fb_helper->conn_limit = max_conn_count;
442 return 0; 468 return 0;
443out_free: 469out_free:
444 drm_fb_helper_crtc_free(helper); 470 drm_fb_helper_crtc_free(fb_helper);
445 return -ENOMEM; 471 return -ENOMEM;
446} 472}
447EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); 473EXPORT_SYMBOL(drm_fb_helper_init);
474
475void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
476{
477 if (!list_empty(&fb_helper->kernel_fb_list)) {
478 list_del(&fb_helper->kernel_fb_list);
479 if (list_empty(&kernel_fb_helper_list)) {
480 printk(KERN_INFO "drm: unregistered panic notifier\n");
481 atomic_notifier_chain_unregister(&panic_notifier_list,
482 &paniced);
483 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
484 }
485 }
486
487 drm_fb_helper_crtc_free(fb_helper);
488
489}
490EXPORT_SYMBOL(drm_fb_helper_fini);
448 491
449static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, 492static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
450 u16 blue, u16 regno, struct fb_info *info) 493 u16 blue, u16 regno, struct fb_info *info)
@@ -508,20 +551,15 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
508int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) 551int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
509{ 552{
510 struct drm_fb_helper *fb_helper = info->par; 553 struct drm_fb_helper *fb_helper = info->par;
511 struct drm_device *dev = fb_helper->dev; 554 struct drm_crtc_helper_funcs *crtc_funcs;
512 u16 *red, *green, *blue, *transp; 555 u16 *red, *green, *blue, *transp;
513 struct drm_crtc *crtc; 556 struct drm_crtc *crtc;
514 int i, rc = 0; 557 int i, rc = 0;
515 int start; 558 int start;
516 559
517 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 560 for (i = 0; i < fb_helper->crtc_count; i++) {
518 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 561 crtc = fb_helper->crtc_info[i].mode_set.crtc;
519 for (i = 0; i < fb_helper->crtc_count; i++) { 562 crtc_funcs = crtc->helper_private;
520 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
521 break;
522 }
523 if (i == fb_helper->crtc_count)
524 continue;
525 563
526 red = cmap->red; 564 red = cmap->red;
527 green = cmap->green; 565 green = cmap->green;
@@ -549,41 +587,6 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
549} 587}
550EXPORT_SYMBOL(drm_fb_helper_setcmap); 588EXPORT_SYMBOL(drm_fb_helper_setcmap);
551 589
552int drm_fb_helper_setcolreg(unsigned regno,
553 unsigned red,
554 unsigned green,
555 unsigned blue,
556 unsigned transp,
557 struct fb_info *info)
558{
559 struct drm_fb_helper *fb_helper = info->par;
560 struct drm_device *dev = fb_helper->dev;
561 struct drm_crtc *crtc;
562 int i;
563 int ret;
564
565 if (regno > 255)
566 return 1;
567
568 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
569 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
570 for (i = 0; i < fb_helper->crtc_count; i++) {
571 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
572 break;
573 }
574 if (i == fb_helper->crtc_count)
575 continue;
576
577 ret = setcolreg(crtc, red, green, blue, regno, info);
578 if (ret)
579 return ret;
580
581 crtc_funcs->load_lut(crtc);
582 }
583 return 0;
584}
585EXPORT_SYMBOL(drm_fb_helper_setcolreg);
586
587int drm_fb_helper_check_var(struct fb_var_screeninfo *var, 590int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
588 struct fb_info *info) 591 struct fb_info *info)
589{ 592{
@@ -687,23 +690,21 @@ int drm_fb_helper_set_par(struct fb_info *info)
687 return -EINVAL; 690 return -EINVAL;
688 } 691 }
689 692
690 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 693 mutex_lock(&dev->mode_config.mutex);
691 694 for (i = 0; i < fb_helper->crtc_count; i++) {
692 for (i = 0; i < fb_helper->crtc_count; i++) { 695 crtc = fb_helper->crtc_info[i].mode_set.crtc;
693 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) 696 ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
694 break; 697 if (ret) {
695 }
696 if (i == fb_helper->crtc_count)
697 continue;
698
699 if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
700 mutex_lock(&dev->mode_config.mutex);
701 ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
702 mutex_unlock(&dev->mode_config.mutex); 698 mutex_unlock(&dev->mode_config.mutex);
703 if (ret) 699 return ret;
704 return ret;
705 } 700 }
706 } 701 }
702 mutex_unlock(&dev->mode_config.mutex);
703
704 if (fb_helper->delayed_hotplug) {
705 fb_helper->delayed_hotplug = false;
706 drm_fb_helper_hotplug_event(fb_helper);
707 }
707 return 0; 708 return 0;
708} 709}
709EXPORT_SYMBOL(drm_fb_helper_set_par); 710EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -718,14 +719,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
718 int ret = 0; 719 int ret = 0;
719 int i; 720 int i;
720 721
721 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 722 mutex_lock(&dev->mode_config.mutex);
722 for (i = 0; i < fb_helper->crtc_count; i++) { 723 for (i = 0; i < fb_helper->crtc_count; i++) {
723 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) 724 crtc = fb_helper->crtc_info[i].mode_set.crtc;
724 break;
725 }
726
727 if (i == fb_helper->crtc_count)
728 continue;
729 725
730 modeset = &fb_helper->crtc_info[i].mode_set; 726 modeset = &fb_helper->crtc_info[i].mode_set;
731 727
@@ -733,209 +729,138 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
733 modeset->y = var->yoffset; 729 modeset->y = var->yoffset;
734 730
735 if (modeset->num_connectors) { 731 if (modeset->num_connectors) {
736 mutex_lock(&dev->mode_config.mutex);
737 ret = crtc->funcs->set_config(modeset); 732 ret = crtc->funcs->set_config(modeset);
738 mutex_unlock(&dev->mode_config.mutex);
739 if (!ret) { 733 if (!ret) {
740 info->var.xoffset = var->xoffset; 734 info->var.xoffset = var->xoffset;
741 info->var.yoffset = var->yoffset; 735 info->var.yoffset = var->yoffset;
742 } 736 }
743 } 737 }
744 } 738 }
739 mutex_unlock(&dev->mode_config.mutex);
745 return ret; 740 return ret;
746} 741}
747EXPORT_SYMBOL(drm_fb_helper_pan_display); 742EXPORT_SYMBOL(drm_fb_helper_pan_display);
748 743
749int drm_fb_helper_single_fb_probe(struct drm_device *dev, 744int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
750 int preferred_bpp, 745 int preferred_bpp)
751 int (*fb_create)(struct drm_device *dev,
752 uint32_t fb_width,
753 uint32_t fb_height,
754 uint32_t surface_width,
755 uint32_t surface_height,
756 uint32_t surface_depth,
757 uint32_t surface_bpp,
758 struct drm_framebuffer **fb_ptr))
759{ 746{
760 struct drm_crtc *crtc;
761 struct drm_connector *connector;
762 unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
763 unsigned int surface_width = 0, surface_height = 0;
764 int new_fb = 0; 747 int new_fb = 0;
765 int crtc_count = 0; 748 int crtc_count = 0;
766 int ret, i, conn_count = 0; 749 int i;
767 struct fb_info *info; 750 struct fb_info *info;
768 struct drm_framebuffer *fb; 751 struct drm_fb_helper_surface_size sizes;
769 struct drm_mode_set *modeset = NULL; 752 int gamma_size = 0;
770 struct drm_fb_helper *fb_helper; 753
771 uint32_t surface_depth = 24, surface_bpp = 32; 754 memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
755 sizes.surface_depth = 24;
756 sizes.surface_bpp = 32;
757 sizes.fb_width = (unsigned)-1;
758 sizes.fb_height = (unsigned)-1;
772 759
773 /* if driver picks 8 or 16 by default use that 760 /* if driver picks 8 or 16 by default use that
774 for both depth/bpp */ 761 for both depth/bpp */
775 if (preferred_bpp != surface_bpp) { 762 if (preferred_bpp != sizes.surface_bpp) {
776 surface_depth = surface_bpp = preferred_bpp; 763 sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
777 } 764 }
778 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 765 /* first up get a count of crtcs now in use and new min/maxes width/heights */
779 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 766 for (i = 0; i < fb_helper->connector_count; i++) {
780 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; 767 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
781
782 struct drm_fb_helper_cmdline_mode *cmdline_mode; 768 struct drm_fb_helper_cmdline_mode *cmdline_mode;
783 769
784 if (!fb_help_conn) 770 cmdline_mode = &fb_helper_conn->cmdline_mode;
785 continue;
786
787 cmdline_mode = &fb_help_conn->cmdline_mode;
788 771
789 if (cmdline_mode->bpp_specified) { 772 if (cmdline_mode->bpp_specified) {
790 switch (cmdline_mode->bpp) { 773 switch (cmdline_mode->bpp) {
791 case 8: 774 case 8:
792 surface_depth = surface_bpp = 8; 775 sizes.surface_depth = sizes.surface_bpp = 8;
793 break; 776 break;
794 case 15: 777 case 15:
795 surface_depth = 15; 778 sizes.surface_depth = 15;
796 surface_bpp = 16; 779 sizes.surface_bpp = 16;
797 break; 780 break;
798 case 16: 781 case 16:
799 surface_depth = surface_bpp = 16; 782 sizes.surface_depth = sizes.surface_bpp = 16;
800 break; 783 break;
801 case 24: 784 case 24:
802 surface_depth = surface_bpp = 24; 785 sizes.surface_depth = sizes.surface_bpp = 24;
803 break; 786 break;
804 case 32: 787 case 32:
805 surface_depth = 24; 788 sizes.surface_depth = 24;
806 surface_bpp = 32; 789 sizes.surface_bpp = 32;
807 break; 790 break;
808 } 791 }
809 break; 792 break;
810 } 793 }
811 } 794 }
812 795
813 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 796 crtc_count = 0;
814 if (drm_helper_crtc_in_use(crtc)) { 797 for (i = 0; i < fb_helper->crtc_count; i++) {
815 if (crtc->desired_mode) { 798 struct drm_display_mode *desired_mode;
816 if (crtc->desired_mode->hdisplay < fb_width) 799 desired_mode = fb_helper->crtc_info[i].desired_mode;
817 fb_width = crtc->desired_mode->hdisplay; 800
818 801 if (desired_mode) {
819 if (crtc->desired_mode->vdisplay < fb_height) 802 if (gamma_size == 0)
820 fb_height = crtc->desired_mode->vdisplay; 803 gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
821 804 if (desired_mode->hdisplay < sizes.fb_width)
822 if (crtc->desired_mode->hdisplay > surface_width) 805 sizes.fb_width = desired_mode->hdisplay;
823 surface_width = crtc->desired_mode->hdisplay; 806 if (desired_mode->vdisplay < sizes.fb_height)
824 807 sizes.fb_height = desired_mode->vdisplay;
825 if (crtc->desired_mode->vdisplay > surface_height) 808 if (desired_mode->hdisplay > sizes.surface_width)
826 surface_height = crtc->desired_mode->vdisplay; 809 sizes.surface_width = desired_mode->hdisplay;
827 } 810 if (desired_mode->vdisplay > sizes.surface_height)
811 sizes.surface_height = desired_mode->vdisplay;
828 crtc_count++; 812 crtc_count++;
829 } 813 }
830 } 814 }
831 815
832 if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { 816 if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
833 /* hmm everyone went away - assume VGA cable just fell out 817 /* hmm everyone went away - assume VGA cable just fell out
834 and will come back later. */ 818 and will come back later. */
835 return 0; 819 DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
820 sizes.fb_width = sizes.surface_width = 1024;
821 sizes.fb_height = sizes.surface_height = 768;
836 } 822 }
837 823
838 /* do we have an fb already? */ 824 /* push down into drivers */
839 if (list_empty(&dev->mode_config.fb_kernel_list)) { 825 new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
840 ret = (*fb_create)(dev, fb_width, fb_height, surface_width, 826 if (new_fb < 0)
841 surface_height, surface_depth, surface_bpp, 827 return new_fb;
842 &fb);
843 if (ret)
844 return -EINVAL;
845 new_fb = 1;
846 } else {
847 fb = list_first_entry(&dev->mode_config.fb_kernel_list,
848 struct drm_framebuffer, filp_head);
849
850 /* if someone hotplugs something bigger than we have already allocated, we are pwned.
851 As really we can't resize an fbdev that is in the wild currently due to fbdev
852 not really being designed for the lower layers moving stuff around under it.
853 - so in the grand style of things - punt. */
854 if ((fb->width < surface_width) ||
855 (fb->height < surface_height)) {
856 DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
857 return -EINVAL;
858 }
859 }
860
861 info = fb->fbdev;
862 fb_helper = info->par;
863
864 crtc_count = 0;
865 /* okay we need to setup new connector sets in the crtcs */
866 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
867 modeset = &fb_helper->crtc_info[crtc_count].mode_set;
868 modeset->fb = fb;
869 conn_count = 0;
870 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
871 if (connector->encoder)
872 if (connector->encoder->crtc == modeset->crtc) {
873 modeset->connectors[conn_count] = connector;
874 conn_count++;
875 if (conn_count > fb_helper->conn_limit)
876 BUG();
877 }
878 }
879
880 for (i = conn_count; i < fb_helper->conn_limit; i++)
881 modeset->connectors[i] = NULL;
882 828
883 modeset->crtc = crtc; 829 info = fb_helper->fbdev;
884 crtc_count++;
885 830
886 modeset->num_connectors = conn_count; 831 /* set the fb pointer */
887 if (modeset->crtc->desired_mode) { 832 for (i = 0; i < fb_helper->crtc_count; i++) {
888 if (modeset->mode) 833 fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
889 drm_mode_destroy(dev, modeset->mode);
890 modeset->mode = drm_mode_duplicate(dev,
891 modeset->crtc->desired_mode);
892 }
893 } 834 }
894 fb_helper->crtc_count = crtc_count;
895 fb_helper->fb = fb;
896 835
897 if (new_fb) { 836 if (new_fb) {
898 info->var.pixclock = 0; 837 info->var.pixclock = 0;
899 ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
900 if (ret)
901 return ret;
902 if (register_framebuffer(info) < 0) { 838 if (register_framebuffer(info) < 0) {
903 fb_dealloc_cmap(&info->cmap);
904 return -EINVAL; 839 return -EINVAL;
905 } 840 }
841
842 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
843 info->fix.id);
844
906 } else { 845 } else {
907 drm_fb_helper_set_par(info); 846 drm_fb_helper_set_par(info);
908 } 847 }
909 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
910 info->fix.id);
911 848
912 /* Switch back to kernel console on panic */ 849 /* Switch back to kernel console on panic */
913 /* multi card linked list maybe */ 850 /* multi card linked list maybe */
914 if (list_empty(&kernel_fb_helper_list)) { 851 if (list_empty(&kernel_fb_helper_list)) {
915 printk(KERN_INFO "registered panic notifier\n"); 852 printk(KERN_INFO "drm: registered panic notifier\n");
916 atomic_notifier_chain_register(&panic_notifier_list, 853 atomic_notifier_chain_register(&panic_notifier_list,
917 &paniced); 854 &paniced);
918 register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 855 register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
919 } 856 }
920 list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); 857 if (new_fb)
858 list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
859
921 return 0; 860 return 0;
922} 861}
923EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); 862EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
924 863
925void drm_fb_helper_free(struct drm_fb_helper *helper)
926{
927 list_del(&helper->kernel_fb_list);
928 if (list_empty(&kernel_fb_helper_list)) {
929 printk(KERN_INFO "unregistered panic notifier\n");
930 atomic_notifier_chain_unregister(&panic_notifier_list,
931 &paniced);
932 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
933 }
934 drm_fb_helper_crtc_free(helper);
935 fb_dealloc_cmap(&helper->fb->fbdev->cmap);
936}
937EXPORT_SYMBOL(drm_fb_helper_free);
938
939void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, 864void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
940 uint32_t depth) 865 uint32_t depth)
941{ 866{
@@ -954,10 +879,11 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
954} 879}
955EXPORT_SYMBOL(drm_fb_helper_fill_fix); 880EXPORT_SYMBOL(drm_fb_helper_fill_fix);
956 881
957void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, 882void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
958 uint32_t fb_width, uint32_t fb_height) 883 uint32_t fb_width, uint32_t fb_height)
959{ 884{
960 info->pseudo_palette = fb->pseudo_palette; 885 struct drm_framebuffer *fb = fb_helper->fb;
886 info->pseudo_palette = fb_helper->pseudo_palette;
961 info->var.xres_virtual = fb->width; 887 info->var.xres_virtual = fb->width;
962 info->var.yres_virtual = fb->height; 888 info->var.yres_virtual = fb->height;
963 info->var.bits_per_pixel = fb->bits_per_pixel; 889 info->var.bits_per_pixel = fb->bits_per_pixel;
@@ -1025,3 +951,457 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
1025 info->var.yres = fb_height; 951 info->var.yres = fb_height;
1026} 952}
1027EXPORT_SYMBOL(drm_fb_helper_fill_var); 953EXPORT_SYMBOL(drm_fb_helper_fill_var);
954
955static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
956 uint32_t maxX,
957 uint32_t maxY)
958{
959 struct drm_connector *connector;
960 int count = 0;
961 int i;
962
963 for (i = 0; i < fb_helper->connector_count; i++) {
964 connector = fb_helper->connector_info[i]->connector;
965 count += connector->funcs->fill_modes(connector, maxX, maxY);
966 }
967
968 return count;
969}
970
971static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
972{
973 struct drm_display_mode *mode;
974
975 list_for_each_entry(mode, &fb_connector->connector->modes, head) {
976 if (drm_mode_width(mode) > width ||
977 drm_mode_height(mode) > height)
978 continue;
979 if (mode->type & DRM_MODE_TYPE_PREFERRED)
980 return mode;
981 }
982 return NULL;
983}
984
985static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
986{
987 struct drm_fb_helper_cmdline_mode *cmdline_mode;
988 cmdline_mode = &fb_connector->cmdline_mode;
989 return cmdline_mode->specified;
990}
991
992static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
993 int width, int height)
994{
995 struct drm_fb_helper_cmdline_mode *cmdline_mode;
996 struct drm_display_mode *mode = NULL;
997
998 cmdline_mode = &fb_helper_conn->cmdline_mode;
999 if (cmdline_mode->specified == false)
1000 return mode;
1001
1002 /* attempt to find a matching mode in the list of modes
1003 * we have gotten so far, if not add a CVT mode that conforms
1004 */
1005 if (cmdline_mode->rb || cmdline_mode->margins)
1006 goto create_mode;
1007
1008 list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
1009 /* check width/height */
1010 if (mode->hdisplay != cmdline_mode->xres ||
1011 mode->vdisplay != cmdline_mode->yres)
1012 continue;
1013
1014 if (cmdline_mode->refresh_specified) {
1015 if (mode->vrefresh != cmdline_mode->refresh)
1016 continue;
1017 }
1018
1019 if (cmdline_mode->interlace) {
1020 if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
1021 continue;
1022 }
1023 return mode;
1024 }
1025
1026create_mode:
1027 mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres,
1028 cmdline_mode->yres,
1029 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
1030 cmdline_mode->rb, cmdline_mode->interlace,
1031 cmdline_mode->margins);
1032 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1033 list_add(&mode->head, &fb_helper_conn->connector->modes);
1034 return mode;
1035}
1036
1037static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
1038{
1039 bool enable;
1040
1041 if (strict) {
1042 enable = connector->status == connector_status_connected;
1043 } else {
1044 enable = connector->status != connector_status_disconnected;
1045 }
1046 return enable;
1047}
1048
1049static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
1050 bool *enabled)
1051{
1052 bool any_enabled = false;
1053 struct drm_connector *connector;
1054 int i = 0;
1055
1056 for (i = 0; i < fb_helper->connector_count; i++) {
1057 connector = fb_helper->connector_info[i]->connector;
1058 enabled[i] = drm_connector_enabled(connector, true);
1059 DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
1060 enabled[i] ? "yes" : "no");
1061 any_enabled |= enabled[i];
1062 }
1063
1064 if (any_enabled)
1065 return;
1066
1067 for (i = 0; i < fb_helper->connector_count; i++) {
1068 connector = fb_helper->connector_info[i]->connector;
1069 enabled[i] = drm_connector_enabled(connector, false);
1070 }
1071}
1072
1073static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
1074 struct drm_display_mode **modes,
1075 bool *enabled, int width, int height)
1076{
1077 int count, i, j;
1078 bool can_clone = false;
1079 struct drm_fb_helper_connector *fb_helper_conn;
1080 struct drm_display_mode *dmt_mode, *mode;
1081
1082 /* only contemplate cloning in the single crtc case */
1083 if (fb_helper->crtc_count > 1)
1084 return false;
1085
1086 count = 0;
1087 for (i = 0; i < fb_helper->connector_count; i++) {
1088 if (enabled[i])
1089 count++;
1090 }
1091
1092 /* only contemplate cloning if more than one connector is enabled */
1093 if (count <= 1)
1094 return false;
1095
1096 /* check the command line or if nothing common pick 1024x768 */
1097 can_clone = true;
1098 for (i = 0; i < fb_helper->connector_count; i++) {
1099 if (!enabled[i])
1100 continue;
1101 fb_helper_conn = fb_helper->connector_info[i];
1102 modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
1103 if (!modes[i]) {
1104 can_clone = false;
1105 break;
1106 }
1107 for (j = 0; j < i; j++) {
1108 if (!enabled[j])
1109 continue;
1110 if (!drm_mode_equal(modes[j], modes[i]))
1111 can_clone = false;
1112 }
1113 }
1114
1115 if (can_clone) {
1116 DRM_DEBUG_KMS("can clone using command line\n");
1117 return true;
1118 }
1119
1120 /* try and find a 1024x768 mode on each connector */
1121 can_clone = true;
1122 dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
1123
1124 for (i = 0; i < fb_helper->connector_count; i++) {
1125
1126 if (!enabled[i])
1127 continue;
1128
1129 fb_helper_conn = fb_helper->connector_info[i];
1130 list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
1131 if (drm_mode_equal(mode, dmt_mode))
1132 modes[i] = mode;
1133 }
1134 if (!modes[i])
1135 can_clone = false;
1136 }
1137
1138 if (can_clone) {
1139 DRM_DEBUG_KMS("can clone using 1024x768\n");
1140 return true;
1141 }
1142 DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
1143 return false;
1144}
1145
1146static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
1147 struct drm_display_mode **modes,
1148 bool *enabled, int width, int height)
1149{
1150 struct drm_fb_helper_connector *fb_helper_conn;
1151 int i;
1152
1153 for (i = 0; i < fb_helper->connector_count; i++) {
1154 fb_helper_conn = fb_helper->connector_info[i];
1155
1156 if (enabled[i] == false)
1157 continue;
1158
1159 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
1160 fb_helper_conn->connector->base.id);
1161
1162 /* got for command line mode first */
1163 modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
1164 if (!modes[i]) {
1165 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
1166 fb_helper_conn->connector->base.id);
1167 modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
1168 }
1169 /* No preferred modes, pick one off the list */
1170 if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
1171 list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
1172 break;
1173 }
1174 DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
1175 "none");
1176 }
1177 return true;
1178}
1179
1180static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1181 struct drm_fb_helper_crtc **best_crtcs,
1182 struct drm_display_mode **modes,
1183 int n, int width, int height)
1184{
1185 int c, o;
1186 struct drm_device *dev = fb_helper->dev;
1187 struct drm_connector *connector;
1188 struct drm_connector_helper_funcs *connector_funcs;
1189 struct drm_encoder *encoder;
1190 struct drm_fb_helper_crtc *best_crtc;
1191 int my_score, best_score, score;
1192 struct drm_fb_helper_crtc **crtcs, *crtc;
1193 struct drm_fb_helper_connector *fb_helper_conn;
1194
1195 if (n == fb_helper->connector_count)
1196 return 0;
1197
1198 fb_helper_conn = fb_helper->connector_info[n];
1199 connector = fb_helper_conn->connector;
1200
1201 best_crtcs[n] = NULL;
1202 best_crtc = NULL;
1203 best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
1204 if (modes[n] == NULL)
1205 return best_score;
1206
1207 crtcs = kzalloc(dev->mode_config.num_connector *
1208 sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
1209 if (!crtcs)
1210 return best_score;
1211
1212 my_score = 1;
1213 if (connector->status == connector_status_connected)
1214 my_score++;
1215 if (drm_has_cmdline_mode(fb_helper_conn))
1216 my_score++;
1217 if (drm_has_preferred_mode(fb_helper_conn, width, height))
1218 my_score++;
1219
1220 connector_funcs = connector->helper_private;
1221 encoder = connector_funcs->best_encoder(connector);
1222 if (!encoder)
1223 goto out;
1224
1225 /* select a crtc for this connector and then attempt to configure
1226 remaining connectors */
1227 for (c = 0; c < fb_helper->crtc_count; c++) {
1228 crtc = &fb_helper->crtc_info[c];
1229
1230 if ((encoder->possible_crtcs & (1 << c)) == 0) {
1231 continue;
1232 }
1233
1234 for (o = 0; o < n; o++)
1235 if (best_crtcs[o] == crtc)
1236 break;
1237
1238 if (o < n) {
1239 /* ignore cloning unless only a single crtc */
1240 if (fb_helper->crtc_count > 1)
1241 continue;
1242
1243 if (!drm_mode_equal(modes[o], modes[n]))
1244 continue;
1245 }
1246
1247 crtcs[n] = crtc;
1248 memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
1249 score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
1250 width, height);
1251 if (score > best_score) {
1252 best_crtc = crtc;
1253 best_score = score;
1254 memcpy(best_crtcs, crtcs,
1255 dev->mode_config.num_connector *
1256 sizeof(struct drm_fb_helper_crtc *));
1257 }
1258 }
1259out:
1260 kfree(crtcs);
1261 return best_score;
1262}
1263
1264static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1265{
1266 struct drm_device *dev = fb_helper->dev;
1267 struct drm_fb_helper_crtc **crtcs;
1268 struct drm_display_mode **modes;
1269 struct drm_encoder *encoder;
1270 struct drm_mode_set *modeset;
1271 bool *enabled;
1272 int width, height;
1273 int i, ret;
1274
1275 DRM_DEBUG_KMS("\n");
1276
1277 width = dev->mode_config.max_width;
1278 height = dev->mode_config.max_height;
1279
1280 /* clean out all the encoder/crtc combos */
1281 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1282 encoder->crtc = NULL;
1283 }
1284
1285 crtcs = kcalloc(dev->mode_config.num_connector,
1286 sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
1287 modes = kcalloc(dev->mode_config.num_connector,
1288 sizeof(struct drm_display_mode *), GFP_KERNEL);
1289 enabled = kcalloc(dev->mode_config.num_connector,
1290 sizeof(bool), GFP_KERNEL);
1291
1292 drm_enable_connectors(fb_helper, enabled);
1293
1294 ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
1295 if (!ret) {
1296 ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
1297 if (!ret)
1298 DRM_ERROR("Unable to find initial modes\n");
1299 }
1300
1301 DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
1302
1303 drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
1304
1305 /* need to set the modesets up here for use later */
1306 /* fill out the connector<->crtc mappings into the modesets */
1307 for (i = 0; i < fb_helper->crtc_count; i++) {
1308 modeset = &fb_helper->crtc_info[i].mode_set;
1309 modeset->num_connectors = 0;
1310 }
1311
1312 for (i = 0; i < fb_helper->connector_count; i++) {
1313 struct drm_display_mode *mode = modes[i];
1314 struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
1315 modeset = &fb_crtc->mode_set;
1316
1317 if (mode && fb_crtc) {
1318 DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
1319 mode->name, fb_crtc->mode_set.crtc->base.id);
1320 fb_crtc->desired_mode = mode;
1321 if (modeset->mode)
1322 drm_mode_destroy(dev, modeset->mode);
1323 modeset->mode = drm_mode_duplicate(dev,
1324 fb_crtc->desired_mode);
1325 modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
1326 }
1327 }
1328
1329 kfree(crtcs);
1330 kfree(modes);
1331 kfree(enabled);
1332}
1333
1334/**
1335 * drm_helper_initial_config - setup a sane initial connector configuration
1336 * @dev: DRM device
1337 *
1338 * LOCKING:
1339 * Called at init time, must take mode config lock.
1340 *
1341 * Scan the CRTCs and connectors and try to put together an initial setup.
1342 * At the moment, this is a cloned configuration across all heads with
1343 * a new framebuffer object as the backing store.
1344 *
1345 * RETURNS:
1346 * Zero if everything went ok, nonzero otherwise.
1347 */
1348bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
1349{
1350 struct drm_device *dev = fb_helper->dev;
1351 int count = 0;
1352
1353 /* disable all the possible outputs/crtcs before entering KMS mode */
1354 drm_helper_disable_unused_functions(fb_helper->dev);
1355
1356 drm_fb_helper_parse_command_line(fb_helper);
1357
1358 count = drm_fb_helper_probe_connector_modes(fb_helper,
1359 dev->mode_config.max_width,
1360 dev->mode_config.max_height);
1361 /*
1362 * we shouldn't end up with no modes here.
1363 */
1364 if (count == 0) {
1365 printk(KERN_INFO "No connectors reported connected with modes\n");
1366 }
1367 drm_setup_crtcs(fb_helper);
1368
1369 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
1370}
1371EXPORT_SYMBOL(drm_fb_helper_initial_config);
1372
1373bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1374{
1375 int count = 0;
1376 u32 max_width, max_height, bpp_sel;
1377 bool bound = false, crtcs_bound = false;
1378 struct drm_crtc *crtc;
1379
1380 if (!fb_helper->fb)
1381 return false;
1382
1383 list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
1384 if (crtc->fb)
1385 crtcs_bound = true;
1386 if (crtc->fb == fb_helper->fb)
1387 bound = true;
1388 }
1389
1390 if (!bound && crtcs_bound) {
1391 fb_helper->delayed_hotplug = true;
1392 return false;
1393 }
1394 DRM_DEBUG_KMS("\n");
1395
1396 max_width = fb_helper->fb->width;
1397 max_height = fb_helper->fb->height;
1398 bpp_sel = fb_helper->fb->bits_per_pixel;
1399
1400 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
1401 max_height);
1402 drm_setup_crtcs(fb_helper);
1403
1404 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
1405}
1406EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
1407
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 9d532d7fdf59..e7aace20981f 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -243,11 +243,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
243 243
244 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); 244 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
245 245
246 priv = kmalloc(sizeof(*priv), GFP_KERNEL); 246 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
247 if (!priv) 247 if (!priv)
248 return -ENOMEM; 248 return -ENOMEM;
249 249
250 memset(priv, 0, sizeof(*priv));
251 filp->private_data = priv; 250 filp->private_data = priv;
252 priv->filp = filp; 251 priv->filp = filp;
253 priv->uid = current_euid(); 252 priv->uid = current_euid();
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index aa89d4b0b4c4..33dad3fa6043 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -124,6 +124,31 @@ drm_gem_destroy(struct drm_device *dev)
124} 124}
125 125
126/** 126/**
127 * Initialize an already allocate GEM object of the specified size with
128 * shmfs backing store.
129 */
130int drm_gem_object_init(struct drm_device *dev,
131 struct drm_gem_object *obj, size_t size)
132{
133 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
134
135 obj->dev = dev;
136 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
137 if (IS_ERR(obj->filp))
138 return -ENOMEM;
139
140 kref_init(&obj->refcount);
141 kref_init(&obj->handlecount);
142 obj->size = size;
143
144 atomic_inc(&dev->object_count);
145 atomic_add(obj->size, &dev->object_memory);
146
147 return 0;
148}
149EXPORT_SYMBOL(drm_gem_object_init);
150
151/**
127 * Allocate a GEM object of the specified size with shmfs backing store 152 * Allocate a GEM object of the specified size with shmfs backing store
128 */ 153 */
129struct drm_gem_object * 154struct drm_gem_object *
@@ -131,28 +156,22 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
131{ 156{
132 struct drm_gem_object *obj; 157 struct drm_gem_object *obj;
133 158
134 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
135
136 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 159 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
137 if (!obj) 160 if (!obj)
138 goto free; 161 goto free;
139 162
140 obj->dev = dev; 163 if (drm_gem_object_init(dev, obj, size) != 0)
141 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
142 if (IS_ERR(obj->filp))
143 goto free; 164 goto free;
144 165
145 kref_init(&obj->refcount);
146 kref_init(&obj->handlecount);
147 obj->size = size;
148 if (dev->driver->gem_init_object != NULL && 166 if (dev->driver->gem_init_object != NULL &&
149 dev->driver->gem_init_object(obj) != 0) { 167 dev->driver->gem_init_object(obj) != 0) {
150 goto fput; 168 goto fput;
151 } 169 }
152 atomic_inc(&dev->object_count);
153 atomic_add(obj->size, &dev->object_memory);
154 return obj; 170 return obj;
155fput: 171fput:
172 /* Object_init mangles the global counters - readjust them. */
173 atomic_dec(&dev->object_count);
174 atomic_sub(obj->size, &dev->object_memory);
156 fput(obj->filp); 175 fput(obj->filp);
157free: 176free:
158 kfree(obj); 177 kfree(obj);
@@ -403,15 +422,15 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
403 idr_destroy(&file_private->object_idr); 422 idr_destroy(&file_private->object_idr);
404} 423}
405 424
406static void 425void
407drm_gem_object_free_common(struct drm_gem_object *obj) 426drm_gem_object_release(struct drm_gem_object *obj)
408{ 427{
409 struct drm_device *dev = obj->dev; 428 struct drm_device *dev = obj->dev;
410 fput(obj->filp); 429 fput(obj->filp);
411 atomic_dec(&dev->object_count); 430 atomic_dec(&dev->object_count);
412 atomic_sub(obj->size, &dev->object_memory); 431 atomic_sub(obj->size, &dev->object_memory);
413 kfree(obj);
414} 432}
433EXPORT_SYMBOL(drm_gem_object_release);
415 434
416/** 435/**
417 * Called after the last reference to the object has been lost. 436 * Called after the last reference to the object has been lost.
@@ -429,8 +448,6 @@ drm_gem_object_free(struct kref *kref)
429 448
430 if (dev->driver->gem_free_object != NULL) 449 if (dev->driver->gem_free_object != NULL)
431 dev->driver->gem_free_object(obj); 450 dev->driver->gem_free_object(obj);
432
433 drm_gem_object_free_common(obj);
434} 451}
435EXPORT_SYMBOL(drm_gem_object_free); 452EXPORT_SYMBOL(drm_gem_object_free);
436 453
@@ -453,8 +470,6 @@ drm_gem_object_free_unlocked(struct kref *kref)
453 dev->driver->gem_free_object(obj); 470 dev->driver->gem_free_object(obj);
454 mutex_unlock(&dev->struct_mutex); 471 mutex_unlock(&dev->struct_mutex);
455 } 472 }
456
457 drm_gem_object_free_common(obj);
458} 473}
459EXPORT_SYMBOL(drm_gem_object_free_unlocked); 474EXPORT_SYMBOL(drm_gem_object_free_unlocked);
460 475
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 76d63394c776..f1f473ea97d3 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
258 drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP; 258 drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
259 /* 18/16. Find actual vertical frame frequency */ 259 /* 18/16. Find actual vertical frame frequency */
260 /* ignore - just set the mode flag for interlaced */ 260 /* ignore - just set the mode flag for interlaced */
261 if (interlaced) 261 if (interlaced) {
262 drm_mode->vtotal *= 2; 262 drm_mode->vtotal *= 2;
263 drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
264 }
263 /* Fill the mode line name */ 265 /* Fill the mode line name */
264 drm_mode_set_name(drm_mode); 266 drm_mode_set_name(drm_mode);
265 if (reduced) 267 if (reduced)
@@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
268 else 270 else
269 drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC | 271 drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
270 DRM_MODE_FLAG_NHSYNC); 272 DRM_MODE_FLAG_NHSYNC);
271 if (interlaced)
272 drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
273 273
274 return drm_mode; 274 return drm_mode;
275} 275}
276EXPORT_SYMBOL(drm_cvt_mode); 276EXPORT_SYMBOL(drm_cvt_mode);
277 277
278/** 278/**
279 * drm_gtf_mode - create the modeline based on GTF algorithm 279 * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
280 * 280 *
281 * @dev :drm device 281 * @dev :drm device
282 * @hdisplay :hdisplay size 282 * @hdisplay :hdisplay size
283 * @vdisplay :vdisplay size 283 * @vdisplay :vdisplay size
284 * @vrefresh :vrefresh rate. 284 * @vrefresh :vrefresh rate.
285 * @interlaced :whether the interlace is supported 285 * @interlaced :whether the interlace is supported
286 * @margins :whether the margin is supported 286 * @margins :desired margin size
287 * @GTF_[MCKJ] :extended GTF formula parameters
287 * 288 *
288 * LOCKING. 289 * LOCKING.
289 * none. 290 * none.
290 * 291 *
291 * return the modeline based on GTF algorithm 292 * return the modeline based on full GTF algorithm.
292 *
293 * This function is to create the modeline based on the GTF algorithm.
294 * Generalized Timing Formula is derived from:
295 * GTF Spreadsheet by Andy Morrish (1/5/97)
296 * available at http://www.vesa.org
297 * 293 *
298 * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. 294 * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
299 * What I have done is to translate it by using integer calculation. 295 * in here multiplied by two. For a C of 40, pass in 80.
300 * I also refer to the function of fb_get_mode in the file of
301 * drivers/video/fbmon.c
302 */ 296 */
303struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, 297struct drm_display_mode *
304 int vdisplay, int vrefresh, 298drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
305 bool interlaced, int margins) 299 int vrefresh, bool interlaced, int margins,
306{ 300 int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
307 /* 1) top/bottom margin size (% of height) - default: 1.8, */ 301{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
308#define GTF_MARGIN_PERCENTAGE 18 302#define GTF_MARGIN_PERCENTAGE 18
309 /* 2) character cell horizontal granularity (pixels) - default 8 */ 303 /* 2) character cell horizontal granularity (pixels) - default 8 */
310#define GTF_CELL_GRAN 8 304#define GTF_CELL_GRAN 8
@@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
316#define H_SYNC_PERCENT 8 310#define H_SYNC_PERCENT 8
317 /* min time of vsync + back porch (microsec) */ 311 /* min time of vsync + back porch (microsec) */
318#define MIN_VSYNC_PLUS_BP 550 312#define MIN_VSYNC_PLUS_BP 550
319 /* blanking formula gradient */
320#define GTF_M 600
321 /* blanking formula offset */
322#define GTF_C 40
323 /* blanking formula scaling factor */
324#define GTF_K 128
325 /* blanking formula scaling factor */
326#define GTF_J 20
327 /* C' and M' are part of the Blanking Duty Cycle computation */ 313 /* C' and M' are part of the Blanking Duty Cycle computation */
328#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J) 314#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
329#define GTF_M_PRIME (GTF_K * GTF_M / 256) 315#define GTF_M_PRIME (GTF_K * GTF_M / 256)
330 struct drm_display_mode *drm_mode; 316 struct drm_display_mode *drm_mode;
331 unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd; 317 unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
332 int top_margin, bottom_margin; 318 int top_margin, bottom_margin;
@@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
460 446
461 drm_mode->clock = pixel_freq; 447 drm_mode->clock = pixel_freq;
462 448
463 drm_mode_set_name(drm_mode);
464 drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
465
466 if (interlaced) { 449 if (interlaced) {
467 drm_mode->vtotal *= 2; 450 drm_mode->vtotal *= 2;
468 drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; 451 drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
469 } 452 }
470 453
454 drm_mode_set_name(drm_mode);
455 if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
456 drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
457 else
458 drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
459
471 return drm_mode; 460 return drm_mode;
472} 461}
462EXPORT_SYMBOL(drm_gtf_mode_complex);
463
464/**
465 * drm_gtf_mode - create the modeline based on GTF algorithm
466 *
467 * @dev :drm device
468 * @hdisplay :hdisplay size
469 * @vdisplay :vdisplay size
470 * @vrefresh :vrefresh rate.
471 * @interlaced :whether the interlace is supported
472 * @margins :whether the margin is supported
473 *
474 * LOCKING.
475 * none.
476 *
477 * return the modeline based on GTF algorithm
478 *
479 * This function is to create the modeline based on the GTF algorithm.
480 * Generalized Timing Formula is derived from:
481 * GTF Spreadsheet by Andy Morrish (1/5/97)
482 * available at http://www.vesa.org
483 *
484 * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
485 * What I have done is to translate it by using integer calculation.
486 * I also refer to the function of fb_get_mode in the file of
487 * drivers/video/fbmon.c
488 *
489 * Standard GTF parameters:
490 * M = 600
491 * C = 40
492 * K = 128
493 * J = 20
494 */
495struct drm_display_mode *
496drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
497 bool lace, int margins)
498{
499 return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
500 margins, 600, 40 * 2, 128, 20 * 2);
501}
473EXPORT_SYMBOL(drm_gtf_mode); 502EXPORT_SYMBOL(drm_gtf_mode);
503
474/** 504/**
475 * drm_mode_set_name - set the name on a mode 505 * drm_mode_set_name - set the name on a mode
476 * @mode: name will be set in this mode 506 * @mode: name will be set in this mode
@@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode);
482 */ 512 */
483void drm_mode_set_name(struct drm_display_mode *mode) 513void drm_mode_set_name(struct drm_display_mode *mode)
484{ 514{
485 snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay, 515 bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
486 mode->vdisplay); 516
517 snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
518 mode->hdisplay, mode->vdisplay,
519 interlaced ? "i" : "");
487} 520}
488EXPORT_SYMBOL(drm_mode_set_name); 521EXPORT_SYMBOL(drm_mode_set_name);
489 522
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 387166d5a109..101d381e9d86 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -334,7 +334,7 @@ static struct device_attribute connector_attrs_opt1[] = {
334static struct bin_attribute edid_attr = { 334static struct bin_attribute edid_attr = {
335 .attr.name = "edid", 335 .attr.name = "edid",
336 .attr.mode = 0444, 336 .attr.mode = 0444,
337 .size = 128, 337 .size = 0,
338 .read = edid_show, 338 .read = edid_show,
339}; 339};
340 340
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9929f84ec3e1..95639017bdbe 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,3 +33,5 @@ i915-$(CONFIG_ACPI) += i915_opregion.o
33i915-$(CONFIG_COMPAT) += i915_ioc32.o 33i915-$(CONFIG_COMPAT) += i915_ioc32.o
34 34
35obj-$(CONFIG_DRM_I915) += i915.o 35obj-$(CONFIG_DRM_I915) += i915.o
36
37CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 288fc50627e2..0d6ff640e1c6 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -70,16 +70,6 @@ struct intel_dvo_dev_ops {
70 void (*dpms)(struct intel_dvo_device *dvo, int mode); 70 void (*dpms)(struct intel_dvo_device *dvo, int mode);
71 71
72 /* 72 /*
73 * Saves the output's state for restoration on VT switch.
74 */
75 void (*save)(struct intel_dvo_device *dvo);
76
77 /*
78 * Restore's the output's state at VT switch.
79 */
80 void (*restore)(struct intel_dvo_device *dvo);
81
82 /*
83 * Callback for testing a video mode for a given output. 73 * Callback for testing a video mode for a given output.
84 * 74 *
85 * This function should only check for cases where a mode can't 75 * This function should only check for cases where a mode can't
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1184c14ba87d..14d59804acd7 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,16 +159,7 @@
159#define CH7017_BANG_LIMIT_CONTROL 0x7f 159#define CH7017_BANG_LIMIT_CONTROL 0x7f
160 160
161struct ch7017_priv { 161struct ch7017_priv {
162 uint8_t save_hapi; 162 uint8_t dummy;
163 uint8_t save_vali;
164 uint8_t save_valo;
165 uint8_t save_ailo;
166 uint8_t save_lvds_pll_vco;
167 uint8_t save_feedback_div;
168 uint8_t save_lvds_control_2;
169 uint8_t save_outputs_enable;
170 uint8_t save_lvds_power_down;
171 uint8_t save_power_management;
172}; 163};
173 164
174static void ch7017_dump_regs(struct intel_dvo_device *dvo); 165static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -401,39 +392,6 @@ do { \
401 DUMP(CH7017_LVDS_POWER_DOWN); 392 DUMP(CH7017_LVDS_POWER_DOWN);
402} 393}
403 394
404static void ch7017_save(struct intel_dvo_device *dvo)
405{
406 struct ch7017_priv *priv = dvo->dev_priv;
407
408 ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
409 ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
410 ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
411 ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
412 ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
413 ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
414 ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
415 ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
416 ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
417}
418
419static void ch7017_restore(struct intel_dvo_device *dvo)
420{
421 struct ch7017_priv *priv = dvo->dev_priv;
422
423 /* Power down before changing mode */
424 ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
425
426 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
427 ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
428 ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
429 ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
430 ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
431 ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
432 ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
433 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
434 ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
435}
436
437static void ch7017_destroy(struct intel_dvo_device *dvo) 395static void ch7017_destroy(struct intel_dvo_device *dvo)
438{ 396{
439 struct ch7017_priv *priv = dvo->dev_priv; 397 struct ch7017_priv *priv = dvo->dev_priv;
@@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = {
451 .mode_set = ch7017_mode_set, 409 .mode_set = ch7017_mode_set,
452 .dpms = ch7017_dpms, 410 .dpms = ch7017_dpms,
453 .dump_regs = ch7017_dump_regs, 411 .dump_regs = ch7017_dump_regs,
454 .save = ch7017_save,
455 .restore = ch7017_restore,
456 .destroy = ch7017_destroy, 412 .destroy = ch7017_destroy,
457}; 413};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d56ff5cc22b2..6f1944b24441 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -92,21 +92,10 @@ static struct ch7xxx_id_struct {
92 { CH7301_VID, "CH7301" }, 92 { CH7301_VID, "CH7301" },
93}; 93};
94 94
95struct ch7xxx_reg_state {
96 uint8_t regs[CH7xxx_NUM_REGS];
97};
98
99struct ch7xxx_priv { 95struct ch7xxx_priv {
100 bool quiet; 96 bool quiet;
101
102 struct ch7xxx_reg_state save_reg;
103 struct ch7xxx_reg_state mode_reg;
104 uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
105 uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
106}; 97};
107 98
108static void ch7xxx_save(struct intel_dvo_device *dvo);
109
110static char *ch7xxx_get_id(uint8_t vid) 99static char *ch7xxx_get_id(uint8_t vid)
111{ 100{
112 int i; 101 int i;
@@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
312 301
313static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) 302static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
314{ 303{
315 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
316 int i; 304 int i;
317 305
318 for (i = 0; i < CH7xxx_NUM_REGS; i++) { 306 for (i = 0; i < CH7xxx_NUM_REGS; i++) {
307 uint8_t val;
319 if ((i % 8) == 0 ) 308 if ((i % 8) == 0 )
320 DRM_LOG_KMS("\n %02X: ", i); 309 DRM_LOG_KMS("\n %02X: ", i);
321 DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]); 310 ch7xxx_readb(dvo, i, &val);
311 DRM_LOG_KMS("%02X ", val);
322 } 312 }
323} 313}
324 314
325static void ch7xxx_save(struct intel_dvo_device *dvo)
326{
327 struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
328
329 ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
330 ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
331 ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
332 ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
333 ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
334 ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
335 ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
336}
337
338static void ch7xxx_restore(struct intel_dvo_device *dvo)
339{
340 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
341
342 ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
343 ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
344 ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
345 ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
346 ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
347 ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
348 ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
349}
350
351static void ch7xxx_destroy(struct intel_dvo_device *dvo) 315static void ch7xxx_destroy(struct intel_dvo_device *dvo)
352{ 316{
353 struct ch7xxx_priv *ch7xxx = dvo->dev_priv; 317 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
@@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
365 .mode_set = ch7xxx_mode_set, 329 .mode_set = ch7xxx_mode_set,
366 .dpms = ch7xxx_dpms, 330 .dpms = ch7xxx_dpms,
367 .dump_regs = ch7xxx_dump_regs, 331 .dump_regs = ch7xxx_dump_regs,
368 .save = ch7xxx_save,
369 .restore = ch7xxx_restore,
370 .destroy = ch7xxx_destroy, 332 .destroy = ch7xxx_destroy,
371}; 333};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 24169e528f0f..a2ec3f487202 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -153,9 +153,6 @@ struct ivch_priv {
153 bool quiet; 153 bool quiet;
154 154
155 uint16_t width, height; 155 uint16_t width, height;
156
157 uint16_t save_VR01;
158 uint16_t save_VR40;
159}; 156};
160 157
161 158
@@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
405 DRM_LOG_KMS("VR8F: 0x%04x\n", val); 402 DRM_LOG_KMS("VR8F: 0x%04x\n", val);
406} 403}
407 404
408static void ivch_save(struct intel_dvo_device *dvo)
409{
410 struct ivch_priv *priv = dvo->dev_priv;
411
412 ivch_read(dvo, VR01, &priv->save_VR01);
413 ivch_read(dvo, VR40, &priv->save_VR40);
414}
415
416static void ivch_restore(struct intel_dvo_device *dvo)
417{
418 struct ivch_priv *priv = dvo->dev_priv;
419
420 ivch_write(dvo, VR01, priv->save_VR01);
421 ivch_write(dvo, VR40, priv->save_VR40);
422}
423
424static void ivch_destroy(struct intel_dvo_device *dvo) 405static void ivch_destroy(struct intel_dvo_device *dvo)
425{ 406{
426 struct ivch_priv *priv = dvo->dev_priv; 407 struct ivch_priv *priv = dvo->dev_priv;
@@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
434struct intel_dvo_dev_ops ivch_ops= { 415struct intel_dvo_dev_ops ivch_ops= {
435 .init = ivch_init, 416 .init = ivch_init,
436 .dpms = ivch_dpms, 417 .dpms = ivch_dpms,
437 .save = ivch_save,
438 .restore = ivch_restore,
439 .mode_valid = ivch_mode_valid, 418 .mode_valid = ivch_mode_valid,
440 .mode_set = ivch_mode_set, 419 .mode_set = ivch_mode_set,
441 .detect = ivch_detect, 420 .detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 0001c13f0a80..9b8e6765cf26 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
58 58
59#define SIL164_REGC 0x0c 59#define SIL164_REGC 0x0c
60 60
61struct sil164_save_rec {
62 uint8_t reg8;
63 uint8_t reg9;
64 uint8_t regc;
65};
66
67struct sil164_priv { 61struct sil164_priv {
68 //I2CDevRec d; 62 //I2CDevRec d;
69 bool quiet; 63 bool quiet;
70 struct sil164_save_rec save_regs;
71 struct sil164_save_rec mode_regs;
72}; 64};
73 65
74#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) 66#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
@@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
252 DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val); 244 DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
253} 245}
254 246
255static void sil164_save(struct intel_dvo_device *dvo)
256{
257 struct sil164_priv *sil= dvo->dev_priv;
258
259 if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
260 return;
261
262 if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
263 return;
264
265 if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
266 return;
267
268 return;
269}
270
271static void sil164_restore(struct intel_dvo_device *dvo)
272{
273 struct sil164_priv *sil = dvo->dev_priv;
274
275 /* Restore it powered down initially */
276 sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
277
278 sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
279 sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
280 sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
281}
282
283static void sil164_destroy(struct intel_dvo_device *dvo) 247static void sil164_destroy(struct intel_dvo_device *dvo)
284{ 248{
285 struct sil164_priv *sil = dvo->dev_priv; 249 struct sil164_priv *sil = dvo->dev_priv;
@@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = {
297 .mode_set = sil164_mode_set, 261 .mode_set = sil164_mode_set,
298 .dpms = sil164_dpms, 262 .dpms = sil164_dpms,
299 .dump_regs = sil164_dump_regs, 263 .dump_regs = sil164_dump_regs,
300 .save = sil164_save,
301 .restore = sil164_restore,
302 .destroy = sil164_destroy, 264 .destroy = sil164_destroy,
303}; 265};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index c7c391bc116a..66c697bc9b22 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -86,16 +86,8 @@
86#define TFP410_V_RES_LO 0x3C 86#define TFP410_V_RES_LO 0x3C
87#define TFP410_V_RES_HI 0x3D 87#define TFP410_V_RES_HI 0x3D
88 88
89struct tfp410_save_rec {
90 uint8_t ctl1;
91 uint8_t ctl2;
92};
93
94struct tfp410_priv { 89struct tfp410_priv {
95 bool quiet; 90 bool quiet;
96
97 struct tfp410_save_rec saved_reg;
98 struct tfp410_save_rec mode_reg;
99}; 91};
100 92
101static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) 93static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
@@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
293 DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); 285 DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
294} 286}
295 287
296static void tfp410_save(struct intel_dvo_device *dvo)
297{
298 struct tfp410_priv *tfp = dvo->dev_priv;
299
300 if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
301 return;
302
303 if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
304 return;
305}
306
307static void tfp410_restore(struct intel_dvo_device *dvo)
308{
309 struct tfp410_priv *tfp = dvo->dev_priv;
310
311 /* Restore it powered down initially */
312 tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
313
314 tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
315 tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
316}
317
318static void tfp410_destroy(struct intel_dvo_device *dvo) 288static void tfp410_destroy(struct intel_dvo_device *dvo)
319{ 289{
320 struct tfp410_priv *tfp = dvo->dev_priv; 290 struct tfp410_priv *tfp = dvo->dev_priv;
@@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = {
332 .mode_set = tfp410_mode_set, 302 .mode_set = tfp410_mode_set,
333 .dpms = tfp410_dpms, 303 .dpms = tfp410_dpms,
334 .dump_regs = tfp410_dump_regs, 304 .dump_regs = tfp410_dump_regs,
335 .save = tfp410_save,
336 .restore = tfp410_restore,
337 .destroy = tfp410_destroy, 305 .destroy = tfp410_destroy,
338}; 306};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a0b8447b06e7..322070c0c631 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
96 spin_lock(lock); 96 spin_lock(lock);
97 list_for_each_entry(obj_priv, head, list) 97 list_for_each_entry(obj_priv, head, list)
98 { 98 {
99 struct drm_gem_object *obj = obj_priv->obj;
100
101 seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s", 99 seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
102 obj, 100 &obj_priv->base,
103 get_pin_flag(obj_priv), 101 get_pin_flag(obj_priv),
104 obj->size, 102 obj_priv->base.size,
105 obj->read_domains, obj->write_domain, 103 obj_priv->base.read_domains,
104 obj_priv->base.write_domain,
106 obj_priv->last_rendering_seqno, 105 obj_priv->last_rendering_seqno,
107 obj_priv->dirty ? " dirty" : "", 106 obj_priv->dirty ? " dirty" : "",
108 obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 107 obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
109 108
110 if (obj->name) 109 if (obj_priv->base.name)
111 seq_printf(m, " (name: %d)", obj->name); 110 seq_printf(m, " (name: %d)", obj_priv->base.name);
112 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 111 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
113 seq_printf(m, " (fence: %d)", obj_priv->fence_reg); 112 seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
114 if (obj_priv->gtt_space != NULL) 113 if (obj_priv->gtt_space != NULL)
@@ -289,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
289 spin_lock(&dev_priv->mm.active_list_lock); 288 spin_lock(&dev_priv->mm.active_list_lock);
290 289
291 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
292 obj = obj_priv->obj; 291 obj = &obj_priv->base;
293 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
294 ret = i915_gem_object_get_pages(obj, 0); 293 ret = i915_gem_object_get_pages(obj, 0);
295 if (ret) { 294 if (ret) {
@@ -567,23 +566,14 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
567{ 566{
568 struct drm_info_node *node = (struct drm_info_node *) m->private; 567 struct drm_info_node *node = (struct drm_info_node *) m->private;
569 struct drm_device *dev = node->minor->dev; 568 struct drm_device *dev = node->minor->dev;
570 struct drm_crtc *crtc;
571 drm_i915_private_t *dev_priv = dev->dev_private; 569 drm_i915_private_t *dev_priv = dev->dev_private;
572 bool fbc_enabled = false;
573 570
574 if (!dev_priv->display.fbc_enabled) { 571 if (!I915_HAS_FBC(dev)) {
575 seq_printf(m, "FBC unsupported on this chipset\n"); 572 seq_printf(m, "FBC unsupported on this chipset\n");
576 return 0; 573 return 0;
577 } 574 }
578 575
579 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 576 if (intel_fbc_enabled(dev)) {
580 if (!crtc->enabled)
581 continue;
582 if (dev_priv->display.fbc_enabled(crtc))
583 fbc_enabled = true;
584 }
585
586 if (fbc_enabled) {
587 seq_printf(m, "FBC enabled\n"); 577 seq_printf(m, "FBC enabled\n");
588 } else { 578 } else {
589 seq_printf(m, "FBC disabled: "); 579 seq_printf(m, "FBC disabled: ");
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c3cfafcbfe7d..2a6b5de5ae5d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1357,13 +1357,12 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1357 1357
1358 dev_priv->cfb_size = size; 1358 dev_priv->cfb_size = size;
1359 1359
1360 intel_disable_fbc(dev);
1360 dev_priv->compressed_fb = compressed_fb; 1361 dev_priv->compressed_fb = compressed_fb;
1361 1362
1362 if (IS_GM45(dev)) { 1363 if (IS_GM45(dev)) {
1363 g4x_disable_fbc(dev);
1364 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1364 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1365 } else { 1365 } else {
1366 i8xx_disable_fbc(dev);
1367 I915_WRITE(FBC_CFB_BASE, cfb_base); 1366 I915_WRITE(FBC_CFB_BASE, cfb_base);
1368 I915_WRITE(FBC_LL_BASE, ll_base); 1367 I915_WRITE(FBC_LL_BASE, ll_base);
1369 dev_priv->compressed_llb = compressed_llb; 1368 dev_priv->compressed_llb = compressed_llb;
@@ -1504,8 +1503,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
1504 1503
1505 I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); 1504 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1506 1505
1507 drm_helper_initial_config(dev); 1506 intel_fbdev_init(dev);
1508 1507 drm_kms_helper_poll_init(dev);
1509 return 0; 1508 return 0;
1510 1509
1511destroy_ringbuffer: 1510destroy_ringbuffer:
@@ -1591,7 +1590,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
1591 */ 1590 */
1592int i915_driver_load(struct drm_device *dev, unsigned long flags) 1591int i915_driver_load(struct drm_device *dev, unsigned long flags)
1593{ 1592{
1594 struct drm_i915_private *dev_priv = dev->dev_private; 1593 struct drm_i915_private *dev_priv;
1595 resource_size_t base, size; 1594 resource_size_t base, size;
1596 int ret = 0, mmio_bar; 1595 int ret = 0, mmio_bar;
1597 uint32_t agp_size, prealloc_size, prealloc_start; 1596 uint32_t agp_size, prealloc_size, prealloc_start;
@@ -1723,6 +1722,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1723 /* Start out suspended */ 1722 /* Start out suspended */
1724 dev_priv->mm.suspended = 1; 1723 dev_priv->mm.suspended = 1;
1725 1724
1725 intel_detect_pch(dev);
1726
1726 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1727 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1727 ret = i915_load_modeset_init(dev, prealloc_start, 1728 ret = i915_load_modeset_init(dev, prealloc_start,
1728 prealloc_size, agp_size); 1729 prealloc_size, agp_size);
@@ -1769,6 +1770,8 @@ int i915_driver_unload(struct drm_device *dev)
1769 } 1770 }
1770 1771
1771 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1772 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1773 intel_modeset_cleanup(dev);
1774
1772 /* 1775 /*
1773 * free the memory space allocated for the child device 1776 * free the memory space allocated for the child device
1774 * config parsed from VBT 1777 * config parsed from VBT
@@ -1792,8 +1795,6 @@ int i915_driver_unload(struct drm_device *dev)
1792 intel_opregion_free(dev, 0); 1795 intel_opregion_free(dev, 0);
1793 1796
1794 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1797 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1795 intel_modeset_cleanup(dev);
1796
1797 i915_gem_free_all_phys_object(dev); 1798 i915_gem_free_all_phys_object(dev);
1798 1799
1799 mutex_lock(&dev->struct_mutex); 1800 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cc03537bb883..5c51e45ab68d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -188,6 +188,35 @@ const static struct pci_device_id pciidlist[] = {
188MODULE_DEVICE_TABLE(pci, pciidlist); 188MODULE_DEVICE_TABLE(pci, pciidlist);
189#endif 189#endif
190 190
191#define INTEL_PCH_DEVICE_ID_MASK 0xff00
192#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
193
194void intel_detect_pch (struct drm_device *dev)
195{
196 struct drm_i915_private *dev_priv = dev->dev_private;
197 struct pci_dev *pch;
198
199 /*
200 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
201 * make graphics device passthrough work easy for VMM, that only
202 * need to expose ISA bridge to let driver know the real hardware
203 * underneath. This is a requirement from virtualization team.
204 */
205 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
206 if (pch) {
207 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
208 int id;
209 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
210
211 if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
212 dev_priv->pch_type = PCH_CPT;
213 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
214 }
215 }
216 pci_dev_put(pch);
217 }
218}
219
191static int i915_drm_freeze(struct drm_device *dev) 220static int i915_drm_freeze(struct drm_device *dev)
192{ 221{
193 struct drm_i915_private *dev_priv = dev->dev_private; 222 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6e4790065d9e..7f797ef1ab39 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -128,6 +128,7 @@ struct drm_i915_master_private {
128 128
129struct drm_i915_fence_reg { 129struct drm_i915_fence_reg {
130 struct drm_gem_object *obj; 130 struct drm_gem_object *obj;
131 struct list_head lru_list;
131}; 132};
132 133
133struct sdvo_device_mapping { 134struct sdvo_device_mapping {
@@ -135,6 +136,7 @@ struct sdvo_device_mapping {
135 u8 slave_addr; 136 u8 slave_addr;
136 u8 dvo_wiring; 137 u8 dvo_wiring;
137 u8 initialized; 138 u8 initialized;
139 u8 ddc_pin;
138}; 140};
139 141
140struct drm_i915_error_state { 142struct drm_i915_error_state {
@@ -175,7 +177,7 @@ struct drm_i915_error_state {
175 177
176struct drm_i915_display_funcs { 178struct drm_i915_display_funcs {
177 void (*dpms)(struct drm_crtc *crtc, int mode); 179 void (*dpms)(struct drm_crtc *crtc, int mode);
178 bool (*fbc_enabled)(struct drm_crtc *crtc); 180 bool (*fbc_enabled)(struct drm_device *dev);
179 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 181 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
180 void (*disable_fbc)(struct drm_device *dev); 182 void (*disable_fbc)(struct drm_device *dev);
181 int (*get_display_clock_speed)(struct drm_device *dev); 183 int (*get_display_clock_speed)(struct drm_device *dev);
@@ -222,6 +224,13 @@ enum no_fbc_reason {
222 FBC_NOT_TILED, /* buffer not tiled */ 224 FBC_NOT_TILED, /* buffer not tiled */
223}; 225};
224 226
227enum intel_pch {
228 PCH_IBX, /* Ibexpeak PCH */
229 PCH_CPT, /* Cougarpoint PCH */
230};
231
232struct intel_fbdev;
233
225typedef struct drm_i915_private { 234typedef struct drm_i915_private {
226 struct drm_device *dev; 235 struct drm_device *dev;
227 236
@@ -335,6 +344,9 @@ typedef struct drm_i915_private {
335 /* Display functions */ 344 /* Display functions */
336 struct drm_i915_display_funcs display; 345 struct drm_i915_display_funcs display;
337 346
347 /* PCH chipset type */
348 enum intel_pch pch_type;
349
338 /* Register state */ 350 /* Register state */
339 bool modeset_on_lid; 351 bool modeset_on_lid;
340 u8 saveLBB; 352 u8 saveLBB;
@@ -637,11 +649,14 @@ typedef struct drm_i915_private {
637 649
638 struct drm_mm_node *compressed_fb; 650 struct drm_mm_node *compressed_fb;
639 struct drm_mm_node *compressed_llb; 651 struct drm_mm_node *compressed_llb;
652
653 /* list of fbdev register on this device */
654 struct intel_fbdev *fbdev;
640} drm_i915_private_t; 655} drm_i915_private_t;
641 656
642/** driver private structure attached to each drm_gem_object */ 657/** driver private structure attached to each drm_gem_object */
643struct drm_i915_gem_object { 658struct drm_i915_gem_object {
644 struct drm_gem_object *obj; 659 struct drm_gem_object base;
645 660
646 /** Current space allocated to this object in the GTT, if any. */ 661 /** Current space allocated to this object in the GTT, if any. */
647 struct drm_mm_node *gtt_space; 662 struct drm_mm_node *gtt_space;
@@ -651,9 +666,6 @@ struct drm_i915_gem_object {
651 /** This object's place on GPU write list */ 666 /** This object's place on GPU write list */
652 struct list_head gpu_write_list; 667 struct list_head gpu_write_list;
653 668
654 /** This object's place on the fenced object LRU */
655 struct list_head fence_list;
656
657 /** 669 /**
658 * This is set if the object is on the active or flushing lists 670 * This is set if the object is on the active or flushing lists
659 * (has pending rendering), and is not set if it's on inactive (ready 671 * (has pending rendering), and is not set if it's on inactive (ready
@@ -740,7 +752,7 @@ struct drm_i915_gem_object {
740 atomic_t pending_flip; 752 atomic_t pending_flip;
741}; 753};
742 754
743#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private) 755#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
744 756
745/** 757/**
746 * Request queue structure. 758 * Request queue structure.
@@ -902,6 +914,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
902 struct drm_file *file_priv); 914 struct drm_file *file_priv);
903void i915_gem_load(struct drm_device *dev); 915void i915_gem_load(struct drm_device *dev);
904int i915_gem_init_object(struct drm_gem_object *obj); 916int i915_gem_init_object(struct drm_gem_object *obj);
917struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
918 size_t size);
905void i915_gem_free_object(struct drm_gem_object *obj); 919void i915_gem_free_object(struct drm_gem_object *obj);
906int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 920int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
907void i915_gem_object_unpin(struct drm_gem_object *obj); 921void i915_gem_object_unpin(struct drm_gem_object *obj);
@@ -998,6 +1012,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
998extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1012extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
999extern void i8xx_disable_fbc(struct drm_device *dev); 1013extern void i8xx_disable_fbc(struct drm_device *dev);
1000extern void g4x_disable_fbc(struct drm_device *dev); 1014extern void g4x_disable_fbc(struct drm_device *dev);
1015extern void intel_disable_fbc(struct drm_device *dev);
1016extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1017extern bool intel_fbc_enabled(struct drm_device *dev);
1018
1019extern void intel_detect_pch (struct drm_device *dev);
1020extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1001 1021
1002/** 1022/**
1003 * Lock test for when it's just for synchronization of ring access. 1023 * Lock test for when it's just for synchronization of ring access.
@@ -1130,7 +1150,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1130#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1150#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1131#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1151#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1132#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ 1152#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
1133 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) 1153 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
1154 !IS_GEN6(dev))
1134#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1155#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1135/* dsparb controlled by hw only */ 1156/* dsparb controlled by hw only */
1136#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1157#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
@@ -1144,6 +1165,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1144 IS_GEN6(dev)) 1165 IS_GEN6(dev))
1145#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) 1166#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
1146 1167
1168#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1169#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1170
1147#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1171#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1148 1172
1149#endif 1173#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ef3d91dda71a..112699f71fa4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -124,7 +124,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
124 args->size = roundup(args->size, PAGE_SIZE); 124 args->size = roundup(args->size, PAGE_SIZE);
125 125
126 /* Allocate the new object */ 126 /* Allocate the new object */
127 obj = drm_gem_object_alloc(dev, args->size); 127 obj = i915_gem_alloc_object(dev, args->size);
128 if (obj == NULL) 128 if (obj == NULL)
129 return -ENOMEM; 129 return -ENOMEM;
130 130
@@ -1051,7 +1051,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1051 * about to occur. 1051 * about to occur.
1052 */ 1052 */
1053 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 1053 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1054 list_move_tail(&obj_priv->fence_list, 1054 struct drm_i915_fence_reg *reg =
1055 &dev_priv->fence_regs[obj_priv->fence_reg];
1056 list_move_tail(&reg->lru_list,
1055 &dev_priv->mm.fence_list); 1057 &dev_priv->mm.fence_list);
1056 } 1058 }
1057 1059
@@ -1566,7 +1568,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1566 list_for_each_entry_safe(obj_priv, next, 1568 list_for_each_entry_safe(obj_priv, next,
1567 &dev_priv->mm.gpu_write_list, 1569 &dev_priv->mm.gpu_write_list,
1568 gpu_write_list) { 1570 gpu_write_list) {
1569 struct drm_gem_object *obj = obj_priv->obj; 1571 struct drm_gem_object *obj = &obj_priv->base;
1570 1572
1571 if ((obj->write_domain & flush_domains) == 1573 if ((obj->write_domain & flush_domains) ==
1572 obj->write_domain) { 1574 obj->write_domain) {
@@ -1577,9 +1579,12 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1577 i915_gem_object_move_to_active(obj, seqno); 1579 i915_gem_object_move_to_active(obj, seqno);
1578 1580
1579 /* update the fence lru list */ 1581 /* update the fence lru list */
1580 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 1582 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1581 list_move_tail(&obj_priv->fence_list, 1583 struct drm_i915_fence_reg *reg =
1584 &dev_priv->fence_regs[obj_priv->fence_reg];
1585 list_move_tail(&reg->lru_list,
1582 &dev_priv->mm.fence_list); 1586 &dev_priv->mm.fence_list);
1587 }
1583 1588
1584 trace_i915_gem_object_change_domain(obj, 1589 trace_i915_gem_object_change_domain(obj,
1585 obj->read_domains, 1590 obj->read_domains,
@@ -1745,7 +1750,7 @@ i915_gem_retire_request(struct drm_device *dev,
1745 obj_priv = list_first_entry(&dev_priv->mm.active_list, 1750 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1746 struct drm_i915_gem_object, 1751 struct drm_i915_gem_object,
1747 list); 1752 list);
1748 obj = obj_priv->obj; 1753 obj = &obj_priv->base;
1749 1754
1750 /* If the seqno being retired doesn't match the oldest in the 1755 /* If the seqno being retired doesn't match the oldest in the
1751 * list, then the oldest in the list must still be newer than 1756 * list, then the oldest in the list must still be newer than
@@ -2119,7 +2124,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2119 2124
2120 /* Try to find the smallest clean object */ 2125 /* Try to find the smallest clean object */
2121 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 2126 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2122 struct drm_gem_object *obj = obj_priv->obj; 2127 struct drm_gem_object *obj = &obj_priv->base;
2123 if (obj->size >= min_size) { 2128 if (obj->size >= min_size) {
2124 if ((!obj_priv->dirty || 2129 if ((!obj_priv->dirty ||
2125 i915_gem_object_is_purgeable(obj_priv)) && 2130 i915_gem_object_is_purgeable(obj_priv)) &&
@@ -2253,7 +2258,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2253 2258
2254 /* Find an object that we can immediately reuse */ 2259 /* Find an object that we can immediately reuse */
2255 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { 2260 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
2256 obj = obj_priv->obj; 2261 obj = &obj_priv->base;
2257 if (obj->size >= min_size) 2262 if (obj->size >= min_size)
2258 break; 2263 break;
2259 2264
@@ -2485,9 +2490,10 @@ static int i915_find_fence_reg(struct drm_device *dev)
2485 2490
2486 /* None available, try to steal one or wait for a user to finish */ 2491 /* None available, try to steal one or wait for a user to finish */
2487 i = I915_FENCE_REG_NONE; 2492 i = I915_FENCE_REG_NONE;
2488 list_for_each_entry(obj_priv, &dev_priv->mm.fence_list, 2493 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2489 fence_list) { 2494 lru_list) {
2490 obj = obj_priv->obj; 2495 obj = reg->obj;
2496 obj_priv = to_intel_bo(obj);
2491 2497
2492 if (obj_priv->pin_count) 2498 if (obj_priv->pin_count)
2493 continue; 2499 continue;
@@ -2536,7 +2542,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2536 2542
2537 /* Just update our place in the LRU if our fence is getting used. */ 2543 /* Just update our place in the LRU if our fence is getting used. */
2538 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 2544 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2539 list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); 2545 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2546 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2540 return 0; 2547 return 0;
2541 } 2548 }
2542 2549
@@ -2566,7 +2573,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2566 2573
2567 obj_priv->fence_reg = ret; 2574 obj_priv->fence_reg = ret;
2568 reg = &dev_priv->fence_regs[obj_priv->fence_reg]; 2575 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2569 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); 2576 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2570 2577
2571 reg->obj = obj; 2578 reg->obj = obj;
2572 2579
@@ -2598,6 +2605,8 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2598 struct drm_device *dev = obj->dev; 2605 struct drm_device *dev = obj->dev;
2599 drm_i915_private_t *dev_priv = dev->dev_private; 2606 drm_i915_private_t *dev_priv = dev->dev_private;
2600 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2607 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2608 struct drm_i915_fence_reg *reg =
2609 &dev_priv->fence_regs[obj_priv->fence_reg];
2601 2610
2602 if (IS_GEN6(dev)) { 2611 if (IS_GEN6(dev)) {
2603 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + 2612 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2616,9 +2625,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2616 I915_WRITE(fence_reg, 0); 2625 I915_WRITE(fence_reg, 0);
2617 } 2626 }
2618 2627
2619 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; 2628 reg->obj = NULL;
2620 obj_priv->fence_reg = I915_FENCE_REG_NONE; 2629 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2621 list_del_init(&obj_priv->fence_list); 2630 list_del_init(&reg->lru_list);
2622} 2631}
2623 2632
2624/** 2633/**
@@ -4471,34 +4480,38 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4471 return 0; 4480 return 0;
4472} 4481}
4473 4482
4474int i915_gem_init_object(struct drm_gem_object *obj) 4483struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4484 size_t size)
4475{ 4485{
4476 struct drm_i915_gem_object *obj_priv; 4486 struct drm_i915_gem_object *obj;
4477 4487
4478 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL); 4488 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4479 if (obj_priv == NULL) 4489 if (obj == NULL)
4480 return -ENOMEM; 4490 return NULL;
4481 4491
4482 /* 4492 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4483 * We've just allocated pages from the kernel, 4493 kfree(obj);
4484 * so they've just been written by the CPU with 4494 return NULL;
4485 * zeros. They'll need to be clflushed before we 4495 }
4486 * use them with the GPU.
4487 */
4488 obj->write_domain = I915_GEM_DOMAIN_CPU;
4489 obj->read_domains = I915_GEM_DOMAIN_CPU;
4490 4496
4491 obj_priv->agp_type = AGP_USER_MEMORY; 4497 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4498 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4492 4499
4493 obj->driver_private = obj_priv; 4500 obj->agp_type = AGP_USER_MEMORY;
4494 obj_priv->obj = obj; 4501 obj->base.driver_private = NULL;
4495 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4502 obj->fence_reg = I915_FENCE_REG_NONE;
4496 INIT_LIST_HEAD(&obj_priv->list); 4503 INIT_LIST_HEAD(&obj->list);
4497 INIT_LIST_HEAD(&obj_priv->gpu_write_list); 4504 INIT_LIST_HEAD(&obj->gpu_write_list);
4498 INIT_LIST_HEAD(&obj_priv->fence_list); 4505 obj->madv = I915_MADV_WILLNEED;
4499 obj_priv->madv = I915_MADV_WILLNEED;
4500 4506
4501 trace_i915_gem_object_create(obj); 4507 trace_i915_gem_object_create(&obj->base);
4508
4509 return &obj->base;
4510}
4511
4512int i915_gem_init_object(struct drm_gem_object *obj)
4513{
4514 BUG();
4502 4515
4503 return 0; 4516 return 0;
4504} 4517}
@@ -4521,9 +4534,11 @@ void i915_gem_free_object(struct drm_gem_object *obj)
4521 if (obj_priv->mmap_offset) 4534 if (obj_priv->mmap_offset)
4522 i915_gem_free_mmap_offset(obj); 4535 i915_gem_free_mmap_offset(obj);
4523 4536
4537 drm_gem_object_release(obj);
4538
4524 kfree(obj_priv->page_cpu_valid); 4539 kfree(obj_priv->page_cpu_valid);
4525 kfree(obj_priv->bit_17); 4540 kfree(obj_priv->bit_17);
4526 kfree(obj->driver_private); 4541 kfree(obj_priv);
4527} 4542}
4528 4543
4529/** Unbinds all inactive objects. */ 4544/** Unbinds all inactive objects. */
@@ -4536,9 +4551,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
4536 struct drm_gem_object *obj; 4551 struct drm_gem_object *obj;
4537 int ret; 4552 int ret;
4538 4553
4539 obj = list_first_entry(&dev_priv->mm.inactive_list, 4554 obj = &list_first_entry(&dev_priv->mm.inactive_list,
4540 struct drm_i915_gem_object, 4555 struct drm_i915_gem_object,
4541 list)->obj; 4556 list)->base;
4542 4557
4543 ret = i915_gem_object_unbind(obj); 4558 ret = i915_gem_object_unbind(obj);
4544 if (ret != 0) { 4559 if (ret != 0) {
@@ -4608,7 +4623,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
4608 struct drm_i915_gem_object *obj_priv; 4623 struct drm_i915_gem_object *obj_priv;
4609 int ret; 4624 int ret;
4610 4625
4611 obj = drm_gem_object_alloc(dev, 4096); 4626 obj = i915_gem_alloc_object(dev, 4096);
4612 if (obj == NULL) { 4627 if (obj == NULL) {
4613 DRM_ERROR("Failed to allocate seqno page\n"); 4628 DRM_ERROR("Failed to allocate seqno page\n");
4614 ret = -ENOMEM; 4629 ret = -ENOMEM;
@@ -4653,7 +4668,7 @@ i915_gem_init_hws(struct drm_device *dev)
4653 if (!I915_NEED_GFX_HWS(dev)) 4668 if (!I915_NEED_GFX_HWS(dev))
4654 return 0; 4669 return 0;
4655 4670
4656 obj = drm_gem_object_alloc(dev, 4096); 4671 obj = i915_gem_alloc_object(dev, 4096);
4657 if (obj == NULL) { 4672 if (obj == NULL) {
4658 DRM_ERROR("Failed to allocate status page\n"); 4673 DRM_ERROR("Failed to allocate status page\n");
4659 ret = -ENOMEM; 4674 ret = -ENOMEM;
@@ -4764,7 +4779,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4764 if (ret != 0) 4779 if (ret != 0)
4765 return ret; 4780 return ret;
4766 4781
4767 obj = drm_gem_object_alloc(dev, 128 * 1024); 4782 obj = i915_gem_alloc_object(dev, 128 * 1024);
4768 if (obj == NULL) { 4783 if (obj == NULL) {
4769 DRM_ERROR("Failed to allocate ringbuffer\n"); 4784 DRM_ERROR("Failed to allocate ringbuffer\n");
4770 i915_gem_cleanup_hws(dev); 4785 i915_gem_cleanup_hws(dev);
@@ -4957,6 +4972,8 @@ i915_gem_load(struct drm_device *dev)
4957 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4972 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4958 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4973 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4959 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4974 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4975 for (i = 0; i < 16; i++)
4976 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4960 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4977 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4961 i915_gem_retire_work_handler); 4978 i915_gem_retire_work_handler);
4962 dev_priv->mm.next_gem_seqno = 1; 4979 dev_priv->mm.next_gem_seqno = 1;
@@ -5185,6 +5202,20 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
5185} 5202}
5186 5203
5187static int 5204static int
5205i915_gpu_is_active(struct drm_device *dev)
5206{
5207 drm_i915_private_t *dev_priv = dev->dev_private;
5208 int lists_empty;
5209
5210 spin_lock(&dev_priv->mm.active_list_lock);
5211 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
5212 list_empty(&dev_priv->mm.active_list);
5213 spin_unlock(&dev_priv->mm.active_list_lock);
5214
5215 return !lists_empty;
5216}
5217
5218static int
5188i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) 5219i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5189{ 5220{
5190 drm_i915_private_t *dev_priv, *next_dev; 5221 drm_i915_private_t *dev_priv, *next_dev;
@@ -5213,6 +5244,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5213 5244
5214 spin_lock(&shrink_list_lock); 5245 spin_lock(&shrink_list_lock);
5215 5246
5247rescan:
5216 /* first scan for clean buffers */ 5248 /* first scan for clean buffers */
5217 list_for_each_entry_safe(dev_priv, next_dev, 5249 list_for_each_entry_safe(dev_priv, next_dev,
5218 &shrink_list, mm.shrink_list) { 5250 &shrink_list, mm.shrink_list) {
@@ -5229,7 +5261,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5229 &dev_priv->mm.inactive_list, 5261 &dev_priv->mm.inactive_list,
5230 list) { 5262 list) {
5231 if (i915_gem_object_is_purgeable(obj_priv)) { 5263 if (i915_gem_object_is_purgeable(obj_priv)) {
5232 i915_gem_object_unbind(obj_priv->obj); 5264 i915_gem_object_unbind(&obj_priv->base);
5233 if (--nr_to_scan <= 0) 5265 if (--nr_to_scan <= 0)
5234 break; 5266 break;
5235 } 5267 }
@@ -5258,7 +5290,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5258 &dev_priv->mm.inactive_list, 5290 &dev_priv->mm.inactive_list,
5259 list) { 5291 list) {
5260 if (nr_to_scan > 0) { 5292 if (nr_to_scan > 0) {
5261 i915_gem_object_unbind(obj_priv->obj); 5293 i915_gem_object_unbind(&obj_priv->base);
5262 nr_to_scan--; 5294 nr_to_scan--;
5263 } else 5295 } else
5264 cnt++; 5296 cnt++;
@@ -5270,6 +5302,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5270 would_deadlock = 0; 5302 would_deadlock = 0;
5271 } 5303 }
5272 5304
5305 if (nr_to_scan) {
5306 int active = 0;
5307
5308 /*
5309 * We are desperate for pages, so as a last resort, wait
5310 * for the GPU to finish and discard whatever we can.
5311 * This has a dramatic impact to reduce the number of
5312 * OOM-killer events whilst running the GPU aggressively.
5313 */
5314 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5315 struct drm_device *dev = dev_priv->dev;
5316
5317 if (!mutex_trylock(&dev->struct_mutex))
5318 continue;
5319
5320 spin_unlock(&shrink_list_lock);
5321
5322 if (i915_gpu_is_active(dev)) {
5323 i915_gpu_idle(dev);
5324 active++;
5325 }
5326
5327 spin_lock(&shrink_list_lock);
5328 mutex_unlock(&dev->struct_mutex);
5329 }
5330
5331 if (active)
5332 goto rescan;
5333 }
5334
5273 spin_unlock(&shrink_list_lock); 5335 spin_unlock(&shrink_list_lock);
5274 5336
5275 if (would_deadlock) 5337 if (would_deadlock)
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 35507cf53fa3..80f380b1d951 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line)
39 struct drm_i915_gem_object *obj_priv; 39 struct drm_i915_gem_object *obj_priv;
40 40
41 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 41 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
42 obj = obj_priv->obj; 42 obj = &obj_priv->base;
43 if (obj_priv->pin_count || obj_priv->active || 43 if (obj_priv->pin_count || obj_priv->active ||
44 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 44 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
45 I915_GEM_DOMAIN_GTT))) 45 I915_GEM_DOMAIN_GTT)))
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4bdccefcf2cf..4b7c49d4257d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -283,6 +283,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
283 return -EINVAL; 283 return -EINVAL;
284 } 284 }
285 285
286 if (obj_priv->pin_count) {
287 drm_gem_object_unreference_unlocked(obj);
288 return -EBUSY;
289 }
290
286 if (args->tiling_mode == I915_TILING_NONE) { 291 if (args->tiling_mode == I915_TILING_NONE) {
287 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 292 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
288 args->stride = 0; 293 args->stride = 0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index df6a9cd82c4d..8c3f0802686d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -169,9 +169,13 @@ void intel_enable_asle (struct drm_device *dev)
169 169
170 if (HAS_PCH_SPLIT(dev)) 170 if (HAS_PCH_SPLIT(dev))
171 ironlake_enable_display_irq(dev_priv, DE_GSE); 171 ironlake_enable_display_irq(dev_priv, DE_GSE);
172 else 172 else {
173 i915_enable_pipestat(dev_priv, 1, 173 i915_enable_pipestat(dev_priv, 1,
174 I915_LEGACY_BLC_EVENT_ENABLE); 174 I915_LEGACY_BLC_EVENT_ENABLE);
175 if (IS_I965G(dev))
176 i915_enable_pipestat(dev_priv, 0,
177 I915_LEGACY_BLC_EVENT_ENABLE);
178 }
175} 179}
176 180
177/** 181/**
@@ -256,18 +260,18 @@ static void i915_hotplug_work_func(struct work_struct *work)
256 hotplug_work); 260 hotplug_work);
257 struct drm_device *dev = dev_priv->dev; 261 struct drm_device *dev = dev_priv->dev;
258 struct drm_mode_config *mode_config = &dev->mode_config; 262 struct drm_mode_config *mode_config = &dev->mode_config;
259 struct drm_connector *connector; 263 struct drm_encoder *encoder;
260 264
261 if (mode_config->num_connector) { 265 if (mode_config->num_encoder) {
262 list_for_each_entry(connector, &mode_config->connector_list, head) { 266 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
263 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 267 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
264 268
265 if (intel_encoder->hot_plug) 269 if (intel_encoder->hot_plug)
266 (*intel_encoder->hot_plug) (intel_encoder); 270 (*intel_encoder->hot_plug) (intel_encoder);
267 } 271 }
268 } 272 }
269 /* Just fire off a uevent and let userspace tell us what to do */ 273 /* Just fire off a uevent and let userspace tell us what to do */
270 drm_sysfs_hotplug_event(dev); 274 drm_helper_hpd_irq_event(dev);
271} 275}
272 276
273static void i915_handle_rps_change(struct drm_device *dev) 277static void i915_handle_rps_change(struct drm_device *dev)
@@ -612,7 +616,7 @@ static void i915_capture_error_state(struct drm_device *dev)
612 batchbuffer[1] = NULL; 616 batchbuffer[1] = NULL;
613 count = 0; 617 count = 0;
614 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 618 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
615 struct drm_gem_object *obj = obj_priv->obj; 619 struct drm_gem_object *obj = &obj_priv->base;
616 620
617 if (batchbuffer[0] == NULL && 621 if (batchbuffer[0] == NULL &&
618 bbaddr >= obj_priv->gtt_offset && 622 bbaddr >= obj_priv->gtt_offset &&
@@ -648,7 +652,7 @@ static void i915_capture_error_state(struct drm_device *dev)
648 if (error->active_bo) { 652 if (error->active_bo) {
649 int i = 0; 653 int i = 0;
650 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 654 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
651 struct drm_gem_object *obj = obj_priv->obj; 655 struct drm_gem_object *obj = &obj_priv->base;
652 656
653 error->active_bo[i].size = obj->size; 657 error->active_bo[i].size = obj->size;
654 error->active_bo[i].name = obj->name; 658 error->active_bo[i].name = obj->name;
@@ -950,7 +954,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
950 intel_finish_page_flip(dev, 1); 954 intel_finish_page_flip(dev, 1);
951 } 955 }
952 956
953 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || 957 if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
958 (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
954 (iir & I915_ASLE_INTERRUPT)) 959 (iir & I915_ASLE_INTERRUPT))
955 opregion_asle_intr(dev); 960 opregion_asle_intr(dev);
956 961
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4cbc5210fd30..f3e39cc46f0d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1764,6 +1764,14 @@
1764#define DP_LINK_TRAIN_MASK (3 << 28) 1764#define DP_LINK_TRAIN_MASK (3 << 28)
1765#define DP_LINK_TRAIN_SHIFT 28 1765#define DP_LINK_TRAIN_SHIFT 28
1766 1766
1767/* CPT Link training mode */
1768#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
1769#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
1770#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
1771#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
1772#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
1773#define DP_LINK_TRAIN_SHIFT_CPT 8
1774
1767/* Signal voltages. These are mostly controlled by the other end */ 1775/* Signal voltages. These are mostly controlled by the other end */
1768#define DP_VOLTAGE_0_4 (0 << 25) 1776#define DP_VOLTAGE_0_4 (0 << 25)
1769#define DP_VOLTAGE_0_6 (1 << 25) 1777#define DP_VOLTAGE_0_6 (1 << 25)
@@ -1924,7 +1932,10 @@
1924/* Display & cursor control */ 1932/* Display & cursor control */
1925 1933
1926/* dithering flag on Ironlake */ 1934/* dithering flag on Ironlake */
1927#define PIPE_ENABLE_DITHER (1 << 4) 1935#define PIPE_ENABLE_DITHER (1 << 4)
1936#define PIPE_DITHER_TYPE_MASK (3 << 2)
1937#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
1938#define PIPE_DITHER_TYPE_ST01 (1 << 2)
1928/* Pipe A */ 1939/* Pipe A */
1929#define PIPEADSL 0x70000 1940#define PIPEADSL 0x70000
1930#define PIPEACONF 0x70008 1941#define PIPEACONF 0x70008
@@ -1988,15 +1999,24 @@
1988 1999
1989#define DSPFW1 0x70034 2000#define DSPFW1 0x70034
1990#define DSPFW_SR_SHIFT 23 2001#define DSPFW_SR_SHIFT 23
2002#define DSPFW_SR_MASK (0x1ff<<23)
1991#define DSPFW_CURSORB_SHIFT 16 2003#define DSPFW_CURSORB_SHIFT 16
2004#define DSPFW_CURSORB_MASK (0x3f<<16)
1992#define DSPFW_PLANEB_SHIFT 8 2005#define DSPFW_PLANEB_SHIFT 8
2006#define DSPFW_PLANEB_MASK (0x7f<<8)
2007#define DSPFW_PLANEA_MASK (0x7f)
1993#define DSPFW2 0x70038 2008#define DSPFW2 0x70038
1994#define DSPFW_CURSORA_MASK 0x00003f00 2009#define DSPFW_CURSORA_MASK 0x00003f00
1995#define DSPFW_CURSORA_SHIFT 8 2010#define DSPFW_CURSORA_SHIFT 8
2011#define DSPFW_PLANEC_MASK (0x7f)
1996#define DSPFW3 0x7003c 2012#define DSPFW3 0x7003c
1997#define DSPFW_HPLL_SR_EN (1<<31) 2013#define DSPFW_HPLL_SR_EN (1<<31)
1998#define DSPFW_CURSOR_SR_SHIFT 24 2014#define DSPFW_CURSOR_SR_SHIFT 24
1999#define PINEVIEW_SELF_REFRESH_EN (1<<30) 2015#define PINEVIEW_SELF_REFRESH_EN (1<<30)
2016#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
2017#define DSPFW_HPLL_CURSOR_SHIFT 16
2018#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
2019#define DSPFW_HPLL_SR_MASK (0x1ff)
2000 2020
2001/* FIFO watermark sizes etc */ 2021/* FIFO watermark sizes etc */
2002#define G4X_FIFO_LINE_SIZE 64 2022#define G4X_FIFO_LINE_SIZE 64
@@ -2023,6 +2043,43 @@
2023#define PINEVIEW_CURSOR_DFT_WM 0 2043#define PINEVIEW_CURSOR_DFT_WM 0
2024#define PINEVIEW_CURSOR_GUARD_WM 5 2044#define PINEVIEW_CURSOR_GUARD_WM 5
2025 2045
2046
2047/* define the Watermark register on Ironlake */
2048#define WM0_PIPEA_ILK 0x45100
2049#define WM0_PIPE_PLANE_MASK (0x7f<<16)
2050#define WM0_PIPE_PLANE_SHIFT 16
2051#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
2052#define WM0_PIPE_SPRITE_SHIFT 8
2053#define WM0_PIPE_CURSOR_MASK (0x1f)
2054
2055#define WM0_PIPEB_ILK 0x45104
2056#define WM1_LP_ILK 0x45108
2057#define WM1_LP_SR_EN (1<<31)
2058#define WM1_LP_LATENCY_SHIFT 24
2059#define WM1_LP_LATENCY_MASK (0x7f<<24)
2060#define WM1_LP_SR_MASK (0x1ff<<8)
2061#define WM1_LP_SR_SHIFT 8
2062#define WM1_LP_CURSOR_MASK (0x3f)
2063
2064/* Memory latency timer register */
2065#define MLTR_ILK 0x11222
2066/* the unit of memory self-refresh latency time is 0.5us */
2067#define ILK_SRLT_MASK 0x3f
2068
2069/* define the fifo size on Ironlake */
2070#define ILK_DISPLAY_FIFO 128
2071#define ILK_DISPLAY_MAXWM 64
2072#define ILK_DISPLAY_DFTWM 8
2073
2074#define ILK_DISPLAY_SR_FIFO 512
2075#define ILK_DISPLAY_MAX_SRWM 0x1ff
2076#define ILK_DISPLAY_DFT_SRWM 0x3f
2077#define ILK_CURSOR_SR_FIFO 64
2078#define ILK_CURSOR_MAX_SRWM 0x3f
2079#define ILK_CURSOR_DFT_SRWM 8
2080
2081#define ILK_FIFO_LINE_SIZE 64
2082
2026/* 2083/*
2027 * The two pipe frame counter registers are not synchronized, so 2084 * The two pipe frame counter registers are not synchronized, so
2028 * reading a stable value is somewhat tricky. The following code 2085 * reading a stable value is somewhat tricky. The following code
@@ -2304,8 +2361,15 @@
2304#define GTIIR 0x44018 2361#define GTIIR 0x44018
2305#define GTIER 0x4401c 2362#define GTIER 0x4401c
2306 2363
2364#define ILK_DISPLAY_CHICKEN2 0x42004
2365#define ILK_DPARB_GATE (1<<22)
2366#define ILK_VSDPFD_FULL (1<<21)
2367#define ILK_DSPCLK_GATE 0x42020
2368#define ILK_DPARB_CLK_GATE (1<<5)
2369
2307#define DISP_ARB_CTL 0x45000 2370#define DISP_ARB_CTL 0x45000
2308#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 2371#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
2372#define DISP_FBC_WM_DIS (1<<15)
2309 2373
2310/* PCH */ 2374/* PCH */
2311 2375
@@ -2316,6 +2380,11 @@
2316#define SDE_PORTB_HOTPLUG (1 << 8) 2380#define SDE_PORTB_HOTPLUG (1 << 8)
2317#define SDE_SDVOB_HOTPLUG (1 << 6) 2381#define SDE_SDVOB_HOTPLUG (1 << 6)
2318#define SDE_HOTPLUG_MASK (0xf << 8) 2382#define SDE_HOTPLUG_MASK (0xf << 8)
2383/* CPT */
2384#define SDE_CRT_HOTPLUG_CPT (1 << 19)
2385#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
2386#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
2387#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
2319 2388
2320#define SDEISR 0xc4000 2389#define SDEISR 0xc4000
2321#define SDEIMR 0xc4004 2390#define SDEIMR 0xc4004
@@ -2407,6 +2476,17 @@
2407#define PCH_SSC4_PARMS 0xc6210 2476#define PCH_SSC4_PARMS 0xc6210
2408#define PCH_SSC4_AUX_PARMS 0xc6214 2477#define PCH_SSC4_AUX_PARMS 0xc6214
2409 2478
2479#define PCH_DPLL_SEL 0xc7000
2480#define TRANSA_DPLL_ENABLE (1<<3)
2481#define TRANSA_DPLLB_SEL (1<<0)
2482#define TRANSA_DPLLA_SEL 0
2483#define TRANSB_DPLL_ENABLE (1<<7)
2484#define TRANSB_DPLLB_SEL (1<<4)
2485#define TRANSB_DPLLA_SEL (0)
2486#define TRANSC_DPLL_ENABLE (1<<11)
2487#define TRANSC_DPLLB_SEL (1<<8)
2488#define TRANSC_DPLLA_SEL (0)
2489
2410/* transcoder */ 2490/* transcoder */
2411 2491
2412#define TRANS_HTOTAL_A 0xe0000 2492#define TRANS_HTOTAL_A 0xe0000
@@ -2493,6 +2573,19 @@
2493#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) 2573#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
2494#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) 2574#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
2495#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) 2575#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
2576/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
2577 SNB has different settings. */
2578/* SNB A-stepping */
2579#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
2580#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
2581#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
2582#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
2583/* SNB B-stepping */
2584#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
2585#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
2586#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
2587#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
2588#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
2496#define FDI_DP_PORT_WIDTH_X1 (0<<19) 2589#define FDI_DP_PORT_WIDTH_X1 (0<<19)
2497#define FDI_DP_PORT_WIDTH_X2 (1<<19) 2590#define FDI_DP_PORT_WIDTH_X2 (1<<19)
2498#define FDI_DP_PORT_WIDTH_X3 (2<<19) 2591#define FDI_DP_PORT_WIDTH_X3 (2<<19)
@@ -2525,6 +2618,13 @@
2525#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) 2618#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
2526#define FDI_SEL_RAWCLK (0<<4) 2619#define FDI_SEL_RAWCLK (0<<4)
2527#define FDI_SEL_PCDCLK (1<<4) 2620#define FDI_SEL_PCDCLK (1<<4)
2621/* CPT */
2622#define FDI_AUTO_TRAINING (1<<10)
2623#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
2624#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
2625#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
2626#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
2627#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
2528 2628
2529#define FDI_RXA_MISC 0xf0010 2629#define FDI_RXA_MISC 0xf0010
2530#define FDI_RXB_MISC 0xf1010 2630#define FDI_RXB_MISC 0xf1010
@@ -2596,6 +2696,9 @@
2596#define HSYNC_ACTIVE_HIGH (1 << 3) 2696#define HSYNC_ACTIVE_HIGH (1 << 3)
2597#define PORT_DETECTED (1 << 2) 2697#define PORT_DETECTED (1 << 2)
2598 2698
2699/* PCH SDVOB multiplex with HDMIB */
2700#define PCH_SDVOB HDMIB
2701
2599#define HDMIC 0xe1150 2702#define HDMIC 0xe1150
2600#define HDMID 0xe1160 2703#define HDMID 0xe1160
2601 2704
@@ -2653,4 +2756,42 @@
2653#define PCH_DPD_AUX_CH_DATA4 0xe4320 2756#define PCH_DPD_AUX_CH_DATA4 0xe4320
2654#define PCH_DPD_AUX_CH_DATA5 0xe4324 2757#define PCH_DPD_AUX_CH_DATA5 0xe4324
2655 2758
2759/* CPT */
2760#define PORT_TRANS_A_SEL_CPT 0
2761#define PORT_TRANS_B_SEL_CPT (1<<29)
2762#define PORT_TRANS_C_SEL_CPT (2<<29)
2763#define PORT_TRANS_SEL_MASK (3<<29)
2764
2765#define TRANS_DP_CTL_A 0xe0300
2766#define TRANS_DP_CTL_B 0xe1300
2767#define TRANS_DP_CTL_C 0xe2300
2768#define TRANS_DP_OUTPUT_ENABLE (1<<31)
2769#define TRANS_DP_PORT_SEL_B (0<<29)
2770#define TRANS_DP_PORT_SEL_C (1<<29)
2771#define TRANS_DP_PORT_SEL_D (2<<29)
2772#define TRANS_DP_PORT_SEL_MASK (3<<29)
2773#define TRANS_DP_AUDIO_ONLY (1<<26)
2774#define TRANS_DP_ENH_FRAMING (1<<18)
2775#define TRANS_DP_8BPC (0<<9)
2776#define TRANS_DP_10BPC (1<<9)
2777#define TRANS_DP_6BPC (2<<9)
2778#define TRANS_DP_12BPC (3<<9)
2779#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
2780#define TRANS_DP_VSYNC_ACTIVE_LOW 0
2781#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
2782#define TRANS_DP_HSYNC_ACTIVE_LOW 0
2783
2784/* SNB eDP training params */
2785/* SNB A-stepping */
2786#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
2787#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
2788#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
2789#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
2790/* SNB B-stepping */
2791#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
2792#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
2793#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
2794#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
2795#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
2796
2656#endif /* _I915_REG_H_ */ 2797#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ac0d1a73ac22..60a5800fba6e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev)
600 } 600 }
601 /* FIXME: save TV & SDVO state */ 601 /* FIXME: save TV & SDVO state */
602 602
603 /* FBC state */ 603 /* Only save FBC state on the platform that supports FBC */
604 if (IS_GM45(dev)) { 604 if (I915_HAS_FBC(dev)) {
605 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); 605 if (IS_GM45(dev)) {
606 } else { 606 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
607 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); 607 } else {
608 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); 608 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
609 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 609 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
610 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); 610 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
611 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
612 }
611 } 613 }
612 614
613 /* VGA state */ 615 /* VGA state */
@@ -702,18 +704,19 @@ void i915_restore_display(struct drm_device *dev)
702 } 704 }
703 /* FIXME: restore TV & SDVO state */ 705 /* FIXME: restore TV & SDVO state */
704 706
705 /* FBC info */ 707 /* only restore FBC info on the platform that supports FBC*/
706 if (IS_GM45(dev)) { 708 if (I915_HAS_FBC(dev)) {
707 g4x_disable_fbc(dev); 709 if (IS_GM45(dev)) {
708 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 710 g4x_disable_fbc(dev);
709 } else { 711 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
710 i8xx_disable_fbc(dev); 712 } else {
711 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); 713 i8xx_disable_fbc(dev);
712 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); 714 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
713 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); 715 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
714 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); 716 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
717 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
718 }
715 } 719 }
716
717 /* VGA state */ 720 /* VGA state */
718 if (IS_IRONLAKE(dev)) 721 if (IS_IRONLAKE(dev))
719 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); 722 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 01840d9bc38f..9e4c45f68d6e 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -115,7 +115,7 @@ TRACE_EVENT(i915_gem_object_get_fence,
115 __entry->obj, __entry->fence, __entry->tiling_mode) 115 __entry->obj, __entry->fence, __entry->tiling_mode)
116); 116);
117 117
118TRACE_EVENT(i915_gem_object_unbind, 118DECLARE_EVENT_CLASS(i915_gem_object,
119 119
120 TP_PROTO(struct drm_gem_object *obj), 120 TP_PROTO(struct drm_gem_object *obj),
121 121
@@ -132,21 +132,18 @@ TRACE_EVENT(i915_gem_object_unbind,
132 TP_printk("obj=%p", __entry->obj) 132 TP_printk("obj=%p", __entry->obj)
133); 133);
134 134
135TRACE_EVENT(i915_gem_object_destroy, 135DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
136 136
137 TP_PROTO(struct drm_gem_object *obj), 137 TP_PROTO(struct drm_gem_object *obj),
138 138
139 TP_ARGS(obj), 139 TP_ARGS(obj)
140);
140 141
141 TP_STRUCT__entry( 142DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
142 __field(struct drm_gem_object *, obj)
143 ),
144 143
145 TP_fast_assign( 144 TP_PROTO(struct drm_gem_object *obj),
146 __entry->obj = obj;
147 ),
148 145
149 TP_printk("obj=%p", __entry->obj) 146 TP_ARGS(obj)
150); 147);
151 148
152/* batch tracing */ 149/* batch tracing */
@@ -197,8 +194,7 @@ TRACE_EVENT(i915_gem_request_flush,
197 __entry->flush_domains, __entry->invalidate_domains) 194 __entry->flush_domains, __entry->invalidate_domains)
198); 195);
199 196
200 197DECLARE_EVENT_CLASS(i915_gem_request,
201TRACE_EVENT(i915_gem_request_complete,
202 198
203 TP_PROTO(struct drm_device *dev, u32 seqno), 199 TP_PROTO(struct drm_device *dev, u32 seqno),
204 200
@@ -217,64 +213,35 @@ TRACE_EVENT(i915_gem_request_complete,
217 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) 213 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
218); 214);
219 215
220TRACE_EVENT(i915_gem_request_retire, 216DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
221 217
222 TP_PROTO(struct drm_device *dev, u32 seqno), 218 TP_PROTO(struct drm_device *dev, u32 seqno),
223 219
224 TP_ARGS(dev, seqno), 220 TP_ARGS(dev, seqno)
225
226 TP_STRUCT__entry(
227 __field(u32, dev)
228 __field(u32, seqno)
229 ),
230
231 TP_fast_assign(
232 __entry->dev = dev->primary->index;
233 __entry->seqno = seqno;
234 ),
235
236 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
237); 221);
238 222
239TRACE_EVENT(i915_gem_request_wait_begin, 223DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
240 224
241 TP_PROTO(struct drm_device *dev, u32 seqno), 225 TP_PROTO(struct drm_device *dev, u32 seqno),
242 226
243 TP_ARGS(dev, seqno), 227 TP_ARGS(dev, seqno)
244
245 TP_STRUCT__entry(
246 __field(u32, dev)
247 __field(u32, seqno)
248 ),
249
250 TP_fast_assign(
251 __entry->dev = dev->primary->index;
252 __entry->seqno = seqno;
253 ),
254
255 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
256); 228);
257 229
258TRACE_EVENT(i915_gem_request_wait_end, 230DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
259 231
260 TP_PROTO(struct drm_device *dev, u32 seqno), 232 TP_PROTO(struct drm_device *dev, u32 seqno),
261 233
262 TP_ARGS(dev, seqno), 234 TP_ARGS(dev, seqno)
235);
263 236
264 TP_STRUCT__entry( 237DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
265 __field(u32, dev)
266 __field(u32, seqno)
267 ),
268 238
269 TP_fast_assign( 239 TP_PROTO(struct drm_device *dev, u32 seqno),
270 __entry->dev = dev->primary->index;
271 __entry->seqno = seqno;
272 ),
273 240
274 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) 241 TP_ARGS(dev, seqno)
275); 242);
276 243
277TRACE_EVENT(i915_ring_wait_begin, 244DECLARE_EVENT_CLASS(i915_ring,
278 245
279 TP_PROTO(struct drm_device *dev), 246 TP_PROTO(struct drm_device *dev),
280 247
@@ -291,26 +258,23 @@ TRACE_EVENT(i915_ring_wait_begin,
291 TP_printk("dev=%u", __entry->dev) 258 TP_printk("dev=%u", __entry->dev)
292); 259);
293 260
294TRACE_EVENT(i915_ring_wait_end, 261DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
295 262
296 TP_PROTO(struct drm_device *dev), 263 TP_PROTO(struct drm_device *dev),
297 264
298 TP_ARGS(dev), 265 TP_ARGS(dev)
266);
299 267
300 TP_STRUCT__entry( 268DEFINE_EVENT(i915_ring, i915_ring_wait_end,
301 __field(u32, dev)
302 ),
303 269
304 TP_fast_assign( 270 TP_PROTO(struct drm_device *dev),
305 __entry->dev = dev->primary->index;
306 ),
307 271
308 TP_printk("dev=%u", __entry->dev) 272 TP_ARGS(dev)
309); 273);
310 274
311#endif /* _I915_TRACE_H_ */ 275#endif /* _I915_TRACE_H_ */
312 276
313/* This part must be outside protection */ 277/* This part must be outside protection */
314#undef TRACE_INCLUDE_PATH 278#undef TRACE_INCLUDE_PATH
315#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 279#define TRACE_INCLUDE_PATH .
316#include <trace/define_trace.h> 280#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f9ba452f0cbf..4c748d8f73d6 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -366,6 +366,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
366 p_mapping->dvo_port = p_child->dvo_port; 366 p_mapping->dvo_port = p_child->dvo_port;
367 p_mapping->slave_addr = p_child->slave_addr; 367 p_mapping->slave_addr = p_child->slave_addr;
368 p_mapping->dvo_wiring = p_child->dvo_wiring; 368 p_mapping->dvo_wiring = p_child->dvo_wiring;
369 p_mapping->ddc_pin = p_child->ddc_pin;
369 p_mapping->initialized = 1; 370 p_mapping->initialized = 1;
370 } else { 371 } else {
371 DRM_DEBUG_KMS("Maybe one SDVO port is shared by " 372 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 759c2ef72eff..e16ac5a28c3c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -136,11 +136,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
136 adpa |= ADPA_VSYNC_ACTIVE_HIGH; 136 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
137 137
138 if (intel_crtc->pipe == 0) { 138 if (intel_crtc->pipe == 0) {
139 adpa |= ADPA_PIPE_A_SELECT; 139 if (HAS_PCH_CPT(dev))
140 adpa |= PORT_TRANS_A_SEL_CPT;
141 else
142 adpa |= ADPA_PIPE_A_SELECT;
140 if (!HAS_PCH_SPLIT(dev)) 143 if (!HAS_PCH_SPLIT(dev))
141 I915_WRITE(BCLRPAT_A, 0); 144 I915_WRITE(BCLRPAT_A, 0);
142 } else { 145 } else {
143 adpa |= ADPA_PIPE_B_SELECT; 146 if (HAS_PCH_CPT(dev))
147 adpa |= PORT_TRANS_B_SEL_CPT;
148 else
149 adpa |= ADPA_PIPE_B_SELECT;
144 if (!HAS_PCH_SPLIT(dev)) 150 if (!HAS_PCH_SPLIT(dev))
145 I915_WRITE(BCLRPAT_B, 0); 151 I915_WRITE(BCLRPAT_B, 0);
146 } 152 }
@@ -152,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
152{ 158{
153 struct drm_device *dev = connector->dev; 159 struct drm_device *dev = connector->dev;
154 struct drm_i915_private *dev_priv = dev->dev_private; 160 struct drm_i915_private *dev_priv = dev->dev_private;
155 u32 adpa; 161 u32 adpa, temp;
156 bool ret; 162 bool ret;
157 163
158 adpa = I915_READ(PCH_ADPA); 164 temp = adpa = I915_READ(PCH_ADPA);
159 165
160 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 166 if (HAS_PCH_CPT(dev)) {
161 /* disable HPD first */ 167 /* Disable DAC before force detect */
162 I915_WRITE(PCH_ADPA, adpa); 168 I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
163 (void)I915_READ(PCH_ADPA); 169 (void)I915_READ(PCH_ADPA);
170 } else {
171 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
172 /* disable HPD first */
173 I915_WRITE(PCH_ADPA, adpa);
174 (void)I915_READ(PCH_ADPA);
175 }
164 176
165 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 177 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
166 ADPA_CRT_HOTPLUG_WARMUP_10MS | 178 ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -176,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
176 while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) 188 while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
177 ; 189 ;
178 190
191 if (HAS_PCH_CPT(dev)) {
192 I915_WRITE(PCH_ADPA, temp);
193 (void)I915_READ(PCH_ADPA);
194 }
195
179 /* Check the status to see if both blue and green are on now */ 196 /* Check the status to see if both blue and green are on now */
180 adpa = I915_READ(PCH_ADPA); 197 adpa = I915_READ(PCH_ADPA);
181 adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; 198 adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
@@ -245,9 +262,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
245 return false; 262 return false;
246} 263}
247 264
248static bool intel_crt_detect_ddc(struct drm_connector *connector) 265static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
249{ 266{
250 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 267 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
251 268
252 /* CRT should always be at 0, but check anyway */ 269 /* CRT should always be at 0, but check anyway */
253 if (intel_encoder->type != INTEL_OUTPUT_ANALOG) 270 if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
@@ -387,8 +404,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
387static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 404static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
388{ 405{
389 struct drm_device *dev = connector->dev; 406 struct drm_device *dev = connector->dev;
390 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 407 struct drm_encoder *encoder = intel_attached_encoder(connector);
391 struct drm_encoder *encoder = &intel_encoder->enc; 408 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
392 struct drm_crtc *crtc; 409 struct drm_crtc *crtc;
393 int dpms_mode; 410 int dpms_mode;
394 enum drm_connector_status status; 411 enum drm_connector_status status;
@@ -400,18 +417,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
400 return connector_status_disconnected; 417 return connector_status_disconnected;
401 } 418 }
402 419
403 if (intel_crt_detect_ddc(connector)) 420 if (intel_crt_detect_ddc(encoder))
404 return connector_status_connected; 421 return connector_status_connected;
405 422
406 /* for pre-945g platforms use load detect */ 423 /* for pre-945g platforms use load detect */
407 if (encoder->crtc && encoder->crtc->enabled) { 424 if (encoder->crtc && encoder->crtc->enabled) {
408 status = intel_crt_load_detect(encoder->crtc, intel_encoder); 425 status = intel_crt_load_detect(encoder->crtc, intel_encoder);
409 } else { 426 } else {
410 crtc = intel_get_load_detect_pipe(intel_encoder, 427 crtc = intel_get_load_detect_pipe(intel_encoder, connector,
411 NULL, &dpms_mode); 428 NULL, &dpms_mode);
412 if (crtc) { 429 if (crtc) {
413 status = intel_crt_load_detect(crtc, intel_encoder); 430 status = intel_crt_load_detect(crtc, intel_encoder);
414 intel_release_load_detect_pipe(intel_encoder, dpms_mode); 431 intel_release_load_detect_pipe(intel_encoder,
432 connector, dpms_mode);
415 } else 433 } else
416 status = connector_status_unknown; 434 status = connector_status_unknown;
417 } 435 }
@@ -421,9 +439,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
421 439
422static void intel_crt_destroy(struct drm_connector *connector) 440static void intel_crt_destroy(struct drm_connector *connector)
423{ 441{
424 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
425
426 intel_i2c_destroy(intel_encoder->ddc_bus);
427 drm_sysfs_connector_remove(connector); 442 drm_sysfs_connector_remove(connector);
428 drm_connector_cleanup(connector); 443 drm_connector_cleanup(connector);
429 kfree(connector); 444 kfree(connector);
@@ -432,29 +447,27 @@ static void intel_crt_destroy(struct drm_connector *connector)
432static int intel_crt_get_modes(struct drm_connector *connector) 447static int intel_crt_get_modes(struct drm_connector *connector)
433{ 448{
434 int ret; 449 int ret;
435 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 450 struct drm_encoder *encoder = intel_attached_encoder(connector);
436 struct i2c_adapter *ddcbus; 451 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
452 struct i2c_adapter *ddc_bus;
437 struct drm_device *dev = connector->dev; 453 struct drm_device *dev = connector->dev;
438 454
439 455
440 ret = intel_ddc_get_modes(intel_encoder); 456 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
441 if (ret || !IS_G4X(dev)) 457 if (ret || !IS_G4X(dev))
442 goto end; 458 goto end;
443 459
444 ddcbus = intel_encoder->ddc_bus;
445 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 460 /* Try to probe digital port for output in DVI-I -> VGA mode. */
446 intel_encoder->ddc_bus = 461 ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
447 intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
448 462
449 if (!intel_encoder->ddc_bus) { 463 if (!ddc_bus) {
450 intel_encoder->ddc_bus = ddcbus;
451 dev_printk(KERN_ERR, &connector->dev->pdev->dev, 464 dev_printk(KERN_ERR, &connector->dev->pdev->dev,
452 "DDC bus registration failed for CRTDDC_D.\n"); 465 "DDC bus registration failed for CRTDDC_D.\n");
453 goto end; 466 goto end;
454 } 467 }
455 /* Try to get modes by GPIOD port */ 468 /* Try to get modes by GPIOD port */
456 ret = intel_ddc_get_modes(intel_encoder); 469 ret = intel_ddc_get_modes(connector, ddc_bus);
457 intel_i2c_destroy(ddcbus); 470 intel_i2c_destroy(ddc_bus);
458 471
459end: 472end:
460 return ret; 473 return ret;
@@ -491,12 +504,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
491static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { 504static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
492 .mode_valid = intel_crt_mode_valid, 505 .mode_valid = intel_crt_mode_valid,
493 .get_modes = intel_crt_get_modes, 506 .get_modes = intel_crt_get_modes,
494 .best_encoder = intel_best_encoder, 507 .best_encoder = intel_attached_encoder,
495}; 508};
496 509
497static void intel_crt_enc_destroy(struct drm_encoder *encoder) 510static void intel_crt_enc_destroy(struct drm_encoder *encoder)
498{ 511{
512 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
513
514 intel_i2c_destroy(intel_encoder->ddc_bus);
499 drm_encoder_cleanup(encoder); 515 drm_encoder_cleanup(encoder);
516 kfree(intel_encoder);
500} 517}
501 518
502static const struct drm_encoder_funcs intel_crt_enc_funcs = { 519static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -507,6 +524,7 @@ void intel_crt_init(struct drm_device *dev)
507{ 524{
508 struct drm_connector *connector; 525 struct drm_connector *connector;
509 struct intel_encoder *intel_encoder; 526 struct intel_encoder *intel_encoder;
527 struct intel_connector *intel_connector;
510 struct drm_i915_private *dev_priv = dev->dev_private; 528 struct drm_i915_private *dev_priv = dev->dev_private;
511 u32 i2c_reg; 529 u32 i2c_reg;
512 530
@@ -514,14 +532,20 @@ void intel_crt_init(struct drm_device *dev)
514 if (!intel_encoder) 532 if (!intel_encoder)
515 return; 533 return;
516 534
517 connector = &intel_encoder->base; 535 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
518 drm_connector_init(dev, &intel_encoder->base, 536 if (!intel_connector) {
537 kfree(intel_encoder);
538 return;
539 }
540
541 connector = &intel_connector->base;
542 drm_connector_init(dev, &intel_connector->base,
519 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 543 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
520 544
521 drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, 545 drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
522 DRM_MODE_ENCODER_DAC); 546 DRM_MODE_ENCODER_DAC);
523 547
524 drm_mode_connector_attach_encoder(&intel_encoder->base, 548 drm_mode_connector_attach_encoder(&intel_connector->base,
525 &intel_encoder->enc); 549 &intel_encoder->enc);
526 550
527 /* Set up the DDC bus. */ 551 /* Set up the DDC bus. */
@@ -553,5 +577,10 @@ void intel_crt_init(struct drm_device *dev)
553 577
554 drm_sysfs_connector_add(connector); 578 drm_sysfs_connector_add(connector);
555 579
580 if (I915_HAS_HOTPLUG(dev))
581 connector->polled = DRM_CONNECTOR_POLL_HPD;
582 else
583 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
584
556 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; 585 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
557} 586}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f27e3703a716..f469a84cacfd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -742,12 +742,11 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
742{ 742{
743 struct drm_device *dev = crtc->dev; 743 struct drm_device *dev = crtc->dev;
744 struct drm_mode_config *mode_config = &dev->mode_config; 744 struct drm_mode_config *mode_config = &dev->mode_config;
745 struct drm_connector *l_entry; 745 struct drm_encoder *l_entry;
746 746
747 list_for_each_entry(l_entry, &mode_config->connector_list, head) { 747 list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
748 if (l_entry->encoder && 748 if (l_entry && l_entry->crtc == crtc) {
749 l_entry->encoder->crtc == crtc) { 749 struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
750 struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
751 if (intel_encoder->type == type) 750 if (intel_encoder->type == type)
752 return true; 751 return true;
753 } 752 }
@@ -755,23 +754,6 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
755 return false; 754 return false;
756} 755}
757 756
758static struct drm_connector *
759intel_pipe_get_connector (struct drm_crtc *crtc)
760{
761 struct drm_device *dev = crtc->dev;
762 struct drm_mode_config *mode_config = &dev->mode_config;
763 struct drm_connector *l_entry, *ret = NULL;
764
765 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
766 if (l_entry->encoder &&
767 l_entry->encoder->crtc == crtc) {
768 ret = l_entry;
769 break;
770 }
771 }
772 return ret;
773}
774
775#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 757#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
776/** 758/**
777 * Returns whether the given set of divisors are valid for a given refclk with 759 * Returns whether the given set of divisors are valid for a given refclk with
@@ -1066,9 +1048,8 @@ void i8xx_disable_fbc(struct drm_device *dev)
1066 DRM_DEBUG_KMS("disabled FBC\n"); 1048 DRM_DEBUG_KMS("disabled FBC\n");
1067} 1049}
1068 1050
1069static bool i8xx_fbc_enabled(struct drm_crtc *crtc) 1051static bool i8xx_fbc_enabled(struct drm_device *dev)
1070{ 1052{
1071 struct drm_device *dev = crtc->dev;
1072 struct drm_i915_private *dev_priv = dev->dev_private; 1053 struct drm_i915_private *dev_priv = dev->dev_private;
1073 1054
1074 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 1055 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
@@ -1125,14 +1106,43 @@ void g4x_disable_fbc(struct drm_device *dev)
1125 DRM_DEBUG_KMS("disabled FBC\n"); 1106 DRM_DEBUG_KMS("disabled FBC\n");
1126} 1107}
1127 1108
1128static bool g4x_fbc_enabled(struct drm_crtc *crtc) 1109static bool g4x_fbc_enabled(struct drm_device *dev)
1129{ 1110{
1130 struct drm_device *dev = crtc->dev;
1131 struct drm_i915_private *dev_priv = dev->dev_private; 1111 struct drm_i915_private *dev_priv = dev->dev_private;
1132 1112
1133 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1113 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1134} 1114}
1135 1115
1116bool intel_fbc_enabled(struct drm_device *dev)
1117{
1118 struct drm_i915_private *dev_priv = dev->dev_private;
1119
1120 if (!dev_priv->display.fbc_enabled)
1121 return false;
1122
1123 return dev_priv->display.fbc_enabled(dev);
1124}
1125
1126void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1127{
1128 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1129
1130 if (!dev_priv->display.enable_fbc)
1131 return;
1132
1133 dev_priv->display.enable_fbc(crtc, interval);
1134}
1135
1136void intel_disable_fbc(struct drm_device *dev)
1137{
1138 struct drm_i915_private *dev_priv = dev->dev_private;
1139
1140 if (!dev_priv->display.disable_fbc)
1141 return;
1142
1143 dev_priv->display.disable_fbc(dev);
1144}
1145
1136/** 1146/**
1137 * intel_update_fbc - enable/disable FBC as needed 1147 * intel_update_fbc - enable/disable FBC as needed
1138 * @crtc: CRTC to point the compressor at 1148 * @crtc: CRTC to point the compressor at
@@ -1167,9 +1177,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1167 if (!i915_powersave) 1177 if (!i915_powersave)
1168 return; 1178 return;
1169 1179
1170 if (!dev_priv->display.fbc_enabled || 1180 if (!I915_HAS_FBC(dev))
1171 !dev_priv->display.enable_fbc ||
1172 !dev_priv->display.disable_fbc)
1173 return; 1181 return;
1174 1182
1175 if (!crtc->fb) 1183 if (!crtc->fb)
@@ -1216,28 +1224,25 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1216 goto out_disable; 1224 goto out_disable;
1217 } 1225 }
1218 1226
1219 if (dev_priv->display.fbc_enabled(crtc)) { 1227 if (intel_fbc_enabled(dev)) {
1220 /* We can re-enable it in this case, but need to update pitch */ 1228 /* We can re-enable it in this case, but need to update pitch */
1221 if (fb->pitch > dev_priv->cfb_pitch) 1229 if ((fb->pitch > dev_priv->cfb_pitch) ||
1222 dev_priv->display.disable_fbc(dev); 1230 (obj_priv->fence_reg != dev_priv->cfb_fence) ||
1223 if (obj_priv->fence_reg != dev_priv->cfb_fence) 1231 (plane != dev_priv->cfb_plane))
1224 dev_priv->display.disable_fbc(dev); 1232 intel_disable_fbc(dev);
1225 if (plane != dev_priv->cfb_plane)
1226 dev_priv->display.disable_fbc(dev);
1227 } 1233 }
1228 1234
1229 if (!dev_priv->display.fbc_enabled(crtc)) { 1235 /* Now try to turn it back on if possible */
1230 /* Now try to turn it back on if possible */ 1236 if (!intel_fbc_enabled(dev))
1231 dev_priv->display.enable_fbc(crtc, 500); 1237 intel_enable_fbc(crtc, 500);
1232 }
1233 1238
1234 return; 1239 return;
1235 1240
1236out_disable: 1241out_disable:
1237 DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 1242 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1238 /* Multiple disables should be harmless */ 1243 /* Multiple disables should be harmless */
1239 if (dev_priv->display.fbc_enabled(crtc)) 1244 if (intel_fbc_enabled(dev))
1240 dev_priv->display.disable_fbc(dev); 1245 intel_disable_fbc(dev);
1241} 1246}
1242 1247
1243static int 1248static int
@@ -1510,6 +1515,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
1510 udelay(500); 1515 udelay(500);
1511} 1516}
1512 1517
1518/* The FDI link training functions for ILK/Ibexpeak. */
1519static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1520{
1521 struct drm_device *dev = crtc->dev;
1522 struct drm_i915_private *dev_priv = dev->dev_private;
1523 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1524 int pipe = intel_crtc->pipe;
1525 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1526 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1527 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
1528 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1529 u32 temp, tries = 0;
1530
1531 /* enable CPU FDI TX and PCH FDI RX */
1532 temp = I915_READ(fdi_tx_reg);
1533 temp |= FDI_TX_ENABLE;
1534 temp &= ~(7 << 19);
1535 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1536 temp &= ~FDI_LINK_TRAIN_NONE;
1537 temp |= FDI_LINK_TRAIN_PATTERN_1;
1538 I915_WRITE(fdi_tx_reg, temp);
1539 I915_READ(fdi_tx_reg);
1540
1541 temp = I915_READ(fdi_rx_reg);
1542 temp &= ~FDI_LINK_TRAIN_NONE;
1543 temp |= FDI_LINK_TRAIN_PATTERN_1;
1544 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
1545 I915_READ(fdi_rx_reg);
1546 udelay(150);
1547
1548 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1549 for train result */
1550 temp = I915_READ(fdi_rx_imr_reg);
1551 temp &= ~FDI_RX_SYMBOL_LOCK;
1552 temp &= ~FDI_RX_BIT_LOCK;
1553 I915_WRITE(fdi_rx_imr_reg, temp);
1554 I915_READ(fdi_rx_imr_reg);
1555 udelay(150);
1556
1557 for (;;) {
1558 temp = I915_READ(fdi_rx_iir_reg);
1559 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1560
1561 if ((temp & FDI_RX_BIT_LOCK)) {
1562 DRM_DEBUG_KMS("FDI train 1 done.\n");
1563 I915_WRITE(fdi_rx_iir_reg,
1564 temp | FDI_RX_BIT_LOCK);
1565 break;
1566 }
1567
1568 tries++;
1569
1570 if (tries > 5) {
1571 DRM_DEBUG_KMS("FDI train 1 fail!\n");
1572 break;
1573 }
1574 }
1575
1576 /* Train 2 */
1577 temp = I915_READ(fdi_tx_reg);
1578 temp &= ~FDI_LINK_TRAIN_NONE;
1579 temp |= FDI_LINK_TRAIN_PATTERN_2;
1580 I915_WRITE(fdi_tx_reg, temp);
1581
1582 temp = I915_READ(fdi_rx_reg);
1583 temp &= ~FDI_LINK_TRAIN_NONE;
1584 temp |= FDI_LINK_TRAIN_PATTERN_2;
1585 I915_WRITE(fdi_rx_reg, temp);
1586 udelay(150);
1587
1588 tries = 0;
1589
1590 for (;;) {
1591 temp = I915_READ(fdi_rx_iir_reg);
1592 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1593
1594 if (temp & FDI_RX_SYMBOL_LOCK) {
1595 I915_WRITE(fdi_rx_iir_reg,
1596 temp | FDI_RX_SYMBOL_LOCK);
1597 DRM_DEBUG_KMS("FDI train 2 done.\n");
1598 break;
1599 }
1600
1601 tries++;
1602
1603 if (tries > 5) {
1604 DRM_DEBUG_KMS("FDI train 2 fail!\n");
1605 break;
1606 }
1607 }
1608
1609 DRM_DEBUG_KMS("FDI train done\n");
1610}
1611
1612static int snb_b_fdi_train_param [] = {
1613 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
1614 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
1615 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
1616 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
1617};
1618
1619/* The FDI link training functions for SNB/Cougarpoint. */
1620static void gen6_fdi_link_train(struct drm_crtc *crtc)
1621{
1622 struct drm_device *dev = crtc->dev;
1623 struct drm_i915_private *dev_priv = dev->dev_private;
1624 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1625 int pipe = intel_crtc->pipe;
1626 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1627 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1628 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
1629 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1630 u32 temp, i;
1631
1632 /* enable CPU FDI TX and PCH FDI RX */
1633 temp = I915_READ(fdi_tx_reg);
1634 temp |= FDI_TX_ENABLE;
1635 temp &= ~(7 << 19);
1636 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1637 temp &= ~FDI_LINK_TRAIN_NONE;
1638 temp |= FDI_LINK_TRAIN_PATTERN_1;
1639 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1640 /* SNB-B */
1641 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
1642 I915_WRITE(fdi_tx_reg, temp);
1643 I915_READ(fdi_tx_reg);
1644
1645 temp = I915_READ(fdi_rx_reg);
1646 if (HAS_PCH_CPT(dev)) {
1647 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1648 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1649 } else {
1650 temp &= ~FDI_LINK_TRAIN_NONE;
1651 temp |= FDI_LINK_TRAIN_PATTERN_1;
1652 }
1653 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
1654 I915_READ(fdi_rx_reg);
1655 udelay(150);
1656
1657 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1658 for train result */
1659 temp = I915_READ(fdi_rx_imr_reg);
1660 temp &= ~FDI_RX_SYMBOL_LOCK;
1661 temp &= ~FDI_RX_BIT_LOCK;
1662 I915_WRITE(fdi_rx_imr_reg, temp);
1663 I915_READ(fdi_rx_imr_reg);
1664 udelay(150);
1665
1666 for (i = 0; i < 4; i++ ) {
1667 temp = I915_READ(fdi_tx_reg);
1668 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1669 temp |= snb_b_fdi_train_param[i];
1670 I915_WRITE(fdi_tx_reg, temp);
1671 udelay(500);
1672
1673 temp = I915_READ(fdi_rx_iir_reg);
1674 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1675
1676 if (temp & FDI_RX_BIT_LOCK) {
1677 I915_WRITE(fdi_rx_iir_reg,
1678 temp | FDI_RX_BIT_LOCK);
1679 DRM_DEBUG_KMS("FDI train 1 done.\n");
1680 break;
1681 }
1682 }
1683 if (i == 4)
1684 DRM_DEBUG_KMS("FDI train 1 fail!\n");
1685
1686 /* Train 2 */
1687 temp = I915_READ(fdi_tx_reg);
1688 temp &= ~FDI_LINK_TRAIN_NONE;
1689 temp |= FDI_LINK_TRAIN_PATTERN_2;
1690 if (IS_GEN6(dev)) {
1691 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1692 /* SNB-B */
1693 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
1694 }
1695 I915_WRITE(fdi_tx_reg, temp);
1696
1697 temp = I915_READ(fdi_rx_reg);
1698 if (HAS_PCH_CPT(dev)) {
1699 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1700 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
1701 } else {
1702 temp &= ~FDI_LINK_TRAIN_NONE;
1703 temp |= FDI_LINK_TRAIN_PATTERN_2;
1704 }
1705 I915_WRITE(fdi_rx_reg, temp);
1706 udelay(150);
1707
1708 for (i = 0; i < 4; i++ ) {
1709 temp = I915_READ(fdi_tx_reg);
1710 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1711 temp |= snb_b_fdi_train_param[i];
1712 I915_WRITE(fdi_tx_reg, temp);
1713 udelay(500);
1714
1715 temp = I915_READ(fdi_rx_iir_reg);
1716 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1717
1718 if (temp & FDI_RX_SYMBOL_LOCK) {
1719 I915_WRITE(fdi_rx_iir_reg,
1720 temp | FDI_RX_SYMBOL_LOCK);
1721 DRM_DEBUG_KMS("FDI train 2 done.\n");
1722 break;
1723 }
1724 }
1725 if (i == 4)
1726 DRM_DEBUG_KMS("FDI train 2 fail!\n");
1727
1728 DRM_DEBUG_KMS("FDI train done.\n");
1729}
1730
1513static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) 1731static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1514{ 1732{
1515 struct drm_device *dev = crtc->dev; 1733 struct drm_device *dev = crtc->dev;
@@ -1523,8 +1741,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1523 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; 1741 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
1524 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; 1742 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1525 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; 1743 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1526 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
1527 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1528 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; 1744 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
1529 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; 1745 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
1530 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; 1746 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
@@ -1541,8 +1757,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1541 int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; 1757 int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
1542 int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; 1758 int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
1543 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1759 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
1760 int trans_dpll_sel = (pipe == 0) ? 0 : 1;
1544 u32 temp; 1761 u32 temp;
1545 int tries = 5, j, n; 1762 int n;
1546 u32 pipe_bpc; 1763 u32 pipe_bpc;
1547 1764
1548 temp = I915_READ(pipeconf_reg); 1765 temp = I915_READ(pipeconf_reg);
@@ -1569,12 +1786,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1569 /* enable eDP PLL */ 1786 /* enable eDP PLL */
1570 ironlake_enable_pll_edp(crtc); 1787 ironlake_enable_pll_edp(crtc);
1571 } else { 1788 } else {
1572 /* enable PCH DPLL */
1573 temp = I915_READ(pch_dpll_reg);
1574 if ((temp & DPLL_VCO_ENABLE) == 0) {
1575 I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
1576 I915_READ(pch_dpll_reg);
1577 }
1578 1789
1579 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1790 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1580 temp = I915_READ(fdi_rx_reg); 1791 temp = I915_READ(fdi_rx_reg);
@@ -1584,9 +1795,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1584 */ 1795 */
1585 temp &= ~(0x7 << 16); 1796 temp &= ~(0x7 << 16);
1586 temp |= (pipe_bpc << 11); 1797 temp |= (pipe_bpc << 11);
1587 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | 1798 temp &= ~(7 << 19);
1588 FDI_SEL_PCDCLK | 1799 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1589 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ 1800 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
1801 I915_READ(fdi_rx_reg);
1802 udelay(200);
1803
1804 /* Switch from Rawclk to PCDclk */
1805 temp = I915_READ(fdi_rx_reg);
1806 I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
1590 I915_READ(fdi_rx_reg); 1807 I915_READ(fdi_rx_reg);
1591 udelay(200); 1808 udelay(200);
1592 1809
@@ -1629,91 +1846,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1629 } 1846 }
1630 1847
1631 if (!HAS_eDP) { 1848 if (!HAS_eDP) {
1632 /* enable CPU FDI TX and PCH FDI RX */ 1849 /* For PCH output, training FDI link */
1633 temp = I915_READ(fdi_tx_reg); 1850 if (IS_GEN6(dev))
1634 temp |= FDI_TX_ENABLE; 1851 gen6_fdi_link_train(crtc);
1635 temp |= FDI_DP_PORT_WIDTH_X4; /* default */ 1852 else
1636 temp &= ~FDI_LINK_TRAIN_NONE; 1853 ironlake_fdi_link_train(crtc);
1637 temp |= FDI_LINK_TRAIN_PATTERN_1;
1638 I915_WRITE(fdi_tx_reg, temp);
1639 I915_READ(fdi_tx_reg);
1640
1641 temp = I915_READ(fdi_rx_reg);
1642 temp &= ~FDI_LINK_TRAIN_NONE;
1643 temp |= FDI_LINK_TRAIN_PATTERN_1;
1644 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
1645 I915_READ(fdi_rx_reg);
1646
1647 udelay(150);
1648
1649 /* Train FDI. */
1650 /* umask FDI RX Interrupt symbol_lock and bit_lock bit
1651 for train result */
1652 temp = I915_READ(fdi_rx_imr_reg);
1653 temp &= ~FDI_RX_SYMBOL_LOCK;
1654 temp &= ~FDI_RX_BIT_LOCK;
1655 I915_WRITE(fdi_rx_imr_reg, temp);
1656 I915_READ(fdi_rx_imr_reg);
1657 udelay(150);
1658 1854
1659 temp = I915_READ(fdi_rx_iir_reg); 1855 /* enable PCH DPLL */
1660 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1856 temp = I915_READ(pch_dpll_reg);
1661 1857 if ((temp & DPLL_VCO_ENABLE) == 0) {
1662 if ((temp & FDI_RX_BIT_LOCK) == 0) { 1858 I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
1663 for (j = 0; j < tries; j++) { 1859 I915_READ(pch_dpll_reg);
1664 temp = I915_READ(fdi_rx_iir_reg);
1665 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
1666 temp);
1667 if (temp & FDI_RX_BIT_LOCK)
1668 break;
1669 udelay(200);
1670 }
1671 if (j != tries)
1672 I915_WRITE(fdi_rx_iir_reg,
1673 temp | FDI_RX_BIT_LOCK);
1674 else
1675 DRM_DEBUG_KMS("train 1 fail\n");
1676 } else {
1677 I915_WRITE(fdi_rx_iir_reg,
1678 temp | FDI_RX_BIT_LOCK);
1679 DRM_DEBUG_KMS("train 1 ok 2!\n");
1680 } 1860 }
1681 temp = I915_READ(fdi_tx_reg); 1861 udelay(200);
1682 temp &= ~FDI_LINK_TRAIN_NONE;
1683 temp |= FDI_LINK_TRAIN_PATTERN_2;
1684 I915_WRITE(fdi_tx_reg, temp);
1685
1686 temp = I915_READ(fdi_rx_reg);
1687 temp &= ~FDI_LINK_TRAIN_NONE;
1688 temp |= FDI_LINK_TRAIN_PATTERN_2;
1689 I915_WRITE(fdi_rx_reg, temp);
1690
1691 udelay(150);
1692 1862
1693 temp = I915_READ(fdi_rx_iir_reg); 1863 if (HAS_PCH_CPT(dev)) {
1694 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1864 /* Be sure PCH DPLL SEL is set */
1695 1865 temp = I915_READ(PCH_DPLL_SEL);
1696 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { 1866 if (trans_dpll_sel == 0 &&
1697 for (j = 0; j < tries; j++) { 1867 (temp & TRANSA_DPLL_ENABLE) == 0)
1698 temp = I915_READ(fdi_rx_iir_reg); 1868 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
1699 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", 1869 else if (trans_dpll_sel == 1 &&
1700 temp); 1870 (temp & TRANSB_DPLL_ENABLE) == 0)
1701 if (temp & FDI_RX_SYMBOL_LOCK) 1871 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
1702 break; 1872 I915_WRITE(PCH_DPLL_SEL, temp);
1703 udelay(200); 1873 I915_READ(PCH_DPLL_SEL);
1704 }
1705 if (j != tries) {
1706 I915_WRITE(fdi_rx_iir_reg,
1707 temp | FDI_RX_SYMBOL_LOCK);
1708 DRM_DEBUG_KMS("train 2 ok 1!\n");
1709 } else
1710 DRM_DEBUG_KMS("train 2 fail\n");
1711 } else {
1712 I915_WRITE(fdi_rx_iir_reg,
1713 temp | FDI_RX_SYMBOL_LOCK);
1714 DRM_DEBUG_KMS("train 2 ok 2!\n");
1715 } 1874 }
1716 DRM_DEBUG_KMS("train done\n");
1717 1875
1718 /* set transcoder timing */ 1876 /* set transcoder timing */
1719 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); 1877 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1724,6 +1882,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1724 I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); 1882 I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
1725 I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); 1883 I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
1726 1884
1885 /* enable normal train */
1886 temp = I915_READ(fdi_tx_reg);
1887 temp &= ~FDI_LINK_TRAIN_NONE;
1888 I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
1889 FDI_TX_ENHANCE_FRAME_ENABLE);
1890 I915_READ(fdi_tx_reg);
1891
1892 temp = I915_READ(fdi_rx_reg);
1893 if (HAS_PCH_CPT(dev)) {
1894 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1895 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1896 } else {
1897 temp &= ~FDI_LINK_TRAIN_NONE;
1898 temp |= FDI_LINK_TRAIN_NONE;
1899 }
1900 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1901 I915_READ(fdi_rx_reg);
1902
1903 /* wait one idle pattern time */
1904 udelay(100);
1905
1906 /* For PCH DP, enable TRANS_DP_CTL */
1907 if (HAS_PCH_CPT(dev) &&
1908 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
1909 int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
1910 int reg;
1911
1912 reg = I915_READ(trans_dp_ctl);
1913 reg &= ~TRANS_DP_PORT_SEL_MASK;
1914 reg = TRANS_DP_OUTPUT_ENABLE |
1915 TRANS_DP_ENH_FRAMING |
1916 TRANS_DP_VSYNC_ACTIVE_HIGH |
1917 TRANS_DP_HSYNC_ACTIVE_HIGH;
1918
1919 switch (intel_trans_dp_port_sel(crtc)) {
1920 case PCH_DP_B:
1921 reg |= TRANS_DP_PORT_SEL_B;
1922 break;
1923 case PCH_DP_C:
1924 reg |= TRANS_DP_PORT_SEL_C;
1925 break;
1926 case PCH_DP_D:
1927 reg |= TRANS_DP_PORT_SEL_D;
1928 break;
1929 default:
1930 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
1931 reg |= TRANS_DP_PORT_SEL_B;
1932 break;
1933 }
1934
1935 I915_WRITE(trans_dp_ctl, reg);
1936 POSTING_READ(trans_dp_ctl);
1937 }
1938
1727 /* enable PCH transcoder */ 1939 /* enable PCH transcoder */
1728 temp = I915_READ(transconf_reg); 1940 temp = I915_READ(transconf_reg);
1729 /* 1941 /*
@@ -1738,23 +1950,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1738 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) 1950 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
1739 ; 1951 ;
1740 1952
1741 /* enable normal */
1742
1743 temp = I915_READ(fdi_tx_reg);
1744 temp &= ~FDI_LINK_TRAIN_NONE;
1745 I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
1746 FDI_TX_ENHANCE_FRAME_ENABLE);
1747 I915_READ(fdi_tx_reg);
1748
1749 temp = I915_READ(fdi_rx_reg);
1750 temp &= ~FDI_LINK_TRAIN_NONE;
1751 I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
1752 FDI_RX_ENHANCE_FRAME_ENABLE);
1753 I915_READ(fdi_rx_reg);
1754
1755 /* wait one idle pattern time */
1756 udelay(100);
1757
1758 } 1953 }
1759 1954
1760 intel_crtc_load_lut(crtc); 1955 intel_crtc_load_lut(crtc);
@@ -1805,6 +2000,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1805 I915_READ(pf_ctl_reg); 2000 I915_READ(pf_ctl_reg);
1806 } 2001 }
1807 I915_WRITE(pf_win_size, 0); 2002 I915_WRITE(pf_win_size, 0);
2003 POSTING_READ(pf_win_size);
2004
1808 2005
1809 /* disable CPU FDI tx and PCH FDI rx */ 2006 /* disable CPU FDI tx and PCH FDI rx */
1810 temp = I915_READ(fdi_tx_reg); 2007 temp = I915_READ(fdi_tx_reg);
@@ -1825,11 +2022,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1825 temp &= ~FDI_LINK_TRAIN_NONE; 2022 temp &= ~FDI_LINK_TRAIN_NONE;
1826 temp |= FDI_LINK_TRAIN_PATTERN_1; 2023 temp |= FDI_LINK_TRAIN_PATTERN_1;
1827 I915_WRITE(fdi_tx_reg, temp); 2024 I915_WRITE(fdi_tx_reg, temp);
2025 POSTING_READ(fdi_tx_reg);
1828 2026
1829 temp = I915_READ(fdi_rx_reg); 2027 temp = I915_READ(fdi_rx_reg);
1830 temp &= ~FDI_LINK_TRAIN_NONE; 2028 if (HAS_PCH_CPT(dev)) {
1831 temp |= FDI_LINK_TRAIN_PATTERN_1; 2029 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2030 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2031 } else {
2032 temp &= ~FDI_LINK_TRAIN_NONE;
2033 temp |= FDI_LINK_TRAIN_PATTERN_1;
2034 }
1832 I915_WRITE(fdi_rx_reg, temp); 2035 I915_WRITE(fdi_rx_reg, temp);
2036 POSTING_READ(fdi_rx_reg);
1833 2037
1834 udelay(100); 2038 udelay(100);
1835 2039
@@ -1859,6 +2063,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1859 } 2063 }
1860 } 2064 }
1861 } 2065 }
2066
1862 temp = I915_READ(transconf_reg); 2067 temp = I915_READ(transconf_reg);
1863 /* BPC in transcoder is consistent with that in pipeconf */ 2068 /* BPC in transcoder is consistent with that in pipeconf */
1864 temp &= ~PIPE_BPC_MASK; 2069 temp &= ~PIPE_BPC_MASK;
@@ -1867,35 +2072,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1867 I915_READ(transconf_reg); 2072 I915_READ(transconf_reg);
1868 udelay(100); 2073 udelay(100);
1869 2074
2075 if (HAS_PCH_CPT(dev)) {
2076 /* disable TRANS_DP_CTL */
2077 int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
2078 int reg;
2079
2080 reg = I915_READ(trans_dp_ctl);
2081 reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
2082 I915_WRITE(trans_dp_ctl, reg);
2083 POSTING_READ(trans_dp_ctl);
2084
2085 /* disable DPLL_SEL */
2086 temp = I915_READ(PCH_DPLL_SEL);
2087 if (trans_dpll_sel == 0)
2088 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
2089 else
2090 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2091 I915_WRITE(PCH_DPLL_SEL, temp);
2092 I915_READ(PCH_DPLL_SEL);
2093
2094 }
2095
1870 /* disable PCH DPLL */ 2096 /* disable PCH DPLL */
1871 temp = I915_READ(pch_dpll_reg); 2097 temp = I915_READ(pch_dpll_reg);
1872 if ((temp & DPLL_VCO_ENABLE) != 0) { 2098 I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
1873 I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); 2099 I915_READ(pch_dpll_reg);
1874 I915_READ(pch_dpll_reg);
1875 }
1876 2100
1877 if (HAS_eDP) { 2101 if (HAS_eDP) {
1878 ironlake_disable_pll_edp(crtc); 2102 ironlake_disable_pll_edp(crtc);
1879 } 2103 }
1880 2104
2105 /* Switch from PCDclk to Rawclk */
1881 temp = I915_READ(fdi_rx_reg); 2106 temp = I915_READ(fdi_rx_reg);
1882 temp &= ~FDI_SEL_PCDCLK; 2107 temp &= ~FDI_SEL_PCDCLK;
1883 I915_WRITE(fdi_rx_reg, temp); 2108 I915_WRITE(fdi_rx_reg, temp);
1884 I915_READ(fdi_rx_reg); 2109 I915_READ(fdi_rx_reg);
1885 2110
2111 /* Disable CPU FDI TX PLL */
2112 temp = I915_READ(fdi_tx_reg);
2113 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
2114 I915_READ(fdi_tx_reg);
2115 udelay(100);
2116
1886 temp = I915_READ(fdi_rx_reg); 2117 temp = I915_READ(fdi_rx_reg);
1887 temp &= ~FDI_RX_PLL_ENABLE; 2118 temp &= ~FDI_RX_PLL_ENABLE;
1888 I915_WRITE(fdi_rx_reg, temp); 2119 I915_WRITE(fdi_rx_reg, temp);
1889 I915_READ(fdi_rx_reg); 2120 I915_READ(fdi_rx_reg);
1890 2121
1891 /* Disable CPU FDI TX PLL */
1892 temp = I915_READ(fdi_tx_reg);
1893 if ((temp & FDI_TX_PLL_ENABLE) != 0) {
1894 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
1895 I915_READ(fdi_tx_reg);
1896 udelay(100);
1897 }
1898
1899 /* Wait for the clocks to turn off. */ 2122 /* Wait for the clocks to turn off. */
1900 udelay(100); 2123 udelay(100);
1901 break; 2124 break;
@@ -2331,6 +2554,30 @@ static struct intel_watermark_params i830_wm_info = {
2331 I830_FIFO_LINE_SIZE 2554 I830_FIFO_LINE_SIZE
2332}; 2555};
2333 2556
2557static struct intel_watermark_params ironlake_display_wm_info = {
2558 ILK_DISPLAY_FIFO,
2559 ILK_DISPLAY_MAXWM,
2560 ILK_DISPLAY_DFTWM,
2561 2,
2562 ILK_FIFO_LINE_SIZE
2563};
2564
2565static struct intel_watermark_params ironlake_display_srwm_info = {
2566 ILK_DISPLAY_SR_FIFO,
2567 ILK_DISPLAY_MAX_SRWM,
2568 ILK_DISPLAY_DFT_SRWM,
2569 2,
2570 ILK_FIFO_LINE_SIZE
2571};
2572
2573static struct intel_watermark_params ironlake_cursor_srwm_info = {
2574 ILK_CURSOR_SR_FIFO,
2575 ILK_CURSOR_MAX_SRWM,
2576 ILK_CURSOR_DFT_SRWM,
2577 2,
2578 ILK_FIFO_LINE_SIZE
2579};
2580
2334/** 2581/**
2335 * intel_calculate_wm - calculate watermark level 2582 * intel_calculate_wm - calculate watermark level
2336 * @clock_in_khz: pixel clock 2583 * @clock_in_khz: pixel clock
@@ -2449,66 +2696,6 @@ static void pineview_disable_cxsr(struct drm_device *dev)
2449 DRM_INFO("Big FIFO is disabled\n"); 2696 DRM_INFO("Big FIFO is disabled\n");
2450} 2697}
2451 2698
2452static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
2453 int pixel_size)
2454{
2455 struct drm_i915_private *dev_priv = dev->dev_private;
2456 u32 reg;
2457 unsigned long wm;
2458 struct cxsr_latency *latency;
2459
2460 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
2461 dev_priv->mem_freq);
2462 if (!latency) {
2463 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2464 pineview_disable_cxsr(dev);
2465 return;
2466 }
2467
2468 /* Display SR */
2469 wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
2470 latency->display_sr);
2471 reg = I915_READ(DSPFW1);
2472 reg &= 0x7fffff;
2473 reg |= wm << 23;
2474 I915_WRITE(DSPFW1, reg);
2475 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
2476
2477 /* cursor SR */
2478 wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
2479 latency->cursor_sr);
2480 reg = I915_READ(DSPFW3);
2481 reg &= ~(0x3f << 24);
2482 reg |= (wm & 0x3f) << 24;
2483 I915_WRITE(DSPFW3, reg);
2484
2485 /* Display HPLL off SR */
2486 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
2487 latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
2488 reg = I915_READ(DSPFW3);
2489 reg &= 0xfffffe00;
2490 reg |= wm & 0x1ff;
2491 I915_WRITE(DSPFW3, reg);
2492
2493 /* cursor HPLL off SR */
2494 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
2495 latency->cursor_hpll_disable);
2496 reg = I915_READ(DSPFW3);
2497 reg &= ~(0x3f << 16);
2498 reg |= (wm & 0x3f) << 16;
2499 I915_WRITE(DSPFW3, reg);
2500 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
2501
2502 /* activate cxsr */
2503 reg = I915_READ(DSPFW3);
2504 reg |= PINEVIEW_SELF_REFRESH_EN;
2505 I915_WRITE(DSPFW3, reg);
2506
2507 DRM_INFO("Big FIFO is enabled\n");
2508
2509 return;
2510}
2511
2512/* 2699/*
2513 * Latency for FIFO fetches is dependent on several factors: 2700 * Latency for FIFO fetches is dependent on several factors:
2514 * - memory configuration (speed, channels) 2701 * - memory configuration (speed, channels)
@@ -2593,6 +2780,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
2593 return size; 2780 return size;
2594} 2781}
2595 2782
2783static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2784 int planeb_clock, int sr_hdisplay, int pixel_size)
2785{
2786 struct drm_i915_private *dev_priv = dev->dev_private;
2787 u32 reg;
2788 unsigned long wm;
2789 struct cxsr_latency *latency;
2790 int sr_clock;
2791
2792 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
2793 dev_priv->mem_freq);
2794 if (!latency) {
2795 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2796 pineview_disable_cxsr(dev);
2797 return;
2798 }
2799
2800 if (!planea_clock || !planeb_clock) {
2801 sr_clock = planea_clock ? planea_clock : planeb_clock;
2802
2803 /* Display SR */
2804 wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
2805 pixel_size, latency->display_sr);
2806 reg = I915_READ(DSPFW1);
2807 reg &= ~DSPFW_SR_MASK;
2808 reg |= wm << DSPFW_SR_SHIFT;
2809 I915_WRITE(DSPFW1, reg);
2810 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
2811
2812 /* cursor SR */
2813 wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
2814 pixel_size, latency->cursor_sr);
2815 reg = I915_READ(DSPFW3);
2816 reg &= ~DSPFW_CURSOR_SR_MASK;
2817 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
2818 I915_WRITE(DSPFW3, reg);
2819
2820 /* Display HPLL off SR */
2821 wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
2822 pixel_size, latency->display_hpll_disable);
2823 reg = I915_READ(DSPFW3);
2824 reg &= ~DSPFW_HPLL_SR_MASK;
2825 reg |= wm & DSPFW_HPLL_SR_MASK;
2826 I915_WRITE(DSPFW3, reg);
2827
2828 /* cursor HPLL off SR */
2829 wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
2830 pixel_size, latency->cursor_hpll_disable);
2831 reg = I915_READ(DSPFW3);
2832 reg &= ~DSPFW_HPLL_CURSOR_MASK;
2833 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
2834 I915_WRITE(DSPFW3, reg);
2835 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
2836
2837 /* activate cxsr */
2838 reg = I915_READ(DSPFW3);
2839 reg |= PINEVIEW_SELF_REFRESH_EN;
2840 I915_WRITE(DSPFW3, reg);
2841 DRM_DEBUG_KMS("Self-refresh is enabled\n");
2842 } else {
2843 pineview_disable_cxsr(dev);
2844 DRM_DEBUG_KMS("Self-refresh is disabled\n");
2845 }
2846}
2847
2596static void g4x_update_wm(struct drm_device *dev, int planea_clock, 2848static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2597 int planeb_clock, int sr_hdisplay, int pixel_size) 2849 int planeb_clock, int sr_hdisplay, int pixel_size)
2598{ 2850{
@@ -2813,6 +3065,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
2813 I915_WRITE(FW_BLC, fwater_lo); 3065 I915_WRITE(FW_BLC, fwater_lo);
2814} 3066}
2815 3067
3068#define ILK_LP0_PLANE_LATENCY 700
3069
3070static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3071 int planeb_clock, int sr_hdisplay, int pixel_size)
3072{
3073 struct drm_i915_private *dev_priv = dev->dev_private;
3074 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
3075 int sr_wm, cursor_wm;
3076 unsigned long line_time_us;
3077 int sr_clock, entries_required;
3078 u32 reg_value;
3079
3080 /* Calculate and update the watermark for plane A */
3081 if (planea_clock) {
3082 entries_required = ((planea_clock / 1000) * pixel_size *
3083 ILK_LP0_PLANE_LATENCY) / 1000;
3084 entries_required = DIV_ROUND_UP(entries_required,
3085 ironlake_display_wm_info.cacheline_size);
3086 planea_wm = entries_required +
3087 ironlake_display_wm_info.guard_size;
3088
3089 if (planea_wm > (int)ironlake_display_wm_info.max_wm)
3090 planea_wm = ironlake_display_wm_info.max_wm;
3091
3092 cursora_wm = 16;
3093 reg_value = I915_READ(WM0_PIPEA_ILK);
3094 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3095 reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
3096 (cursora_wm & WM0_PIPE_CURSOR_MASK);
3097 I915_WRITE(WM0_PIPEA_ILK, reg_value);
3098 DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
3099 "cursor: %d\n", planea_wm, cursora_wm);
3100 }
3101 /* Calculate and update the watermark for plane B */
3102 if (planeb_clock) {
3103 entries_required = ((planeb_clock / 1000) * pixel_size *
3104 ILK_LP0_PLANE_LATENCY) / 1000;
3105 entries_required = DIV_ROUND_UP(entries_required,
3106 ironlake_display_wm_info.cacheline_size);
3107 planeb_wm = entries_required +
3108 ironlake_display_wm_info.guard_size;
3109
3110 if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
3111 planeb_wm = ironlake_display_wm_info.max_wm;
3112
3113 cursorb_wm = 16;
3114 reg_value = I915_READ(WM0_PIPEB_ILK);
3115 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3116 reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
3117 (cursorb_wm & WM0_PIPE_CURSOR_MASK);
3118 I915_WRITE(WM0_PIPEB_ILK, reg_value);
3119 DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
3120 "cursor: %d\n", planeb_wm, cursorb_wm);
3121 }
3122
3123 /*
3124 * Calculate and update the self-refresh watermark only when one
3125 * display plane is used.
3126 */
3127 if (!planea_clock || !planeb_clock) {
3128 int line_count;
3129 /* Read the self-refresh latency. The unit is 0.5us */
3130 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
3131
3132 sr_clock = planea_clock ? planea_clock : planeb_clock;
3133 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
3134
3135 /* Use ns/us then divide to preserve precision */
3136 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
3137 / 1000;
3138
3139 /* calculate the self-refresh watermark for display plane */
3140 entries_required = line_count * sr_hdisplay * pixel_size;
3141 entries_required = DIV_ROUND_UP(entries_required,
3142 ironlake_display_srwm_info.cacheline_size);
3143 sr_wm = entries_required +
3144 ironlake_display_srwm_info.guard_size;
3145
3146 /* calculate the self-refresh watermark for display cursor */
3147 entries_required = line_count * pixel_size * 64;
3148 entries_required = DIV_ROUND_UP(entries_required,
3149 ironlake_cursor_srwm_info.cacheline_size);
3150 cursor_wm = entries_required +
3151 ironlake_cursor_srwm_info.guard_size;
3152
3153 /* configure watermark and enable self-refresh */
3154 reg_value = I915_READ(WM1_LP_ILK);
3155 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
3156 WM1_LP_CURSOR_MASK);
3157 reg_value |= WM1_LP_SR_EN |
3158 (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3159 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
3160
3161 I915_WRITE(WM1_LP_ILK, reg_value);
3162 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3163 "cursor %d\n", sr_wm, cursor_wm);
3164
3165 } else {
3166 /* Turn off self refresh if both pipes are enabled */
3167 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
3168 }
3169}
2816/** 3170/**
2817 * intel_update_watermarks - update FIFO watermark values based on current modes 3171 * intel_update_watermarks - update FIFO watermark values based on current modes
2818 * 3172 *
@@ -2882,12 +3236,6 @@ static void intel_update_watermarks(struct drm_device *dev)
2882 if (enabled <= 0) 3236 if (enabled <= 0)
2883 return; 3237 return;
2884 3238
2885 /* Single plane configs can enable self refresh */
2886 if (enabled == 1 && IS_PINEVIEW(dev))
2887 pineview_enable_cxsr(dev, sr_clock, pixel_size);
2888 else if (IS_PINEVIEW(dev))
2889 pineview_disable_cxsr(dev);
2890
2891 dev_priv->display.update_wm(dev, planea_clock, planeb_clock, 3239 dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
2892 sr_hdisplay, pixel_size); 3240 sr_hdisplay, pixel_size);
2893} 3241}
@@ -2924,7 +3272,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2924 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3272 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
2925 bool is_edp = false; 3273 bool is_edp = false;
2926 struct drm_mode_config *mode_config = &dev->mode_config; 3274 struct drm_mode_config *mode_config = &dev->mode_config;
2927 struct drm_connector *connector; 3275 struct drm_encoder *encoder;
3276 struct intel_encoder *intel_encoder = NULL;
2928 const intel_limit_t *limit; 3277 const intel_limit_t *limit;
2929 int ret; 3278 int ret;
2930 struct fdi_m_n m_n = {0}; 3279 struct fdi_m_n m_n = {0};
@@ -2935,6 +3284,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2935 int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; 3284 int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
2936 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; 3285 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
2937 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; 3286 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
3287 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
3288 int trans_dpll_sel = (pipe == 0) ? 0 : 1;
2938 int lvds_reg = LVDS; 3289 int lvds_reg = LVDS;
2939 u32 temp; 3290 u32 temp;
2940 int sdvo_pixel_multiply; 3291 int sdvo_pixel_multiply;
@@ -2942,12 +3293,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2942 3293
2943 drm_vblank_pre_modeset(dev, pipe); 3294 drm_vblank_pre_modeset(dev, pipe);
2944 3295
2945 list_for_each_entry(connector, &mode_config->connector_list, head) { 3296 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
2946 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2947 3297
2948 if (!connector->encoder || connector->encoder->crtc != crtc) 3298 if (!encoder || encoder->crtc != crtc)
2949 continue; 3299 continue;
2950 3300
3301 intel_encoder = enc_to_intel_encoder(encoder);
3302
2951 switch (intel_encoder->type) { 3303 switch (intel_encoder->type) {
2952 case INTEL_OUTPUT_LVDS: 3304 case INTEL_OUTPUT_LVDS:
2953 is_lvds = true; 3305 is_lvds = true;
@@ -3043,14 +3395,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3043 3395
3044 /* FDI link */ 3396 /* FDI link */
3045 if (HAS_PCH_SPLIT(dev)) { 3397 if (HAS_PCH_SPLIT(dev)) {
3046 int lane, link_bw, bpp; 3398 int lane = 0, link_bw, bpp;
3047 /* eDP doesn't require FDI link, so just set DP M/N 3399 /* eDP doesn't require FDI link, so just set DP M/N
3048 according to current link config */ 3400 according to current link config */
3049 if (is_edp) { 3401 if (is_edp) {
3050 struct drm_connector *edp;
3051 target_clock = mode->clock; 3402 target_clock = mode->clock;
3052 edp = intel_pipe_get_connector(crtc); 3403 intel_edp_link_config(intel_encoder,
3053 intel_edp_link_config(to_intel_encoder(edp),
3054 &lane, &link_bw); 3404 &lane, &link_bw);
3055 } else { 3405 } else {
3056 /* DP over FDI requires target mode clock 3406 /* DP over FDI requires target mode clock
@@ -3059,7 +3409,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3059 target_clock = mode->clock; 3409 target_clock = mode->clock;
3060 else 3410 else
3061 target_clock = adjusted_mode->clock; 3411 target_clock = adjusted_mode->clock;
3062 lane = 4;
3063 link_bw = 270000; 3412 link_bw = 270000;
3064 } 3413 }
3065 3414
@@ -3111,6 +3460,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3111 bpp = 24; 3460 bpp = 24;
3112 } 3461 }
3113 3462
3463 if (!lane) {
3464 /*
3465 * Account for spread spectrum to avoid
3466 * oversubscribing the link. Max center spread
3467 * is 2.5%; use 5% for safety's sake.
3468 */
3469 u32 bps = target_clock * bpp * 21 / 20;
3470 lane = bps / (link_bw * 8) + 1;
3471 }
3472
3473 intel_crtc->fdi_lanes = lane;
3474
3114 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); 3475 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
3115 } 3476 }
3116 3477
@@ -3265,11 +3626,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3265 pipeconf &= ~PIPEACONF_DOUBLE_WIDE; 3626 pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
3266 } 3627 }
3267 3628
3268 dspcntr |= DISPLAY_PLANE_ENABLE;
3269 pipeconf |= PIPEACONF_ENABLE;
3270 dpll |= DPLL_VCO_ENABLE;
3271
3272
3273 /* Disable the panel fitter if it was on our pipe */ 3629 /* Disable the panel fitter if it was on our pipe */
3274 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) 3630 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
3275 I915_WRITE(PFIT_CONTROL, 0); 3631 I915_WRITE(PFIT_CONTROL, 0);
@@ -3292,6 +3648,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3292 udelay(150); 3648 udelay(150);
3293 } 3649 }
3294 3650
3651 /* enable transcoder DPLL */
3652 if (HAS_PCH_CPT(dev)) {
3653 temp = I915_READ(PCH_DPLL_SEL);
3654 if (trans_dpll_sel == 0)
3655 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3656 else
3657 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3658 I915_WRITE(PCH_DPLL_SEL, temp);
3659 I915_READ(PCH_DPLL_SEL);
3660 udelay(150);
3661 }
3662
3295 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 3663 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3296 * This is an exception to the general rule that mode_set doesn't turn 3664 * This is an exception to the general rule that mode_set doesn't turn
3297 * things on. 3665 * things on.
@@ -3303,7 +3671,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3303 lvds_reg = PCH_LVDS; 3671 lvds_reg = PCH_LVDS;
3304 3672
3305 lvds = I915_READ(lvds_reg); 3673 lvds = I915_READ(lvds_reg);
3306 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; 3674 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
3675 if (pipe == 1) {
3676 if (HAS_PCH_CPT(dev))
3677 lvds |= PORT_TRANS_B_SEL_CPT;
3678 else
3679 lvds |= LVDS_PIPEB_SELECT;
3680 } else {
3681 if (HAS_PCH_CPT(dev))
3682 lvds &= ~PORT_TRANS_SEL_MASK;
3683 else
3684 lvds &= ~LVDS_PIPEB_SELECT;
3685 }
3307 /* set the corresponsding LVDS_BORDER bit */ 3686 /* set the corresponsding LVDS_BORDER bit */
3308 lvds |= dev_priv->lvds_border_bits; 3687 lvds |= dev_priv->lvds_border_bits;
3309 /* Set the B0-B3 data pairs corresponding to whether we're going to 3688 /* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -3321,14 +3700,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3321 /* set the dithering flag */ 3700 /* set the dithering flag */
3322 if (IS_I965G(dev)) { 3701 if (IS_I965G(dev)) {
3323 if (dev_priv->lvds_dither) { 3702 if (dev_priv->lvds_dither) {
3324 if (HAS_PCH_SPLIT(dev)) 3703 if (HAS_PCH_SPLIT(dev)) {
3325 pipeconf |= PIPE_ENABLE_DITHER; 3704 pipeconf |= PIPE_ENABLE_DITHER;
3326 else 3705 pipeconf |= PIPE_DITHER_TYPE_ST01;
3706 } else
3327 lvds |= LVDS_ENABLE_DITHER; 3707 lvds |= LVDS_ENABLE_DITHER;
3328 } else { 3708 } else {
3329 if (HAS_PCH_SPLIT(dev)) 3709 if (HAS_PCH_SPLIT(dev)) {
3330 pipeconf &= ~PIPE_ENABLE_DITHER; 3710 pipeconf &= ~PIPE_ENABLE_DITHER;
3331 else 3711 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3712 } else
3332 lvds &= ~LVDS_ENABLE_DITHER; 3713 lvds &= ~LVDS_ENABLE_DITHER;
3333 } 3714 }
3334 } 3715 }
@@ -3337,6 +3718,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3337 } 3718 }
3338 if (is_dp) 3719 if (is_dp)
3339 intel_dp_set_m_n(crtc, mode, adjusted_mode); 3720 intel_dp_set_m_n(crtc, mode, adjusted_mode);
3721 else if (HAS_PCH_SPLIT(dev)) {
3722 /* For non-DP output, clear any trans DP clock recovery setting.*/
3723 if (pipe == 0) {
3724 I915_WRITE(TRANSA_DATA_M1, 0);
3725 I915_WRITE(TRANSA_DATA_N1, 0);
3726 I915_WRITE(TRANSA_DP_LINK_M1, 0);
3727 I915_WRITE(TRANSA_DP_LINK_N1, 0);
3728 } else {
3729 I915_WRITE(TRANSB_DATA_M1, 0);
3730 I915_WRITE(TRANSB_DATA_N1, 0);
3731 I915_WRITE(TRANSB_DP_LINK_M1, 0);
3732 I915_WRITE(TRANSB_DP_LINK_N1, 0);
3733 }
3734 }
3340 3735
3341 if (!is_edp) { 3736 if (!is_edp) {
3342 I915_WRITE(fp_reg, fp); 3737 I915_WRITE(fp_reg, fp);
@@ -3411,6 +3806,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3411 /* enable FDI RX PLL too */ 3806 /* enable FDI RX PLL too */
3412 temp = I915_READ(fdi_rx_reg); 3807 temp = I915_READ(fdi_rx_reg);
3413 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); 3808 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
3809 I915_READ(fdi_rx_reg);
3810 udelay(200);
3811
3812 /* enable FDI TX PLL too */
3813 temp = I915_READ(fdi_tx_reg);
3814 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
3815 I915_READ(fdi_tx_reg);
3816
3817 /* enable FDI RX PCDCLK */
3818 temp = I915_READ(fdi_rx_reg);
3819 I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
3820 I915_READ(fdi_rx_reg);
3414 udelay(200); 3821 udelay(200);
3415 } 3822 }
3416 } 3823 }
@@ -3671,6 +4078,7 @@ static struct drm_display_mode load_detect_mode = {
3671}; 4078};
3672 4079
3673struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 4080struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
4081 struct drm_connector *connector,
3674 struct drm_display_mode *mode, 4082 struct drm_display_mode *mode,
3675 int *dpms_mode) 4083 int *dpms_mode)
3676{ 4084{
@@ -3729,7 +4137,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
3729 } 4137 }
3730 4138
3731 encoder->crtc = crtc; 4139 encoder->crtc = crtc;
3732 intel_encoder->base.encoder = encoder; 4140 connector->encoder = encoder;
3733 intel_encoder->load_detect_temp = true; 4141 intel_encoder->load_detect_temp = true;
3734 4142
3735 intel_crtc = to_intel_crtc(crtc); 4143 intel_crtc = to_intel_crtc(crtc);
@@ -3755,7 +4163,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
3755 return crtc; 4163 return crtc;
3756} 4164}
3757 4165
3758void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode) 4166void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
4167 struct drm_connector *connector, int dpms_mode)
3759{ 4168{
3760 struct drm_encoder *encoder = &intel_encoder->enc; 4169 struct drm_encoder *encoder = &intel_encoder->enc;
3761 struct drm_device *dev = encoder->dev; 4170 struct drm_device *dev = encoder->dev;
@@ -3765,7 +4174,7 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpm
3765 4174
3766 if (intel_encoder->load_detect_temp) { 4175 if (intel_encoder->load_detect_temp) {
3767 encoder->crtc = NULL; 4176 encoder->crtc = NULL;
3768 intel_encoder->base.encoder = NULL; 4177 connector->encoder = NULL;
3769 intel_encoder->load_detect_temp = false; 4178 intel_encoder->load_detect_temp = false;
3770 crtc->enabled = drm_helper_crtc_in_use(crtc); 4179 crtc->enabled = drm_helper_crtc_in_use(crtc);
3771 drm_helper_disable_unused_functions(dev); 4180 drm_helper_disable_unused_functions(dev);
@@ -4392,14 +4801,14 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
4392 return crtc; 4801 return crtc;
4393} 4802}
4394 4803
4395static int intel_connector_clones(struct drm_device *dev, int type_mask) 4804static int intel_encoder_clones(struct drm_device *dev, int type_mask)
4396{ 4805{
4397 int index_mask = 0; 4806 int index_mask = 0;
4398 struct drm_connector *connector; 4807 struct drm_encoder *encoder;
4399 int entry = 0; 4808 int entry = 0;
4400 4809
4401 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4810 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4402 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 4811 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
4403 if (type_mask & intel_encoder->clone_mask) 4812 if (type_mask & intel_encoder->clone_mask)
4404 index_mask |= (1 << entry); 4813 index_mask |= (1 << entry);
4405 entry++; 4814 entry++;
@@ -4411,7 +4820,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
4411static void intel_setup_outputs(struct drm_device *dev) 4820static void intel_setup_outputs(struct drm_device *dev)
4412{ 4821{
4413 struct drm_i915_private *dev_priv = dev->dev_private; 4822 struct drm_i915_private *dev_priv = dev->dev_private;
4414 struct drm_connector *connector; 4823 struct drm_encoder *encoder;
4415 4824
4416 intel_crt_init(dev); 4825 intel_crt_init(dev);
4417 4826
@@ -4426,9 +4835,8 @@ static void intel_setup_outputs(struct drm_device *dev)
4426 intel_dp_init(dev, DP_A); 4835 intel_dp_init(dev, DP_A);
4427 4836
4428 if (I915_READ(HDMIB) & PORT_DETECTED) { 4837 if (I915_READ(HDMIB) & PORT_DETECTED) {
4429 /* check SDVOB */ 4838 /* PCH SDVOB multiplex with HDMIB */
4430 /* found = intel_sdvo_init(dev, HDMIB); */ 4839 found = intel_sdvo_init(dev, PCH_SDVOB);
4431 found = 0;
4432 if (!found) 4840 if (!found)
4433 intel_hdmi_init(dev, HDMIB); 4841 intel_hdmi_init(dev, HDMIB);
4434 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 4842 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -4494,12 +4902,11 @@ static void intel_setup_outputs(struct drm_device *dev)
4494 if (SUPPORTS_TV(dev)) 4902 if (SUPPORTS_TV(dev))
4495 intel_tv_init(dev); 4903 intel_tv_init(dev);
4496 4904
4497 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4905 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4498 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 4906 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
4499 struct drm_encoder *encoder = &intel_encoder->enc;
4500 4907
4501 encoder->possible_crtcs = intel_encoder->crtc_mask; 4908 encoder->possible_crtcs = intel_encoder->crtc_mask;
4502 encoder->possible_clones = intel_connector_clones(dev, 4909 encoder->possible_clones = intel_encoder_clones(dev,
4503 intel_encoder->clone_mask); 4910 intel_encoder->clone_mask);
4504 } 4911 }
4505} 4912}
@@ -4507,10 +4914,6 @@ static void intel_setup_outputs(struct drm_device *dev)
4507static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 4914static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
4508{ 4915{
4509 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 4916 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
4510 struct drm_device *dev = fb->dev;
4511
4512 if (fb->fbdev)
4513 intelfb_remove(dev, fb);
4514 4917
4515 drm_framebuffer_cleanup(fb); 4918 drm_framebuffer_cleanup(fb);
4516 drm_gem_object_unreference_unlocked(intel_fb->obj); 4919 drm_gem_object_unreference_unlocked(intel_fb->obj);
@@ -4533,18 +4936,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
4533 .create_handle = intel_user_framebuffer_create_handle, 4936 .create_handle = intel_user_framebuffer_create_handle,
4534}; 4937};
4535 4938
4536int intel_framebuffer_create(struct drm_device *dev, 4939int intel_framebuffer_init(struct drm_device *dev,
4537 struct drm_mode_fb_cmd *mode_cmd, 4940 struct intel_framebuffer *intel_fb,
4538 struct drm_framebuffer **fb, 4941 struct drm_mode_fb_cmd *mode_cmd,
4539 struct drm_gem_object *obj) 4942 struct drm_gem_object *obj)
4540{ 4943{
4541 struct intel_framebuffer *intel_fb;
4542 int ret; 4944 int ret;
4543 4945
4544 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
4545 if (!intel_fb)
4546 return -ENOMEM;
4547
4548 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 4946 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
4549 if (ret) { 4947 if (ret) {
4550 DRM_ERROR("framebuffer init failed %d\n", ret); 4948 DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -4552,40 +4950,41 @@ int intel_framebuffer_create(struct drm_device *dev,
4552 } 4950 }
4553 4951
4554 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 4952 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
4555
4556 intel_fb->obj = obj; 4953 intel_fb->obj = obj;
4557
4558 *fb = &intel_fb->base;
4559
4560 return 0; 4954 return 0;
4561} 4955}
4562 4956
4563
4564static struct drm_framebuffer * 4957static struct drm_framebuffer *
4565intel_user_framebuffer_create(struct drm_device *dev, 4958intel_user_framebuffer_create(struct drm_device *dev,
4566 struct drm_file *filp, 4959 struct drm_file *filp,
4567 struct drm_mode_fb_cmd *mode_cmd) 4960 struct drm_mode_fb_cmd *mode_cmd)
4568{ 4961{
4569 struct drm_gem_object *obj; 4962 struct drm_gem_object *obj;
4570 struct drm_framebuffer *fb; 4963 struct intel_framebuffer *intel_fb;
4571 int ret; 4964 int ret;
4572 4965
4573 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); 4966 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
4574 if (!obj) 4967 if (!obj)
4575 return NULL; 4968 return NULL;
4576 4969
4577 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); 4970 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
4971 if (!intel_fb)
4972 return NULL;
4973
4974 ret = intel_framebuffer_init(dev, intel_fb,
4975 mode_cmd, obj);
4578 if (ret) { 4976 if (ret) {
4579 drm_gem_object_unreference_unlocked(obj); 4977 drm_gem_object_unreference_unlocked(obj);
4978 kfree(intel_fb);
4580 return NULL; 4979 return NULL;
4581 } 4980 }
4582 4981
4583 return fb; 4982 return &intel_fb->base;
4584} 4983}
4585 4984
4586static const struct drm_mode_config_funcs intel_mode_funcs = { 4985static const struct drm_mode_config_funcs intel_mode_funcs = {
4587 .fb_create = intel_user_framebuffer_create, 4986 .fb_create = intel_user_framebuffer_create,
4588 .fb_changed = intelfb_probe, 4987 .output_poll_changed = intel_fb_output_poll_changed,
4589}; 4988};
4590 4989
4591static struct drm_gem_object * 4990static struct drm_gem_object *
@@ -4594,7 +4993,7 @@ intel_alloc_power_context(struct drm_device *dev)
4594 struct drm_gem_object *pwrctx; 4993 struct drm_gem_object *pwrctx;
4595 int ret; 4994 int ret;
4596 4995
4597 pwrctx = drm_gem_object_alloc(dev, 4096); 4996 pwrctx = i915_gem_alloc_object(dev, 4096);
4598 if (!pwrctx) { 4997 if (!pwrctx) {
4599 DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); 4998 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
4600 return NULL; 4999 return NULL;
@@ -4732,6 +5131,25 @@ void intel_init_clock_gating(struct drm_device *dev)
4732 } 5131 }
4733 5132
4734 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 5133 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
5134
5135 /*
5136 * According to the spec the following bits should be set in
5137 * order to enable memory self-refresh
5138 * The bit 22/21 of 0x42004
5139 * The bit 5 of 0x42020
5140 * The bit 15 of 0x45000
5141 */
5142 if (IS_IRONLAKE(dev)) {
5143 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5144 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5145 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5146 I915_WRITE(ILK_DSPCLK_GATE,
5147 (I915_READ(ILK_DSPCLK_GATE) |
5148 ILK_DPARB_CLK_GATE));
5149 I915_WRITE(DISP_ARB_CTL,
5150 (I915_READ(DISP_ARB_CTL) |
5151 DISP_FBC_WM_DIS));
5152 }
4735 return; 5153 return;
4736 } else if (IS_G4X(dev)) { 5154 } else if (IS_G4X(dev)) {
4737 uint32_t dspclk_gate; 5155 uint32_t dspclk_gate;
@@ -4809,8 +5227,7 @@ static void intel_init_display(struct drm_device *dev)
4809 else 5227 else
4810 dev_priv->display.dpms = i9xx_crtc_dpms; 5228 dev_priv->display.dpms = i9xx_crtc_dpms;
4811 5229
4812 /* Only mobile has FBC, leave pointers NULL for other chips */ 5230 if (I915_HAS_FBC(dev)) {
4813 if (IS_MOBILE(dev)) {
4814 if (IS_GM45(dev)) { 5231 if (IS_GM45(dev)) {
4815 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 5232 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
4816 dev_priv->display.enable_fbc = g4x_enable_fbc; 5233 dev_priv->display.enable_fbc = g4x_enable_fbc;
@@ -4847,9 +5264,31 @@ static void intel_init_display(struct drm_device *dev)
4847 i830_get_display_clock_speed; 5264 i830_get_display_clock_speed;
4848 5265
4849 /* For FIFO watermark updates */ 5266 /* For FIFO watermark updates */
4850 if (HAS_PCH_SPLIT(dev)) 5267 if (HAS_PCH_SPLIT(dev)) {
4851 dev_priv->display.update_wm = NULL; 5268 if (IS_IRONLAKE(dev)) {
4852 else if (IS_G4X(dev)) 5269 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
5270 dev_priv->display.update_wm = ironlake_update_wm;
5271 else {
5272 DRM_DEBUG_KMS("Failed to get proper latency. "
5273 "Disable CxSR\n");
5274 dev_priv->display.update_wm = NULL;
5275 }
5276 } else
5277 dev_priv->display.update_wm = NULL;
5278 } else if (IS_PINEVIEW(dev)) {
5279 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
5280 dev_priv->fsb_freq,
5281 dev_priv->mem_freq)) {
5282 DRM_INFO("failed to find known CxSR latency "
5283 "(found fsb freq %d, mem freq %d), "
5284 "disabling CxSR\n",
5285 dev_priv->fsb_freq, dev_priv->mem_freq);
5286 /* Disable CxSR and never update its watermark again */
5287 pineview_disable_cxsr(dev);
5288 dev_priv->display.update_wm = NULL;
5289 } else
5290 dev_priv->display.update_wm = pineview_update_wm;
5291 } else if (IS_G4X(dev))
4853 dev_priv->display.update_wm = g4x_update_wm; 5292 dev_priv->display.update_wm = g4x_update_wm;
4854 else if (IS_I965G(dev)) 5293 else if (IS_I965G(dev))
4855 dev_priv->display.update_wm = i965_update_wm; 5294 dev_priv->display.update_wm = i965_update_wm;
@@ -4923,13 +5362,6 @@ void intel_modeset_init(struct drm_device *dev)
4923 (unsigned long)dev); 5362 (unsigned long)dev);
4924 5363
4925 intel_setup_overlay(dev); 5364 intel_setup_overlay(dev);
4926
4927 if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
4928 dev_priv->fsb_freq,
4929 dev_priv->mem_freq))
4930 DRM_INFO("failed to find known CxSR latency "
4931 "(found fsb freq %d, mem freq %d), disabling CxSR\n",
4932 dev_priv->fsb_freq, dev_priv->mem_freq);
4933} 5365}
4934 5366
4935void intel_modeset_cleanup(struct drm_device *dev) 5367void intel_modeset_cleanup(struct drm_device *dev)
@@ -4940,6 +5372,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
4940 5372
4941 mutex_lock(&dev->struct_mutex); 5373 mutex_lock(&dev->struct_mutex);
4942 5374
5375 drm_kms_helper_poll_fini(dev);
5376 intel_fbdev_fini(dev);
5377
4943 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5378 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4944 /* Skip inactive CRTCs */ 5379 /* Skip inactive CRTCs */
4945 if (!crtc->fb) 5380 if (!crtc->fb)
@@ -4974,14 +5409,29 @@ void intel_modeset_cleanup(struct drm_device *dev)
4974} 5409}
4975 5410
4976 5411
4977/* current intel driver doesn't take advantage of encoders 5412/*
4978 always give back the encoder for the connector 5413 * Return which encoder is currently attached for connector.
4979*/ 5414 */
4980struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 5415struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
4981{ 5416{
4982 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 5417 struct drm_mode_object *obj;
5418 struct drm_encoder *encoder;
5419 int i;
4983 5420
4984 return &intel_encoder->enc; 5421 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
5422 if (connector->encoder_ids[i] == 0)
5423 break;
5424
5425 obj = drm_mode_object_find(connector->dev,
5426 connector->encoder_ids[i],
5427 DRM_MODE_OBJECT_ENCODER);
5428 if (!obj)
5429 continue;
5430
5431 encoder = obj_to_encoder(obj);
5432 return encoder;
5433 }
5434 return NULL;
4985} 5435}
4986 5436
4987/* 5437/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 77e40cfcf216..6b1c9a27c27a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -48,8 +48,6 @@ struct intel_dp_priv {
48 uint32_t output_reg; 48 uint32_t output_reg;
49 uint32_t DP; 49 uint32_t DP;
50 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; 50 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
51 uint32_t save_DP;
52 uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
53 bool has_audio; 51 bool has_audio;
54 int dpms_mode; 52 int dpms_mode;
55 uint8_t link_bw; 53 uint8_t link_bw;
@@ -141,7 +139,8 @@ static int
141intel_dp_mode_valid(struct drm_connector *connector, 139intel_dp_mode_valid(struct drm_connector *connector,
142 struct drm_display_mode *mode) 140 struct drm_display_mode *mode)
143{ 141{
144 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 142 struct drm_encoder *encoder = intel_attached_encoder(connector);
143 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
145 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); 144 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
146 int max_lanes = intel_dp_max_lane_count(intel_encoder); 145 int max_lanes = intel_dp_max_lane_count(intel_encoder);
147 146
@@ -215,7 +214,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
215{ 214{
216 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 215 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
217 uint32_t output_reg = dp_priv->output_reg; 216 uint32_t output_reg = dp_priv->output_reg;
218 struct drm_device *dev = intel_encoder->base.dev; 217 struct drm_device *dev = intel_encoder->enc.dev;
219 struct drm_i915_private *dev_priv = dev->dev_private; 218 struct drm_i915_private *dev_priv = dev->dev_private;
220 uint32_t ch_ctl = output_reg + 0x10; 219 uint32_t ch_ctl = output_reg + 0x10;
221 uint32_t ch_data = ch_ctl + 4; 220 uint32_t ch_data = ch_ctl + 4;
@@ -224,19 +223,27 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
224 uint32_t ctl; 223 uint32_t ctl;
225 uint32_t status; 224 uint32_t status;
226 uint32_t aux_clock_divider; 225 uint32_t aux_clock_divider;
227 int try; 226 int try, precharge;
228 227
229 /* The clock divider is based off the hrawclk, 228 /* The clock divider is based off the hrawclk,
230 * and would like to run at 2MHz. So, take the 229 * and would like to run at 2MHz. So, take the
231 * hrawclk value and divide by 2 and use that 230 * hrawclk value and divide by 2 and use that
232 */ 231 */
233 if (IS_eDP(intel_encoder)) 232 if (IS_eDP(intel_encoder)) {
234 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 233 if (IS_GEN6(dev))
235 else if (HAS_PCH_SPLIT(dev)) 234 aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
235 else
236 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
237 } else if (HAS_PCH_SPLIT(dev))
236 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ 238 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
237 else 239 else
238 aux_clock_divider = intel_hrawclk(dev) / 2; 240 aux_clock_divider = intel_hrawclk(dev) / 2;
239 241
242 if (IS_GEN6(dev))
243 precharge = 3;
244 else
245 precharge = 5;
246
240 /* Must try at least 3 times according to DP spec */ 247 /* Must try at least 3 times according to DP spec */
241 for (try = 0; try < 5; try++) { 248 for (try = 0; try < 5; try++) {
242 /* Load the send data into the aux channel data registers */ 249 /* Load the send data into the aux channel data registers */
@@ -249,7 +256,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
249 ctl = (DP_AUX_CH_CTL_SEND_BUSY | 256 ctl = (DP_AUX_CH_CTL_SEND_BUSY |
250 DP_AUX_CH_CTL_TIME_OUT_400us | 257 DP_AUX_CH_CTL_TIME_OUT_400us |
251 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 258 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
252 (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 259 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
253 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 260 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
254 DP_AUX_CH_CTL_DONE | 261 DP_AUX_CH_CTL_DONE |
255 DP_AUX_CH_CTL_TIME_OUT_ERROR | 262 DP_AUX_CH_CTL_TIME_OUT_ERROR |
@@ -465,7 +472,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
465} 472}
466 473
467static int 474static int
468intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name) 475intel_dp_i2c_init(struct intel_encoder *intel_encoder,
476 struct intel_connector *intel_connector, const char *name)
469{ 477{
470 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 478 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
471 479
@@ -480,7 +488,7 @@ intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
480 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); 488 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
481 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; 489 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
482 dp_priv->adapter.algo_data = &dp_priv->algo; 490 dp_priv->adapter.algo_data = &dp_priv->algo;
483 dp_priv->adapter.dev.parent = &intel_encoder->base.kdev; 491 dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
484 492
485 return i2c_dp_aux_add_bus(&dp_priv->adapter); 493 return i2c_dp_aux_add_bus(&dp_priv->adapter);
486} 494}
@@ -555,7 +563,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
555{ 563{
556 struct drm_device *dev = crtc->dev; 564 struct drm_device *dev = crtc->dev;
557 struct drm_mode_config *mode_config = &dev->mode_config; 565 struct drm_mode_config *mode_config = &dev->mode_config;
558 struct drm_connector *connector; 566 struct drm_encoder *encoder;
559 struct drm_i915_private *dev_priv = dev->dev_private; 567 struct drm_i915_private *dev_priv = dev->dev_private;
560 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 568 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
561 int lane_count = 4; 569 int lane_count = 4;
@@ -564,13 +572,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
564 /* 572 /*
565 * Find the lane count in the intel_encoder private 573 * Find the lane count in the intel_encoder private
566 */ 574 */
567 list_for_each_entry(connector, &mode_config->connector_list, head) { 575 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
568 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 576 struct intel_encoder *intel_encoder;
569 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 577 struct intel_dp_priv *dp_priv;
570 578
571 if (!connector->encoder || connector->encoder->crtc != crtc) 579 if (!encoder || encoder->crtc != crtc)
572 continue; 580 continue;
573 581
582 intel_encoder = enc_to_intel_encoder(encoder);
583 dp_priv = intel_encoder->dev_priv;
584
574 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { 585 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
575 lane_count = dp_priv->lane_count; 586 lane_count = dp_priv->lane_count;
576 break; 587 break;
@@ -626,16 +637,24 @@ static void
626intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 637intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
627 struct drm_display_mode *adjusted_mode) 638 struct drm_display_mode *adjusted_mode)
628{ 639{
640 struct drm_device *dev = encoder->dev;
629 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 641 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
630 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 642 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
631 struct drm_crtc *crtc = intel_encoder->enc.crtc; 643 struct drm_crtc *crtc = intel_encoder->enc.crtc;
632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 644 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
633 645
634 dp_priv->DP = (DP_LINK_TRAIN_OFF | 646 dp_priv->DP = (DP_VOLTAGE_0_4 |
635 DP_VOLTAGE_0_4 | 647 DP_PRE_EMPHASIS_0);
636 DP_PRE_EMPHASIS_0 | 648
637 DP_SYNC_VS_HIGH | 649 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
638 DP_SYNC_HS_HIGH); 650 dp_priv->DP |= DP_SYNC_HS_HIGH;
651 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
652 dp_priv->DP |= DP_SYNC_VS_HIGH;
653
654 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
655 dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
656 else
657 dp_priv->DP |= DP_LINK_TRAIN_OFF;
639 658
640 switch (dp_priv->lane_count) { 659 switch (dp_priv->lane_count) {
641 case 1: 660 case 1:
@@ -664,7 +683,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
664 dp_priv->DP |= DP_ENHANCED_FRAMING; 683 dp_priv->DP |= DP_ENHANCED_FRAMING;
665 } 684 }
666 685
667 if (intel_crtc->pipe == 1) 686 /* CPT DP's pipe select is decided in TRANS_DP_CTL */
687 if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
668 dp_priv->DP |= DP_PIPEB_SELECT; 688 dp_priv->DP |= DP_PIPEB_SELECT;
669 689
670 if (IS_eDP(intel_encoder)) { 690 if (IS_eDP(intel_encoder)) {
@@ -704,7 +724,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
704{ 724{
705 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 725 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
706 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 726 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
707 struct drm_device *dev = intel_encoder->base.dev; 727 struct drm_device *dev = encoder->dev;
708 struct drm_i915_private *dev_priv = dev->dev_private; 728 struct drm_i915_private *dev_priv = dev->dev_private;
709 uint32_t dp_reg = I915_READ(dp_priv->output_reg); 729 uint32_t dp_reg = I915_READ(dp_priv->output_reg);
710 730
@@ -749,20 +769,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
749 return link_status[r - DP_LANE0_1_STATUS]; 769 return link_status[r - DP_LANE0_1_STATUS];
750} 770}
751 771
752static void
753intel_dp_save(struct drm_connector *connector)
754{
755 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
756 struct drm_device *dev = intel_encoder->base.dev;
757 struct drm_i915_private *dev_priv = dev->dev_private;
758 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
759
760 dp_priv->save_DP = I915_READ(dp_priv->output_reg);
761 intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
762 dp_priv->save_link_configuration,
763 sizeof (dp_priv->save_link_configuration));
764}
765
766static uint8_t 772static uint8_t
767intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], 773intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
768 int lane) 774 int lane)
@@ -892,6 +898,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
892 return signal_levels; 898 return signal_levels;
893} 899}
894 900
901/* Gen6's DP voltage swing and pre-emphasis control */
902static uint32_t
903intel_gen6_edp_signal_levels(uint8_t train_set)
904{
905 switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
906 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
907 return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
908 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
909 return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
910 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
911 return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
912 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
913 return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
914 default:
915 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
916 return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
917 }
918}
919
895static uint8_t 920static uint8_t
896intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 921intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
897 int lane) 922 int lane)
@@ -948,7 +973,7 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder,
948 uint8_t train_set[4], 973 uint8_t train_set[4],
949 bool first) 974 bool first)
950{ 975{
951 struct drm_device *dev = intel_encoder->base.dev; 976 struct drm_device *dev = intel_encoder->enc.dev;
952 struct drm_i915_private *dev_priv = dev->dev_private; 977 struct drm_i915_private *dev_priv = dev->dev_private;
953 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 978 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
954 int ret; 979 int ret;
@@ -974,7 +999,7 @@ static void
974intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, 999intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
975 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) 1000 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
976{ 1001{
977 struct drm_device *dev = intel_encoder->base.dev; 1002 struct drm_device *dev = intel_encoder->enc.dev;
978 struct drm_i915_private *dev_priv = dev->dev_private; 1003 struct drm_i915_private *dev_priv = dev->dev_private;
979 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 1004 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
980 uint8_t train_set[4]; 1005 uint8_t train_set[4];
@@ -985,23 +1010,38 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
985 bool channel_eq = false; 1010 bool channel_eq = false;
986 bool first = true; 1011 bool first = true;
987 int tries; 1012 int tries;
1013 u32 reg;
988 1014
989 /* Write the link configuration data */ 1015 /* Write the link configuration data */
990 intel_dp_aux_native_write(intel_encoder, 0x100, 1016 intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
991 link_configuration, DP_LINK_CONFIGURATION_SIZE); 1017 link_configuration, DP_LINK_CONFIGURATION_SIZE);
992 1018
993 DP |= DP_PORT_EN; 1019 DP |= DP_PORT_EN;
994 DP &= ~DP_LINK_TRAIN_MASK; 1020 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
1021 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1022 else
1023 DP &= ~DP_LINK_TRAIN_MASK;
995 memset(train_set, 0, 4); 1024 memset(train_set, 0, 4);
996 voltage = 0xff; 1025 voltage = 0xff;
997 tries = 0; 1026 tries = 0;
998 clock_recovery = false; 1027 clock_recovery = false;
999 for (;;) { 1028 for (;;) {
1000 /* Use train_set[0] to set the voltage and pre emphasis values */ 1029 /* Use train_set[0] to set the voltage and pre emphasis values */
1001 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); 1030 uint32_t signal_levels;
1002 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1031 if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
1032 signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
1033 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1034 } else {
1035 signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
1036 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1037 }
1003 1038
1004 if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1, 1039 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
1040 reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1041 else
1042 reg = DP | DP_LINK_TRAIN_PAT_1;
1043
1044 if (!intel_dp_set_link_train(intel_encoder, reg,
1005 DP_TRAINING_PATTERN_1, train_set, first)) 1045 DP_TRAINING_PATTERN_1, train_set, first))
1006 break; 1046 break;
1007 first = false; 1047 first = false;
@@ -1041,11 +1081,23 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
1041 channel_eq = false; 1081 channel_eq = false;
1042 for (;;) { 1082 for (;;) {
1043 /* Use train_set[0] to set the voltage and pre emphasis values */ 1083 /* Use train_set[0] to set the voltage and pre emphasis values */
1044 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); 1084 uint32_t signal_levels;
1045 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1085
1086 if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
1087 signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
1088 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1089 } else {
1090 signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
1091 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1092 }
1093
1094 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
1095 reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1096 else
1097 reg = DP | DP_LINK_TRAIN_PAT_2;
1046 1098
1047 /* channel eq pattern */ 1099 /* channel eq pattern */
1048 if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2, 1100 if (!intel_dp_set_link_train(intel_encoder, reg,
1049 DP_TRAINING_PATTERN_2, train_set, 1101 DP_TRAINING_PATTERN_2, train_set,
1050 false)) 1102 false))
1051 break; 1103 break;
@@ -1068,7 +1120,12 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
1068 ++tries; 1120 ++tries;
1069 } 1121 }
1070 1122
1071 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); 1123 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
1124 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1125 else
1126 reg = DP | DP_LINK_TRAIN_OFF;
1127
1128 I915_WRITE(dp_priv->output_reg, reg);
1072 POSTING_READ(dp_priv->output_reg); 1129 POSTING_READ(dp_priv->output_reg);
1073 intel_dp_aux_native_write_1(intel_encoder, 1130 intel_dp_aux_native_write_1(intel_encoder,
1074 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); 1131 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
@@ -1077,7 +1134,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
1077static void 1134static void
1078intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) 1135intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
1079{ 1136{
1080 struct drm_device *dev = intel_encoder->base.dev; 1137 struct drm_device *dev = intel_encoder->enc.dev;
1081 struct drm_i915_private *dev_priv = dev->dev_private; 1138 struct drm_i915_private *dev_priv = dev->dev_private;
1082 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 1139 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1083 1140
@@ -1090,9 +1147,15 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
1090 udelay(100); 1147 udelay(100);
1091 } 1148 }
1092 1149
1093 DP &= ~DP_LINK_TRAIN_MASK; 1150 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
1094 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 1151 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1095 POSTING_READ(dp_priv->output_reg); 1152 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1153 POSTING_READ(dp_priv->output_reg);
1154 } else {
1155 DP &= ~DP_LINK_TRAIN_MASK;
1156 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1157 POSTING_READ(dp_priv->output_reg);
1158 }
1096 1159
1097 udelay(17000); 1160 udelay(17000);
1098 1161
@@ -1102,18 +1165,6 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
1102 POSTING_READ(dp_priv->output_reg); 1165 POSTING_READ(dp_priv->output_reg);
1103} 1166}
1104 1167
1105static void
1106intel_dp_restore(struct drm_connector *connector)
1107{
1108 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1109 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1110
1111 if (dp_priv->save_DP & DP_PORT_EN)
1112 intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
1113 else
1114 intel_dp_link_down(intel_encoder, dp_priv->save_DP);
1115}
1116
1117/* 1168/*
1118 * According to DP spec 1169 * According to DP spec
1119 * 5.1.2: 1170 * 5.1.2:
@@ -1144,7 +1195,8 @@ intel_dp_check_link_status(struct intel_encoder *intel_encoder)
1144static enum drm_connector_status 1195static enum drm_connector_status
1145ironlake_dp_detect(struct drm_connector *connector) 1196ironlake_dp_detect(struct drm_connector *connector)
1146{ 1197{
1147 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1198 struct drm_encoder *encoder = intel_attached_encoder(connector);
1199 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1148 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 1200 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1149 enum drm_connector_status status; 1201 enum drm_connector_status status;
1150 1202
@@ -1168,8 +1220,9 @@ ironlake_dp_detect(struct drm_connector *connector)
1168static enum drm_connector_status 1220static enum drm_connector_status
1169intel_dp_detect(struct drm_connector *connector) 1221intel_dp_detect(struct drm_connector *connector)
1170{ 1222{
1171 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1223 struct drm_encoder *encoder = intel_attached_encoder(connector);
1172 struct drm_device *dev = intel_encoder->base.dev; 1224 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1225 struct drm_device *dev = intel_encoder->enc.dev;
1173 struct drm_i915_private *dev_priv = dev->dev_private; 1226 struct drm_i915_private *dev_priv = dev->dev_private;
1174 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 1227 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1175 uint32_t temp, bit; 1228 uint32_t temp, bit;
@@ -1180,16 +1233,6 @@ intel_dp_detect(struct drm_connector *connector)
1180 if (HAS_PCH_SPLIT(dev)) 1233 if (HAS_PCH_SPLIT(dev))
1181 return ironlake_dp_detect(connector); 1234 return ironlake_dp_detect(connector);
1182 1235
1183 temp = I915_READ(PORT_HOTPLUG_EN);
1184
1185 I915_WRITE(PORT_HOTPLUG_EN,
1186 temp |
1187 DPB_HOTPLUG_INT_EN |
1188 DPC_HOTPLUG_INT_EN |
1189 DPD_HOTPLUG_INT_EN);
1190
1191 POSTING_READ(PORT_HOTPLUG_EN);
1192
1193 switch (dp_priv->output_reg) { 1236 switch (dp_priv->output_reg) {
1194 case DP_B: 1237 case DP_B:
1195 bit = DPB_HOTPLUG_INT_STATUS; 1238 bit = DPB_HOTPLUG_INT_STATUS;
@@ -1222,15 +1265,16 @@ intel_dp_detect(struct drm_connector *connector)
1222 1265
1223static int intel_dp_get_modes(struct drm_connector *connector) 1266static int intel_dp_get_modes(struct drm_connector *connector)
1224{ 1267{
1225 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1268 struct drm_encoder *encoder = intel_attached_encoder(connector);
1226 struct drm_device *dev = intel_encoder->base.dev; 1269 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1270 struct drm_device *dev = intel_encoder->enc.dev;
1227 struct drm_i915_private *dev_priv = dev->dev_private; 1271 struct drm_i915_private *dev_priv = dev->dev_private;
1228 int ret; 1272 int ret;
1229 1273
1230 /* We should parse the EDID data and find out if it has an audio sink 1274 /* We should parse the EDID data and find out if it has an audio sink
1231 */ 1275 */
1232 1276
1233 ret = intel_ddc_get_modes(intel_encoder); 1277 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
1234 if (ret) 1278 if (ret)
1235 return ret; 1279 return ret;
1236 1280
@@ -1249,13 +1293,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1249static void 1293static void
1250intel_dp_destroy (struct drm_connector *connector) 1294intel_dp_destroy (struct drm_connector *connector)
1251{ 1295{
1252 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1253
1254 if (intel_encoder->i2c_bus)
1255 intel_i2c_destroy(intel_encoder->i2c_bus);
1256 drm_sysfs_connector_remove(connector); 1296 drm_sysfs_connector_remove(connector);
1257 drm_connector_cleanup(connector); 1297 drm_connector_cleanup(connector);
1258 kfree(intel_encoder); 1298 kfree(connector);
1259} 1299}
1260 1300
1261static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 1301static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1268,8 +1308,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
1268 1308
1269static const struct drm_connector_funcs intel_dp_connector_funcs = { 1309static const struct drm_connector_funcs intel_dp_connector_funcs = {
1270 .dpms = drm_helper_connector_dpms, 1310 .dpms = drm_helper_connector_dpms,
1271 .save = intel_dp_save,
1272 .restore = intel_dp_restore,
1273 .detect = intel_dp_detect, 1311 .detect = intel_dp_detect,
1274 .fill_modes = drm_helper_probe_single_connector_modes, 1312 .fill_modes = drm_helper_probe_single_connector_modes,
1275 .destroy = intel_dp_destroy, 1313 .destroy = intel_dp_destroy,
@@ -1278,12 +1316,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
1278static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 1316static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
1279 .get_modes = intel_dp_get_modes, 1317 .get_modes = intel_dp_get_modes,
1280 .mode_valid = intel_dp_mode_valid, 1318 .mode_valid = intel_dp_mode_valid,
1281 .best_encoder = intel_best_encoder, 1319 .best_encoder = intel_attached_encoder,
1282}; 1320};
1283 1321
1284static void intel_dp_enc_destroy(struct drm_encoder *encoder) 1322static void intel_dp_enc_destroy(struct drm_encoder *encoder)
1285{ 1323{
1324 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1325
1326 if (intel_encoder->i2c_bus)
1327 intel_i2c_destroy(intel_encoder->i2c_bus);
1286 drm_encoder_cleanup(encoder); 1328 drm_encoder_cleanup(encoder);
1329 kfree(intel_encoder);
1287} 1330}
1288 1331
1289static const struct drm_encoder_funcs intel_dp_enc_funcs = { 1332static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -1299,12 +1342,35 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
1299 intel_dp_check_link_status(intel_encoder); 1342 intel_dp_check_link_status(intel_encoder);
1300} 1343}
1301 1344
1345/* Return which DP Port should be selected for Transcoder DP control */
1346int
1347intel_trans_dp_port_sel (struct drm_crtc *crtc)
1348{
1349 struct drm_device *dev = crtc->dev;
1350 struct drm_mode_config *mode_config = &dev->mode_config;
1351 struct drm_encoder *encoder;
1352 struct intel_encoder *intel_encoder = NULL;
1353
1354 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
1355 if (!encoder || encoder->crtc != crtc)
1356 continue;
1357
1358 intel_encoder = enc_to_intel_encoder(encoder);
1359 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
1360 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1361 return dp_priv->output_reg;
1362 }
1363 }
1364 return -1;
1365}
1366
1302void 1367void
1303intel_dp_init(struct drm_device *dev, int output_reg) 1368intel_dp_init(struct drm_device *dev, int output_reg)
1304{ 1369{
1305 struct drm_i915_private *dev_priv = dev->dev_private; 1370 struct drm_i915_private *dev_priv = dev->dev_private;
1306 struct drm_connector *connector; 1371 struct drm_connector *connector;
1307 struct intel_encoder *intel_encoder; 1372 struct intel_encoder *intel_encoder;
1373 struct intel_connector *intel_connector;
1308 struct intel_dp_priv *dp_priv; 1374 struct intel_dp_priv *dp_priv;
1309 const char *name = NULL; 1375 const char *name = NULL;
1310 1376
@@ -1313,13 +1379,21 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1313 if (!intel_encoder) 1379 if (!intel_encoder)
1314 return; 1380 return;
1315 1381
1382 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1383 if (!intel_connector) {
1384 kfree(intel_encoder);
1385 return;
1386 }
1387
1316 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); 1388 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
1317 1389
1318 connector = &intel_encoder->base; 1390 connector = &intel_connector->base;
1319 drm_connector_init(dev, connector, &intel_dp_connector_funcs, 1391 drm_connector_init(dev, connector, &intel_dp_connector_funcs,
1320 DRM_MODE_CONNECTOR_DisplayPort); 1392 DRM_MODE_CONNECTOR_DisplayPort);
1321 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 1393 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
1322 1394
1395 connector->polled = DRM_CONNECTOR_POLL_HPD;
1396
1323 if (output_reg == DP_A) 1397 if (output_reg == DP_A)
1324 intel_encoder->type = INTEL_OUTPUT_EDP; 1398 intel_encoder->type = INTEL_OUTPUT_EDP;
1325 else 1399 else
@@ -1349,7 +1423,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1349 DRM_MODE_ENCODER_TMDS); 1423 DRM_MODE_ENCODER_TMDS);
1350 drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); 1424 drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
1351 1425
1352 drm_mode_connector_attach_encoder(&intel_encoder->base, 1426 drm_mode_connector_attach_encoder(&intel_connector->base,
1353 &intel_encoder->enc); 1427 &intel_encoder->enc);
1354 drm_sysfs_connector_add(connector); 1428 drm_sysfs_connector_add(connector);
1355 1429
@@ -1378,7 +1452,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1378 break; 1452 break;
1379 } 1453 }
1380 1454
1381 intel_dp_i2c_init(intel_encoder, name); 1455 intel_dp_i2c_init(intel_encoder, intel_connector, name);
1382 1456
1383 intel_encoder->ddc_bus = &dp_priv->adapter; 1457 intel_encoder->ddc_bus = &dp_priv->adapter;
1384 intel_encoder->hot_plug = intel_dp_hot_plug; 1458 intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e30253755f12..df931f787665 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -96,8 +96,6 @@ struct intel_framebuffer {
96 96
97 97
98struct intel_encoder { 98struct intel_encoder {
99 struct drm_connector base;
100
101 struct drm_encoder enc; 99 struct drm_encoder enc;
102 int type; 100 int type;
103 struct i2c_adapter *i2c_bus; 101 struct i2c_adapter *i2c_bus;
@@ -110,6 +108,11 @@ struct intel_encoder {
110 int clone_mask; 108 int clone_mask;
111}; 109};
112 110
111struct intel_connector {
112 struct drm_connector base;
113 void *dev_priv;
114};
115
113struct intel_crtc; 116struct intel_crtc;
114struct intel_overlay { 117struct intel_overlay {
115 struct drm_device *dev; 118 struct drm_device *dev;
@@ -149,17 +152,18 @@ struct intel_crtc {
149 bool lowfreq_avail; 152 bool lowfreq_avail;
150 struct intel_overlay *overlay; 153 struct intel_overlay *overlay;
151 struct intel_unpin_work *unpin_work; 154 struct intel_unpin_work *unpin_work;
155 int fdi_lanes;
152}; 156};
153 157
154#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 158#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
155#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) 159#define to_intel_connector(x) container_of(x, struct intel_connector, base)
156#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) 160#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
157#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 161#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
158 162
159struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, 163struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
160 const char *name); 164 const char *name);
161void intel_i2c_destroy(struct i2c_adapter *adapter); 165void intel_i2c_destroy(struct i2c_adapter *adapter);
162int intel_ddc_get_modes(struct intel_encoder *intel_encoder); 166int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
163extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); 167extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
164void intel_i2c_quirk_set(struct drm_device *dev, bool enable); 168void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
165void intel_i2c_reset_gmbus(struct drm_device *dev); 169void intel_i2c_reset_gmbus(struct drm_device *dev);
@@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
183extern void intel_encoder_prepare (struct drm_encoder *encoder); 187extern void intel_encoder_prepare (struct drm_encoder *encoder);
184extern void intel_encoder_commit (struct drm_encoder *encoder); 188extern void intel_encoder_commit (struct drm_encoder *encoder);
185 189
186extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 190extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
187 191
188extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 192extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
189 struct drm_crtc *crtc); 193 struct drm_crtc *crtc);
@@ -192,17 +196,16 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
192extern void intel_wait_for_vblank(struct drm_device *dev); 196extern void intel_wait_for_vblank(struct drm_device *dev);
193extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 197extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
194extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 198extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
199 struct drm_connector *connector,
195 struct drm_display_mode *mode, 200 struct drm_display_mode *mode,
196 int *dpms_mode); 201 int *dpms_mode);
197extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 202extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
203 struct drm_connector *connector,
198 int dpms_mode); 204 int dpms_mode);
199 205
200extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); 206extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
201extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); 207extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
202extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); 208extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
203extern int intelfb_probe(struct drm_device *dev);
204extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
205extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
206extern void intelfb_restore(void); 209extern void intelfb_restore(void);
207extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 210extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
208 u16 blue, int regno); 211 u16 blue, int regno);
@@ -212,10 +215,12 @@ extern void intel_init_clock_gating(struct drm_device *dev);
212extern void ironlake_enable_drps(struct drm_device *dev); 215extern void ironlake_enable_drps(struct drm_device *dev);
213extern void ironlake_disable_drps(struct drm_device *dev); 216extern void ironlake_disable_drps(struct drm_device *dev);
214 217
215extern int intel_framebuffer_create(struct drm_device *dev, 218extern int intel_framebuffer_init(struct drm_device *dev,
216 struct drm_mode_fb_cmd *mode_cmd, 219 struct intel_framebuffer *ifb,
217 struct drm_framebuffer **fb, 220 struct drm_mode_fb_cmd *mode_cmd,
218 struct drm_gem_object *obj); 221 struct drm_gem_object *obj);
222extern int intel_fbdev_init(struct drm_device *dev);
223extern void intel_fbdev_fini(struct drm_device *dev);
219 224
220extern void intel_prepare_page_flip(struct drm_device *dev, int plane); 225extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
221extern void intel_finish_page_flip(struct drm_device *dev, int pipe); 226extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
@@ -229,4 +234,6 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data,
229 struct drm_file *file_priv); 234 struct drm_file *file_priv);
230extern int intel_overlay_attrs(struct drm_device *dev, void *data, 235extern int intel_overlay_attrs(struct drm_device *dev, void *data,
231 struct drm_file *file_priv); 236 struct drm_file *file_priv);
237
238extern void intel_fb_output_poll_changed(struct drm_device *dev);
232#endif /* __INTEL_DRV_H__ */ 239#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ebf213c96b9c..227feca7cf8d 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -96,39 +96,11 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
96 } 96 }
97} 97}
98 98
99static void intel_dvo_save(struct drm_connector *connector)
100{
101 struct drm_i915_private *dev_priv = connector->dev->dev_private;
102 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
103 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
104
105 /* Each output should probably just save the registers it touches,
106 * but for now, use more overkill.
107 */
108 dev_priv->saveDVOA = I915_READ(DVOA);
109 dev_priv->saveDVOB = I915_READ(DVOB);
110 dev_priv->saveDVOC = I915_READ(DVOC);
111
112 dvo->dev_ops->save(dvo);
113}
114
115static void intel_dvo_restore(struct drm_connector *connector)
116{
117 struct drm_i915_private *dev_priv = connector->dev->dev_private;
118 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
119 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
120
121 dvo->dev_ops->restore(dvo);
122
123 I915_WRITE(DVOA, dev_priv->saveDVOA);
124 I915_WRITE(DVOB, dev_priv->saveDVOB);
125 I915_WRITE(DVOC, dev_priv->saveDVOC);
126}
127
128static int intel_dvo_mode_valid(struct drm_connector *connector, 99static int intel_dvo_mode_valid(struct drm_connector *connector,
129 struct drm_display_mode *mode) 100 struct drm_display_mode *mode)
130{ 101{
131 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 102 struct drm_encoder *encoder = intel_attached_encoder(connector);
103 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
132 struct intel_dvo_device *dvo = intel_encoder->dev_priv; 104 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
133 105
134 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 106 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -241,7 +213,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
241 */ 213 */
242static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) 214static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
243{ 215{
244 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 216 struct drm_encoder *encoder = intel_attached_encoder(connector);
217 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
245 struct intel_dvo_device *dvo = intel_encoder->dev_priv; 218 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
246 219
247 return dvo->dev_ops->detect(dvo); 220 return dvo->dev_ops->detect(dvo);
@@ -249,7 +222,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connecto
249 222
250static int intel_dvo_get_modes(struct drm_connector *connector) 223static int intel_dvo_get_modes(struct drm_connector *connector)
251{ 224{
252 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 225 struct drm_encoder *encoder = intel_attached_encoder(connector);
226 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
253 struct intel_dvo_device *dvo = intel_encoder->dev_priv; 227 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
254 228
255 /* We should probably have an i2c driver get_modes function for those 229 /* We should probably have an i2c driver get_modes function for those
@@ -257,7 +231,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
257 * (TV-out, for example), but for now with just TMDS and LVDS, 231 * (TV-out, for example), but for now with just TMDS and LVDS,
258 * that's not the case. 232 * that's not the case.
259 */ 233 */
260 intel_ddc_get_modes(intel_encoder); 234 intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
261 if (!list_empty(&connector->probed_modes)) 235 if (!list_empty(&connector->probed_modes))
262 return 1; 236 return 1;
263 237
@@ -275,38 +249,10 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
275 249
276static void intel_dvo_destroy (struct drm_connector *connector) 250static void intel_dvo_destroy (struct drm_connector *connector)
277{ 251{
278 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
279 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
280
281 if (dvo) {
282 if (dvo->dev_ops->destroy)
283 dvo->dev_ops->destroy(dvo);
284 if (dvo->panel_fixed_mode)
285 kfree(dvo->panel_fixed_mode);
286 /* no need, in i830_dvoices[] now */
287 //kfree(dvo);
288 }
289 if (intel_encoder->i2c_bus)
290 intel_i2c_destroy(intel_encoder->i2c_bus);
291 if (intel_encoder->ddc_bus)
292 intel_i2c_destroy(intel_encoder->ddc_bus);
293 drm_sysfs_connector_remove(connector); 252 drm_sysfs_connector_remove(connector);
294 drm_connector_cleanup(connector); 253 drm_connector_cleanup(connector);
295 kfree(intel_encoder); 254 kfree(connector);
296}
297
298#ifdef RANDR_GET_CRTC_INTERFACE
299static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
300{
301 struct drm_device *dev = connector->dev;
302 struct drm_i915_private *dev_priv = dev->dev_private;
303 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
304 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
305 int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
306
307 return intel_pipe_to_crtc(pScrn, pipe);
308} 255}
309#endif
310 256
311static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { 257static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
312 .dpms = intel_dvo_dpms, 258 .dpms = intel_dvo_dpms,
@@ -318,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
318 264
319static const struct drm_connector_funcs intel_dvo_connector_funcs = { 265static const struct drm_connector_funcs intel_dvo_connector_funcs = {
320 .dpms = drm_helper_connector_dpms, 266 .dpms = drm_helper_connector_dpms,
321 .save = intel_dvo_save,
322 .restore = intel_dvo_restore,
323 .detect = intel_dvo_detect, 267 .detect = intel_dvo_detect,
324 .destroy = intel_dvo_destroy, 268 .destroy = intel_dvo_destroy,
325 .fill_modes = drm_helper_probe_single_connector_modes, 269 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -328,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
328static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { 272static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
329 .mode_valid = intel_dvo_mode_valid, 273 .mode_valid = intel_dvo_mode_valid,
330 .get_modes = intel_dvo_get_modes, 274 .get_modes = intel_dvo_get_modes,
331 .best_encoder = intel_best_encoder, 275 .best_encoder = intel_attached_encoder,
332}; 276};
333 277
334static void intel_dvo_enc_destroy(struct drm_encoder *encoder) 278static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
335{ 279{
280 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
281 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
282
283 if (dvo) {
284 if (dvo->dev_ops->destroy)
285 dvo->dev_ops->destroy(dvo);
286 if (dvo->panel_fixed_mode)
287 kfree(dvo->panel_fixed_mode);
288 }
289 if (intel_encoder->i2c_bus)
290 intel_i2c_destroy(intel_encoder->i2c_bus);
291 if (intel_encoder->ddc_bus)
292 intel_i2c_destroy(intel_encoder->ddc_bus);
336 drm_encoder_cleanup(encoder); 293 drm_encoder_cleanup(encoder);
294 kfree(intel_encoder);
337} 295}
338 296
339static const struct drm_encoder_funcs intel_dvo_enc_funcs = { 297static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
@@ -352,7 +310,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
352{ 310{
353 struct drm_device *dev = connector->dev; 311 struct drm_device *dev = connector->dev;
354 struct drm_i915_private *dev_priv = dev->dev_private; 312 struct drm_i915_private *dev_priv = dev->dev_private;
355 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 313 struct drm_encoder *encoder = intel_attached_encoder(connector);
314 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
356 struct intel_dvo_device *dvo = intel_encoder->dev_priv; 315 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
357 uint32_t dvo_reg = dvo->dvo_reg; 316 uint32_t dvo_reg = dvo->dvo_reg;
358 uint32_t dvo_val = I915_READ(dvo_reg); 317 uint32_t dvo_val = I915_READ(dvo_reg);
@@ -384,6 +343,7 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
384void intel_dvo_init(struct drm_device *dev) 343void intel_dvo_init(struct drm_device *dev)
385{ 344{
386 struct intel_encoder *intel_encoder; 345 struct intel_encoder *intel_encoder;
346 struct intel_connector *intel_connector;
387 struct intel_dvo_device *dvo; 347 struct intel_dvo_device *dvo;
388 struct i2c_adapter *i2cbus = NULL; 348 struct i2c_adapter *i2cbus = NULL;
389 int ret = 0; 349 int ret = 0;
@@ -393,6 +353,12 @@ void intel_dvo_init(struct drm_device *dev)
393 if (!intel_encoder) 353 if (!intel_encoder)
394 return; 354 return;
395 355
356 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
357 if (!intel_connector) {
358 kfree(intel_encoder);
359 return;
360 }
361
396 /* Set up the DDC bus */ 362 /* Set up the DDC bus */
397 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); 363 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
398 if (!intel_encoder->ddc_bus) 364 if (!intel_encoder->ddc_bus)
@@ -400,7 +366,7 @@ void intel_dvo_init(struct drm_device *dev)
400 366
401 /* Now, try to find a controller */ 367 /* Now, try to find a controller */
402 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 368 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
403 struct drm_connector *connector = &intel_encoder->base; 369 struct drm_connector *connector = &intel_connector->base;
404 int gpio; 370 int gpio;
405 371
406 dvo = &intel_dvo_devices[i]; 372 dvo = &intel_dvo_devices[i];
@@ -471,7 +437,7 @@ void intel_dvo_init(struct drm_device *dev)
471 drm_encoder_helper_add(&intel_encoder->enc, 437 drm_encoder_helper_add(&intel_encoder->enc,
472 &intel_dvo_helper_funcs); 438 &intel_dvo_helper_funcs);
473 439
474 drm_mode_connector_attach_encoder(&intel_encoder->base, 440 drm_mode_connector_attach_encoder(&intel_connector->base,
475 &intel_encoder->enc); 441 &intel_encoder->enc);
476 if (dvo->type == INTEL_DVO_CHIP_LVDS) { 442 if (dvo->type == INTEL_DVO_CHIP_LVDS) {
477 /* For our LVDS chipsets, we should hopefully be able 443 /* For our LVDS chipsets, we should hopefully be able
@@ -496,4 +462,5 @@ void intel_dvo_init(struct drm_device *dev)
496 intel_i2c_destroy(i2cbus); 462 intel_i2c_destroy(i2cbus);
497free_intel: 463free_intel:
498 kfree(intel_encoder); 464 kfree(intel_encoder);
465 kfree(intel_connector);
499} 466}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 8a0b3bcdc7b1..6f53cf7fbc50 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -44,9 +44,10 @@
44#include "i915_drm.h" 44#include "i915_drm.h"
45#include "i915_drv.h" 45#include "i915_drv.h"
46 46
47struct intelfb_par { 47struct intel_fbdev {
48 struct drm_fb_helper helper; 48 struct drm_fb_helper helper;
49 struct intel_framebuffer *intel_fb; 49 struct intel_framebuffer ifb;
50 struct list_head fbdev_list;
50 struct drm_display_mode *our_mode; 51 struct drm_display_mode *our_mode;
51}; 52};
52 53
@@ -54,7 +55,6 @@ static struct fb_ops intelfb_ops = {
54 .owner = THIS_MODULE, 55 .owner = THIS_MODULE,
55 .fb_check_var = drm_fb_helper_check_var, 56 .fb_check_var = drm_fb_helper_check_var,
56 .fb_set_par = drm_fb_helper_set_par, 57 .fb_set_par = drm_fb_helper_set_par,
57 .fb_setcolreg = drm_fb_helper_setcolreg,
58 .fb_fillrect = cfb_fillrect, 58 .fb_fillrect = cfb_fillrect,
59 .fb_copyarea = cfb_copyarea, 59 .fb_copyarea = cfb_copyarea,
60 .fb_imageblit = cfb_imageblit, 60 .fb_imageblit = cfb_imageblit,
@@ -63,62 +63,12 @@ static struct fb_ops intelfb_ops = {
63 .fb_setcmap = drm_fb_helper_setcmap, 63 .fb_setcmap = drm_fb_helper_setcmap,
64}; 64};
65 65
66static struct drm_fb_helper_funcs intel_fb_helper_funcs = { 66static int intelfb_create(struct intel_fbdev *ifbdev,
67 .gamma_set = intel_crtc_fb_gamma_set, 67 struct drm_fb_helper_surface_size *sizes)
68 .gamma_get = intel_crtc_fb_gamma_get,
69};
70
71
72/**
73 * Currently it is assumed that the old framebuffer is reused.
74 *
75 * LOCKING
76 * caller should hold the mode config lock.
77 *
78 */
79int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
80{
81 struct fb_info *info;
82 struct drm_framebuffer *fb;
83 struct drm_display_mode *mode = crtc->desired_mode;
84
85 fb = crtc->fb;
86 if (!fb)
87 return 1;
88
89 info = fb->fbdev;
90 if (!info)
91 return 1;
92
93 if (!mode)
94 return 1;
95
96 info->var.xres = mode->hdisplay;
97 info->var.right_margin = mode->hsync_start - mode->hdisplay;
98 info->var.hsync_len = mode->hsync_end - mode->hsync_start;
99 info->var.left_margin = mode->htotal - mode->hsync_end;
100 info->var.yres = mode->vdisplay;
101 info->var.lower_margin = mode->vsync_start - mode->vdisplay;
102 info->var.vsync_len = mode->vsync_end - mode->vsync_start;
103 info->var.upper_margin = mode->vtotal - mode->vsync_end;
104 info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
105 /* avoid overflow */
106 info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
107
108 return 0;
109}
110EXPORT_SYMBOL(intelfb_resize);
111
112static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
113 uint32_t fb_height, uint32_t surface_width,
114 uint32_t surface_height,
115 uint32_t surface_depth, uint32_t surface_bpp,
116 struct drm_framebuffer **fb_p)
117{ 68{
69 struct drm_device *dev = ifbdev->helper.dev;
118 struct fb_info *info; 70 struct fb_info *info;
119 struct intelfb_par *par;
120 struct drm_framebuffer *fb; 71 struct drm_framebuffer *fb;
121 struct intel_framebuffer *intel_fb;
122 struct drm_mode_fb_cmd mode_cmd; 72 struct drm_mode_fb_cmd mode_cmd;
123 struct drm_gem_object *fbo = NULL; 73 struct drm_gem_object *fbo = NULL;
124 struct drm_i915_gem_object *obj_priv; 74 struct drm_i915_gem_object *obj_priv;
@@ -126,19 +76,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
126 int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; 76 int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
127 77
128 /* we don't do packed 24bpp */ 78 /* we don't do packed 24bpp */
129 if (surface_bpp == 24) 79 if (sizes->surface_bpp == 24)
130 surface_bpp = 32; 80 sizes->surface_bpp = 32;
131 81
132 mode_cmd.width = surface_width; 82 mode_cmd.width = sizes->surface_width;
133 mode_cmd.height = surface_height; 83 mode_cmd.height = sizes->surface_height;
134 84
135 mode_cmd.bpp = surface_bpp; 85 mode_cmd.bpp = sizes->surface_bpp;
136 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); 86 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
137 mode_cmd.depth = surface_depth; 87 mode_cmd.depth = sizes->surface_depth;
138 88
139 size = mode_cmd.pitch * mode_cmd.height; 89 size = mode_cmd.pitch * mode_cmd.height;
140 size = ALIGN(size, PAGE_SIZE); 90 size = ALIGN(size, PAGE_SIZE);
141 fbo = drm_gem_object_alloc(dev, size); 91 fbo = i915_gem_alloc_object(dev, size);
142 if (!fbo) { 92 if (!fbo) {
143 DRM_ERROR("failed to allocate framebuffer\n"); 93 DRM_ERROR("failed to allocate framebuffer\n");
144 ret = -ENOMEM; 94 ret = -ENOMEM;
@@ -157,45 +107,37 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
157 /* Flush everything out, we'll be doing GTT only from now on */ 107 /* Flush everything out, we'll be doing GTT only from now on */
158 i915_gem_object_set_to_gtt_domain(fbo, 1); 108 i915_gem_object_set_to_gtt_domain(fbo, 1);
159 109
160 ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); 110 info = framebuffer_alloc(0, device);
161 if (ret) {
162 DRM_ERROR("failed to allocate fb.\n");
163 goto out_unpin;
164 }
165
166 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
167
168 intel_fb = to_intel_framebuffer(fb);
169 *fb_p = fb;
170
171 info = framebuffer_alloc(sizeof(struct intelfb_par), device);
172 if (!info) { 111 if (!info) {
173 ret = -ENOMEM; 112 ret = -ENOMEM;
174 goto out_unpin; 113 goto out_unpin;
175 } 114 }
176 115
177 par = info->par; 116 info->par = ifbdev;
178 117
179 par->helper.funcs = &intel_fb_helper_funcs; 118 intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
180 par->helper.dev = dev; 119
181 ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 120 fb = &ifbdev->ifb.base;
182 INTELFB_CONN_LIMIT); 121
183 if (ret) 122 ifbdev->helper.fb = fb;
184 goto out_unref; 123 ifbdev->helper.fbdev = info;
185 124
186 strcpy(info->fix.id, "inteldrmfb"); 125 strcpy(info->fix.id, "inteldrmfb");
187 126
188 info->flags = FBINFO_DEFAULT; 127 info->flags = FBINFO_DEFAULT;
189
190 info->fbops = &intelfb_ops; 128 info->fbops = &intelfb_ops;
191 129
192
193 /* setup aperture base/size for vesafb takeover */ 130 /* setup aperture base/size for vesafb takeover */
194 info->aperture_base = dev->mode_config.fb_base; 131 info->apertures = alloc_apertures(1);
132 if (!info->apertures) {
133 ret = -ENOMEM;
134 goto out_unpin;
135 }
136 info->apertures->ranges[0].base = dev->mode_config.fb_base;
195 if (IS_I9XX(dev)) 137 if (IS_I9XX(dev))
196 info->aperture_size = pci_resource_len(dev->pdev, 2); 138 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
197 else 139 else
198 info->aperture_size = pci_resource_len(dev->pdev, 0); 140 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
199 141
200 info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; 142 info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
201 info->fix.smem_len = size; 143 info->fix.smem_len = size;
@@ -208,12 +150,18 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
208 ret = -ENOSPC; 150 ret = -ENOSPC;
209 goto out_unpin; 151 goto out_unpin;
210 } 152 }
153
154 ret = fb_alloc_cmap(&info->cmap, 256, 0);
155 if (ret) {
156 ret = -ENOMEM;
157 goto out_unpin;
158 }
211 info->screen_size = size; 159 info->screen_size = size;
212 160
213// memset(info->screen_base, 0, size); 161// memset(info->screen_base, 0, size);
214 162
215 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); 163 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
216 drm_fb_helper_fill_var(info, fb, fb_width, fb_height); 164 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
217 165
218 /* FIXME: we really shouldn't expose mmio space at all */ 166 /* FIXME: we really shouldn't expose mmio space at all */
219 info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar); 167 info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
@@ -225,14 +173,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
225 info->pixmap.flags = FB_PIXMAP_SYSTEM; 173 info->pixmap.flags = FB_PIXMAP_SYSTEM;
226 info->pixmap.scan_align = 1; 174 info->pixmap.scan_align = 1;
227 175
228 fb->fbdev = info;
229
230 par->intel_fb = intel_fb;
231
232 /* To allow resizeing without swapping buffers */
233 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", 176 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
234 intel_fb->base.width, intel_fb->base.height, 177 fb->width, fb->height,
235 obj_priv->gtt_offset, fbo); 178 obj_priv->gtt_offset, fbo);
179
236 180
237 mutex_unlock(&dev->struct_mutex); 181 mutex_unlock(&dev->struct_mutex);
238 vga_switcheroo_client_fb_set(dev->pdev, info); 182 vga_switcheroo_client_fb_set(dev->pdev, info);
@@ -247,35 +191,86 @@ out:
247 return ret; 191 return ret;
248} 192}
249 193
250int intelfb_probe(struct drm_device *dev) 194static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
195 struct drm_fb_helper_surface_size *sizes)
251{ 196{
197 struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
198 int new_fb = 0;
252 int ret; 199 int ret;
253 200
254 DRM_DEBUG_KMS("\n"); 201 if (!helper->fb) {
255 ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); 202 ret = intelfb_create(ifbdev, sizes);
256 return ret; 203 if (ret)
204 return ret;
205 new_fb = 1;
206 }
207 return new_fb;
257} 208}
258EXPORT_SYMBOL(intelfb_probe);
259 209
260int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) 210static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
211 .gamma_set = intel_crtc_fb_gamma_set,
212 .gamma_get = intel_crtc_fb_gamma_get,
213 .fb_probe = intel_fb_find_or_create_single,
214};
215
216int intel_fbdev_destroy(struct drm_device *dev,
217 struct intel_fbdev *ifbdev)
261{ 218{
262 struct fb_info *info; 219 struct fb_info *info;
220 struct intel_framebuffer *ifb = &ifbdev->ifb;
263 221
264 if (!fb) 222 if (ifbdev->helper.fbdev) {
265 return -EINVAL; 223 info = ifbdev->helper.fbdev;
266
267 info = fb->fbdev;
268
269 if (info) {
270 struct intelfb_par *par = info->par;
271 unregister_framebuffer(info); 224 unregister_framebuffer(info);
272 iounmap(info->screen_base); 225 iounmap(info->screen_base);
273 if (info->par) 226 if (info->cmap.len)
274 drm_fb_helper_free(&par->helper); 227 fb_dealloc_cmap(&info->cmap);
275 framebuffer_release(info); 228 framebuffer_release(info);
276 } 229 }
277 230
231 drm_fb_helper_fini(&ifbdev->helper);
232
233 drm_framebuffer_cleanup(&ifb->base);
234 if (ifb->obj)
235 drm_gem_object_unreference_unlocked(ifb->obj);
236
237 return 0;
238}
239
240int intel_fbdev_init(struct drm_device *dev)
241{
242 struct intel_fbdev *ifbdev;
243 drm_i915_private_t *dev_priv = dev->dev_private;
244
245 ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
246 if (!ifbdev)
247 return -ENOMEM;
248
249 dev_priv->fbdev = ifbdev;
250 ifbdev->helper.funcs = &intel_fb_helper_funcs;
251
252 drm_fb_helper_init(dev, &ifbdev->helper, 2,
253 INTELFB_CONN_LIMIT);
254
255 drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
256 drm_fb_helper_initial_config(&ifbdev->helper, 32);
278 return 0; 257 return 0;
279} 258}
280EXPORT_SYMBOL(intelfb_remove); 259
260void intel_fbdev_fini(struct drm_device *dev)
261{
262 drm_i915_private_t *dev_priv = dev->dev_private;
263 if (!dev_priv->fbdev)
264 return;
265
266 intel_fbdev_destroy(dev, dev_priv->fbdev);
267 kfree(dev_priv->fbdev);
268 dev_priv->fbdev = NULL;
269}
281MODULE_LICENSE("GPL and additional rights"); 270MODULE_LICENSE("GPL and additional rights");
271
272void intel_fb_output_poll_changed(struct drm_device *dev)
273{
274 drm_i915_private_t *dev_priv = dev->dev_private;
275 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
276}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 48cade0cf7b1..65727f0a79a3 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -39,7 +39,6 @@
39 39
40struct intel_hdmi_priv { 40struct intel_hdmi_priv {
41 u32 sdvox_reg; 41 u32 sdvox_reg;
42 u32 save_SDVOX;
43 bool has_hdmi_sink; 42 bool has_hdmi_sink;
44}; 43};
45 44
@@ -63,8 +62,12 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
63 if (hdmi_priv->has_hdmi_sink) 62 if (hdmi_priv->has_hdmi_sink)
64 sdvox |= SDVO_AUDIO_ENABLE; 63 sdvox |= SDVO_AUDIO_ENABLE;
65 64
66 if (intel_crtc->pipe == 1) 65 if (intel_crtc->pipe == 1) {
67 sdvox |= SDVO_PIPE_B_SELECT; 66 if (HAS_PCH_CPT(dev))
67 sdvox |= PORT_TRANS_B_SEL_CPT;
68 else
69 sdvox |= SDVO_PIPE_B_SELECT;
70 }
68 71
69 I915_WRITE(hdmi_priv->sdvox_reg, sdvox); 72 I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
70 POSTING_READ(hdmi_priv->sdvox_reg); 73 POSTING_READ(hdmi_priv->sdvox_reg);
@@ -106,27 +109,6 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
106 } 109 }
107} 110}
108 111
109static void intel_hdmi_save(struct drm_connector *connector)
110{
111 struct drm_device *dev = connector->dev;
112 struct drm_i915_private *dev_priv = dev->dev_private;
113 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
114 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
115
116 hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
117}
118
119static void intel_hdmi_restore(struct drm_connector *connector)
120{
121 struct drm_device *dev = connector->dev;
122 struct drm_i915_private *dev_priv = dev->dev_private;
123 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
124 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
125
126 I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
127 POSTING_READ(hdmi_priv->sdvox_reg);
128}
129
130static int intel_hdmi_mode_valid(struct drm_connector *connector, 112static int intel_hdmi_mode_valid(struct drm_connector *connector,
131 struct drm_display_mode *mode) 113 struct drm_display_mode *mode)
132{ 114{
@@ -151,13 +133,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
151static enum drm_connector_status 133static enum drm_connector_status
152intel_hdmi_detect(struct drm_connector *connector) 134intel_hdmi_detect(struct drm_connector *connector)
153{ 135{
154 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 136 struct drm_encoder *encoder = intel_attached_encoder(connector);
137 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
155 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; 138 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
156 struct edid *edid = NULL; 139 struct edid *edid = NULL;
157 enum drm_connector_status status = connector_status_disconnected; 140 enum drm_connector_status status = connector_status_disconnected;
158 141
159 hdmi_priv->has_hdmi_sink = false; 142 hdmi_priv->has_hdmi_sink = false;
160 edid = drm_get_edid(&intel_encoder->base, 143 edid = drm_get_edid(connector,
161 intel_encoder->ddc_bus); 144 intel_encoder->ddc_bus);
162 145
163 if (edid) { 146 if (edid) {
@@ -165,7 +148,7 @@ intel_hdmi_detect(struct drm_connector *connector)
165 status = connector_status_connected; 148 status = connector_status_connected;
166 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 149 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
167 } 150 }
168 intel_encoder->base.display_info.raw_edid = NULL; 151 connector->display_info.raw_edid = NULL;
169 kfree(edid); 152 kfree(edid);
170 } 153 }
171 154
@@ -174,24 +157,21 @@ intel_hdmi_detect(struct drm_connector *connector)
174 157
175static int intel_hdmi_get_modes(struct drm_connector *connector) 158static int intel_hdmi_get_modes(struct drm_connector *connector)
176{ 159{
177 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 160 struct drm_encoder *encoder = intel_attached_encoder(connector);
161 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
178 162
179 /* We should parse the EDID data and find out if it's an HDMI sink so 163 /* We should parse the EDID data and find out if it's an HDMI sink so
180 * we can send audio to it. 164 * we can send audio to it.
181 */ 165 */
182 166
183 return intel_ddc_get_modes(intel_encoder); 167 return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
184} 168}
185 169
186static void intel_hdmi_destroy(struct drm_connector *connector) 170static void intel_hdmi_destroy(struct drm_connector *connector)
187{ 171{
188 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
189
190 if (intel_encoder->i2c_bus)
191 intel_i2c_destroy(intel_encoder->i2c_bus);
192 drm_sysfs_connector_remove(connector); 172 drm_sysfs_connector_remove(connector);
193 drm_connector_cleanup(connector); 173 drm_connector_cleanup(connector);
194 kfree(intel_encoder); 174 kfree(connector);
195} 175}
196 176
197static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 177static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -204,8 +184,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
204 184
205static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 185static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
206 .dpms = drm_helper_connector_dpms, 186 .dpms = drm_helper_connector_dpms,
207 .save = intel_hdmi_save,
208 .restore = intel_hdmi_restore,
209 .detect = intel_hdmi_detect, 187 .detect = intel_hdmi_detect,
210 .fill_modes = drm_helper_probe_single_connector_modes, 188 .fill_modes = drm_helper_probe_single_connector_modes,
211 .destroy = intel_hdmi_destroy, 189 .destroy = intel_hdmi_destroy,
@@ -214,12 +192,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
214static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { 192static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
215 .get_modes = intel_hdmi_get_modes, 193 .get_modes = intel_hdmi_get_modes,
216 .mode_valid = intel_hdmi_mode_valid, 194 .mode_valid = intel_hdmi_mode_valid,
217 .best_encoder = intel_best_encoder, 195 .best_encoder = intel_attached_encoder,
218}; 196};
219 197
220static void intel_hdmi_enc_destroy(struct drm_encoder *encoder) 198static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
221{ 199{
200 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
201
202 if (intel_encoder->i2c_bus)
203 intel_i2c_destroy(intel_encoder->i2c_bus);
222 drm_encoder_cleanup(encoder); 204 drm_encoder_cleanup(encoder);
205 kfree(intel_encoder);
223} 206}
224 207
225static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { 208static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -231,21 +214,30 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
231 struct drm_i915_private *dev_priv = dev->dev_private; 214 struct drm_i915_private *dev_priv = dev->dev_private;
232 struct drm_connector *connector; 215 struct drm_connector *connector;
233 struct intel_encoder *intel_encoder; 216 struct intel_encoder *intel_encoder;
217 struct intel_connector *intel_connector;
234 struct intel_hdmi_priv *hdmi_priv; 218 struct intel_hdmi_priv *hdmi_priv;
235 219
236 intel_encoder = kcalloc(sizeof(struct intel_encoder) + 220 intel_encoder = kcalloc(sizeof(struct intel_encoder) +
237 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 221 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
238 if (!intel_encoder) 222 if (!intel_encoder)
239 return; 223 return;
224
225 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
226 if (!intel_connector) {
227 kfree(intel_encoder);
228 return;
229 }
230
240 hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); 231 hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
241 232
242 connector = &intel_encoder->base; 233 connector = &intel_connector->base;
243 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 234 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
244 DRM_MODE_CONNECTOR_HDMIA); 235 DRM_MODE_CONNECTOR_HDMIA);
245 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); 236 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
246 237
247 intel_encoder->type = INTEL_OUTPUT_HDMI; 238 intel_encoder->type = INTEL_OUTPUT_HDMI;
248 239
240 connector->polled = DRM_CONNECTOR_POLL_HPD;
249 connector->interlace_allowed = 0; 241 connector->interlace_allowed = 0;
250 connector->doublescan_allowed = 0; 242 connector->doublescan_allowed = 0;
251 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 243 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
@@ -285,7 +277,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
285 DRM_MODE_ENCODER_TMDS); 277 DRM_MODE_ENCODER_TMDS);
286 drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); 278 drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
287 279
288 drm_mode_connector_attach_encoder(&intel_encoder->base, 280 drm_mode_connector_attach_encoder(&intel_connector->base,
289 &intel_encoder->enc); 281 &intel_encoder->enc);
290 drm_sysfs_connector_add(connector); 282 drm_sysfs_connector_add(connector);
291 283
@@ -303,6 +295,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
303err_connector: 295err_connector:
304 drm_connector_cleanup(connector); 296 drm_connector_cleanup(connector);
305 kfree(intel_encoder); 297 kfree(intel_encoder);
298 kfree(intel_connector);
306 299
307 return; 300 return;
308} 301}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b66806a37d37..6a1accd83aec 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -139,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
139 /* XXX: We never power down the LVDS pairs. */ 139 /* XXX: We never power down the LVDS pairs. */
140} 140}
141 141
142static void intel_lvds_save(struct drm_connector *connector)
143{
144 struct drm_device *dev = connector->dev;
145 struct drm_i915_private *dev_priv = dev->dev_private;
146 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
147 u32 pwm_ctl_reg;
148
149 if (HAS_PCH_SPLIT(dev)) {
150 pp_on_reg = PCH_PP_ON_DELAYS;
151 pp_off_reg = PCH_PP_OFF_DELAYS;
152 pp_ctl_reg = PCH_PP_CONTROL;
153 pp_div_reg = PCH_PP_DIVISOR;
154 pwm_ctl_reg = BLC_PWM_CPU_CTL;
155 } else {
156 pp_on_reg = PP_ON_DELAYS;
157 pp_off_reg = PP_OFF_DELAYS;
158 pp_ctl_reg = PP_CONTROL;
159 pp_div_reg = PP_DIVISOR;
160 pwm_ctl_reg = BLC_PWM_CTL;
161 }
162
163 dev_priv->savePP_ON = I915_READ(pp_on_reg);
164 dev_priv->savePP_OFF = I915_READ(pp_off_reg);
165 dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
166 dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
167 dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
168 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
169 BACKLIGHT_DUTY_CYCLE_MASK);
170
171 /*
172 * If the light is off at server startup, just make it full brightness
173 */
174 if (dev_priv->backlight_duty_cycle == 0)
175 dev_priv->backlight_duty_cycle =
176 intel_lvds_get_max_backlight(dev);
177}
178
179static void intel_lvds_restore(struct drm_connector *connector)
180{
181 struct drm_device *dev = connector->dev;
182 struct drm_i915_private *dev_priv = dev->dev_private;
183 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
184 u32 pwm_ctl_reg;
185
186 if (HAS_PCH_SPLIT(dev)) {
187 pp_on_reg = PCH_PP_ON_DELAYS;
188 pp_off_reg = PCH_PP_OFF_DELAYS;
189 pp_ctl_reg = PCH_PP_CONTROL;
190 pp_div_reg = PCH_PP_DIVISOR;
191 pwm_ctl_reg = BLC_PWM_CPU_CTL;
192 } else {
193 pp_on_reg = PP_ON_DELAYS;
194 pp_off_reg = PP_OFF_DELAYS;
195 pp_ctl_reg = PP_CONTROL;
196 pp_div_reg = PP_DIVISOR;
197 pwm_ctl_reg = BLC_PWM_CTL;
198 }
199
200 I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
201 I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
202 I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
203 I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
204 I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
205 if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
206 intel_lvds_set_power(dev, true);
207 else
208 intel_lvds_set_power(dev, false);
209}
210
211static int intel_lvds_mode_valid(struct drm_connector *connector, 142static int intel_lvds_mode_valid(struct drm_connector *connector,
212 struct drm_display_mode *mode) 143 struct drm_display_mode *mode)
213{ 144{
@@ -635,12 +566,13 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
635static int intel_lvds_get_modes(struct drm_connector *connector) 566static int intel_lvds_get_modes(struct drm_connector *connector)
636{ 567{
637 struct drm_device *dev = connector->dev; 568 struct drm_device *dev = connector->dev;
638 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 569 struct drm_encoder *encoder = intel_attached_encoder(connector);
570 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
639 struct drm_i915_private *dev_priv = dev->dev_private; 571 struct drm_i915_private *dev_priv = dev->dev_private;
640 int ret = 0; 572 int ret = 0;
641 573
642 if (dev_priv->lvds_edid_good) { 574 if (dev_priv->lvds_edid_good) {
643 ret = intel_ddc_get_modes(intel_encoder); 575 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
644 576
645 if (ret) 577 if (ret)
646 return ret; 578 return ret;
@@ -717,11 +649,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
717static void intel_lvds_destroy(struct drm_connector *connector) 649static void intel_lvds_destroy(struct drm_connector *connector)
718{ 650{
719 struct drm_device *dev = connector->dev; 651 struct drm_device *dev = connector->dev;
720 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
721 struct drm_i915_private *dev_priv = dev->dev_private; 652 struct drm_i915_private *dev_priv = dev->dev_private;
722 653
723 if (intel_encoder->ddc_bus)
724 intel_i2c_destroy(intel_encoder->ddc_bus);
725 if (dev_priv->lid_notifier.notifier_call) 654 if (dev_priv->lid_notifier.notifier_call)
726 acpi_lid_notifier_unregister(&dev_priv->lid_notifier); 655 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
727 drm_sysfs_connector_remove(connector); 656 drm_sysfs_connector_remove(connector);
@@ -734,13 +663,14 @@ static int intel_lvds_set_property(struct drm_connector *connector,
734 uint64_t value) 663 uint64_t value)
735{ 664{
736 struct drm_device *dev = connector->dev; 665 struct drm_device *dev = connector->dev;
737 struct intel_encoder *intel_encoder =
738 to_intel_encoder(connector);
739 666
740 if (property == dev->mode_config.scaling_mode_property && 667 if (property == dev->mode_config.scaling_mode_property &&
741 connector->encoder) { 668 connector->encoder) {
742 struct drm_crtc *crtc = connector->encoder->crtc; 669 struct drm_crtc *crtc = connector->encoder->crtc;
670 struct drm_encoder *encoder = connector->encoder;
671 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
743 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; 672 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
673
744 if (value == DRM_MODE_SCALE_NONE) { 674 if (value == DRM_MODE_SCALE_NONE) {
745 DRM_DEBUG_KMS("no scaling not supported\n"); 675 DRM_DEBUG_KMS("no scaling not supported\n");
746 return 0; 676 return 0;
@@ -774,13 +704,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
774static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 704static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
775 .get_modes = intel_lvds_get_modes, 705 .get_modes = intel_lvds_get_modes,
776 .mode_valid = intel_lvds_mode_valid, 706 .mode_valid = intel_lvds_mode_valid,
777 .best_encoder = intel_best_encoder, 707 .best_encoder = intel_attached_encoder,
778}; 708};
779 709
780static const struct drm_connector_funcs intel_lvds_connector_funcs = { 710static const struct drm_connector_funcs intel_lvds_connector_funcs = {
781 .dpms = drm_helper_connector_dpms, 711 .dpms = drm_helper_connector_dpms,
782 .save = intel_lvds_save,
783 .restore = intel_lvds_restore,
784 .detect = intel_lvds_detect, 712 .detect = intel_lvds_detect,
785 .fill_modes = drm_helper_probe_single_connector_modes, 713 .fill_modes = drm_helper_probe_single_connector_modes,
786 .set_property = intel_lvds_set_property, 714 .set_property = intel_lvds_set_property,
@@ -790,7 +718,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
790 718
791static void intel_lvds_enc_destroy(struct drm_encoder *encoder) 719static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
792{ 720{
721 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
722
723 if (intel_encoder->ddc_bus)
724 intel_i2c_destroy(intel_encoder->ddc_bus);
793 drm_encoder_cleanup(encoder); 725 drm_encoder_cleanup(encoder);
726 kfree(intel_encoder);
794} 727}
795 728
796static const struct drm_encoder_funcs intel_lvds_enc_funcs = { 729static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -979,6 +912,7 @@ void intel_lvds_init(struct drm_device *dev)
979{ 912{
980 struct drm_i915_private *dev_priv = dev->dev_private; 913 struct drm_i915_private *dev_priv = dev->dev_private;
981 struct intel_encoder *intel_encoder; 914 struct intel_encoder *intel_encoder;
915 struct intel_connector *intel_connector;
982 struct drm_connector *connector; 916 struct drm_connector *connector;
983 struct drm_encoder *encoder; 917 struct drm_encoder *encoder;
984 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 918 struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -1012,19 +946,27 @@ void intel_lvds_init(struct drm_device *dev)
1012 return; 946 return;
1013 } 947 }
1014 948
1015 connector = &intel_encoder->base; 949 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
950 if (!intel_connector) {
951 kfree(intel_encoder);
952 return;
953 }
954
955 connector = &intel_connector->base;
1016 encoder = &intel_encoder->enc; 956 encoder = &intel_encoder->enc;
1017 drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs, 957 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
1018 DRM_MODE_CONNECTOR_LVDS); 958 DRM_MODE_CONNECTOR_LVDS);
1019 959
1020 drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, 960 drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
1021 DRM_MODE_ENCODER_LVDS); 961 DRM_MODE_ENCODER_LVDS);
1022 962
1023 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); 963 drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
1024 intel_encoder->type = INTEL_OUTPUT_LVDS; 964 intel_encoder->type = INTEL_OUTPUT_LVDS;
1025 965
1026 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 966 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
1027 intel_encoder->crtc_mask = (1 << 1); 967 intel_encoder->crtc_mask = (1 << 1);
968 if (IS_I965G(dev))
969 intel_encoder->crtc_mask |= (1 << 0);
1028 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 970 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
1029 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 971 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
1030 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 972 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1039,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev)
1039 * the initial panel fitting mode will be FULL_SCREEN. 981 * the initial panel fitting mode will be FULL_SCREEN.
1040 */ 982 */
1041 983
1042 drm_connector_attach_property(&intel_encoder->base, 984 drm_connector_attach_property(&intel_connector->base,
1043 dev->mode_config.scaling_mode_property, 985 dev->mode_config.scaling_mode_property,
1044 DRM_MODE_SCALE_FULLSCREEN); 986 DRM_MODE_SCALE_FULLSCREEN);
1045 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; 987 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1067,7 +1009,7 @@ void intel_lvds_init(struct drm_device *dev)
1067 */ 1009 */
1068 dev_priv->lvds_edid_good = true; 1010 dev_priv->lvds_edid_good = true;
1069 1011
1070 if (!intel_ddc_get_modes(intel_encoder)) 1012 if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
1071 dev_priv->lvds_edid_good = false; 1013 dev_priv->lvds_edid_good = false;
1072 1014
1073 list_for_each_entry(scan, &connector->probed_modes, head) { 1015 list_for_each_entry(scan, &connector->probed_modes, head) {
@@ -1151,4 +1093,5 @@ failed:
1151 drm_connector_cleanup(connector); 1093 drm_connector_cleanup(connector);
1152 drm_encoder_cleanup(encoder); 1094 drm_encoder_cleanup(encoder);
1153 kfree(intel_encoder); 1095 kfree(intel_encoder);
1096 kfree(intel_connector);
1154} 1097}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 8e5c83b2d120..4b1fd3d9c73c 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
54 } 54 }
55 }; 55 };
56 56
57 intel_i2c_quirk_set(intel_encoder->base.dev, true); 57 intel_i2c_quirk_set(intel_encoder->enc.dev, true);
58 ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); 58 ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
59 intel_i2c_quirk_set(intel_encoder->base.dev, false); 59 intel_i2c_quirk_set(intel_encoder->enc.dev, false);
60 if (ret == 2) 60 if (ret == 2)
61 return true; 61 return true;
62 62
@@ -66,22 +66,23 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
66/** 66/**
67 * intel_ddc_get_modes - get modelist from monitor 67 * intel_ddc_get_modes - get modelist from monitor
68 * @connector: DRM connector device to use 68 * @connector: DRM connector device to use
69 * @adapter: i2c adapter
69 * 70 *
70 * Fetch the EDID information from @connector using the DDC bus. 71 * Fetch the EDID information from @connector using the DDC bus.
71 */ 72 */
72int intel_ddc_get_modes(struct intel_encoder *intel_encoder) 73int intel_ddc_get_modes(struct drm_connector *connector,
74 struct i2c_adapter *adapter)
73{ 75{
74 struct edid *edid; 76 struct edid *edid;
75 int ret = 0; 77 int ret = 0;
76 78
77 intel_i2c_quirk_set(intel_encoder->base.dev, true); 79 intel_i2c_quirk_set(connector->dev, true);
78 edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus); 80 edid = drm_get_edid(connector, adapter);
79 intel_i2c_quirk_set(intel_encoder->base.dev, false); 81 intel_i2c_quirk_set(connector->dev, false);
80 if (edid) { 82 if (edid) {
81 drm_mode_connector_update_edid_property(&intel_encoder->base, 83 drm_mode_connector_update_edid_property(connector, edid);
82 edid); 84 ret = drm_add_edid_modes(connector, edid);
83 ret = drm_add_edid_modes(&intel_encoder->base, edid); 85 connector->display_info.raw_edid = NULL;
84 intel_encoder->base.display_info.raw_edid = NULL;
85 kfree(edid); 86 kfree(edid);
86 } 87 }
87 88
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 6d524a1fc271..b0e17b06eb6e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -373,7 +373,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
373 373
374 /* never have the overlay hw on without showing a frame */ 374 /* never have the overlay hw on without showing a frame */
375 BUG_ON(!overlay->vid_bo); 375 BUG_ON(!overlay->vid_bo);
376 obj = overlay->vid_bo->obj; 376 obj = &overlay->vid_bo->base;
377 377
378 i915_gem_object_unpin(obj); 378 i915_gem_object_unpin(obj);
379 drm_gem_object_unreference(obj); 379 drm_gem_object_unreference(obj);
@@ -411,7 +411,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
411 411
412 switch (overlay->hw_wedged) { 412 switch (overlay->hw_wedged) {
413 case RELEASE_OLD_VID: 413 case RELEASE_OLD_VID:
414 obj = overlay->old_vid_bo->obj; 414 obj = &overlay->old_vid_bo->base;
415 i915_gem_object_unpin(obj); 415 i915_gem_object_unpin(obj);
416 drm_gem_object_unreference(obj); 416 drm_gem_object_unreference(obj);
417 overlay->old_vid_bo = NULL; 417 overlay->old_vid_bo = NULL;
@@ -467,7 +467,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
467 if (ret != 0) 467 if (ret != 0)
468 return ret; 468 return ret;
469 469
470 obj = overlay->old_vid_bo->obj; 470 obj = &overlay->old_vid_bo->base;
471 i915_gem_object_unpin(obj); 471 i915_gem_object_unpin(obj);
472 drm_gem_object_unreference(obj); 472 drm_gem_object_unreference(obj);
473 overlay->old_vid_bo = NULL; 473 overlay->old_vid_bo = NULL;
@@ -1341,7 +1341,7 @@ void intel_setup_overlay(struct drm_device *dev)
1341 return; 1341 return;
1342 overlay->dev = dev; 1342 overlay->dev = dev;
1343 1343
1344 reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); 1344 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
1345 if (!reg_bo) 1345 if (!reg_bo)
1346 goto out_free; 1346 goto out_free;
1347 overlay->reg_bo = to_intel_bo(reg_bo); 1347 overlay->reg_bo = to_intel_bo(reg_bo);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 87d953664cb0..aba72c489a2f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,7 +36,18 @@
36#include "i915_drm.h" 36#include "i915_drm.h"
37#include "i915_drv.h" 37#include "i915_drv.h"
38#include "intel_sdvo_regs.h" 38#include "intel_sdvo_regs.h"
39#include <linux/dmi.h> 39
40#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
41#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
42#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
43#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
44
45#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
46 SDVO_TV_MASK)
47
48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
49#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
50
40 51
41static char *tv_format_names[] = { 52static char *tv_format_names[] = {
42 "NTSC_M" , "NTSC_J" , "NTSC_443", 53 "NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -86,12 +97,6 @@ struct intel_sdvo_priv {
86 /* This is for current tv format name */ 97 /* This is for current tv format name */
87 char *tv_format_name; 98 char *tv_format_name;
88 99
89 /* This contains all current supported TV format */
90 char *tv_format_supported[TV_FORMAT_NUM];
91 int format_supported_num;
92 struct drm_property *tv_format_property;
93 struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
94
95 /** 100 /**
96 * This is set if we treat the device as HDMI, instead of DVI. 101 * This is set if we treat the device as HDMI, instead of DVI.
97 */ 102 */
@@ -112,12 +117,6 @@ struct intel_sdvo_priv {
112 */ 117 */
113 struct drm_display_mode *sdvo_lvds_fixed_mode; 118 struct drm_display_mode *sdvo_lvds_fixed_mode;
114 119
115 /**
116 * Returned SDTV resolutions allowed for the current format, if the
117 * device reported it.
118 */
119 struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
120
121 /* 120 /*
122 * supported encoding mode, used to determine whether HDMI is 121 * supported encoding mode, used to determine whether HDMI is
123 * supported 122 * supported
@@ -130,11 +129,24 @@ struct intel_sdvo_priv {
130 /* Mac mini hack -- use the same DDC as the analog connector */ 129 /* Mac mini hack -- use the same DDC as the analog connector */
131 struct i2c_adapter *analog_ddc_bus; 130 struct i2c_adapter *analog_ddc_bus;
132 131
133 int save_sdvo_mult; 132};
134 u16 save_active_outputs; 133
135 struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; 134struct intel_sdvo_connector {
136 struct intel_sdvo_dtd save_output_dtd[16]; 135 /* Mark the type of connector */
137 u32 save_SDVOX; 136 uint16_t output_flag;
137
138 /* This contains all current supported TV format */
139 char *tv_format_supported[TV_FORMAT_NUM];
140 int format_supported_num;
141 struct drm_property *tv_format_property;
142 struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
143
144 /**
145 * Returned SDTV resolutions allowed for the current format, if the
146 * device reported it.
147 */
148 struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
149
138 /* add the property for the SDVO-TV */ 150 /* add the property for the SDVO-TV */
139 struct drm_property *left_property; 151 struct drm_property *left_property;
140 struct drm_property *right_property; 152 struct drm_property *right_property;
@@ -162,7 +174,12 @@ struct intel_sdvo_priv {
162}; 174};
163 175
164static bool 176static bool
165intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags); 177intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
178 uint16_t flags);
179static void
180intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
181static void
182intel_sdvo_create_enhance_property(struct drm_connector *connector);
166 183
167/** 184/**
168 * Writes the SDVOB or SDVOC with the given value, but always writes both 185 * Writes the SDVOB or SDVOC with the given value, but always writes both
@@ -171,12 +188,18 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
171 */ 188 */
172static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) 189static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
173{ 190{
174 struct drm_device *dev = intel_encoder->base.dev; 191 struct drm_device *dev = intel_encoder->enc.dev;
175 struct drm_i915_private *dev_priv = dev->dev_private; 192 struct drm_i915_private *dev_priv = dev->dev_private;
176 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 193 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
177 u32 bval = val, cval = val; 194 u32 bval = val, cval = val;
178 int i; 195 int i;
179 196
197 if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
198 I915_WRITE(sdvo_priv->sdvo_reg, val);
199 I915_READ(sdvo_priv->sdvo_reg);
200 return;
201 }
202
180 if (sdvo_priv->sdvo_reg == SDVOB) { 203 if (sdvo_priv->sdvo_reg == SDVOB) {
181 cval = I915_READ(SDVOC); 204 cval = I915_READ(SDVOC);
182 } else { 205 } else {
@@ -353,7 +376,8 @@ static const struct _sdvo_cmd_name {
353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), 376 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
354}; 377};
355 378
356#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC") 379#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
380#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
357#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) 381#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
358 382
359static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, 383static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
@@ -563,17 +587,6 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b
563 return true; 587 return true;
564} 588}
565 589
566static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
567 u16 *outputs)
568{
569 u8 status;
570
571 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
572 status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
573
574 return (status == SDVO_CMD_STATUS_SUCCESS);
575}
576
577static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, 590static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
578 u16 outputs) 591 u16 outputs)
579{ 592{
@@ -646,40 +659,6 @@ static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
646 return (status == SDVO_CMD_STATUS_SUCCESS); 659 return (status == SDVO_CMD_STATUS_SUCCESS);
647} 660}
648 661
649static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
650 struct intel_sdvo_dtd *dtd)
651{
652 u8 status;
653
654 intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
655 status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
656 sizeof(dtd->part1));
657 if (status != SDVO_CMD_STATUS_SUCCESS)
658 return false;
659
660 intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
661 status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
662 sizeof(dtd->part2));
663 if (status != SDVO_CMD_STATUS_SUCCESS)
664 return false;
665
666 return true;
667}
668
669static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
670 struct intel_sdvo_dtd *dtd)
671{
672 return intel_sdvo_get_timing(intel_encoder,
673 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
674}
675
676static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
677 struct intel_sdvo_dtd *dtd)
678{
679 return intel_sdvo_get_timing(intel_encoder,
680 SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
681}
682
683static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, 662static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
684 struct intel_sdvo_dtd *dtd) 663 struct intel_sdvo_dtd *dtd)
685{ 664{
@@ -767,23 +746,6 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_en
767 return false; 746 return false;
768} 747}
769 748
770static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
771{
772 u8 response, status;
773
774 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
775 status = intel_sdvo_read_response(intel_encoder, &response, 1);
776
777 if (status != SDVO_CMD_STATUS_SUCCESS) {
778 DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
779 return SDVO_CLOCK_RATE_MULT_1X;
780 } else {
781 DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
782 }
783
784 return response;
785}
786
787static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) 749static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
788{ 750{
789 u8 status; 751 u8 status;
@@ -1071,7 +1033,7 @@ static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
1071 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? 1033 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
1072 sizeof(format) : sizeof(format_map)); 1034 sizeof(format) : sizeof(format_map));
1073 1035
1074 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map, 1036 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
1075 sizeof(format)); 1037 sizeof(format));
1076 1038
1077 status = intel_sdvo_read_response(intel_encoder, NULL, 0); 1039 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
@@ -1101,7 +1063,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1101 /* Set output timings */ 1063 /* Set output timings */
1102 intel_sdvo_get_dtd_from_mode(&output_dtd, mode); 1064 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
1103 intel_sdvo_set_target_output(intel_encoder, 1065 intel_sdvo_set_target_output(intel_encoder,
1104 dev_priv->controlled_output); 1066 dev_priv->attached_output);
1105 intel_sdvo_set_output_timing(intel_encoder, &output_dtd); 1067 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1106 1068
1107 /* Set the input timing to the screen. Assume always input 0. */ 1069 /* Set the input timing to the screen. Assume always input 0. */
@@ -1139,7 +1101,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1139 dev_priv->sdvo_lvds_fixed_mode); 1101 dev_priv->sdvo_lvds_fixed_mode);
1140 1102
1141 intel_sdvo_set_target_output(intel_encoder, 1103 intel_sdvo_set_target_output(intel_encoder,
1142 dev_priv->controlled_output); 1104 dev_priv->attached_output);
1143 intel_sdvo_set_output_timing(intel_encoder, &output_dtd); 1105 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1144 1106
1145 /* Set the input timing to the screen. Assume always input 0. */ 1107 /* Set the input timing to the screen. Assume always input 0. */
@@ -1204,7 +1166,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1204 * channel on the motherboard. In a two-input device, the first input 1166 * channel on the motherboard. In a two-input device, the first input
1205 * will be SDVOB and the second SDVOC. 1167 * will be SDVOB and the second SDVOC.
1206 */ 1168 */
1207 in_out.in0 = sdvo_priv->controlled_output; 1169 in_out.in0 = sdvo_priv->attached_output;
1208 in_out.in1 = 0; 1170 in_out.in1 = 0;
1209 1171
1210 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, 1172 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
@@ -1230,7 +1192,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1230 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { 1192 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
1231 /* Set the output timing to the screen */ 1193 /* Set the output timing to the screen */
1232 intel_sdvo_set_target_output(intel_encoder, 1194 intel_sdvo_set_target_output(intel_encoder,
1233 sdvo_priv->controlled_output); 1195 sdvo_priv->attached_output);
1234 intel_sdvo_set_output_timing(intel_encoder, &input_dtd); 1196 intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
1235 } 1197 }
1236 1198
@@ -1352,107 +1314,16 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1352 1314
1353 if (0) 1315 if (0)
1354 intel_sdvo_set_encoder_power_state(intel_encoder, mode); 1316 intel_sdvo_set_encoder_power_state(intel_encoder, mode);
1355 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output); 1317 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
1356 } 1318 }
1357 return; 1319 return;
1358} 1320}
1359 1321
1360static void intel_sdvo_save(struct drm_connector *connector)
1361{
1362 struct drm_device *dev = connector->dev;
1363 struct drm_i915_private *dev_priv = dev->dev_private;
1364 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1365 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1366 int o;
1367
1368 sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
1369 intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
1370
1371 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
1372 intel_sdvo_set_target_input(intel_encoder, true, false);
1373 intel_sdvo_get_input_timing(intel_encoder,
1374 &sdvo_priv->save_input_dtd_1);
1375 }
1376
1377 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
1378 intel_sdvo_set_target_input(intel_encoder, false, true);
1379 intel_sdvo_get_input_timing(intel_encoder,
1380 &sdvo_priv->save_input_dtd_2);
1381 }
1382
1383 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
1384 {
1385 u16 this_output = (1 << o);
1386 if (sdvo_priv->caps.output_flags & this_output)
1387 {
1388 intel_sdvo_set_target_output(intel_encoder, this_output);
1389 intel_sdvo_get_output_timing(intel_encoder,
1390 &sdvo_priv->save_output_dtd[o]);
1391 }
1392 }
1393 if (sdvo_priv->is_tv) {
1394 /* XXX: Save TV format/enhancements. */
1395 }
1396
1397 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
1398}
1399
1400static void intel_sdvo_restore(struct drm_connector *connector)
1401{
1402 struct drm_device *dev = connector->dev;
1403 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1404 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1405 int o;
1406 int i;
1407 bool input1, input2;
1408 u8 status;
1409
1410 intel_sdvo_set_active_outputs(intel_encoder, 0);
1411
1412 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
1413 {
1414 u16 this_output = (1 << o);
1415 if (sdvo_priv->caps.output_flags & this_output) {
1416 intel_sdvo_set_target_output(intel_encoder, this_output);
1417 intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
1418 }
1419 }
1420
1421 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
1422 intel_sdvo_set_target_input(intel_encoder, true, false);
1423 intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
1424 }
1425
1426 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
1427 intel_sdvo_set_target_input(intel_encoder, false, true);
1428 intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
1429 }
1430
1431 intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
1432
1433 if (sdvo_priv->is_tv) {
1434 /* XXX: Restore TV format/enhancements. */
1435 }
1436
1437 intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
1438
1439 if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
1440 {
1441 for (i = 0; i < 2; i++)
1442 intel_wait_for_vblank(dev);
1443 status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
1444 if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
1445 DRM_DEBUG_KMS("First %s output reported failure to "
1446 "sync\n", SDVO_NAME(sdvo_priv));
1447 }
1448
1449 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
1450}
1451
1452static int intel_sdvo_mode_valid(struct drm_connector *connector, 1322static int intel_sdvo_mode_valid(struct drm_connector *connector,
1453 struct drm_display_mode *mode) 1323 struct drm_display_mode *mode)
1454{ 1324{
1455 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1325 struct drm_encoder *encoder = intel_attached_encoder(connector);
1326 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1456 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1327 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1457 1328
1458 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1329 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1490,6 +1361,8 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str
1490 return true; 1361 return true;
1491} 1362}
1492 1363
1364/* No use! */
1365#if 0
1493struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) 1366struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
1494{ 1367{
1495 struct drm_connector *connector = NULL; 1368 struct drm_connector *connector = NULL;
@@ -1560,6 +1433,7 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1560 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1433 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1561 intel_sdvo_read_response(intel_encoder, &response, 2); 1434 intel_sdvo_read_response(intel_encoder, &response, 2);
1562} 1435}
1436#endif
1563 1437
1564static bool 1438static bool
1565intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) 1439intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
@@ -1598,12 +1472,17 @@ static struct drm_connector *
1598intel_find_analog_connector(struct drm_device *dev) 1472intel_find_analog_connector(struct drm_device *dev)
1599{ 1473{
1600 struct drm_connector *connector; 1474 struct drm_connector *connector;
1475 struct drm_encoder *encoder;
1601 struct intel_encoder *intel_encoder; 1476 struct intel_encoder *intel_encoder;
1602 1477
1603 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1478 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1604 intel_encoder = to_intel_encoder(connector); 1479 intel_encoder = enc_to_intel_encoder(encoder);
1605 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) 1480 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
1606 return connector; 1481 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1482 if (connector && encoder == intel_attached_encoder(connector))
1483 return connector;
1484 }
1485 }
1607 } 1486 }
1608 return NULL; 1487 return NULL;
1609} 1488}
@@ -1625,15 +1504,17 @@ intel_analog_is_connected(struct drm_device *dev)
1625} 1504}
1626 1505
1627enum drm_connector_status 1506enum drm_connector_status
1628intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) 1507intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1629{ 1508{
1630 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1509 struct drm_encoder *encoder = intel_attached_encoder(connector);
1510 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1631 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1511 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1512 struct intel_connector *intel_connector = to_intel_connector(connector);
1513 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1632 enum drm_connector_status status = connector_status_connected; 1514 enum drm_connector_status status = connector_status_connected;
1633 struct edid *edid = NULL; 1515 struct edid *edid = NULL;
1634 1516
1635 edid = drm_get_edid(&intel_encoder->base, 1517 edid = drm_get_edid(connector, intel_encoder->ddc_bus);
1636 intel_encoder->ddc_bus);
1637 1518
1638 /* This is only applied to SDVO cards with multiple outputs */ 1519 /* This is only applied to SDVO cards with multiple outputs */
1639 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { 1520 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
@@ -1646,8 +1527,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1646 */ 1527 */
1647 while(temp_ddc > 1) { 1528 while(temp_ddc > 1) {
1648 sdvo_priv->ddc_bus = temp_ddc; 1529 sdvo_priv->ddc_bus = temp_ddc;
1649 edid = drm_get_edid(&intel_encoder->base, 1530 edid = drm_get_edid(connector, intel_encoder->ddc_bus);
1650 intel_encoder->ddc_bus);
1651 if (edid) { 1531 if (edid) {
1652 /* 1532 /*
1653 * When we can get the EDID, maybe it is the 1533 * When we can get the EDID, maybe it is the
@@ -1664,28 +1544,25 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1664 /* when there is no edid and no monitor is connected with VGA 1544 /* when there is no edid and no monitor is connected with VGA
1665 * port, try to use the CRT ddc to read the EDID for DVI-connector 1545 * port, try to use the CRT ddc to read the EDID for DVI-connector
1666 */ 1546 */
1667 if (edid == NULL && 1547 if (edid == NULL && sdvo_priv->analog_ddc_bus &&
1668 sdvo_priv->analog_ddc_bus && 1548 !intel_analog_is_connected(connector->dev))
1669 !intel_analog_is_connected(intel_encoder->base.dev)) 1549 edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
1670 edid = drm_get_edid(&intel_encoder->base, 1550
1671 sdvo_priv->analog_ddc_bus);
1672 if (edid != NULL) { 1551 if (edid != NULL) {
1673 /* Don't report the output as connected if it's a DVI-I 1552 bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1674 * connector with a non-digital EDID coming out. 1553 bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
1675 */
1676 if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
1677 if (edid->input & DRM_EDID_INPUT_DIGITAL)
1678 sdvo_priv->is_hdmi =
1679 drm_detect_hdmi_monitor(edid);
1680 else
1681 status = connector_status_disconnected;
1682 }
1683 1554
1684 kfree(edid); 1555 /* DDC bus is shared, match EDID to connector type */
1685 intel_encoder->base.display_info.raw_edid = NULL; 1556 if (is_digital && need_digital)
1557 sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
1558 else if (is_digital != need_digital)
1559 status = connector_status_disconnected;
1686 1560
1687 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) 1561 connector->display_info.raw_edid = NULL;
1562 } else
1688 status = connector_status_disconnected; 1563 status = connector_status_disconnected;
1564
1565 kfree(edid);
1689 1566
1690 return status; 1567 return status;
1691} 1568}
@@ -1694,8 +1571,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1694{ 1571{
1695 uint16_t response; 1572 uint16_t response;
1696 u8 status; 1573 u8 status;
1697 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1574 struct drm_encoder *encoder = intel_attached_encoder(connector);
1575 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1576 struct intel_connector *intel_connector = to_intel_connector(connector);
1698 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1577 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1578 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1579 enum drm_connector_status ret;
1699 1580
1700 intel_sdvo_write_cmd(intel_encoder, 1581 intel_sdvo_write_cmd(intel_encoder,
1701 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); 1582 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
@@ -1713,24 +1594,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1713 if (response == 0) 1594 if (response == 0)
1714 return connector_status_disconnected; 1595 return connector_status_disconnected;
1715 1596
1716 if (intel_sdvo_multifunc_encoder(intel_encoder) && 1597 sdvo_priv->attached_output = response;
1717 sdvo_priv->attached_output != response) { 1598
1718 if (sdvo_priv->controlled_output != response && 1599 if ((sdvo_connector->output_flag & response) == 0)
1719 intel_sdvo_output_setup(intel_encoder, response) != true) 1600 ret = connector_status_disconnected;
1720 return connector_status_unknown; 1601 else if (response & SDVO_TMDS_MASK)
1721 sdvo_priv->attached_output = response; 1602 ret = intel_sdvo_hdmi_sink_detect(connector);
1603 else
1604 ret = connector_status_connected;
1605
1606 /* May update encoder flag for like clock for SDVO TV, etc.*/
1607 if (ret == connector_status_connected) {
1608 sdvo_priv->is_tv = false;
1609 sdvo_priv->is_lvds = false;
1610 intel_encoder->needs_tv_clock = false;
1611
1612 if (response & SDVO_TV_MASK) {
1613 sdvo_priv->is_tv = true;
1614 intel_encoder->needs_tv_clock = true;
1615 }
1616 if (response & SDVO_LVDS_MASK)
1617 sdvo_priv->is_lvds = true;
1722 } 1618 }
1723 return intel_sdvo_hdmi_sink_detect(connector, response); 1619
1620 return ret;
1724} 1621}
1725 1622
1726static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1623static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1727{ 1624{
1728 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1625 struct drm_encoder *encoder = intel_attached_encoder(connector);
1626 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1729 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1627 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1730 int num_modes; 1628 int num_modes;
1731 1629
1732 /* set the bus switch and get the modes */ 1630 /* set the bus switch and get the modes */
1733 num_modes = intel_ddc_get_modes(intel_encoder); 1631 num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
1734 1632
1735 /* 1633 /*
1736 * Mac mini hack. On this device, the DVI-I connector shares one DDC 1634 * Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1740,17 +1638,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1740 */ 1638 */
1741 if (num_modes == 0 && 1639 if (num_modes == 0 &&
1742 sdvo_priv->analog_ddc_bus && 1640 sdvo_priv->analog_ddc_bus &&
1743 !intel_analog_is_connected(intel_encoder->base.dev)) { 1641 !intel_analog_is_connected(connector->dev)) {
1744 struct i2c_adapter *digital_ddc_bus;
1745
1746 /* Switch to the analog ddc bus and try that 1642 /* Switch to the analog ddc bus and try that
1747 */ 1643 */
1748 digital_ddc_bus = intel_encoder->ddc_bus; 1644 (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
1749 intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
1750
1751 (void) intel_ddc_get_modes(intel_encoder);
1752
1753 intel_encoder->ddc_bus = digital_ddc_bus;
1754 } 1645 }
1755} 1646}
1756 1647
@@ -1821,8 +1712,9 @@ struct drm_display_mode sdvo_tv_modes[] = {
1821 1712
1822static void intel_sdvo_get_tv_modes(struct drm_connector *connector) 1713static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1823{ 1714{
1824 struct intel_encoder *output = to_intel_encoder(connector); 1715 struct drm_encoder *encoder = intel_attached_encoder(connector);
1825 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1716 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1717 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1826 struct intel_sdvo_sdtv_resolution_request tv_res; 1718 struct intel_sdvo_sdtv_resolution_request tv_res;
1827 uint32_t reply = 0, format_map = 0; 1719 uint32_t reply = 0, format_map = 0;
1828 int i; 1720 int i;
@@ -1842,11 +1734,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1842 sizeof(format_map) ? sizeof(format_map) : 1734 sizeof(format_map) ? sizeof(format_map) :
1843 sizeof(struct intel_sdvo_sdtv_resolution_request)); 1735 sizeof(struct intel_sdvo_sdtv_resolution_request));
1844 1736
1845 intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); 1737 intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
1846 1738
1847 intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, 1739 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
1848 &tv_res, sizeof(tv_res)); 1740 &tv_res, sizeof(tv_res));
1849 status = intel_sdvo_read_response(output, &reply, 3); 1741 status = intel_sdvo_read_response(intel_encoder, &reply, 3);
1850 if (status != SDVO_CMD_STATUS_SUCCESS) 1742 if (status != SDVO_CMD_STATUS_SUCCESS)
1851 return; 1743 return;
1852 1744
@@ -1863,7 +1755,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1863 1755
1864static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1756static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1865{ 1757{
1866 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1758 struct drm_encoder *encoder = intel_attached_encoder(connector);
1759 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1867 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1760 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1868 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1761 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1869 struct drm_display_mode *newmode; 1762 struct drm_display_mode *newmode;
@@ -1873,7 +1766,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1873 * Assume that the preferred modes are 1766 * Assume that the preferred modes are
1874 * arranged in priority order. 1767 * arranged in priority order.
1875 */ 1768 */
1876 intel_ddc_get_modes(intel_encoder); 1769 intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
1877 if (list_empty(&connector->probed_modes) == false) 1770 if (list_empty(&connector->probed_modes) == false)
1878 goto end; 1771 goto end;
1879 1772
@@ -1902,12 +1795,12 @@ end:
1902 1795
1903static int intel_sdvo_get_modes(struct drm_connector *connector) 1796static int intel_sdvo_get_modes(struct drm_connector *connector)
1904{ 1797{
1905 struct intel_encoder *output = to_intel_encoder(connector); 1798 struct intel_connector *intel_connector = to_intel_connector(connector);
1906 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1799 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1907 1800
1908 if (sdvo_priv->is_tv) 1801 if (IS_TV(sdvo_connector))
1909 intel_sdvo_get_tv_modes(connector); 1802 intel_sdvo_get_tv_modes(connector);
1910 else if (sdvo_priv->is_lvds == true) 1803 else if (IS_LVDS(sdvo_connector))
1911 intel_sdvo_get_lvds_modes(connector); 1804 intel_sdvo_get_lvds_modes(connector);
1912 else 1805 else
1913 intel_sdvo_get_ddc_modes(connector); 1806 intel_sdvo_get_ddc_modes(connector);
@@ -1920,11 +1813,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1920static 1813static
1921void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) 1814void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1922{ 1815{
1923 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1816 struct intel_connector *intel_connector = to_intel_connector(connector);
1924 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1817 struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
1925 struct drm_device *dev = connector->dev; 1818 struct drm_device *dev = connector->dev;
1926 1819
1927 if (sdvo_priv->is_tv) { 1820 if (IS_TV(sdvo_priv)) {
1928 if (sdvo_priv->left_property) 1821 if (sdvo_priv->left_property)
1929 drm_property_destroy(dev, sdvo_priv->left_property); 1822 drm_property_destroy(dev, sdvo_priv->left_property);
1930 if (sdvo_priv->right_property) 1823 if (sdvo_priv->right_property)
@@ -1937,8 +1830,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1937 drm_property_destroy(dev, sdvo_priv->hpos_property); 1830 drm_property_destroy(dev, sdvo_priv->hpos_property);
1938 if (sdvo_priv->vpos_property) 1831 if (sdvo_priv->vpos_property)
1939 drm_property_destroy(dev, sdvo_priv->vpos_property); 1832 drm_property_destroy(dev, sdvo_priv->vpos_property);
1940 }
1941 if (sdvo_priv->is_tv) {
1942 if (sdvo_priv->saturation_property) 1833 if (sdvo_priv->saturation_property)
1943 drm_property_destroy(dev, 1834 drm_property_destroy(dev,
1944 sdvo_priv->saturation_property); 1835 sdvo_priv->saturation_property);
@@ -1948,7 +1839,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1948 if (sdvo_priv->hue_property) 1839 if (sdvo_priv->hue_property)
1949 drm_property_destroy(dev, sdvo_priv->hue_property); 1840 drm_property_destroy(dev, sdvo_priv->hue_property);
1950 } 1841 }
1951 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 1842 if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
1952 if (sdvo_priv->brightness_property) 1843 if (sdvo_priv->brightness_property)
1953 drm_property_destroy(dev, 1844 drm_property_destroy(dev,
1954 sdvo_priv->brightness_property); 1845 sdvo_priv->brightness_property);
@@ -1958,31 +1849,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1958 1849
1959static void intel_sdvo_destroy(struct drm_connector *connector) 1850static void intel_sdvo_destroy(struct drm_connector *connector)
1960{ 1851{
1961 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1852 struct intel_connector *intel_connector = to_intel_connector(connector);
1962 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1853 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1963
1964 if (intel_encoder->i2c_bus)
1965 intel_i2c_destroy(intel_encoder->i2c_bus);
1966 if (intel_encoder->ddc_bus)
1967 intel_i2c_destroy(intel_encoder->ddc_bus);
1968 if (sdvo_priv->analog_ddc_bus)
1969 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
1970
1971 if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
1972 drm_mode_destroy(connector->dev,
1973 sdvo_priv->sdvo_lvds_fixed_mode);
1974 1854
1975 if (sdvo_priv->tv_format_property) 1855 if (sdvo_connector->tv_format_property)
1976 drm_property_destroy(connector->dev, 1856 drm_property_destroy(connector->dev,
1977 sdvo_priv->tv_format_property); 1857 sdvo_connector->tv_format_property);
1978
1979 if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
1980 intel_sdvo_destroy_enhance_property(connector);
1981 1858
1859 intel_sdvo_destroy_enhance_property(connector);
1982 drm_sysfs_connector_remove(connector); 1860 drm_sysfs_connector_remove(connector);
1983 drm_connector_cleanup(connector); 1861 drm_connector_cleanup(connector);
1984 1862 kfree(connector);
1985 kfree(intel_encoder);
1986} 1863}
1987 1864
1988static int 1865static int
@@ -1990,9 +1867,11 @@ intel_sdvo_set_property(struct drm_connector *connector,
1990 struct drm_property *property, 1867 struct drm_property *property,
1991 uint64_t val) 1868 uint64_t val)
1992{ 1869{
1993 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1870 struct drm_encoder *encoder = intel_attached_encoder(connector);
1871 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1994 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1872 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1995 struct drm_encoder *encoder = &intel_encoder->enc; 1873 struct intel_connector *intel_connector = to_intel_connector(connector);
1874 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1996 struct drm_crtc *crtc = encoder->crtc; 1875 struct drm_crtc *crtc = encoder->crtc;
1997 int ret = 0; 1876 int ret = 0;
1998 bool changed = false; 1877 bool changed = false;
@@ -2003,101 +1882,101 @@ intel_sdvo_set_property(struct drm_connector *connector,
2003 if (ret < 0) 1882 if (ret < 0)
2004 goto out; 1883 goto out;
2005 1884
2006 if (property == sdvo_priv->tv_format_property) { 1885 if (property == sdvo_connector->tv_format_property) {
2007 if (val >= TV_FORMAT_NUM) { 1886 if (val >= TV_FORMAT_NUM) {
2008 ret = -EINVAL; 1887 ret = -EINVAL;
2009 goto out; 1888 goto out;
2010 } 1889 }
2011 if (sdvo_priv->tv_format_name == 1890 if (sdvo_priv->tv_format_name ==
2012 sdvo_priv->tv_format_supported[val]) 1891 sdvo_connector->tv_format_supported[val])
2013 goto out; 1892 goto out;
2014 1893
2015 sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; 1894 sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
2016 changed = true; 1895 changed = true;
2017 } 1896 }
2018 1897
2019 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 1898 if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
2020 cmd = 0; 1899 cmd = 0;
2021 temp_value = val; 1900 temp_value = val;
2022 if (sdvo_priv->left_property == property) { 1901 if (sdvo_connector->left_property == property) {
2023 drm_connector_property_set_value(connector, 1902 drm_connector_property_set_value(connector,
2024 sdvo_priv->right_property, val); 1903 sdvo_connector->right_property, val);
2025 if (sdvo_priv->left_margin == temp_value) 1904 if (sdvo_connector->left_margin == temp_value)
2026 goto out; 1905 goto out;
2027 1906
2028 sdvo_priv->left_margin = temp_value; 1907 sdvo_connector->left_margin = temp_value;
2029 sdvo_priv->right_margin = temp_value; 1908 sdvo_connector->right_margin = temp_value;
2030 temp_value = sdvo_priv->max_hscan - 1909 temp_value = sdvo_connector->max_hscan -
2031 sdvo_priv->left_margin; 1910 sdvo_connector->left_margin;
2032 cmd = SDVO_CMD_SET_OVERSCAN_H; 1911 cmd = SDVO_CMD_SET_OVERSCAN_H;
2033 } else if (sdvo_priv->right_property == property) { 1912 } else if (sdvo_connector->right_property == property) {
2034 drm_connector_property_set_value(connector, 1913 drm_connector_property_set_value(connector,
2035 sdvo_priv->left_property, val); 1914 sdvo_connector->left_property, val);
2036 if (sdvo_priv->right_margin == temp_value) 1915 if (sdvo_connector->right_margin == temp_value)
2037 goto out; 1916 goto out;
2038 1917
2039 sdvo_priv->left_margin = temp_value; 1918 sdvo_connector->left_margin = temp_value;
2040 sdvo_priv->right_margin = temp_value; 1919 sdvo_connector->right_margin = temp_value;
2041 temp_value = sdvo_priv->max_hscan - 1920 temp_value = sdvo_connector->max_hscan -
2042 sdvo_priv->left_margin; 1921 sdvo_connector->left_margin;
2043 cmd = SDVO_CMD_SET_OVERSCAN_H; 1922 cmd = SDVO_CMD_SET_OVERSCAN_H;
2044 } else if (sdvo_priv->top_property == property) { 1923 } else if (sdvo_connector->top_property == property) {
2045 drm_connector_property_set_value(connector, 1924 drm_connector_property_set_value(connector,
2046 sdvo_priv->bottom_property, val); 1925 sdvo_connector->bottom_property, val);
2047 if (sdvo_priv->top_margin == temp_value) 1926 if (sdvo_connector->top_margin == temp_value)
2048 goto out; 1927 goto out;
2049 1928
2050 sdvo_priv->top_margin = temp_value; 1929 sdvo_connector->top_margin = temp_value;
2051 sdvo_priv->bottom_margin = temp_value; 1930 sdvo_connector->bottom_margin = temp_value;
2052 temp_value = sdvo_priv->max_vscan - 1931 temp_value = sdvo_connector->max_vscan -
2053 sdvo_priv->top_margin; 1932 sdvo_connector->top_margin;
2054 cmd = SDVO_CMD_SET_OVERSCAN_V; 1933 cmd = SDVO_CMD_SET_OVERSCAN_V;
2055 } else if (sdvo_priv->bottom_property == property) { 1934 } else if (sdvo_connector->bottom_property == property) {
2056 drm_connector_property_set_value(connector, 1935 drm_connector_property_set_value(connector,
2057 sdvo_priv->top_property, val); 1936 sdvo_connector->top_property, val);
2058 if (sdvo_priv->bottom_margin == temp_value) 1937 if (sdvo_connector->bottom_margin == temp_value)
2059 goto out; 1938 goto out;
2060 sdvo_priv->top_margin = temp_value; 1939 sdvo_connector->top_margin = temp_value;
2061 sdvo_priv->bottom_margin = temp_value; 1940 sdvo_connector->bottom_margin = temp_value;
2062 temp_value = sdvo_priv->max_vscan - 1941 temp_value = sdvo_connector->max_vscan -
2063 sdvo_priv->top_margin; 1942 sdvo_connector->top_margin;
2064 cmd = SDVO_CMD_SET_OVERSCAN_V; 1943 cmd = SDVO_CMD_SET_OVERSCAN_V;
2065 } else if (sdvo_priv->hpos_property == property) { 1944 } else if (sdvo_connector->hpos_property == property) {
2066 if (sdvo_priv->cur_hpos == temp_value) 1945 if (sdvo_connector->cur_hpos == temp_value)
2067 goto out; 1946 goto out;
2068 1947
2069 cmd = SDVO_CMD_SET_POSITION_H; 1948 cmd = SDVO_CMD_SET_POSITION_H;
2070 sdvo_priv->cur_hpos = temp_value; 1949 sdvo_connector->cur_hpos = temp_value;
2071 } else if (sdvo_priv->vpos_property == property) { 1950 } else if (sdvo_connector->vpos_property == property) {
2072 if (sdvo_priv->cur_vpos == temp_value) 1951 if (sdvo_connector->cur_vpos == temp_value)
2073 goto out; 1952 goto out;
2074 1953
2075 cmd = SDVO_CMD_SET_POSITION_V; 1954 cmd = SDVO_CMD_SET_POSITION_V;
2076 sdvo_priv->cur_vpos = temp_value; 1955 sdvo_connector->cur_vpos = temp_value;
2077 } else if (sdvo_priv->saturation_property == property) { 1956 } else if (sdvo_connector->saturation_property == property) {
2078 if (sdvo_priv->cur_saturation == temp_value) 1957 if (sdvo_connector->cur_saturation == temp_value)
2079 goto out; 1958 goto out;
2080 1959
2081 cmd = SDVO_CMD_SET_SATURATION; 1960 cmd = SDVO_CMD_SET_SATURATION;
2082 sdvo_priv->cur_saturation = temp_value; 1961 sdvo_connector->cur_saturation = temp_value;
2083 } else if (sdvo_priv->contrast_property == property) { 1962 } else if (sdvo_connector->contrast_property == property) {
2084 if (sdvo_priv->cur_contrast == temp_value) 1963 if (sdvo_connector->cur_contrast == temp_value)
2085 goto out; 1964 goto out;
2086 1965
2087 cmd = SDVO_CMD_SET_CONTRAST; 1966 cmd = SDVO_CMD_SET_CONTRAST;
2088 sdvo_priv->cur_contrast = temp_value; 1967 sdvo_connector->cur_contrast = temp_value;
2089 } else if (sdvo_priv->hue_property == property) { 1968 } else if (sdvo_connector->hue_property == property) {
2090 if (sdvo_priv->cur_hue == temp_value) 1969 if (sdvo_connector->cur_hue == temp_value)
2091 goto out; 1970 goto out;
2092 1971
2093 cmd = SDVO_CMD_SET_HUE; 1972 cmd = SDVO_CMD_SET_HUE;
2094 sdvo_priv->cur_hue = temp_value; 1973 sdvo_connector->cur_hue = temp_value;
2095 } else if (sdvo_priv->brightness_property == property) { 1974 } else if (sdvo_connector->brightness_property == property) {
2096 if (sdvo_priv->cur_brightness == temp_value) 1975 if (sdvo_connector->cur_brightness == temp_value)
2097 goto out; 1976 goto out;
2098 1977
2099 cmd = SDVO_CMD_SET_BRIGHTNESS; 1978 cmd = SDVO_CMD_SET_BRIGHTNESS;
2100 sdvo_priv->cur_brightness = temp_value; 1979 sdvo_connector->cur_brightness = temp_value;
2101 } 1980 }
2102 if (cmd) { 1981 if (cmd) {
2103 intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); 1982 intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
@@ -2127,8 +2006,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
2127 2006
2128static const struct drm_connector_funcs intel_sdvo_connector_funcs = { 2007static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2129 .dpms = drm_helper_connector_dpms, 2008 .dpms = drm_helper_connector_dpms,
2130 .save = intel_sdvo_save,
2131 .restore = intel_sdvo_restore,
2132 .detect = intel_sdvo_detect, 2009 .detect = intel_sdvo_detect,
2133 .fill_modes = drm_helper_probe_single_connector_modes, 2010 .fill_modes = drm_helper_probe_single_connector_modes,
2134 .set_property = intel_sdvo_set_property, 2011 .set_property = intel_sdvo_set_property,
@@ -2138,12 +2015,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2138static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { 2015static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
2139 .get_modes = intel_sdvo_get_modes, 2016 .get_modes = intel_sdvo_get_modes,
2140 .mode_valid = intel_sdvo_mode_valid, 2017 .mode_valid = intel_sdvo_mode_valid,
2141 .best_encoder = intel_best_encoder, 2018 .best_encoder = intel_attached_encoder,
2142}; 2019};
2143 2020
2144static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) 2021static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
2145{ 2022{
2023 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
2024 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2025
2026 if (intel_encoder->i2c_bus)
2027 intel_i2c_destroy(intel_encoder->i2c_bus);
2028 if (intel_encoder->ddc_bus)
2029 intel_i2c_destroy(intel_encoder->ddc_bus);
2030 if (sdvo_priv->analog_ddc_bus)
2031 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
2032
2033 if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
2034 drm_mode_destroy(encoder->dev,
2035 sdvo_priv->sdvo_lvds_fixed_mode);
2036
2146 drm_encoder_cleanup(encoder); 2037 drm_encoder_cleanup(encoder);
2038 kfree(intel_encoder);
2147} 2039}
2148 2040
2149static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { 2041static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
@@ -2159,49 +2051,29 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
2159 * outputs, then LVDS outputs. 2051 * outputs, then LVDS outputs.
2160 */ 2052 */
2161static void 2053static void
2162intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) 2054intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
2055 struct intel_sdvo_priv *sdvo, u32 reg)
2163{ 2056{
2164 uint16_t mask = 0; 2057 struct sdvo_device_mapping *mapping;
2165 unsigned int num_bits;
2166
2167 /* Make a mask of outputs less than or equal to our own priority in the
2168 * list.
2169 */
2170 switch (dev_priv->controlled_output) {
2171 case SDVO_OUTPUT_LVDS1:
2172 mask |= SDVO_OUTPUT_LVDS1;
2173 case SDVO_OUTPUT_LVDS0:
2174 mask |= SDVO_OUTPUT_LVDS0;
2175 case SDVO_OUTPUT_TMDS1:
2176 mask |= SDVO_OUTPUT_TMDS1;
2177 case SDVO_OUTPUT_TMDS0:
2178 mask |= SDVO_OUTPUT_TMDS0;
2179 case SDVO_OUTPUT_RGB1:
2180 mask |= SDVO_OUTPUT_RGB1;
2181 case SDVO_OUTPUT_RGB0:
2182 mask |= SDVO_OUTPUT_RGB0;
2183 break;
2184 }
2185 2058
2186 /* Count bits to find what number we are in the priority list. */ 2059 if (IS_SDVOB(reg))
2187 mask &= dev_priv->caps.output_flags; 2060 mapping = &(dev_priv->sdvo_mappings[0]);
2188 num_bits = hweight16(mask); 2061 else
2189 if (num_bits > 3) { 2062 mapping = &(dev_priv->sdvo_mappings[1]);
2190 /* if more than 3 outputs, default to DDC bus 3 for now */
2191 num_bits = 3;
2192 }
2193 2063
2194 /* Corresponds to SDVO_CONTROL_BUS_DDCx */ 2064 sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
2195 dev_priv->ddc_bus = 1 << num_bits;
2196} 2065}
2197 2066
2198static bool 2067static bool
2199intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output) 2068intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
2200{ 2069{
2201 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 2070 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
2202 uint8_t status; 2071 uint8_t status;
2203 2072
2204 intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); 2073 if (device == 0)
2074 intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
2075 else
2076 intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
2205 2077
2206 intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0); 2078 intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
2207 status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1); 2079 status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
@@ -2214,15 +2086,13 @@ static struct intel_encoder *
2214intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) 2086intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
2215{ 2087{
2216 struct drm_device *dev = chan->drm_dev; 2088 struct drm_device *dev = chan->drm_dev;
2217 struct drm_connector *connector; 2089 struct drm_encoder *encoder;
2218 struct intel_encoder *intel_encoder = NULL; 2090 struct intel_encoder *intel_encoder = NULL;
2219 2091
2220 list_for_each_entry(connector, 2092 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2221 &dev->mode_config.connector_list, head) { 2093 intel_encoder = enc_to_intel_encoder(encoder);
2222 if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) { 2094 if (intel_encoder->ddc_bus == &chan->adapter)
2223 intel_encoder = to_intel_encoder(connector);
2224 break; 2095 break;
2225 }
2226 } 2096 }
2227 return intel_encoder; 2097 return intel_encoder;
2228} 2098}
@@ -2259,7 +2129,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2259 struct drm_i915_private *dev_priv = dev->dev_private; 2129 struct drm_i915_private *dev_priv = dev->dev_private;
2260 struct sdvo_device_mapping *my_mapping, *other_mapping; 2130 struct sdvo_device_mapping *my_mapping, *other_mapping;
2261 2131
2262 if (sdvo_reg == SDVOB) { 2132 if (IS_SDVOB(sdvo_reg)) {
2263 my_mapping = &dev_priv->sdvo_mappings[0]; 2133 my_mapping = &dev_priv->sdvo_mappings[0];
2264 other_mapping = &dev_priv->sdvo_mappings[1]; 2134 other_mapping = &dev_priv->sdvo_mappings[1];
2265 } else { 2135 } else {
@@ -2284,120 +2154,237 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2284 /* No SDVO device info is found for another DVO port, 2154 /* No SDVO device info is found for another DVO port,
2285 * so use mapping assumption we had before BIOS parsing. 2155 * so use mapping assumption we had before BIOS parsing.
2286 */ 2156 */
2287 if (sdvo_reg == SDVOB) 2157 if (IS_SDVOB(sdvo_reg))
2288 return 0x70; 2158 return 0x70;
2289 else 2159 else
2290 return 0x72; 2160 return 0x72;
2291} 2161}
2292 2162
2293static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id) 2163static bool
2164intel_sdvo_connector_alloc (struct intel_connector **ret)
2294{ 2165{
2295 DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident); 2166 struct intel_connector *intel_connector;
2296 return 1; 2167 struct intel_sdvo_connector *sdvo_connector;
2168
2169 *ret = kzalloc(sizeof(*intel_connector) +
2170 sizeof(*sdvo_connector), GFP_KERNEL);
2171 if (!*ret)
2172 return false;
2173
2174 intel_connector = *ret;
2175 sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
2176 intel_connector->dev_priv = sdvo_connector;
2177
2178 return true;
2297} 2179}
2298 2180
2299static struct dmi_system_id intel_sdvo_bad_tv[] = { 2181static void
2300 { 2182intel_sdvo_connector_create (struct drm_encoder *encoder,
2301 .callback = intel_sdvo_bad_tv_callback, 2183 struct drm_connector *connector)
2302 .ident = "IntelG45/ICH10R/DME1737", 2184{
2303 .matches = { 2185 drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
2304 DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"), 2186 connector->connector_type);
2305 DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
2306 },
2307 },
2308 2187
2309 { } /* terminating entry */ 2188 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
2310}; 2189
2190 connector->interlace_allowed = 0;
2191 connector->doublescan_allowed = 0;
2192 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
2193
2194 drm_mode_connector_attach_encoder(connector, encoder);
2195 drm_sysfs_connector_add(connector);
2196}
2311 2197
2312static bool 2198static bool
2313intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) 2199intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
2314{ 2200{
2315 struct drm_connector *connector = &intel_encoder->base;
2316 struct drm_encoder *encoder = &intel_encoder->enc; 2201 struct drm_encoder *encoder = &intel_encoder->enc;
2317 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 2202 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2318 bool ret = true, registered = false; 2203 struct drm_connector *connector;
2204 struct intel_connector *intel_connector;
2205 struct intel_sdvo_connector *sdvo_connector;
2206
2207 if (!intel_sdvo_connector_alloc(&intel_connector))
2208 return false;
2209
2210 sdvo_connector = intel_connector->dev_priv;
2211
2212 if (device == 0) {
2213 sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
2214 sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
2215 } else if (device == 1) {
2216 sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
2217 sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
2218 }
2219
2220 connector = &intel_connector->base;
2221 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
2222 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2223 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2224
2225 if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
2226 && intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
2227 && sdvo_priv->is_hdmi) {
2228 /* enable hdmi encoding mode if supported */
2229 intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
2230 intel_sdvo_set_colorimetry(intel_encoder,
2231 SDVO_COLORIMETRY_RGB256);
2232 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2233 }
2234 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2235 (1 << INTEL_ANALOG_CLONE_BIT);
2236
2237 intel_sdvo_connector_create(encoder, connector);
2238
2239 return true;
2240}
2241
2242static bool
2243intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
2244{
2245 struct drm_encoder *encoder = &intel_encoder->enc;
2246 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2247 struct drm_connector *connector;
2248 struct intel_connector *intel_connector;
2249 struct intel_sdvo_connector *sdvo_connector;
2250
2251 if (!intel_sdvo_connector_alloc(&intel_connector))
2252 return false;
2253
2254 connector = &intel_connector->base;
2255 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2256 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2257 sdvo_connector = intel_connector->dev_priv;
2258
2259 sdvo_priv->controlled_output |= type;
2260 sdvo_connector->output_flag = type;
2261
2262 sdvo_priv->is_tv = true;
2263 intel_encoder->needs_tv_clock = true;
2264 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2265
2266 intel_sdvo_connector_create(encoder, connector);
2267
2268 intel_sdvo_tv_create_property(connector, type);
2269
2270 intel_sdvo_create_enhance_property(connector);
2271
2272 return true;
2273}
2274
2275static bool
2276intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
2277{
2278 struct drm_encoder *encoder = &intel_encoder->enc;
2279 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2280 struct drm_connector *connector;
2281 struct intel_connector *intel_connector;
2282 struct intel_sdvo_connector *sdvo_connector;
2283
2284 if (!intel_sdvo_connector_alloc(&intel_connector))
2285 return false;
2286
2287 connector = &intel_connector->base;
2288 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2289 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2290 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2291 sdvo_connector = intel_connector->dev_priv;
2292
2293 if (device == 0) {
2294 sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
2295 sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
2296 } else if (device == 1) {
2297 sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
2298 sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
2299 }
2300
2301 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2302 (1 << INTEL_ANALOG_CLONE_BIT);
2303
2304 intel_sdvo_connector_create(encoder, connector);
2305 return true;
2306}
2307
2308static bool
2309intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
2310{
2311 struct drm_encoder *encoder = &intel_encoder->enc;
2312 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2313 struct drm_connector *connector;
2314 struct intel_connector *intel_connector;
2315 struct intel_sdvo_connector *sdvo_connector;
2316
2317 if (!intel_sdvo_connector_alloc(&intel_connector))
2318 return false;
2319
2320 connector = &intel_connector->base;
2321 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2322 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2323 sdvo_connector = intel_connector->dev_priv;
2324
2325 sdvo_priv->is_lvds = true;
2326
2327 if (device == 0) {
2328 sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
2329 sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
2330 } else if (device == 1) {
2331 sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
2332 sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2333 }
2334
2335 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2336 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2337
2338 intel_sdvo_connector_create(encoder, connector);
2339 intel_sdvo_create_enhance_property(connector);
2340 return true;
2341}
2342
2343static bool
2344intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
2345{
2346 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2319 2347
2320 sdvo_priv->is_tv = false; 2348 sdvo_priv->is_tv = false;
2321 intel_encoder->needs_tv_clock = false; 2349 intel_encoder->needs_tv_clock = false;
2322 sdvo_priv->is_lvds = false; 2350 sdvo_priv->is_lvds = false;
2323 2351
2324 if (device_is_registered(&connector->kdev)) { 2352 /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
2325 drm_sysfs_connector_remove(connector);
2326 registered = true;
2327 }
2328 2353
2329 if (flags & 2354 if (flags & SDVO_OUTPUT_TMDS0)
2330 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { 2355 if (!intel_sdvo_dvi_init(intel_encoder, 0))
2331 if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) 2356 return false;
2332 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; 2357
2333 else 2358 if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
2334 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; 2359 if (!intel_sdvo_dvi_init(intel_encoder, 1))
2335 2360 return false;
2336 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2361
2337 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2362 /* TV has no XXX1 function block */
2338 2363 if (flags & SDVO_OUTPUT_SVID0)
2339 if (intel_sdvo_get_supp_encode(intel_encoder, 2364 if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
2340 &sdvo_priv->encode) && 2365 return false;
2341 intel_sdvo_get_digital_encoding_mode(intel_encoder) && 2366
2342 sdvo_priv->is_hdmi) { 2367 if (flags & SDVO_OUTPUT_CVBS0)
2343 /* enable hdmi encoding mode if supported */ 2368 if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
2344 intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); 2369 return false;
2345 intel_sdvo_set_colorimetry(intel_encoder, 2370
2346 SDVO_COLORIMETRY_RGB256); 2371 if (flags & SDVO_OUTPUT_RGB0)
2347 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2372 if (!intel_sdvo_analog_init(intel_encoder, 0))
2348 intel_encoder->clone_mask = 2373 return false;
2349 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2374
2350 (1 << INTEL_ANALOG_CLONE_BIT); 2375 if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
2351 } 2376 if (!intel_sdvo_analog_init(intel_encoder, 1))
2352 } else if ((flags & SDVO_OUTPUT_SVID0) && 2377 return false;
2353 !dmi_check_system(intel_sdvo_bad_tv)) { 2378
2354 2379 if (flags & SDVO_OUTPUT_LVDS0)
2355 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; 2380 if (!intel_sdvo_lvds_init(intel_encoder, 0))
2356 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2381 return false;
2357 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2358 sdvo_priv->is_tv = true;
2359 intel_encoder->needs_tv_clock = true;
2360 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2361 } else if (flags & SDVO_OUTPUT_RGB0) {
2362
2363 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
2364 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2365 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2366 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2367 (1 << INTEL_ANALOG_CLONE_BIT);
2368 } else if (flags & SDVO_OUTPUT_RGB1) {
2369
2370 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
2371 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2372 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2373 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2374 (1 << INTEL_ANALOG_CLONE_BIT);
2375 } else if (flags & SDVO_OUTPUT_CVBS0) {
2376
2377 sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
2378 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2379 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2380 sdvo_priv->is_tv = true;
2381 intel_encoder->needs_tv_clock = true;
2382 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2383 } else if (flags & SDVO_OUTPUT_LVDS0) {
2384
2385 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
2386 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2387 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2388 sdvo_priv->is_lvds = true;
2389 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2390 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2391 } else if (flags & SDVO_OUTPUT_LVDS1) {
2392
2393 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
2394 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2395 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2396 sdvo_priv->is_lvds = true;
2397 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2398 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2399 } else {
2400 2382
2383 if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
2384 if (!intel_sdvo_lvds_init(intel_encoder, 1))
2385 return false;
2386
2387 if ((flags & SDVO_OUTPUT_MASK) == 0) {
2401 unsigned char bytes[2]; 2388 unsigned char bytes[2];
2402 2389
2403 sdvo_priv->controlled_output = 0; 2390 sdvo_priv->controlled_output = 0;
@@ -2405,28 +2392,25 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
2405 DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", 2392 DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
2406 SDVO_NAME(sdvo_priv), 2393 SDVO_NAME(sdvo_priv),
2407 bytes[0], bytes[1]); 2394 bytes[0], bytes[1]);
2408 ret = false; 2395 return false;
2409 } 2396 }
2410 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 2397 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
2411 2398
2412 if (ret && registered) 2399 return true;
2413 ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
2414
2415
2416 return ret;
2417
2418} 2400}
2419 2401
2420static void intel_sdvo_tv_create_property(struct drm_connector *connector) 2402static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
2421{ 2403{
2422 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 2404 struct drm_encoder *encoder = intel_attached_encoder(connector);
2405 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
2423 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 2406 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2407 struct intel_connector *intel_connector = to_intel_connector(connector);
2408 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
2424 struct intel_sdvo_tv_format format; 2409 struct intel_sdvo_tv_format format;
2425 uint32_t format_map, i; 2410 uint32_t format_map, i;
2426 uint8_t status; 2411 uint8_t status;
2427 2412
2428 intel_sdvo_set_target_output(intel_encoder, 2413 intel_sdvo_set_target_output(intel_encoder, type);
2429 sdvo_priv->controlled_output);
2430 2414
2431 intel_sdvo_write_cmd(intel_encoder, 2415 intel_sdvo_write_cmd(intel_encoder,
2432 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); 2416 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
@@ -2441,35 +2425,37 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
2441 if (format_map == 0) 2425 if (format_map == 0)
2442 return; 2426 return;
2443 2427
2444 sdvo_priv->format_supported_num = 0; 2428 sdvo_connector->format_supported_num = 0;
2445 for (i = 0 ; i < TV_FORMAT_NUM; i++) 2429 for (i = 0 ; i < TV_FORMAT_NUM; i++)
2446 if (format_map & (1 << i)) { 2430 if (format_map & (1 << i)) {
2447 sdvo_priv->tv_format_supported 2431 sdvo_connector->tv_format_supported
2448 [sdvo_priv->format_supported_num++] = 2432 [sdvo_connector->format_supported_num++] =
2449 tv_format_names[i]; 2433 tv_format_names[i];
2450 } 2434 }
2451 2435
2452 2436
2453 sdvo_priv->tv_format_property = 2437 sdvo_connector->tv_format_property =
2454 drm_property_create( 2438 drm_property_create(
2455 connector->dev, DRM_MODE_PROP_ENUM, 2439 connector->dev, DRM_MODE_PROP_ENUM,
2456 "mode", sdvo_priv->format_supported_num); 2440 "mode", sdvo_connector->format_supported_num);
2457 2441
2458 for (i = 0; i < sdvo_priv->format_supported_num; i++) 2442 for (i = 0; i < sdvo_connector->format_supported_num; i++)
2459 drm_property_add_enum( 2443 drm_property_add_enum(
2460 sdvo_priv->tv_format_property, i, 2444 sdvo_connector->tv_format_property, i,
2461 i, sdvo_priv->tv_format_supported[i]); 2445 i, sdvo_connector->tv_format_supported[i]);
2462 2446
2463 sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0]; 2447 sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
2464 drm_connector_attach_property( 2448 drm_connector_attach_property(
2465 connector, sdvo_priv->tv_format_property, 0); 2449 connector, sdvo_connector->tv_format_property, 0);
2466 2450
2467} 2451}
2468 2452
2469static void intel_sdvo_create_enhance_property(struct drm_connector *connector) 2453static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2470{ 2454{
2471 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 2455 struct drm_encoder *encoder = intel_attached_encoder(connector);
2472 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 2456 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
2457 struct intel_connector *intel_connector = to_intel_connector(connector);
2458 struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
2473 struct intel_sdvo_enhancements_reply sdvo_data; 2459 struct intel_sdvo_enhancements_reply sdvo_data;
2474 struct drm_device *dev = connector->dev; 2460 struct drm_device *dev = connector->dev;
2475 uint8_t status; 2461 uint8_t status;
@@ -2488,7 +2474,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2488 DRM_DEBUG_KMS("No enhancement is supported\n"); 2474 DRM_DEBUG_KMS("No enhancement is supported\n");
2489 return; 2475 return;
2490 } 2476 }
2491 if (sdvo_priv->is_tv) { 2477 if (IS_TV(sdvo_priv)) {
2492 /* when horizontal overscan is supported, Add the left/right 2478 /* when horizontal overscan is supported, Add the left/right
2493 * property 2479 * property
2494 */ 2480 */
@@ -2636,8 +2622,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2636 "default %d, current %d\n", 2622 "default %d, current %d\n",
2637 data_value[0], data_value[1], response); 2623 data_value[0], data_value[1], response);
2638 } 2624 }
2639 }
2640 if (sdvo_priv->is_tv) {
2641 if (sdvo_data.saturation) { 2625 if (sdvo_data.saturation) {
2642 intel_sdvo_write_cmd(intel_encoder, 2626 intel_sdvo_write_cmd(intel_encoder,
2643 SDVO_CMD_GET_MAX_SATURATION, NULL, 0); 2627 SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
@@ -2733,7 +2717,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2733 data_value[0], data_value[1], response); 2717 data_value[0], data_value[1], response);
2734 } 2718 }
2735 } 2719 }
2736 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 2720 if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
2737 if (sdvo_data.brightness) { 2721 if (sdvo_data.brightness) {
2738 intel_sdvo_write_cmd(intel_encoder, 2722 intel_sdvo_write_cmd(intel_encoder,
2739 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); 2723 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
@@ -2773,12 +2757,11 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2773bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) 2757bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2774{ 2758{
2775 struct drm_i915_private *dev_priv = dev->dev_private; 2759 struct drm_i915_private *dev_priv = dev->dev_private;
2776 struct drm_connector *connector;
2777 struct intel_encoder *intel_encoder; 2760 struct intel_encoder *intel_encoder;
2778 struct intel_sdvo_priv *sdvo_priv; 2761 struct intel_sdvo_priv *sdvo_priv;
2779
2780 u8 ch[0x40]; 2762 u8 ch[0x40];
2781 int i; 2763 int i;
2764 u32 i2c_reg, ddc_reg, analog_ddc_reg;
2782 2765
2783 intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); 2766 intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
2784 if (!intel_encoder) { 2767 if (!intel_encoder) {
@@ -2791,11 +2774,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2791 intel_encoder->dev_priv = sdvo_priv; 2774 intel_encoder->dev_priv = sdvo_priv;
2792 intel_encoder->type = INTEL_OUTPUT_SDVO; 2775 intel_encoder->type = INTEL_OUTPUT_SDVO;
2793 2776
2777 if (HAS_PCH_SPLIT(dev)) {
2778 i2c_reg = PCH_GPIOE;
2779 ddc_reg = PCH_GPIOE;
2780 analog_ddc_reg = PCH_GPIOA;
2781 } else {
2782 i2c_reg = GPIOE;
2783 ddc_reg = GPIOE;
2784 analog_ddc_reg = GPIOA;
2785 }
2786
2794 /* setup the DDC bus. */ 2787 /* setup the DDC bus. */
2795 if (sdvo_reg == SDVOB) 2788 if (IS_SDVOB(sdvo_reg))
2796 intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); 2789 intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
2797 else 2790 else
2798 intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); 2791 intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
2799 2792
2800 if (!intel_encoder->i2c_bus) 2793 if (!intel_encoder->i2c_bus)
2801 goto err_inteloutput; 2794 goto err_inteloutput;
@@ -2809,20 +2802,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2809 for (i = 0; i < 0x40; i++) { 2802 for (i = 0; i < 0x40; i++) {
2810 if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { 2803 if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
2811 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", 2804 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
2812 sdvo_reg == SDVOB ? 'B' : 'C'); 2805 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2813 goto err_i2c; 2806 goto err_i2c;
2814 } 2807 }
2815 } 2808 }
2816 2809
2817 /* setup the DDC bus. */ 2810 /* setup the DDC bus. */
2818 if (sdvo_reg == SDVOB) { 2811 if (IS_SDVOB(sdvo_reg)) {
2819 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 2812 intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
2820 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2813 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
2821 "SDVOB/VGA DDC BUS"); 2814 "SDVOB/VGA DDC BUS");
2822 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; 2815 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2823 } else { 2816 } else {
2824 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 2817 intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
2825 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2818 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
2826 "SDVOC/VGA DDC BUS"); 2819 "SDVOC/VGA DDC BUS");
2827 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2820 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2828 } 2821 }
@@ -2833,41 +2826,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2833 /* Wrap with our custom algo which switches to DDC mode */ 2826 /* Wrap with our custom algo which switches to DDC mode */
2834 intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; 2827 intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
2835 2828
2829 /* encoder type will be decided later */
2830 drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
2831 drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
2832
2836 /* In default case sdvo lvds is false */ 2833 /* In default case sdvo lvds is false */
2837 intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); 2834 intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
2838 2835
2839 if (intel_sdvo_output_setup(intel_encoder, 2836 if (intel_sdvo_output_setup(intel_encoder,
2840 sdvo_priv->caps.output_flags) != true) { 2837 sdvo_priv->caps.output_flags) != true) {
2841 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2838 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
2842 sdvo_reg == SDVOB ? 'B' : 'C'); 2839 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2843 goto err_i2c; 2840 goto err_i2c;
2844 } 2841 }
2845 2842
2846 2843 intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
2847 connector = &intel_encoder->base;
2848 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
2849 connector->connector_type);
2850
2851 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
2852 connector->interlace_allowed = 0;
2853 connector->doublescan_allowed = 0;
2854 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
2855
2856 drm_encoder_init(dev, &intel_encoder->enc,
2857 &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
2858
2859 drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
2860
2861 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
2862 if (sdvo_priv->is_tv)
2863 intel_sdvo_tv_create_property(connector);
2864
2865 if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
2866 intel_sdvo_create_enhance_property(connector);
2867
2868 drm_sysfs_connector_add(connector);
2869
2870 intel_sdvo_select_ddc_bus(sdvo_priv);
2871 2844
2872 /* Set the input timing to the screen. Assume always input 0. */ 2845 /* Set the input timing to the screen. Assume always input 0. */
2873 intel_sdvo_set_target_input(intel_encoder, true, false); 2846 intel_sdvo_set_target_input(intel_encoder, true, false);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d7d39b2327df..6d553c29d106 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
916 } 916 }
917} 917}
918 918
919static void
920intel_tv_save(struct drm_connector *connector)
921{
922 struct drm_device *dev = connector->dev;
923 struct drm_i915_private *dev_priv = dev->dev_private;
924 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
925 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
926 int i;
927
928 tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
929 tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
930 tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
931 tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
932 tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
933 tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
934 tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
935 tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
936 tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
937 tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
938 tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
939 tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
940 tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
941
942 tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
943 tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
944 tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
945 tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
946 tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
947 tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
948 tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
949 tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
950 tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
951 tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
952 tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
953 tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
954 tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
955
956 for (i = 0; i < 60; i++)
957 tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
958 for (i = 0; i < 60; i++)
959 tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
960 for (i = 0; i < 43; i++)
961 tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
962 for (i = 0; i < 43; i++)
963 tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
964
965 tv_priv->save_TV_DAC = I915_READ(TV_DAC);
966 tv_priv->save_TV_CTL = I915_READ(TV_CTL);
967}
968
969static void
970intel_tv_restore(struct drm_connector *connector)
971{
972 struct drm_device *dev = connector->dev;
973 struct drm_i915_private *dev_priv = dev->dev_private;
974 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
975 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
976 struct drm_crtc *crtc = connector->encoder->crtc;
977 struct intel_crtc *intel_crtc;
978 int i;
979
980 /* FIXME: No CRTC? */
981 if (!crtc)
982 return;
983
984 intel_crtc = to_intel_crtc(crtc);
985 I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
986 I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
987 I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
988 I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
989 I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
990 I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
991 I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
992 I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
993 I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
994 I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
995 I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
996 I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
997 I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
998
999 I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
1000 I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
1001 I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
1002 I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
1003 I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
1004 I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
1005 I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
1006 I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
1007
1008 {
1009 int pipeconf_reg = (intel_crtc->pipe == 0) ?
1010 PIPEACONF : PIPEBCONF;
1011 int dspcntr_reg = (intel_crtc->plane == 0) ?
1012 DSPACNTR : DSPBCNTR;
1013 int pipeconf = I915_READ(pipeconf_reg);
1014 int dspcntr = I915_READ(dspcntr_reg);
1015 int dspbase_reg = (intel_crtc->plane == 0) ?
1016 DSPAADDR : DSPBADDR;
1017 /* Pipe must be off here */
1018 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
1019 /* Flush the plane changes */
1020 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1021
1022 if (!IS_I9XX(dev)) {
1023 /* Wait for vblank for the disable to take effect */
1024 intel_wait_for_vblank(dev);
1025 }
1026
1027 I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
1028 /* Wait for vblank for the disable to take effect. */
1029 intel_wait_for_vblank(dev);
1030
1031 /* Filter ctl must be set before TV_WIN_SIZE */
1032 I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
1033 I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
1034 I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
1035 I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
1036 I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
1037 I915_WRITE(pipeconf_reg, pipeconf);
1038 I915_WRITE(dspcntr_reg, dspcntr);
1039 /* Flush the plane changes */
1040 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1041 }
1042
1043 for (i = 0; i < 60; i++)
1044 I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
1045 for (i = 0; i < 60; i++)
1046 I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
1047 for (i = 0; i < 43; i++)
1048 I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
1049 for (i = 0; i < 43; i++)
1050 I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
1051
1052 I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
1053 I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
1054}
1055
1056static const struct tv_mode * 919static const struct tv_mode *
1057intel_tv_mode_lookup (char *tv_format) 920intel_tv_mode_lookup (char *tv_format)
1058{ 921{
@@ -1078,7 +941,8 @@ intel_tv_mode_find (struct intel_encoder *intel_encoder)
1078static enum drm_mode_status 941static enum drm_mode_status
1079intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) 942intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
1080{ 943{
1081 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 944 struct drm_encoder *encoder = intel_attached_encoder(connector);
945 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); 946 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1083 947
1084 /* Ensure TV refresh is close to desired refresh */ 948 /* Ensure TV refresh is close to desired refresh */
@@ -1441,7 +1305,8 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
1441 */ 1305 */
1442static void intel_tv_find_better_format(struct drm_connector *connector) 1306static void intel_tv_find_better_format(struct drm_connector *connector)
1443{ 1307{
1444 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1308 struct drm_encoder *encoder = intel_attached_encoder(connector);
1309 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1445 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; 1310 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1446 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); 1311 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1447 int i; 1312 int i;
@@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector)
1475{ 1340{
1476 struct drm_crtc *crtc; 1341 struct drm_crtc *crtc;
1477 struct drm_display_mode mode; 1342 struct drm_display_mode mode;
1478 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1343 struct drm_encoder *encoder = intel_attached_encoder(connector);
1344 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1479 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; 1345 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1480 struct drm_encoder *encoder = &intel_encoder->enc;
1481 int dpms_mode; 1346 int dpms_mode;
1482 int type = tv_priv->type; 1347 int type = tv_priv->type;
1483 1348
@@ -1487,10 +1352,12 @@ intel_tv_detect(struct drm_connector *connector)
1487 if (encoder->crtc && encoder->crtc->enabled) { 1352 if (encoder->crtc && encoder->crtc->enabled) {
1488 type = intel_tv_detect_type(encoder->crtc, intel_encoder); 1353 type = intel_tv_detect_type(encoder->crtc, intel_encoder);
1489 } else { 1354 } else {
1490 crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode); 1355 crtc = intel_get_load_detect_pipe(intel_encoder, connector,
1356 &mode, &dpms_mode);
1491 if (crtc) { 1357 if (crtc) {
1492 type = intel_tv_detect_type(crtc, intel_encoder); 1358 type = intel_tv_detect_type(crtc, intel_encoder);
1493 intel_release_load_detect_pipe(intel_encoder, dpms_mode); 1359 intel_release_load_detect_pipe(intel_encoder, connector,
1360 dpms_mode);
1494 } else 1361 } else
1495 type = -1; 1362 type = -1;
1496 } 1363 }
@@ -1525,7 +1392,8 @@ static void
1525intel_tv_chose_preferred_modes(struct drm_connector *connector, 1392intel_tv_chose_preferred_modes(struct drm_connector *connector,
1526 struct drm_display_mode *mode_ptr) 1393 struct drm_display_mode *mode_ptr)
1527{ 1394{
1528 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1395 struct drm_encoder *encoder = intel_attached_encoder(connector);
1396 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1529 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); 1397 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1530 1398
1531 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) 1399 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1550,7 +1418,8 @@ static int
1550intel_tv_get_modes(struct drm_connector *connector) 1418intel_tv_get_modes(struct drm_connector *connector)
1551{ 1419{
1552 struct drm_display_mode *mode_ptr; 1420 struct drm_display_mode *mode_ptr;
1553 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1421 struct drm_encoder *encoder = intel_attached_encoder(connector);
1422 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1554 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); 1423 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1555 int j, count = 0; 1424 int j, count = 0;
1556 u64 tmp; 1425 u64 tmp;
@@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector)
1604static void 1473static void
1605intel_tv_destroy (struct drm_connector *connector) 1474intel_tv_destroy (struct drm_connector *connector)
1606{ 1475{
1607 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1608
1609 drm_sysfs_connector_remove(connector); 1476 drm_sysfs_connector_remove(connector);
1610 drm_connector_cleanup(connector); 1477 drm_connector_cleanup(connector);
1611 kfree(intel_encoder); 1478 kfree(connector);
1612} 1479}
1613 1480
1614 1481
@@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1617 uint64_t val) 1484 uint64_t val)
1618{ 1485{
1619 struct drm_device *dev = connector->dev; 1486 struct drm_device *dev = connector->dev;
1620 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1487 struct drm_encoder *encoder = intel_attached_encoder(connector);
1488 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1621 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; 1489 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1622 struct drm_encoder *encoder = &intel_encoder->enc;
1623 struct drm_crtc *crtc = encoder->crtc; 1490 struct drm_crtc *crtc = encoder->crtc;
1624 int ret = 0; 1491 int ret = 0;
1625 bool changed = false; 1492 bool changed = false;
@@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
1676 1543
1677static const struct drm_connector_funcs intel_tv_connector_funcs = { 1544static const struct drm_connector_funcs intel_tv_connector_funcs = {
1678 .dpms = drm_helper_connector_dpms, 1545 .dpms = drm_helper_connector_dpms,
1679 .save = intel_tv_save,
1680 .restore = intel_tv_restore,
1681 .detect = intel_tv_detect, 1546 .detect = intel_tv_detect,
1682 .destroy = intel_tv_destroy, 1547 .destroy = intel_tv_destroy,
1683 .set_property = intel_tv_set_property, 1548 .set_property = intel_tv_set_property,
@@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
1687static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { 1552static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
1688 .mode_valid = intel_tv_mode_valid, 1553 .mode_valid = intel_tv_mode_valid,
1689 .get_modes = intel_tv_get_modes, 1554 .get_modes = intel_tv_get_modes,
1690 .best_encoder = intel_best_encoder, 1555 .best_encoder = intel_attached_encoder,
1691}; 1556};
1692 1557
1693static void intel_tv_enc_destroy(struct drm_encoder *encoder) 1558static void intel_tv_enc_destroy(struct drm_encoder *encoder)
1694{ 1559{
1560 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1561
1695 drm_encoder_cleanup(encoder); 1562 drm_encoder_cleanup(encoder);
1563 kfree(intel_encoder);
1696} 1564}
1697 1565
1698static const struct drm_encoder_funcs intel_tv_enc_funcs = { 1566static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1741,6 +1609,7 @@ intel_tv_init(struct drm_device *dev)
1741 struct drm_i915_private *dev_priv = dev->dev_private; 1609 struct drm_i915_private *dev_priv = dev->dev_private;
1742 struct drm_connector *connector; 1610 struct drm_connector *connector;
1743 struct intel_encoder *intel_encoder; 1611 struct intel_encoder *intel_encoder;
1612 struct intel_connector *intel_connector;
1744 struct intel_tv_priv *tv_priv; 1613 struct intel_tv_priv *tv_priv;
1745 u32 tv_dac_on, tv_dac_off, save_tv_dac; 1614 u32 tv_dac_on, tv_dac_off, save_tv_dac;
1746 char **tv_format_names; 1615 char **tv_format_names;
@@ -1786,7 +1655,13 @@ intel_tv_init(struct drm_device *dev)
1786 return; 1655 return;
1787 } 1656 }
1788 1657
1789 connector = &intel_encoder->base; 1658 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1659 if (!intel_connector) {
1660 kfree(intel_encoder);
1661 return;
1662 }
1663
1664 connector = &intel_connector->base;
1790 1665
1791 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1666 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1792 DRM_MODE_CONNECTOR_SVIDEO); 1667 DRM_MODE_CONNECTOR_SVIDEO);
@@ -1794,7 +1669,7 @@ intel_tv_init(struct drm_device *dev)
1794 drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, 1669 drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
1795 DRM_MODE_ENCODER_TVDAC); 1670 DRM_MODE_ENCODER_TVDAC);
1796 1671
1797 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); 1672 drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
1798 tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); 1673 tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
1799 intel_encoder->type = INTEL_OUTPUT_TVOUT; 1674 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1800 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 1675 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 453df3f6053f..acd31ed861ef 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -22,7 +22,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
22 nv50_cursor.o nv50_display.o nv50_fbcon.o \ 22 nv50_cursor.o nv50_display.o nv50_fbcon.o \
23 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 23 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
24 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ 24 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
25 nv17_gpio.o nv50_gpio.o 25 nv17_gpio.o nv50_gpio.o \
26 nv50_calc.o
26 27
27nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o 28nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
28nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 29nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index abc382a9918b..e7e69ccce5c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -26,6 +26,7 @@
26#define NV_DEBUG_NOTRACE 26#define NV_DEBUG_NOTRACE
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_hw.h" 28#include "nouveau_hw.h"
29#include "nouveau_encoder.h"
29 30
30/* these defines are made up */ 31/* these defines are made up */
31#define NV_CIO_CRE_44_HEADA 0x0 32#define NV_CIO_CRE_44_HEADA 0x0
@@ -256,6 +257,11 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
256struct init_tbl_entry { 257struct init_tbl_entry {
257 char *name; 258 char *name;
258 uint8_t id; 259 uint8_t id;
260 /* Return:
261 * > 0: success, length of opcode
262 * 0: success, but abort further parsing of table (INIT_DONE etc)
263 * < 0: failure, table parsing will be aborted
264 */
259 int (*handler)(struct nvbios *, uint16_t, struct init_exec *); 265 int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
260}; 266};
261 267
@@ -709,6 +715,83 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
709 return dcb_entry; 715 return dcb_entry;
710} 716}
711 717
718static int
719read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
720{
721 uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
722 int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
723 int recordoffset = 0, rdofs = 1, wrofs = 0;
724 uint8_t port_type = 0;
725
726 if (!i2ctable)
727 return -EINVAL;
728
729 if (dcb_version >= 0x30) {
730 if (i2ctable[0] != dcb_version) /* necessary? */
731 NV_WARN(dev,
732 "DCB I2C table version mismatch (%02X vs %02X)\n",
733 i2ctable[0], dcb_version);
734 dcb_i2c_ver = i2ctable[0];
735 headerlen = i2ctable[1];
736 if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
737 i2c_entries = i2ctable[2];
738 else
739 NV_WARN(dev,
740 "DCB I2C table has more entries than indexable "
741 "(%d entries, max %d)\n", i2ctable[2],
742 DCB_MAX_NUM_I2C_ENTRIES);
743 entry_len = i2ctable[3];
744 /* [4] is i2c_default_indices, read in parse_dcb_table() */
745 }
746 /*
747 * It's your own fault if you call this function on a DCB 1.1 BIOS --
748 * the test below is for DCB 1.2
749 */
750 if (dcb_version < 0x14) {
751 recordoffset = 2;
752 rdofs = 0;
753 wrofs = 1;
754 }
755
756 if (index == 0xf)
757 return 0;
758 if (index >= i2c_entries) {
759 NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
760 index, i2ctable[2]);
761 return -ENOENT;
762 }
763 if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
764 NV_ERROR(dev, "DCB I2C entry invalid\n");
765 return -EINVAL;
766 }
767
768 if (dcb_i2c_ver >= 0x30) {
769 port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
770
771 /*
772 * Fixup for chips using same address offset for read and
773 * write.
774 */
775 if (port_type == 4) /* seen on C51 */
776 rdofs = wrofs = 1;
777 if (port_type >= 5) /* G80+ */
778 rdofs = wrofs = 0;
779 }
780
781 if (dcb_i2c_ver >= 0x40) {
782 if (port_type != 5 && port_type != 6)
783 NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
784
785 i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
786 }
787
788 i2c->port_type = port_type;
789 i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
790 i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
791
792 return 0;
793}
794
712static struct nouveau_i2c_chan * 795static struct nouveau_i2c_chan *
713init_i2c_device_find(struct drm_device *dev, int i2c_index) 796init_i2c_device_find(struct drm_device *dev, int i2c_index)
714{ 797{
@@ -727,6 +810,20 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index)
727 } 810 }
728 if (i2c_index == 0x80) /* g80+ */ 811 if (i2c_index == 0x80) /* g80+ */
729 i2c_index = dcb->i2c_default_indices & 0xf; 812 i2c_index = dcb->i2c_default_indices & 0xf;
813 else
814 if (i2c_index == 0x81)
815 i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
816
817 if (i2c_index > DCB_MAX_NUM_I2C_ENTRIES) {
818 NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
819 return NULL;
820 }
821
822 /* Make sure i2c table entry has been parsed, it may not
823 * have been if this is a bus not referenced by a DCB encoder
824 */
825 read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
826 i2c_index, &dcb->i2c[i2c_index]);
730 827
731 return nouveau_i2c_find(dev, i2c_index); 828 return nouveau_i2c_find(dev, i2c_index);
732} 829}
@@ -818,7 +915,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
818 NV_ERROR(bios->dev, 915 NV_ERROR(bios->dev,
819 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", 916 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
820 offset, config, count); 917 offset, config, count);
821 return 0; 918 return -EINVAL;
822 } 919 }
823 920
824 configval = ROM32(bios->data[offset + 11 + config * 4]); 921 configval = ROM32(bios->data[offset + 11 + config * 4]);
@@ -920,7 +1017,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
920 NV_ERROR(bios->dev, 1017 NV_ERROR(bios->dev,
921 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", 1018 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
922 offset, config, count); 1019 offset, config, count);
923 return 0; 1020 return -EINVAL;
924 } 1021 }
925 1022
926 freq = ROM16(bios->data[offset + 12 + config * 2]); 1023 freq = ROM16(bios->data[offset + 12 + config * 2]);
@@ -1067,6 +1164,126 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
1067} 1164}
1068 1165
1069static int 1166static int
1167init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1168{
1169 /*
1170 * INIT_DP_CONDITION opcode: 0x3A ('')
1171 *
1172 * offset (8 bit): opcode
1173 * offset + 1 (8 bit): "sub" opcode
1174 * offset + 2 (8 bit): unknown
1175 *
1176 */
1177
1178 struct bit_displayport_encoder_table *dpe = NULL;
1179 struct dcb_entry *dcb = bios->display.output;
1180 struct drm_device *dev = bios->dev;
1181 uint8_t cond = bios->data[offset + 1];
1182 int dummy;
1183
1184 BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
1185
1186 if (!iexec->execute)
1187 return 3;
1188
1189 dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
1190 if (!dpe) {
1191 NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
1192 return -EINVAL;
1193 }
1194
1195 switch (cond) {
1196 case 0:
1197 {
1198 struct dcb_connector_table_entry *ent =
1199 &bios->dcb.connector.entry[dcb->connector];
1200
1201 if (ent->type != DCB_CONNECTOR_eDP)
1202 iexec->execute = false;
1203 }
1204 break;
1205 case 1:
1206 case 2:
1207 if (!(dpe->unknown & cond))
1208 iexec->execute = false;
1209 break;
1210 case 5:
1211 {
1212 struct nouveau_i2c_chan *auxch;
1213 int ret;
1214
1215 auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
1216 if (!auxch)
1217 return -ENODEV;
1218
1219 ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
1220 if (ret)
1221 return ret;
1222
1223 if (cond & 1)
1224 iexec->execute = false;
1225 }
1226 break;
1227 default:
1228 NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond);
1229 break;
1230 }
1231
1232 if (iexec->execute)
1233 BIOSLOG(bios, "0x%04X: continuing to execute\n", offset);
1234 else
1235 BIOSLOG(bios, "0x%04X: skipping following commands\n", offset);
1236
1237 return 3;
1238}
1239
1240static int
1241init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1242{
1243 /*
1244 * INIT_3B opcode: 0x3B ('')
1245 *
1246 * offset (8 bit): opcode
1247 * offset + 1 (8 bit): crtc index
1248 *
1249 */
1250
1251 uint8_t or = ffs(bios->display.output->or) - 1;
1252 uint8_t index = bios->data[offset + 1];
1253 uint8_t data;
1254
1255 if (!iexec->execute)
1256 return 2;
1257
1258 data = bios_idxprt_rd(bios, 0x3d4, index);
1259 bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or));
1260 return 2;
1261}
1262
1263static int
1264init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1265{
1266 /*
1267 * INIT_3C opcode: 0x3C ('')
1268 *
1269 * offset (8 bit): opcode
1270 * offset + 1 (8 bit): crtc index
1271 *
1272 */
1273
1274 uint8_t or = ffs(bios->display.output->or) - 1;
1275 uint8_t index = bios->data[offset + 1];
1276 uint8_t data;
1277
1278 if (!iexec->execute)
1279 return 2;
1280
1281 data = bios_idxprt_rd(bios, 0x3d4, index);
1282 bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or));
1283 return 2;
1284}
1285
1286static int
1070init_idx_addr_latched(struct nvbios *bios, uint16_t offset, 1287init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
1071 struct init_exec *iexec) 1288 struct init_exec *iexec)
1072{ 1289{
@@ -1170,7 +1387,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
1170 NV_ERROR(bios->dev, 1387 NV_ERROR(bios->dev,
1171 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", 1388 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
1172 offset, config, count); 1389 offset, config, count);
1173 return 0; 1390 return -EINVAL;
1174 } 1391 }
1175 1392
1176 freq = ROM32(bios->data[offset + 11 + config * 4]); 1393 freq = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1231,12 +1448,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1231 */ 1448 */
1232 1449
1233 uint8_t i2c_index = bios->data[offset + 1]; 1450 uint8_t i2c_index = bios->data[offset + 1];
1234 uint8_t i2c_address = bios->data[offset + 2]; 1451 uint8_t i2c_address = bios->data[offset + 2] >> 1;
1235 uint8_t count = bios->data[offset + 3]; 1452 uint8_t count = bios->data[offset + 3];
1236 int len = 4 + count * 3;
1237 struct nouveau_i2c_chan *chan; 1453 struct nouveau_i2c_chan *chan;
1238 struct i2c_msg msg; 1454 int len = 4 + count * 3;
1239 int i; 1455 int ret, i;
1240 1456
1241 if (!iexec->execute) 1457 if (!iexec->execute)
1242 return len; 1458 return len;
@@ -1247,35 +1463,34 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1247 1463
1248 chan = init_i2c_device_find(bios->dev, i2c_index); 1464 chan = init_i2c_device_find(bios->dev, i2c_index);
1249 if (!chan) 1465 if (!chan)
1250 return 0; 1466 return -ENODEV;
1251 1467
1252 for (i = 0; i < count; i++) { 1468 for (i = 0; i < count; i++) {
1253 uint8_t i2c_reg = bios->data[offset + 4 + i * 3]; 1469 uint8_t reg = bios->data[offset + 4 + i * 3];
1254 uint8_t mask = bios->data[offset + 5 + i * 3]; 1470 uint8_t mask = bios->data[offset + 5 + i * 3];
1255 uint8_t data = bios->data[offset + 6 + i * 3]; 1471 uint8_t data = bios->data[offset + 6 + i * 3];
1256 uint8_t value; 1472 union i2c_smbus_data val;
1257 1473
1258 msg.addr = i2c_address; 1474 ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
1259 msg.flags = I2C_M_RD; 1475 I2C_SMBUS_READ, reg,
1260 msg.len = 1; 1476 I2C_SMBUS_BYTE_DATA, &val);
1261 msg.buf = &value; 1477 if (ret < 0)
1262 if (i2c_transfer(&chan->adapter, &msg, 1) != 1) 1478 return ret;
1263 return 0;
1264 1479
1265 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, " 1480 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
1266 "Mask: 0x%02X, Data: 0x%02X\n", 1481 "Mask: 0x%02X, Data: 0x%02X\n",
1267 offset, i2c_reg, value, mask, data); 1482 offset, reg, val.byte, mask, data);
1268 1483
1269 value = (value & mask) | data; 1484 if (!bios->execute)
1485 continue;
1270 1486
1271 if (bios->execute) { 1487 val.byte &= mask;
1272 msg.addr = i2c_address; 1488 val.byte |= data;
1273 msg.flags = 0; 1489 ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
1274 msg.len = 1; 1490 I2C_SMBUS_WRITE, reg,
1275 msg.buf = &value; 1491 I2C_SMBUS_BYTE_DATA, &val);
1276 if (i2c_transfer(&chan->adapter, &msg, 1) != 1) 1492 if (ret < 0)
1277 return 0; 1493 return ret;
1278 }
1279 } 1494 }
1280 1495
1281 return len; 1496 return len;
@@ -1301,12 +1516,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1301 */ 1516 */
1302 1517
1303 uint8_t i2c_index = bios->data[offset + 1]; 1518 uint8_t i2c_index = bios->data[offset + 1];
1304 uint8_t i2c_address = bios->data[offset + 2]; 1519 uint8_t i2c_address = bios->data[offset + 2] >> 1;
1305 uint8_t count = bios->data[offset + 3]; 1520 uint8_t count = bios->data[offset + 3];
1306 int len = 4 + count * 2;
1307 struct nouveau_i2c_chan *chan; 1521 struct nouveau_i2c_chan *chan;
1308 struct i2c_msg msg; 1522 int len = 4 + count * 2;
1309 int i; 1523 int ret, i;
1310 1524
1311 if (!iexec->execute) 1525 if (!iexec->execute)
1312 return len; 1526 return len;
@@ -1317,23 +1531,25 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1317 1531
1318 chan = init_i2c_device_find(bios->dev, i2c_index); 1532 chan = init_i2c_device_find(bios->dev, i2c_index);
1319 if (!chan) 1533 if (!chan)
1320 return 0; 1534 return -ENODEV;
1321 1535
1322 for (i = 0; i < count; i++) { 1536 for (i = 0; i < count; i++) {
1323 uint8_t i2c_reg = bios->data[offset + 4 + i * 2]; 1537 uint8_t reg = bios->data[offset + 4 + i * 2];
1324 uint8_t data = bios->data[offset + 5 + i * 2]; 1538 union i2c_smbus_data val;
1539
1540 val.byte = bios->data[offset + 5 + i * 2];
1325 1541
1326 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n", 1542 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
1327 offset, i2c_reg, data); 1543 offset, reg, val.byte);
1328 1544
1329 if (bios->execute) { 1545 if (!bios->execute)
1330 msg.addr = i2c_address; 1546 continue;
1331 msg.flags = 0; 1547
1332 msg.len = 1; 1548 ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
1333 msg.buf = &data; 1549 I2C_SMBUS_WRITE, reg,
1334 if (i2c_transfer(&chan->adapter, &msg, 1) != 1) 1550 I2C_SMBUS_BYTE_DATA, &val);
1335 return 0; 1551 if (ret < 0)
1336 } 1552 return ret;
1337 } 1553 }
1338 1554
1339 return len; 1555 return len;
@@ -1357,7 +1573,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1357 */ 1573 */
1358 1574
1359 uint8_t i2c_index = bios->data[offset + 1]; 1575 uint8_t i2c_index = bios->data[offset + 1];
1360 uint8_t i2c_address = bios->data[offset + 2]; 1576 uint8_t i2c_address = bios->data[offset + 2] >> 1;
1361 uint8_t count = bios->data[offset + 3]; 1577 uint8_t count = bios->data[offset + 3];
1362 int len = 4 + count; 1578 int len = 4 + count;
1363 struct nouveau_i2c_chan *chan; 1579 struct nouveau_i2c_chan *chan;
@@ -1374,7 +1590,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1374 1590
1375 chan = init_i2c_device_find(bios->dev, i2c_index); 1591 chan = init_i2c_device_find(bios->dev, i2c_index);
1376 if (!chan) 1592 if (!chan)
1377 return 0; 1593 return -ENODEV;
1378 1594
1379 for (i = 0; i < count; i++) { 1595 for (i = 0; i < count; i++) {
1380 data[i] = bios->data[offset + 4 + i]; 1596 data[i] = bios->data[offset + 4 + i];
@@ -1388,7 +1604,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1388 msg.len = count; 1604 msg.len = count;
1389 msg.buf = data; 1605 msg.buf = data;
1390 if (i2c_transfer(&chan->adapter, &msg, 1) != 1) 1606 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1391 return 0; 1607 return -EIO;
1392 } 1608 }
1393 1609
1394 return len; 1610 return len;
@@ -1427,7 +1643,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1427 1643
1428 reg = get_tmds_index_reg(bios->dev, mlv); 1644 reg = get_tmds_index_reg(bios->dev, mlv);
1429 if (!reg) 1645 if (!reg)
1430 return 0; 1646 return -EINVAL;
1431 1647
1432 bios_wr32(bios, reg, 1648 bios_wr32(bios, reg,
1433 tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE); 1649 tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@@ -1471,7 +1687,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
1471 1687
1472 reg = get_tmds_index_reg(bios->dev, mlv); 1688 reg = get_tmds_index_reg(bios->dev, mlv);
1473 if (!reg) 1689 if (!reg)
1474 return 0; 1690 return -EINVAL;
1475 1691
1476 for (i = 0; i < count; i++) { 1692 for (i = 0; i < count; i++) {
1477 uint8_t tmdsaddr = bios->data[offset + 3 + i * 2]; 1693 uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@@ -1946,7 +2162,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
1946 uint32_t reg, data; 2162 uint32_t reg, data;
1947 2163
1948 if (bios->major_version > 2) 2164 if (bios->major_version > 2)
1949 return 0; 2165 return -ENODEV;
1950 2166
1951 bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd( 2167 bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
1952 bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20); 2168 bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@@ -2001,7 +2217,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
2001 int clock; 2217 int clock;
2002 2218
2003 if (bios->major_version > 2) 2219 if (bios->major_version > 2)
2004 return 0; 2220 return -ENODEV;
2005 2221
2006 clock = ROM16(bios->data[meminitoffs + 4]) * 10; 2222 clock = ROM16(bios->data[meminitoffs + 4]) * 10;
2007 setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock); 2223 setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@@ -2034,7 +2250,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset,
2034 uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6)); 2250 uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
2035 2251
2036 if (bios->major_version > 2) 2252 if (bios->major_version > 2)
2037 return 0; 2253 return -ENODEV;
2038 2254
2039 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, 2255 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
2040 NV_CIO_CRE_SCRATCH4__INDEX, cr3c); 2256 NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
@@ -2656,7 +2872,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
2656 NV_ERROR(bios->dev, 2872 NV_ERROR(bios->dev,
2657 "0x%04X: Zero block length - has the M table " 2873 "0x%04X: Zero block length - has the M table "
2658 "been parsed?\n", offset); 2874 "been parsed?\n", offset);
2659 return 0; 2875 return -EINVAL;
2660 } 2876 }
2661 2877
2662 strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf; 2878 strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
@@ -2840,14 +3056,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2840 3056
2841 if (!bios->display.output) { 3057 if (!bios->display.output) {
2842 NV_ERROR(dev, "INIT_AUXCH: no active output\n"); 3058 NV_ERROR(dev, "INIT_AUXCH: no active output\n");
2843 return 0; 3059 return -EINVAL;
2844 } 3060 }
2845 3061
2846 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); 3062 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
2847 if (!auxch) { 3063 if (!auxch) {
2848 NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n", 3064 NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
2849 bios->display.output->i2c_index); 3065 bios->display.output->i2c_index);
2850 return 0; 3066 return -ENODEV;
2851 } 3067 }
2852 3068
2853 if (!iexec->execute) 3069 if (!iexec->execute)
@@ -2860,7 +3076,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2860 ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1); 3076 ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
2861 if (ret) { 3077 if (ret) {
2862 NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret); 3078 NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
2863 return 0; 3079 return ret;
2864 } 3080 }
2865 3081
2866 data &= bios->data[offset + 0]; 3082 data &= bios->data[offset + 0];
@@ -2869,7 +3085,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2869 ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1); 3085 ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
2870 if (ret) { 3086 if (ret) {
2871 NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret); 3087 NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
2872 return 0; 3088 return ret;
2873 } 3089 }
2874 } 3090 }
2875 3091
@@ -2899,14 +3115,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2899 3115
2900 if (!bios->display.output) { 3116 if (!bios->display.output) {
2901 NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n"); 3117 NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
2902 return 0; 3118 return -EINVAL;
2903 } 3119 }
2904 3120
2905 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); 3121 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
2906 if (!auxch) { 3122 if (!auxch) {
2907 NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n", 3123 NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
2908 bios->display.output->i2c_index); 3124 bios->display.output->i2c_index);
2909 return 0; 3125 return -ENODEV;
2910 } 3126 }
2911 3127
2912 if (!iexec->execute) 3128 if (!iexec->execute)
@@ -2917,7 +3133,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2917 ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1); 3133 ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
2918 if (ret) { 3134 if (ret) {
2919 NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret); 3135 NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
2920 return 0; 3136 return ret;
2921 } 3137 }
2922 } 3138 }
2923 3139
@@ -2934,6 +3150,9 @@ static struct init_tbl_entry itbl_entry[] = {
2934 { "INIT_COPY" , 0x37, init_copy }, 3150 { "INIT_COPY" , 0x37, init_copy },
2935 { "INIT_NOT" , 0x38, init_not }, 3151 { "INIT_NOT" , 0x38, init_not },
2936 { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition }, 3152 { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
3153 { "INIT_DP_CONDITION" , 0x3A, init_dp_condition },
3154 { "INIT_OP_3B" , 0x3B, init_op_3b },
3155 { "INIT_OP_3C" , 0x3C, init_op_3c },
2937 { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched }, 3156 { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
2938 { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 }, 3157 { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
2939 { "INIT_PLL2" , 0x4B, init_pll2 }, 3158 { "INIT_PLL2" , 0x4B, init_pll2 },
@@ -3001,7 +3220,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
3001 * is changed back to EXECUTE. 3220 * is changed back to EXECUTE.
3002 */ 3221 */
3003 3222
3004 int count = 0, i, res; 3223 int count = 0, i, ret;
3005 uint8_t id; 3224 uint8_t id;
3006 3225
3007 /* 3226 /*
@@ -3016,26 +3235,33 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
3016 for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++) 3235 for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
3017 ; 3236 ;
3018 3237
3019 if (itbl_entry[i].name) { 3238 if (!itbl_entry[i].name) {
3020 BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
3021 offset, itbl_entry[i].id, itbl_entry[i].name);
3022
3023 /* execute eventual command handler */
3024 res = (*itbl_entry[i].handler)(bios, offset, iexec);
3025 if (!res)
3026 break;
3027 /*
3028 * Add the offset of the current command including all data
3029 * of that command. The offset will then be pointing on the
3030 * next op code.
3031 */
3032 offset += res;
3033 } else {
3034 NV_ERROR(bios->dev, 3239 NV_ERROR(bios->dev,
3035 "0x%04X: Init table command not found: " 3240 "0x%04X: Init table command not found: "
3036 "0x%02X\n", offset, id); 3241 "0x%02X\n", offset, id);
3037 return -ENOENT; 3242 return -ENOENT;
3038 } 3243 }
3244
3245 BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset,
3246 itbl_entry[i].id, itbl_entry[i].name);
3247
3248 /* execute eventual command handler */
3249 ret = (*itbl_entry[i].handler)(bios, offset, iexec);
3250 if (ret < 0) {
3251 NV_ERROR(bios->dev, "0x%04X: Failed parsing init "
3252 "table opcode: %s %d\n", offset,
3253 itbl_entry[i].name, ret);
3254 }
3255
3256 if (ret <= 0)
3257 break;
3258
3259 /*
3260 * Add the offset of the current command including all data
3261 * of that command. The offset will then be pointing on the
3262 * next op code.
3263 */
3264 offset += ret;
3039 } 3265 }
3040 3266
3041 if (offset >= bios->length) 3267 if (offset >= bios->length)
@@ -4285,31 +4511,32 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
4285 break; 4511 break;
4286 } 4512 }
4287 4513
4288#if 0 /* for easy debugging */ 4514 NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
4289 ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq); 4515 NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
4290 ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq); 4516 NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
4291 ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq); 4517 NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
4292 ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq); 4518 NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
4293 4519 NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
4294 ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq); 4520 NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
4295 ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq); 4521 NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
4296 ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq); 4522 if (pll_lim->vco2.maxfreq) {
4297 ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq); 4523 NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
4298 4524 NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
4299 ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n); 4525 NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
4300 ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n); 4526 NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
4301 ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m); 4527 NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
4302 ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m); 4528 NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
4303 ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n); 4529 NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
4304 ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n); 4530 NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
4305 ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m); 4531 }
4306 ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m); 4532 if (!pll_lim->max_p) {
4307 4533 NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p);
4308 ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p); 4534 NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias);
4309 ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias); 4535 } else {
4310 4536 NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p);
4311 ErrorF("pll.refclk: %d\n", pll_lim->refclk); 4537 NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p);
4312#endif 4538 }
4539 NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk);
4313 4540
4314 return 0; 4541 return 0;
4315} 4542}
@@ -4953,79 +5180,6 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
4953 return 0; 5180 return 0;
4954} 5181}
4955 5182
4956static int
4957read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
4958{
4959 uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
4960 int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
4961 int recordoffset = 0, rdofs = 1, wrofs = 0;
4962 uint8_t port_type = 0;
4963
4964 if (!i2ctable)
4965 return -EINVAL;
4966
4967 if (dcb_version >= 0x30) {
4968 if (i2ctable[0] != dcb_version) /* necessary? */
4969 NV_WARN(dev,
4970 "DCB I2C table version mismatch (%02X vs %02X)\n",
4971 i2ctable[0], dcb_version);
4972 dcb_i2c_ver = i2ctable[0];
4973 headerlen = i2ctable[1];
4974 if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
4975 i2c_entries = i2ctable[2];
4976 else
4977 NV_WARN(dev,
4978 "DCB I2C table has more entries than indexable "
4979 "(%d entries, max %d)\n", i2ctable[2],
4980 DCB_MAX_NUM_I2C_ENTRIES);
4981 entry_len = i2ctable[3];
4982 /* [4] is i2c_default_indices, read in parse_dcb_table() */
4983 }
4984 /*
4985 * It's your own fault if you call this function on a DCB 1.1 BIOS --
4986 * the test below is for DCB 1.2
4987 */
4988 if (dcb_version < 0x14) {
4989 recordoffset = 2;
4990 rdofs = 0;
4991 wrofs = 1;
4992 }
4993
4994 if (index == 0xf)
4995 return 0;
4996 if (index >= i2c_entries) {
4997 NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
4998 index, i2ctable[2]);
4999 return -ENOENT;
5000 }
5001 if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
5002 NV_ERROR(dev, "DCB I2C entry invalid\n");
5003 return -EINVAL;
5004 }
5005
5006 if (dcb_i2c_ver >= 0x30) {
5007 port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
5008
5009 /*
5010 * Fixup for chips using same address offset for read and
5011 * write.
5012 */
5013 if (port_type == 4) /* seen on C51 */
5014 rdofs = wrofs = 1;
5015 if (port_type >= 5) /* G80+ */
5016 rdofs = wrofs = 0;
5017 }
5018
5019 if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
5020 NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
5021
5022 i2c->port_type = port_type;
5023 i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
5024 i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
5025
5026 return 0;
5027}
5028
5029static struct dcb_gpio_entry * 5183static struct dcb_gpio_entry *
5030new_gpio_entry(struct nvbios *bios) 5184new_gpio_entry(struct nvbios *bios)
5031{ 5185{
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index c0d7b0a3ece0..adf4ec2d06c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -35,6 +35,7 @@
35#define DCB_LOC_ON_CHIP 0 35#define DCB_LOC_ON_CHIP 0
36 36
37struct dcb_i2c_entry { 37struct dcb_i2c_entry {
38 uint32_t entry;
38 uint8_t port_type; 39 uint8_t port_type;
39 uint8_t read, write; 40 uint8_t read, write;
40 struct nouveau_i2c_chan *chan; 41 struct nouveau_i2c_chan *chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 957d17629840..6f3c19522377 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -160,11 +160,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
160 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 160 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
161 ttm_bo_type_device, &nvbo->placement, align, 0, 161 ttm_bo_type_device, &nvbo->placement, align, 0,
162 false, NULL, size, nouveau_bo_del_ttm); 162 false, NULL, size, nouveau_bo_del_ttm);
163 nvbo->channel = NULL;
164 if (ret) { 163 if (ret) {
165 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 164 /* ttm will call nouveau_bo_del_ttm if it fails.. */
166 return ret; 165 return ret;
167 } 166 }
167 nvbo->channel = NULL;
168 168
169 spin_lock(&dev_priv->ttm.bo_list_lock); 169 spin_lock(&dev_priv->ttm.bo_list_lock);
170 list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list); 170 list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
225 225
226 nouveau_bo_placement_set(nvbo, memtype, 0); 226 nouveau_bo_placement_set(nvbo, memtype, 0);
227 227
228 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 228 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
229 if (ret == 0) { 229 if (ret == 0) {
230 switch (bo->mem.mem_type) { 230 switch (bo->mem.mem_type) {
231 case TTM_PL_VRAM: 231 case TTM_PL_VRAM:
@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
261 261
262 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 262 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
263 263
264 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 264 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
265 if (ret == 0) { 265 if (ret == 0) {
266 switch (bo->mem.mem_type) { 266 switch (bo->mem.mem_type) {
267 case TTM_PL_VRAM: 267 case TTM_PL_VRAM:
@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
391 break; 391 break;
392 case TTM_PL_VRAM: 392 case TTM_PL_VRAM:
393 man->flags = TTM_MEMTYPE_FLAG_FIXED | 393 man->flags = TTM_MEMTYPE_FLAG_FIXED |
394 TTM_MEMTYPE_FLAG_MAPPABLE | 394 TTM_MEMTYPE_FLAG_MAPPABLE;
395 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
396 man->available_caching = TTM_PL_FLAG_UNCACHED | 395 man->available_caching = TTM_PL_FLAG_UNCACHED |
397 TTM_PL_FLAG_WC; 396 TTM_PL_FLAG_WC;
398 man->default_caching = TTM_PL_FLAG_WC; 397 man->default_caching = TTM_PL_FLAG_WC;
399
400 man->io_addr = NULL;
401 man->io_offset = drm_get_resource_start(dev, 1);
402 man->io_size = drm_get_resource_len(dev, 1);
403 if (man->io_size > dev_priv->vram_size)
404 man->io_size = dev_priv->vram_size;
405
406 man->gpu_offset = dev_priv->vm_vram_base; 398 man->gpu_offset = dev_priv->vm_vram_base;
407 break; 399 break;
408 case TTM_PL_TT: 400 case TTM_PL_TT:
409 switch (dev_priv->gart_info.type) { 401 switch (dev_priv->gart_info.type) {
410 case NOUVEAU_GART_AGP: 402 case NOUVEAU_GART_AGP:
411 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 403 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
412 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
413 man->available_caching = TTM_PL_FLAG_UNCACHED; 404 man->available_caching = TTM_PL_FLAG_UNCACHED;
414 man->default_caching = TTM_PL_FLAG_UNCACHED; 405 man->default_caching = TTM_PL_FLAG_UNCACHED;
415 break; 406 break;
@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
424 dev_priv->gart_info.type); 415 dev_priv->gart_info.type);
425 return -EINVAL; 416 return -EINVAL;
426 } 417 }
427
428 man->io_offset = dev_priv->gart_info.aper_base;
429 man->io_size = dev_priv->gart_info.aper_size;
430 man->io_addr = NULL;
431 man->gpu_offset = dev_priv->vm_gart_base; 418 man->gpu_offset = dev_priv->vm_gart_base;
432 break; 419 break;
433 default: 420 default:
@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
462 449
463static int 450static int
464nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, 451nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
465 struct nouveau_bo *nvbo, bool evict, bool no_wait, 452 struct nouveau_bo *nvbo, bool evict,
453 bool no_wait_reserve, bool no_wait_gpu,
466 struct ttm_mem_reg *new_mem) 454 struct ttm_mem_reg *new_mem)
467{ 455{
468 struct nouveau_fence *fence = NULL; 456 struct nouveau_fence *fence = NULL;
@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
473 return ret; 461 return ret;
474 462
475 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 463 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
476 evict, no_wait, new_mem); 464 evict, no_wait_reserve, no_wait_gpu, new_mem);
477 if (nvbo->channel && nvbo->channel != chan) 465 if (nvbo->channel && nvbo->channel != chan)
478 ret = nouveau_fence_wait(fence, NULL, false, false); 466 ret = nouveau_fence_wait(fence, NULL, false, false);
479 nouveau_fence_unref((void *)&fence); 467 nouveau_fence_unref((void *)&fence);
@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
497 485
498static int 486static int
499nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 487nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
500 int no_wait, struct ttm_mem_reg *new_mem) 488 bool no_wait_reserve, bool no_wait_gpu,
489 struct ttm_mem_reg *new_mem)
501{ 490{
502 struct nouveau_bo *nvbo = nouveau_bo(bo); 491 struct nouveau_bo *nvbo = nouveau_bo(bo);
503 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 492 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
575 dst_offset += (PAGE_SIZE * line_count); 564 dst_offset += (PAGE_SIZE * line_count);
576 } 565 }
577 566
578 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem); 567 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
579} 568}
580 569
581static int 570static int
582nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 571nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
583 bool no_wait, struct ttm_mem_reg *new_mem) 572 bool no_wait_reserve, bool no_wait_gpu,
573 struct ttm_mem_reg *new_mem)
584{ 574{
585 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 575 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
586 struct ttm_placement placement; 576 struct ttm_placement placement;
@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
593 583
594 tmp_mem = *new_mem; 584 tmp_mem = *new_mem;
595 tmp_mem.mm_node = NULL; 585 tmp_mem.mm_node = NULL;
596 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); 586 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
597 if (ret) 587 if (ret)
598 return ret; 588 return ret;
599 589
@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
601 if (ret) 591 if (ret)
602 goto out; 592 goto out;
603 593
604 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem); 594 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
605 if (ret) 595 if (ret)
606 goto out; 596 goto out;
607 597
608 ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); 598 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
609out: 599out:
610 if (tmp_mem.mm_node) { 600 if (tmp_mem.mm_node) {
611 spin_lock(&bo->bdev->glob->lru_lock); 601 spin_lock(&bo->bdev->glob->lru_lock);
@@ -618,7 +608,8 @@ out:
618 608
619static int 609static int
620nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 610nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
621 bool no_wait, struct ttm_mem_reg *new_mem) 611 bool no_wait_reserve, bool no_wait_gpu,
612 struct ttm_mem_reg *new_mem)
622{ 613{
623 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 614 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
624 struct ttm_placement placement; 615 struct ttm_placement placement;
@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
631 622
632 tmp_mem = *new_mem; 623 tmp_mem = *new_mem;
633 tmp_mem.mm_node = NULL; 624 tmp_mem.mm_node = NULL;
634 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); 625 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
635 if (ret) 626 if (ret)
636 return ret; 627 return ret;
637 628
638 ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); 629 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
639 if (ret) 630 if (ret)
640 goto out; 631 goto out;
641 632
642 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); 633 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
643 if (ret) 634 if (ret)
644 goto out; 635 goto out;
645 636
@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
706 697
707static int 698static int
708nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 699nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
709 bool no_wait, struct ttm_mem_reg *new_mem) 700 bool no_wait_reserve, bool no_wait_gpu,
701 struct ttm_mem_reg *new_mem)
710{ 702{
711 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 703 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
712 struct nouveau_bo *nvbo = nouveau_bo(bo); 704 struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
721 /* Software copy if the card isn't up and running yet. */ 713 /* Software copy if the card isn't up and running yet. */
722 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || 714 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
723 !dev_priv->channel) { 715 !dev_priv->channel) {
724 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 716 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
725 goto out; 717 goto out;
726 } 718 }
727 719
@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
735 727
736 /* Hardware assisted copy. */ 728 /* Hardware assisted copy. */
737 if (new_mem->mem_type == TTM_PL_SYSTEM) 729 if (new_mem->mem_type == TTM_PL_SYSTEM)
738 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem); 730 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
739 else if (old_mem->mem_type == TTM_PL_SYSTEM) 731 else if (old_mem->mem_type == TTM_PL_SYSTEM)
740 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem); 732 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
741 else 733 else
742 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); 734 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
743 735
744 if (!ret) 736 if (!ret)
745 goto out; 737 goto out;
746 738
747 /* Fallback to software copy. */ 739 /* Fallback to software copy. */
748 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 740 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
749 741
750out: 742out:
751 if (ret) 743 if (ret)
@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
762 return 0; 754 return 0;
763} 755}
764 756
757static int
758nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
759{
760 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
761 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
762 struct drm_device *dev = dev_priv->dev;
763
764 mem->bus.addr = NULL;
765 mem->bus.offset = 0;
766 mem->bus.size = mem->num_pages << PAGE_SHIFT;
767 mem->bus.base = 0;
768 mem->bus.is_iomem = false;
769 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
770 return -EINVAL;
771 switch (mem->mem_type) {
772 case TTM_PL_SYSTEM:
773 /* System memory */
774 return 0;
775 case TTM_PL_TT:
776#if __OS_HAS_AGP
777 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
778 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
779 mem->bus.base = dev_priv->gart_info.aper_base;
780 mem->bus.is_iomem = true;
781 }
782#endif
783 break;
784 case TTM_PL_VRAM:
785 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
786 mem->bus.base = drm_get_resource_start(dev, 1);
787 mem->bus.is_iomem = true;
788 break;
789 default:
790 return -EINVAL;
791 }
792 return 0;
793}
794
795static void
796nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
797{
798}
799
800static int
801nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
802{
803 return 0;
804}
805
765struct ttm_bo_driver nouveau_bo_driver = { 806struct ttm_bo_driver nouveau_bo_driver = {
766 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, 807 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
767 .invalidate_caches = nouveau_bo_invalidate_caches, 808 .invalidate_caches = nouveau_bo_invalidate_caches,
@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
774 .sync_obj_flush = nouveau_fence_flush, 815 .sync_obj_flush = nouveau_fence_flush,
775 .sync_obj_unref = nouveau_fence_unref, 816 .sync_obj_unref = nouveau_fence_unref,
776 .sync_obj_ref = nouveau_fence_ref, 817 .sync_obj_ref = nouveau_fence_ref,
818 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
819 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
820 .io_mem_free = &nouveau_ttm_io_mem_free,
777}; 821};
778 822
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 14afe1e47e57..7e663a79829f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -843,6 +843,7 @@ nouveau_connector_create(struct drm_device *dev,
843 843
844 switch (dcb->type) { 844 switch (dcb->type) {
845 case DCB_CONNECTOR_VGA: 845 case DCB_CONNECTOR_VGA:
846 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
846 if (dev_priv->card_type >= NV_50) { 847 if (dev_priv->card_type >= NV_50) {
847 drm_connector_attach_property(connector, 848 drm_connector_attach_property(connector,
848 dev->mode_config.scaling_mode_property, 849 dev->mode_config.scaling_mode_property,
@@ -854,6 +855,17 @@ nouveau_connector_create(struct drm_device *dev,
854 case DCB_CONNECTOR_TV_3: 855 case DCB_CONNECTOR_TV_3:
855 nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; 856 nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
856 break; 857 break;
858 case DCB_CONNECTOR_DP:
859 case DCB_CONNECTOR_eDP:
860 case DCB_CONNECTOR_HDMI_0:
861 case DCB_CONNECTOR_HDMI_1:
862 case DCB_CONNECTOR_DVI_I:
863 case DCB_CONNECTOR_DVI_D:
864 if (dev_priv->card_type >= NV_50)
865 connector->polled = DRM_CONNECTOR_POLL_HPD;
866 else
867 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
868 /* fall-through */
857 default: 869 default:
858 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; 870 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
859 871
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index a251886a0ce6..7933de4aff2e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -33,6 +33,8 @@
33#include "drmP.h" 33#include "drmP.h"
34#include "nouveau_drv.h" 34#include "nouveau_drv.h"
35 35
36#include <ttm/ttm_page_alloc.h>
37
36static int 38static int
37nouveau_debugfs_channel_info(struct seq_file *m, void *data) 39nouveau_debugfs_channel_info(struct seq_file *m, void *data)
38{ 40{
@@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
159 { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, 161 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
160 { "memory", nouveau_debugfs_memory_info, 0, NULL }, 162 { "memory", nouveau_debugfs_memory_info, 0, NULL },
161 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, 163 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
164 { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
162}; 165};
163#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) 166#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
164 167
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cf1c5c0a0abe..74e6b4ed12c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -34,10 +34,6 @@ static void
34nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) 34nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
35{ 35{
36 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 36 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
37 struct drm_device *dev = drm_fb->dev;
38
39 if (drm_fb->fbdev)
40 nouveau_fbcon_remove(dev, drm_fb);
41 37
42 if (fb->nvbo) 38 if (fb->nvbo)
43 drm_gem_object_unreference_unlocked(fb->nvbo->gem); 39 drm_gem_object_unreference_unlocked(fb->nvbo->gem);
@@ -61,27 +57,20 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
61 .create_handle = nouveau_user_framebuffer_create_handle, 57 .create_handle = nouveau_user_framebuffer_create_handle,
62}; 58};
63 59
64struct drm_framebuffer * 60int
65nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo, 61nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
66 struct drm_mode_fb_cmd *mode_cmd) 62 struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
67{ 63{
68 struct nouveau_framebuffer *fb;
69 int ret; 64 int ret;
70 65
71 fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); 66 ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
72 if (!fb)
73 return NULL;
74
75 ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
76 if (ret) { 67 if (ret) {
77 kfree(fb); 68 return ret;
78 return NULL;
79 } 69 }
80 70
81 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd); 71 drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
82 72 nouveau_fb->nvbo = nvbo;
83 fb->nvbo = nvbo; 73 return 0;
84 return &fb->base;
85} 74}
86 75
87static struct drm_framebuffer * 76static struct drm_framebuffer *
@@ -89,24 +78,29 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
89 struct drm_file *file_priv, 78 struct drm_file *file_priv,
90 struct drm_mode_fb_cmd *mode_cmd) 79 struct drm_mode_fb_cmd *mode_cmd)
91{ 80{
92 struct drm_framebuffer *fb; 81 struct nouveau_framebuffer *nouveau_fb;
93 struct drm_gem_object *gem; 82 struct drm_gem_object *gem;
83 int ret;
94 84
95 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 85 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
96 if (!gem) 86 if (!gem)
97 return NULL; 87 return NULL;
98 88
99 fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd); 89 nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
100 if (!fb) { 90 if (!nouveau_fb)
91 return NULL;
92
93 ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
94 if (ret) {
101 drm_gem_object_unreference(gem); 95 drm_gem_object_unreference(gem);
102 return NULL; 96 return NULL;
103 } 97 }
104 98
105 return fb; 99 return &nouveau_fb->base;
106} 100}
107 101
108const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 102const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
109 .fb_create = nouveau_user_framebuffer_create, 103 .fb_create = nouveau_user_framebuffer_create,
110 .fb_changed = nouveau_fbcon_probe, 104 .output_poll_changed = nouveau_fbcon_output_poll_changed,
111}; 105};
112 106
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 1de974acbc65..c6079e36669d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -153,7 +153,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
153 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 153 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
154 struct nouveau_channel *chan; 154 struct nouveau_channel *chan;
155 struct drm_crtc *crtc; 155 struct drm_crtc *crtc;
156 uint32_t fbdev_flags;
157 int ret, i; 156 int ret, i;
158 157
159 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 158 if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +162,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
163 return 0; 162 return 0;
164 163
165 NV_INFO(dev, "Disabling fbcon acceleration...\n"); 164 NV_INFO(dev, "Disabling fbcon acceleration...\n");
166 fbdev_flags = dev_priv->fbdev_info->flags; 165 nouveau_fbcon_save_disable_accel(dev);
167 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
168 166
169 NV_INFO(dev, "Unpinning framebuffer(s)...\n"); 167 NV_INFO(dev, "Unpinning framebuffer(s)...\n");
170 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 168 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -230,9 +228,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
230 } 228 }
231 229
232 acquire_console_sem(); 230 acquire_console_sem();
233 fb_set_suspend(dev_priv->fbdev_info, 1); 231 nouveau_fbcon_set_suspend(dev, 1);
234 release_console_sem(); 232 release_console_sem();
235 dev_priv->fbdev_info->flags = fbdev_flags; 233 nouveau_fbcon_restore_accel(dev);
236 return 0; 234 return 0;
237 235
238out_abort: 236out_abort:
@@ -250,14 +248,12 @@ nouveau_pci_resume(struct pci_dev *pdev)
250 struct drm_nouveau_private *dev_priv = dev->dev_private; 248 struct drm_nouveau_private *dev_priv = dev->dev_private;
251 struct nouveau_engine *engine = &dev_priv->engine; 249 struct nouveau_engine *engine = &dev_priv->engine;
252 struct drm_crtc *crtc; 250 struct drm_crtc *crtc;
253 uint32_t fbdev_flags;
254 int ret, i; 251 int ret, i;
255 252
256 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 253 if (!drm_core_check_feature(dev, DRIVER_MODESET))
257 return -ENODEV; 254 return -ENODEV;
258 255
259 fbdev_flags = dev_priv->fbdev_info->flags; 256 nouveau_fbcon_save_disable_accel(dev);
260 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
261 257
262 NV_INFO(dev, "We're back, enabling device...\n"); 258 NV_INFO(dev, "We're back, enabling device...\n");
263 pci_set_power_state(pdev, PCI_D0); 259 pci_set_power_state(pdev, PCI_D0);
@@ -332,13 +328,14 @@ nouveau_pci_resume(struct pci_dev *pdev)
332 } 328 }
333 329
334 acquire_console_sem(); 330 acquire_console_sem();
335 fb_set_suspend(dev_priv->fbdev_info, 0); 331 nouveau_fbcon_set_suspend(dev, 0);
336 release_console_sem(); 332 release_console_sem();
337 333
338 nouveau_fbcon_zfill(dev); 334 nouveau_fbcon_zfill_all(dev);
339 335
340 drm_helper_resume_force_mode(dev); 336 drm_helper_resume_force_mode(dev);
341 dev_priv->fbdev_info->flags = fbdev_flags; 337
338 nouveau_fbcon_restore_accel(dev);
342 return 0; 339 return 0;
343} 340}
344 341
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index ace630aa89e1..5b134438effe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -535,6 +535,7 @@ struct drm_nouveau_private {
535 535
536 struct fb_info *fbdev_info; 536 struct fb_info *fbdev_info;
537 537
538 int fifo_alloc_count;
538 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; 539 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
539 540
540 struct nouveau_engine engine; 541 struct nouveau_engine engine;
@@ -621,6 +622,9 @@ struct drm_nouveau_private {
621 struct { 622 struct {
622 struct dentry *channel_root; 623 struct dentry *channel_root;
623 } debugfs; 624 } debugfs;
625
626 struct nouveau_fbdev *nfbdev;
627 struct apertures_struct *apertures;
624}; 628};
625 629
626static inline struct drm_nouveau_private * 630static inline struct drm_nouveau_private *
@@ -1166,6 +1170,12 @@ int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1166int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1170int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1167int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1171int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1168 1172
1173/* nv50_calc. */
1174int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
1175 int *N1, int *M1, int *N2, int *M2, int *P);
1176int nv50_calc_pll2(struct drm_device *, struct pll_lims *,
1177 int clk, int *N, int *fN, int *M, int *P);
1178
1169#ifndef ioread32_native 1179#ifndef ioread32_native
1170#ifdef __BIG_ENDIAN 1180#ifdef __BIG_ENDIAN
1171#define ioread16_native ioread16be 1181#define ioread16_native ioread16be
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 9f28b94e479b..e1df8209cd0f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -48,6 +48,8 @@ struct nouveau_encoder {
48 union { 48 union {
49 struct { 49 struct {
50 int mc_unknown; 50 int mc_unknown;
51 uint32_t unk0;
52 uint32_t unk1;
51 int dpcd_version; 53 int dpcd_version;
52 int link_nr; 54 int link_nr;
53 int link_bw; 55 int link_bw;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index 4a3f31aa1949..d432134b71e0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -40,8 +40,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
40 40
41extern const struct drm_mode_config_funcs nouveau_mode_config_funcs; 41extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
42 42
43struct drm_framebuffer * 43int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
44nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *, 44 struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
45 struct drm_mode_fb_cmd *);
46
47#endif /* __NOUVEAU_FB_H__ */ 45#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8e7dc1d4912a..fd4a2df715e9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -52,8 +52,8 @@
52static int 52static int
53nouveau_fbcon_sync(struct fb_info *info) 53nouveau_fbcon_sync(struct fb_info *info)
54{ 54{
55 struct nouveau_fbcon_par *par = info->par; 55 struct nouveau_fbdev *nfbdev = info->par;
56 struct drm_device *dev = par->dev; 56 struct drm_device *dev = nfbdev->dev;
57 struct drm_nouveau_private *dev_priv = dev->dev_private; 57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 struct nouveau_channel *chan = dev_priv->channel; 58 struct nouveau_channel *chan = dev_priv->channel;
59 int ret, i; 59 int ret, i;
@@ -97,7 +97,6 @@ static struct fb_ops nouveau_fbcon_ops = {
97 .owner = THIS_MODULE, 97 .owner = THIS_MODULE,
98 .fb_check_var = drm_fb_helper_check_var, 98 .fb_check_var = drm_fb_helper_check_var,
99 .fb_set_par = drm_fb_helper_set_par, 99 .fb_set_par = drm_fb_helper_set_par,
100 .fb_setcolreg = drm_fb_helper_setcolreg,
101 .fb_fillrect = cfb_fillrect, 100 .fb_fillrect = cfb_fillrect,
102 .fb_copyarea = cfb_copyarea, 101 .fb_copyarea = cfb_copyarea,
103 .fb_imageblit = cfb_imageblit, 102 .fb_imageblit = cfb_imageblit,
@@ -111,7 +110,6 @@ static struct fb_ops nv04_fbcon_ops = {
111 .owner = THIS_MODULE, 110 .owner = THIS_MODULE,
112 .fb_check_var = drm_fb_helper_check_var, 111 .fb_check_var = drm_fb_helper_check_var,
113 .fb_set_par = drm_fb_helper_set_par, 112 .fb_set_par = drm_fb_helper_set_par,
114 .fb_setcolreg = drm_fb_helper_setcolreg,
115 .fb_fillrect = nv04_fbcon_fillrect, 113 .fb_fillrect = nv04_fbcon_fillrect,
116 .fb_copyarea = nv04_fbcon_copyarea, 114 .fb_copyarea = nv04_fbcon_copyarea,
117 .fb_imageblit = nv04_fbcon_imageblit, 115 .fb_imageblit = nv04_fbcon_imageblit,
@@ -125,7 +123,6 @@ static struct fb_ops nv50_fbcon_ops = {
125 .owner = THIS_MODULE, 123 .owner = THIS_MODULE,
126 .fb_check_var = drm_fb_helper_check_var, 124 .fb_check_var = drm_fb_helper_check_var,
127 .fb_set_par = drm_fb_helper_set_par, 125 .fb_set_par = drm_fb_helper_set_par,
128 .fb_setcolreg = drm_fb_helper_setcolreg,
129 .fb_fillrect = nv50_fbcon_fillrect, 126 .fb_fillrect = nv50_fbcon_fillrect,
130 .fb_copyarea = nv50_fbcon_copyarea, 127 .fb_copyarea = nv50_fbcon_copyarea,
131 .fb_imageblit = nv50_fbcon_imageblit, 128 .fb_imageblit = nv50_fbcon_imageblit,
@@ -155,54 +152,10 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
155 *blue = nv_crtc->lut.b[regno]; 152 *blue = nv_crtc->lut.b[regno];
156} 153}
157 154
158static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { 155static void
159 .gamma_set = nouveau_fbcon_gamma_set, 156nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
160 .gamma_get = nouveau_fbcon_gamma_get
161};
162
163#if defined(__i386__) || defined(__x86_64__)
164static bool
165nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
166{
167 struct pci_dev *pdev = dev->pdev;
168 int ramin;
169
170 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
171 screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
172 return false;
173
174 if (screen_info.lfb_base < pci_resource_start(pdev, 1))
175 goto not_fb;
176
177 if (screen_info.lfb_base + screen_info.lfb_size >=
178 pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
179 goto not_fb;
180
181 return true;
182not_fb:
183 ramin = 2;
184 if (pci_resource_len(pdev, ramin) == 0) {
185 ramin = 3;
186 if (pci_resource_len(pdev, ramin) == 0)
187 return false;
188 }
189
190 if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
191 return false;
192
193 if (screen_info.lfb_base + screen_info.lfb_size >=
194 pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
195 return false;
196
197 return true;
198}
199#endif
200
201void
202nouveau_fbcon_zfill(struct drm_device *dev)
203{ 157{
204 struct drm_nouveau_private *dev_priv = dev->dev_private; 158 struct fb_info *info = nfbdev->helper.fbdev;
205 struct fb_info *info = dev_priv->fbdev_info;
206 struct fb_fillrect rect; 159 struct fb_fillrect rect;
207 160
208 /* Clear the entire fbcon. The drm will program every connector 161 /* Clear the entire fbcon. The drm will program every connector
@@ -218,28 +171,27 @@ nouveau_fbcon_zfill(struct drm_device *dev)
218} 171}
219 172
220static int 173static int
221nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, 174nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
222 uint32_t fb_height, uint32_t surface_width, 175 struct drm_fb_helper_surface_size *sizes)
223 uint32_t surface_height, uint32_t surface_depth,
224 uint32_t surface_bpp, struct drm_framebuffer **pfb)
225{ 176{
177 struct drm_device *dev = nfbdev->dev;
226 struct drm_nouveau_private *dev_priv = dev->dev_private; 178 struct drm_nouveau_private *dev_priv = dev->dev_private;
227 struct fb_info *info; 179 struct fb_info *info;
228 struct nouveau_fbcon_par *par;
229 struct drm_framebuffer *fb; 180 struct drm_framebuffer *fb;
230 struct nouveau_framebuffer *nouveau_fb; 181 struct nouveau_framebuffer *nouveau_fb;
231 struct nouveau_bo *nvbo; 182 struct nouveau_bo *nvbo;
232 struct drm_mode_fb_cmd mode_cmd; 183 struct drm_mode_fb_cmd mode_cmd;
233 struct device *device = &dev->pdev->dev; 184 struct pci_dev *pdev = dev->pdev;
185 struct device *device = &pdev->dev;
234 int size, ret; 186 int size, ret;
235 187
236 mode_cmd.width = surface_width; 188 mode_cmd.width = sizes->surface_width;
237 mode_cmd.height = surface_height; 189 mode_cmd.height = sizes->surface_height;
238 190
239 mode_cmd.bpp = surface_bpp; 191 mode_cmd.bpp = sizes->surface_bpp;
240 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); 192 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
241 mode_cmd.pitch = roundup(mode_cmd.pitch, 256); 193 mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
242 mode_cmd.depth = surface_depth; 194 mode_cmd.depth = sizes->surface_depth;
243 195
244 size = mode_cmd.pitch * mode_cmd.height; 196 size = mode_cmd.pitch * mode_cmd.height;
245 size = roundup(size, PAGE_SIZE); 197 size = roundup(size, PAGE_SIZE);
@@ -268,31 +220,28 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
268 220
269 mutex_lock(&dev->struct_mutex); 221 mutex_lock(&dev->struct_mutex);
270 222
271 fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd); 223 info = framebuffer_alloc(0, device);
272 if (!fb) { 224 if (!info) {
273 ret = -ENOMEM; 225 ret = -ENOMEM;
274 NV_ERROR(dev, "failed to allocate fb.\n");
275 goto out_unref; 226 goto out_unref;
276 } 227 }
277 228
278 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); 229 ret = fb_alloc_cmap(&info->cmap, 256, 0);
279 230 if (ret) {
280 nouveau_fb = nouveau_framebuffer(fb);
281 *pfb = fb;
282
283 info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
284 if (!info) {
285 ret = -ENOMEM; 231 ret = -ENOMEM;
286 goto out_unref; 232 goto out_unref;
287 } 233 }
288 234
289 par = info->par; 235 info->par = nfbdev;
290 par->helper.funcs = &nouveau_fbcon_helper_funcs; 236
291 par->helper.dev = dev; 237 nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
292 ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4); 238
293 if (ret) 239 nouveau_fb = &nfbdev->nouveau_fb;
294 goto out_unref; 240 fb = &nouveau_fb->base;
295 dev_priv->fbdev_info = info; 241
242 /* setup helper */
243 nfbdev->helper.fb = fb;
244 nfbdev->helper.fbdev = info;
296 245
297 strcpy(info->fix.id, "nouveaufb"); 246 strcpy(info->fix.id, "nouveaufb");
298 if (nouveau_nofbaccel) 247 if (nouveau_nofbaccel)
@@ -310,31 +259,17 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
310 info->screen_size = size; 259 info->screen_size = size;
311 260
312 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); 261 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
313 drm_fb_helper_fill_var(info, fb, fb_width, fb_height); 262 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
314 263
315 /* FIXME: we really shouldn't expose mmio space at all */ 264 /* FIXME: we really shouldn't expose mmio space at all */
316 info->fix.mmio_start = pci_resource_start(dev->pdev, 1); 265 info->fix.mmio_start = pci_resource_start(pdev, 1);
317 info->fix.mmio_len = pci_resource_len(dev->pdev, 1); 266 info->fix.mmio_len = pci_resource_len(pdev, 1);
318 267
319 /* Set aperture base/size for vesafb takeover */ 268 /* Set aperture base/size for vesafb takeover */
320#if defined(__i386__) || defined(__x86_64__) 269 info->apertures = dev_priv->apertures;
321 if (nouveau_fbcon_has_vesafb_or_efifb(dev)) { 270 if (!info->apertures) {
322 /* Some NVIDIA VBIOS' are stupid and decide to put the 271 ret = -ENOMEM;
323 * framebuffer in the middle of the PRAMIN BAR for 272 goto out_unref;
324 * whatever reason. We need to know the exact lfb_base
325 * to get vesafb kicked off, and the only reliable way
326 * we have left is to find out lfb_base the same way
327 * vesafb did.
328 */
329 info->aperture_base = screen_info.lfb_base;
330 info->aperture_size = screen_info.lfb_size;
331 if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
332 info->aperture_size *= 65536;
333 } else
334#endif
335 {
336 info->aperture_base = info->fix.mmio_start;
337 info->aperture_size = info->fix.mmio_len;
338 } 273 }
339 274
340 info->pixmap.size = 64*1024; 275 info->pixmap.size = 64*1024;
@@ -343,11 +278,6 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
343 info->pixmap.flags = FB_PIXMAP_SYSTEM; 278 info->pixmap.flags = FB_PIXMAP_SYSTEM;
344 info->pixmap.scan_align = 1; 279 info->pixmap.scan_align = 1;
345 280
346 fb->fbdev = info;
347
348 par->nouveau_fb = nouveau_fb;
349 par->dev = dev;
350
351 if (dev_priv->channel && !nouveau_nofbaccel) { 281 if (dev_priv->channel && !nouveau_nofbaccel) {
352 switch (dev_priv->card_type) { 282 switch (dev_priv->card_type) {
353 case NV_50: 283 case NV_50:
@@ -361,7 +291,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
361 }; 291 };
362 } 292 }
363 293
364 nouveau_fbcon_zfill(dev); 294 nouveau_fbcon_zfill(dev, nfbdev);
365 295
366 /* To allow resizeing without swapping buffers */ 296 /* To allow resizeing without swapping buffers */
367 NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n", 297 NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
@@ -379,44 +309,123 @@ out:
379 return ret; 309 return ret;
380} 310}
381 311
382int 312static int
383nouveau_fbcon_probe(struct drm_device *dev) 313nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
314 struct drm_fb_helper_surface_size *sizes)
384{ 315{
385 NV_DEBUG_KMS(dev, "\n"); 316 struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
317 int new_fb = 0;
318 int ret;
319
320 if (!helper->fb) {
321 ret = nouveau_fbcon_create(nfbdev, sizes);
322 if (ret)
323 return ret;
324 new_fb = 1;
325 }
326 return new_fb;
327}
386 328
387 return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create); 329void
330nouveau_fbcon_output_poll_changed(struct drm_device *dev)
331{
332 struct drm_nouveau_private *dev_priv = dev->dev_private;
333 drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
388} 334}
389 335
390int 336int
391nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb) 337nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
392{ 338{
393 struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb); 339 struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
394 struct fb_info *info; 340 struct fb_info *info;
395 341
396 if (!fb) 342 if (nfbdev->helper.fbdev) {
397 return -EINVAL; 343 info = nfbdev->helper.fbdev;
398
399 info = fb->fbdev;
400 if (info) {
401 struct nouveau_fbcon_par *par = info->par;
402
403 unregister_framebuffer(info); 344 unregister_framebuffer(info);
345 if (info->cmap.len)
346 fb_dealloc_cmap(&info->cmap);
347 framebuffer_release(info);
348 }
349
350 if (nouveau_fb->nvbo) {
404 nouveau_bo_unmap(nouveau_fb->nvbo); 351 nouveau_bo_unmap(nouveau_fb->nvbo);
405 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 352 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
406 nouveau_fb->nvbo = NULL; 353 nouveau_fb->nvbo = NULL;
407 if (par)
408 drm_fb_helper_free(&par->helper);
409 framebuffer_release(info);
410 } 354 }
411 355 drm_fb_helper_fini(&nfbdev->helper);
356 drm_framebuffer_cleanup(&nouveau_fb->base);
412 return 0; 357 return 0;
413} 358}
414 359
415void nouveau_fbcon_gpu_lockup(struct fb_info *info) 360void nouveau_fbcon_gpu_lockup(struct fb_info *info)
416{ 361{
417 struct nouveau_fbcon_par *par = info->par; 362 struct nouveau_fbdev *nfbdev = info->par;
418 struct drm_device *dev = par->dev; 363 struct drm_device *dev = nfbdev->dev;
419 364
420 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 365 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
421 info->flags |= FBINFO_HWACCEL_DISABLED; 366 info->flags |= FBINFO_HWACCEL_DISABLED;
422} 367}
368
369static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
370 .gamma_set = nouveau_fbcon_gamma_set,
371 .gamma_get = nouveau_fbcon_gamma_get,
372 .fb_probe = nouveau_fbcon_find_or_create_single,
373};
374
375
376int nouveau_fbcon_init(struct drm_device *dev)
377{
378 struct drm_nouveau_private *dev_priv = dev->dev_private;
379 struct nouveau_fbdev *nfbdev;
380
381 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
382 if (!nfbdev)
383 return -ENOMEM;
384
385 nfbdev->dev = dev;
386 dev_priv->nfbdev = nfbdev;
387 nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
388
389 drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
390 drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
391 drm_fb_helper_initial_config(&nfbdev->helper, 32);
392 return 0;
393}
394
395void nouveau_fbcon_fini(struct drm_device *dev)
396{
397 struct drm_nouveau_private *dev_priv = dev->dev_private;
398
399 if (!dev_priv->nfbdev)
400 return;
401
402 nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
403 kfree(dev_priv->nfbdev);
404 dev_priv->nfbdev = NULL;
405}
406
407void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
408{
409 struct drm_nouveau_private *dev_priv = dev->dev_private;
410
411 dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
412 dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
413}
414
415void nouveau_fbcon_restore_accel(struct drm_device *dev)
416{
417 struct drm_nouveau_private *dev_priv = dev->dev_private;
418 dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
419}
420
421void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
422{
423 struct drm_nouveau_private *dev_priv = dev->dev_private;
424 fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
425}
426
427void nouveau_fbcon_zfill_all(struct drm_device *dev)
428{
429 struct drm_nouveau_private *dev_priv = dev->dev_private;
430 nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
431}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index f9c34e1a8c11..e7e12684c37e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -29,16 +29,16 @@
29 29
30#include "drm_fb_helper.h" 30#include "drm_fb_helper.h"
31 31
32struct nouveau_fbcon_par { 32#include "nouveau_fb.h"
33struct nouveau_fbdev {
33 struct drm_fb_helper helper; 34 struct drm_fb_helper helper;
35 struct nouveau_framebuffer nouveau_fb;
36 struct list_head fbdev_list;
34 struct drm_device *dev; 37 struct drm_device *dev;
35 struct nouveau_framebuffer *nouveau_fb; 38 unsigned int saved_flags;
36}; 39};
37 40
38int nouveau_fbcon_probe(struct drm_device *dev);
39int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
40void nouveau_fbcon_restore(void); 41void nouveau_fbcon_restore(void);
41void nouveau_fbcon_zfill(struct drm_device *dev);
42 42
43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); 43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); 44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -50,5 +50,14 @@ void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
50int nv50_fbcon_accel_init(struct fb_info *info); 50int nv50_fbcon_accel_init(struct fb_info *info);
51 51
52void nouveau_fbcon_gpu_lockup(struct fb_info *info); 52void nouveau_fbcon_gpu_lockup(struct fb_info *info);
53
54int nouveau_fbcon_init(struct drm_device *dev);
55void nouveau_fbcon_fini(struct drm_device *dev);
56void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
57void nouveau_fbcon_zfill_all(struct drm_device *dev);
58void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
59void nouveau_fbcon_restore_accel(struct drm_device *dev);
60
61void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
53#endif /* __NV50_FBCON_H__ */ 62#endif /* __NV50_FBCON_H__ */
54 63
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 1bc0b38a5167..69c76cf93407 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -57,6 +57,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
57 } 57 }
58 58
59 ttm_bo_unref(&bo); 59 ttm_bo_unref(&bo);
60
61 drm_gem_object_release(gem);
62 kfree(gem);
60} 63}
61 64
62int 65int
@@ -382,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
382 385
383 nvbo->channel = chan; 386 nvbo->channel = chan;
384 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 387 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
385 false, false); 388 false, false, false);
386 nvbo->channel = NULL; 389 nvbo->channel = NULL;
387 if (unlikely(ret)) { 390 if (unlikely(ret)) {
388 NV_ERROR(dev, "fail ttm_validate\n"); 391 NV_ERROR(dev, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 32f0e495464c..f731c5f60536 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -68,13 +68,12 @@ nouveau_grctx_prog_load(struct drm_device *dev)
68 return ret; 68 return ret;
69 } 69 }
70 70
71 pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL); 71 pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL);
72 if (!pgraph->ctxprog) { 72 if (!pgraph->ctxprog) {
73 NV_ERROR(dev, "OOM copying ctxprog\n"); 73 NV_ERROR(dev, "OOM copying ctxprog\n");
74 release_firmware(fw); 74 release_firmware(fw);
75 return -ENOMEM; 75 return -ENOMEM;
76 } 76 }
77 memcpy(pgraph->ctxprog, fw->data, fw->size);
78 77
79 cp = pgraph->ctxprog; 78 cp = pgraph->ctxprog;
80 if (le32_to_cpu(cp->signature) != 0x5043564e || 79 if (le32_to_cpu(cp->signature) != 0x5043564e ||
@@ -97,14 +96,13 @@ nouveau_grctx_prog_load(struct drm_device *dev)
97 return ret; 96 return ret;
98 } 97 }
99 98
100 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); 99 pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL);
101 if (!pgraph->ctxvals) { 100 if (!pgraph->ctxvals) {
102 NV_ERROR(dev, "OOM copying ctxvals\n"); 101 NV_ERROR(dev, "OOM copying ctxvals\n");
103 release_firmware(fw); 102 release_firmware(fw);
104 nouveau_grctx_fini(dev); 103 nouveau_grctx_fini(dev);
105 return -ENOMEM; 104 return -ENOMEM;
106 } 105 }
107 memcpy(pgraph->ctxvals, fw->data, fw->size);
108 106
109 cv = (void *)pgraph->ctxvals; 107 cv = (void *)pgraph->ctxvals;
110 if (le32_to_cpu(cv->signature) != 0x5643564e || 108 if (le32_to_cpu(cv->signature) != 0x5643564e ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 88583e7bf651..316a3c7e6eb4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -254,16 +254,27 @@ struct nouveau_i2c_chan *
254nouveau_i2c_find(struct drm_device *dev, int index) 254nouveau_i2c_find(struct drm_device *dev, int index)
255{ 255{
256 struct drm_nouveau_private *dev_priv = dev->dev_private; 256 struct drm_nouveau_private *dev_priv = dev->dev_private;
257 struct nvbios *bios = &dev_priv->vbios; 257 struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
258 258
259 if (index >= DCB_MAX_NUM_I2C_ENTRIES) 259 if (index >= DCB_MAX_NUM_I2C_ENTRIES)
260 return NULL; 260 return NULL;
261 261
262 if (!bios->dcb.i2c[index].chan) { 262 if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) {
263 if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index)) 263 uint32_t reg = 0xe500, val;
264 return NULL; 264
265 if (i2c->port_type == 6) {
266 reg += i2c->read * 0x50;
267 val = 0x2002;
268 } else {
269 reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
270 val = 0xe001;
271 }
272
273 nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val);
265 } 274 }
266 275
267 return bios->dcb.i2c[index].chan; 276 if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
277 return NULL;
278 return i2c->chan;
268} 279}
269 280
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 13e73cee4c44..53360f156063 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -1204,7 +1204,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
1204{ 1204{
1205 struct drm_device *dev = (struct drm_device *)arg; 1205 struct drm_device *dev = (struct drm_device *)arg;
1206 struct drm_nouveau_private *dev_priv = dev->dev_private; 1206 struct drm_nouveau_private *dev_priv = dev->dev_private;
1207 uint32_t status, fbdev_flags = 0; 1207 uint32_t status;
1208 unsigned long flags; 1208 unsigned long flags;
1209 1209
1210 status = nv_rd32(dev, NV03_PMC_INTR_0); 1210 status = nv_rd32(dev, NV03_PMC_INTR_0);
@@ -1213,11 +1213,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
1213 1213
1214 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 1214 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1215 1215
1216 if (dev_priv->fbdev_info) {
1217 fbdev_flags = dev_priv->fbdev_info->flags;
1218 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
1219 }
1220
1221 if (status & NV_PMC_INTR_0_PFIFO_PENDING) { 1216 if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
1222 nouveau_fifo_irq_handler(dev); 1217 nouveau_fifo_irq_handler(dev);
1223 status &= ~NV_PMC_INTR_0_PFIFO_PENDING; 1218 status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
@@ -1247,9 +1242,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
1247 if (status) 1242 if (status)
1248 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status); 1243 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
1249 1244
1250 if (dev_priv->fbdev_info)
1251 dev_priv->fbdev_info->flags = fbdev_flags;
1252
1253 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 1245 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1254 1246
1255 return IRQ_HANDLED; 1247 return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index aa9b310e41be..6ca80a3fe70d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -826,6 +826,7 @@
826#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 826#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
827#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) 827#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
828#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) 828#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
829#define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
829#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) 830#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
830 831
831#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) 832#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e1710640a278..e632339c323e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -34,6 +34,7 @@
34 34
35#include "nouveau_drv.h" 35#include "nouveau_drv.h"
36#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nouveau_fbcon.h"
37#include "nv50_display.h" 38#include "nv50_display.h"
38 39
39static void nouveau_stub_takedown(struct drm_device *dev) {} 40static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -515,8 +516,10 @@ nouveau_card_init(struct drm_device *dev)
515 516
516 dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; 517 dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
517 518
518 if (drm_core_check_feature(dev, DRIVER_MODESET)) 519 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
519 drm_helper_initial_config(dev); 520 nouveau_fbcon_init(dev);
521 drm_kms_helper_poll_init(dev);
522 }
520 523
521 return 0; 524 return 0;
522 525
@@ -563,6 +566,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
563 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); 566 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
564 567
565 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { 568 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
569
566 nouveau_backlight_exit(dev); 570 nouveau_backlight_exit(dev);
567 571
568 if (dev_priv->channel) { 572 if (dev_priv->channel) {
@@ -637,6 +641,48 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
637#endif 641#endif
638} 642}
639 643
644static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
645{
646 struct pci_dev *pdev = dev->pdev;
647 struct apertures_struct *aper = alloc_apertures(3);
648 if (!aper)
649 return NULL;
650
651 aper->ranges[0].base = pci_resource_start(pdev, 1);
652 aper->ranges[0].size = pci_resource_len(pdev, 1);
653 aper->count = 1;
654
655 if (pci_resource_len(pdev, 2)) {
656 aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
657 aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
658 aper->count++;
659 }
660
661 if (pci_resource_len(pdev, 3)) {
662 aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
663 aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
664 aper->count++;
665 }
666
667 return aper;
668}
669
670static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
671{
672 struct drm_nouveau_private *dev_priv = dev->dev_private;
673 bool primary = false;
674 dev_priv->apertures = nouveau_get_apertures(dev);
675 if (!dev_priv->apertures)
676 return -ENOMEM;
677
678#ifdef CONFIG_X86
679 primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
680#endif
681
682 remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
683 return 0;
684}
685
640int nouveau_load(struct drm_device *dev, unsigned long flags) 686int nouveau_load(struct drm_device *dev, unsigned long flags)
641{ 687{
642 struct drm_nouveau_private *dev_priv; 688 struct drm_nouveau_private *dev_priv;
@@ -724,6 +770,12 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
724 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", 770 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
725 dev_priv->card_type, reg0); 771 dev_priv->card_type, reg0);
726 772
773 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
774 int ret = nouveau_remove_conflicting_drivers(dev);
775 if (ret)
776 return ret;
777 }
778
727 /* map larger RAMIN aperture on NV40 cards */ 779 /* map larger RAMIN aperture on NV40 cards */
728 dev_priv->ramin = NULL; 780 dev_priv->ramin = NULL;
729 if (dev_priv->card_type >= NV_40) { 781 if (dev_priv->card_type >= NV_40) {
@@ -794,6 +846,8 @@ int nouveau_unload(struct drm_device *dev)
794 struct drm_nouveau_private *dev_priv = dev->dev_private; 846 struct drm_nouveau_private *dev_priv = dev->dev_private;
795 847
796 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 848 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
849 drm_kms_helper_poll_fini(dev);
850 nouveau_fbcon_fini(dev);
797 if (dev_priv->card_type >= NV_50) 851 if (dev_priv->card_type >= NV_50)
798 nv50_display_destroy(dev); 852 nv50_display_destroy(dev);
799 else 853 else
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 813b25cec726..1eeac4fae73d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -30,8 +30,8 @@
30void 30void
31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
32{ 32{
33 struct nouveau_fbcon_par *par = info->par; 33 struct nouveau_fbdev *nfbdev = info->par;
34 struct drm_device *dev = par->dev; 34 struct drm_device *dev = nfbdev->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private; 35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_channel *chan = dev_priv->channel; 36 struct nouveau_channel *chan = dev_priv->channel;
37 37
@@ -57,8 +57,8 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
57void 57void
58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
59{ 59{
60 struct nouveau_fbcon_par *par = info->par; 60 struct nouveau_fbdev *nfbdev = info->par;
61 struct drm_device *dev = par->dev; 61 struct drm_device *dev = nfbdev->dev;
62 struct drm_nouveau_private *dev_priv = dev->dev_private; 62 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 struct nouveau_channel *chan = dev_priv->channel; 63 struct nouveau_channel *chan = dev_priv->channel;
64 64
@@ -91,8 +91,8 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
91void 91void
92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
93{ 93{
94 struct nouveau_fbcon_par *par = info->par; 94 struct nouveau_fbdev *nfbdev = info->par;
95 struct drm_device *dev = par->dev; 95 struct drm_device *dev = nfbdev->dev;
96 struct drm_nouveau_private *dev_priv = dev->dev_private; 96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_channel *chan = dev_priv->channel; 97 struct nouveau_channel *chan = dev_priv->channel;
98 uint32_t fg; 98 uint32_t fg;
@@ -179,8 +179,8 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
179int 179int
180nv04_fbcon_accel_init(struct fb_info *info) 180nv04_fbcon_accel_init(struct fb_info *info)
181{ 181{
182 struct nouveau_fbcon_par *par = info->par; 182 struct nouveau_fbdev *nfbdev = info->par;
183 struct drm_device *dev = par->dev; 183 struct drm_device *dev = nfbdev->dev;
184 struct drm_nouveau_private *dev_priv = dev->dev_private; 184 struct drm_nouveau_private *dev_priv = dev->dev_private;
185 struct nouveau_channel *chan = dev_priv->channel; 185 struct nouveau_channel *chan = dev_priv->channel;
186 const int sub = NvSubCtxSurf2D; 186 const int sub = NvSubCtxSurf2D;
@@ -236,7 +236,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
236 if (ret) 236 if (ret)
237 return ret; 237 return ret;
238 238
239 ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ? 239 ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
240 0x009f : 0x005f, NvImageBlit); 240 0x009f : 0x005f, NvImageBlit);
241 if (ret) 241 if (ret)
242 return ret; 242 return ret;
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index e260986ea65a..618355e9cdd5 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -532,9 +532,82 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
532 return 0; 532 return 0;
533} 533}
534 534
535static int 535/*
536nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, 536 * Software methods, why they are needed, and how they all work:
537 int mthd, uint32_t data) 537 *
538 * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
539 * 2d engine settings are kept inside the grobjs themselves. The grobjs are
540 * 3 words long on both. grobj format on NV04 is:
541 *
542 * word 0:
543 * - bits 0-7: class
544 * - bit 12: color key active
545 * - bit 13: clip rect active
546 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
547 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
548 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
549 * NV03_CONTEXT_SURFACE_DST].
550 * - bits 15-17: 2d operation [aka patch config]
551 * - bit 24: patch valid [enables rendering using this object]
552 * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
553 * word 1:
554 * - bits 0-1: mono format
555 * - bits 8-13: color format
556 * - bits 16-31: DMA_NOTIFY instance
557 * word 2:
558 * - bits 0-15: DMA_A instance
559 * - bits 16-31: DMA_B instance
560 *
561 * On NV05 it's:
562 *
563 * word 0:
564 * - bits 0-7: class
565 * - bit 12: color key active
566 * - bit 13: clip rect active
567 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
568 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
569 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
570 * NV03_CONTEXT_SURFACE_DST].
571 * - bits 15-17: 2d operation [aka patch config]
572 * - bits 20-22: dither mode
573 * - bit 24: patch valid [enables rendering using this object]
574 * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
575 * - bit 26: surface_src/surface_zeta valid
576 * - bit 27: pattern valid
577 * - bit 28: rop valid
578 * - bit 29: beta1 valid
579 * - bit 30: beta4 valid
580 * word 1:
581 * - bits 0-1: mono format
582 * - bits 8-13: color format
583 * - bits 16-31: DMA_NOTIFY instance
584 * word 2:
585 * - bits 0-15: DMA_A instance
586 * - bits 16-31: DMA_B instance
587 *
588 * NV05 will set/unset the relevant valid bits when you poke the relevant
589 * object-binding methods with object of the proper type, or with the NULL
590 * type. It'll only allow rendering using the grobj if all needed objects
591 * are bound. The needed set of objects depends on selected operation: for
592 * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
593 *
594 * NV04 doesn't have these methods implemented at all, and doesn't have the
595 * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
596 * is set. So we have to emulate them in software, internally keeping the
597 * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
598 * but the last word isn't actually used for anything, we abuse it for this
599 * purpose.
600 *
601 * Actually, NV05 can optionally check bit 24 too, but we disable this since
602 * there's no use for it.
603 *
604 * For unknown reasons, NV04 implements surf3d binding in hardware as an
605 * exception. Also for unknown reasons, NV04 doesn't implement the clipping
606 * methods on the surf3d object, so we have to emulate them too.
607 */
608
609static void
610nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
538{ 611{
539 struct drm_device *dev = chan->dev; 612 struct drm_device *dev = chan->dev;
540 uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; 613 uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
@@ -542,42 +615,509 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
542 uint32_t tmp; 615 uint32_t tmp;
543 616
544 tmp = nv_ri32(dev, instance); 617 tmp = nv_ri32(dev, instance);
545 tmp &= ~0x00038000; 618 tmp &= ~mask;
546 tmp |= ((data & 7) << 15); 619 tmp |= value;
547 620
548 nv_wi32(dev, instance, tmp); 621 nv_wi32(dev, instance, tmp);
549 nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp); 622 nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
550 nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp); 623 nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
624}
625
626static void
627nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
628{
629 struct drm_device *dev = chan->dev;
630 uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
631 uint32_t tmp, ctx1;
632 int class, op, valid = 1;
633
634 ctx1 = nv_ri32(dev, instance);
635 class = ctx1 & 0xff;
636 op = (ctx1 >> 15) & 7;
637 tmp = nv_ri32(dev, instance + 0xc);
638 tmp &= ~mask;
639 tmp |= value;
640 nv_wi32(dev, instance + 0xc, tmp);
641
642 /* check for valid surf2d/surf_dst/surf_color */
643 if (!(tmp & 0x02000000))
644 valid = 0;
645 /* check for valid surf_src/surf_zeta */
646 if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
647 valid = 0;
648
649 switch (op) {
650 /* SRCCOPY_AND, SRCCOPY: no extra objects required */
651 case 0:
652 case 3:
653 break;
654 /* ROP_AND: requires pattern and rop */
655 case 1:
656 if (!(tmp & 0x18000000))
657 valid = 0;
658 break;
659 /* BLEND_AND: requires beta1 */
660 case 2:
661 if (!(tmp & 0x20000000))
662 valid = 0;
663 break;
664 /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
665 case 4:
666 case 5:
667 if (!(tmp & 0x40000000))
668 valid = 0;
669 break;
670 }
671
672 nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
673}
674
675static int
676nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
677 int mthd, uint32_t data)
678{
679 if (data > 5)
680 return 1;
681 /* Old versions of the objects only accept first three operations. */
682 if (data > 2 && grclass < 0x40)
683 return 1;
684 nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
685 /* changing operation changes set of objects needed for validation */
686 nv04_graph_set_ctx_val(chan, 0, 0);
687 return 0;
688}
689
690static int
691nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
692 int mthd, uint32_t data)
693{
694 uint32_t min = data & 0xffff, max;
695 uint32_t w = data >> 16;
696 if (min & 0x8000)
697 /* too large */
698 return 1;
699 if (w & 0x8000)
700 /* yes, it accepts negative for some reason. */
701 w |= 0xffff0000;
702 max = min + w;
703 max &= 0x3ffff;
704 nv_wr32(chan->dev, 0x40053c, min);
705 nv_wr32(chan->dev, 0x400544, max);
706 return 0;
707}
708
709static int
710nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
711 int mthd, uint32_t data)
712{
713 uint32_t min = data & 0xffff, max;
714 uint32_t w = data >> 16;
715 if (min & 0x8000)
716 /* too large */
717 return 1;
718 if (w & 0x8000)
719 /* yes, it accepts negative for some reason. */
720 w |= 0xffff0000;
721 max = min + w;
722 max &= 0x3ffff;
723 nv_wr32(chan->dev, 0x400540, min);
724 nv_wr32(chan->dev, 0x400548, max);
551 return 0; 725 return 0;
552} 726}
553 727
728static int
729nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
730 int mthd, uint32_t data)
731{
732 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
733 case 0x30:
734 nv04_graph_set_ctx1(chan, 0x00004000, 0);
735 nv04_graph_set_ctx_val(chan, 0x02000000, 0);
736 return 0;
737 case 0x42:
738 nv04_graph_set_ctx1(chan, 0x00004000, 0);
739 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
740 return 0;
741 }
742 return 1;
743}
744
745static int
746nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
747 int mthd, uint32_t data)
748{
749 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
750 case 0x30:
751 nv04_graph_set_ctx1(chan, 0x00004000, 0);
752 nv04_graph_set_ctx_val(chan, 0x02000000, 0);
753 return 0;
754 case 0x42:
755 nv04_graph_set_ctx1(chan, 0x00004000, 0);
756 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
757 return 0;
758 case 0x52:
759 nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
760 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
761 return 0;
762 }
763 return 1;
764}
765
766static int
767nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
768 int mthd, uint32_t data)
769{
770 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
771 case 0x30:
772 nv04_graph_set_ctx_val(chan, 0x08000000, 0);
773 return 0;
774 case 0x18:
775 nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
776 return 0;
777 }
778 return 1;
779}
780
781static int
782nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
783 int mthd, uint32_t data)
784{
785 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
786 case 0x30:
787 nv04_graph_set_ctx_val(chan, 0x08000000, 0);
788 return 0;
789 case 0x44:
790 nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
791 return 0;
792 }
793 return 1;
794}
795
796static int
797nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
798 int mthd, uint32_t data)
799{
800 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
801 case 0x30:
802 nv04_graph_set_ctx_val(chan, 0x10000000, 0);
803 return 0;
804 case 0x43:
805 nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
806 return 0;
807 }
808 return 1;
809}
810
811static int
812nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
813 int mthd, uint32_t data)
814{
815 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
816 case 0x30:
817 nv04_graph_set_ctx_val(chan, 0x20000000, 0);
818 return 0;
819 case 0x12:
820 nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
821 return 0;
822 }
823 return 1;
824}
825
826static int
827nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
828 int mthd, uint32_t data)
829{
830 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
831 case 0x30:
832 nv04_graph_set_ctx_val(chan, 0x40000000, 0);
833 return 0;
834 case 0x72:
835 nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
836 return 0;
837 }
838 return 1;
839}
840
841static int
842nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
843 int mthd, uint32_t data)
844{
845 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
846 case 0x30:
847 nv04_graph_set_ctx_val(chan, 0x02000000, 0);
848 return 0;
849 case 0x58:
850 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
851 return 0;
852 }
853 return 1;
854}
855
856static int
857nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
858 int mthd, uint32_t data)
859{
860 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
861 case 0x30:
862 nv04_graph_set_ctx_val(chan, 0x04000000, 0);
863 return 0;
864 case 0x59:
865 nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
866 return 0;
867 }
868 return 1;
869}
870
871static int
872nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
873 int mthd, uint32_t data)
874{
875 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
876 case 0x30:
877 nv04_graph_set_ctx_val(chan, 0x02000000, 0);
878 return 0;
879 case 0x5a:
880 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
881 return 0;
882 }
883 return 1;
884}
885
886static int
887nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
888 int mthd, uint32_t data)
889{
890 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
891 case 0x30:
892 nv04_graph_set_ctx_val(chan, 0x04000000, 0);
893 return 0;
894 case 0x5b:
895 nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
896 return 0;
897 }
898 return 1;
899}
900
901static int
902nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
903 int mthd, uint32_t data)
904{
905 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
906 case 0x30:
907 nv04_graph_set_ctx1(chan, 0x2000, 0);
908 return 0;
909 case 0x19:
910 nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
911 return 0;
912 }
913 return 1;
914}
915
916static int
917nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
918 int mthd, uint32_t data)
919{
920 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
921 case 0x30:
922 nv04_graph_set_ctx1(chan, 0x1000, 0);
923 return 0;
924 /* Yes, for some reason even the old versions of objects
925 * accept 0x57 and not 0x17. Consistency be damned.
926 */
927 case 0x57:
928 nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
929 return 0;
930 }
931 return 1;
932}
933
554static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = { 934static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
555 { 0x0150, nv04_graph_mthd_set_ref }, 935 { 0x0150, nv04_graph_mthd_set_ref },
556 {} 936 {}
557}; 937};
558 938
559static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = { 939static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
940 { 0x0184, nv04_graph_mthd_bind_nv01_patt },
941 { 0x0188, nv04_graph_mthd_bind_rop },
942 { 0x018c, nv04_graph_mthd_bind_beta1 },
943 { 0x0190, nv04_graph_mthd_bind_surf_dst },
944 { 0x02fc, nv04_graph_mthd_set_operation },
945 {},
946};
947
948static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
949 { 0x0188, nv04_graph_mthd_bind_nv04_patt },
950 { 0x018c, nv04_graph_mthd_bind_rop },
951 { 0x0190, nv04_graph_mthd_bind_beta1 },
952 { 0x0194, nv04_graph_mthd_bind_beta4 },
953 { 0x0198, nv04_graph_mthd_bind_surf2d },
954 { 0x02fc, nv04_graph_mthd_set_operation },
955 {},
956};
957
958static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
959 { 0x0184, nv04_graph_mthd_bind_chroma },
960 { 0x0188, nv04_graph_mthd_bind_clip },
961 { 0x018c, nv04_graph_mthd_bind_nv01_patt },
962 { 0x0190, nv04_graph_mthd_bind_rop },
963 { 0x0194, nv04_graph_mthd_bind_beta1 },
964 { 0x0198, nv04_graph_mthd_bind_surf_dst },
965 { 0x019c, nv04_graph_mthd_bind_surf_src },
966 { 0x02fc, nv04_graph_mthd_set_operation },
967 {},
968};
969
970static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
971 { 0x0184, nv04_graph_mthd_bind_chroma },
972 { 0x0188, nv04_graph_mthd_bind_clip },
973 { 0x018c, nv04_graph_mthd_bind_nv04_patt },
974 { 0x0190, nv04_graph_mthd_bind_rop },
975 { 0x0194, nv04_graph_mthd_bind_beta1 },
976 { 0x0198, nv04_graph_mthd_bind_beta4 },
977 { 0x019c, nv04_graph_mthd_bind_surf2d },
978 { 0x02fc, nv04_graph_mthd_set_operation },
979 {},
980};
981
982static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
983 { 0x0188, nv04_graph_mthd_bind_chroma },
984 { 0x018c, nv04_graph_mthd_bind_clip },
985 { 0x0190, nv04_graph_mthd_bind_nv04_patt },
986 { 0x0194, nv04_graph_mthd_bind_rop },
987 { 0x0198, nv04_graph_mthd_bind_beta1 },
988 { 0x019c, nv04_graph_mthd_bind_beta4 },
989 { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
990 { 0x03e4, nv04_graph_mthd_set_operation },
991 {},
992};
993
994static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
995 { 0x0184, nv04_graph_mthd_bind_chroma },
996 { 0x0188, nv04_graph_mthd_bind_clip },
997 { 0x018c, nv04_graph_mthd_bind_nv01_patt },
998 { 0x0190, nv04_graph_mthd_bind_rop },
999 { 0x0194, nv04_graph_mthd_bind_beta1 },
1000 { 0x0198, nv04_graph_mthd_bind_surf_dst },
1001 { 0x02fc, nv04_graph_mthd_set_operation },
1002 {},
1003};
1004
1005static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
1006 { 0x0184, nv04_graph_mthd_bind_chroma },
1007 { 0x0188, nv04_graph_mthd_bind_nv01_patt },
1008 { 0x018c, nv04_graph_mthd_bind_rop },
1009 { 0x0190, nv04_graph_mthd_bind_beta1 },
1010 { 0x0194, nv04_graph_mthd_bind_surf_dst },
560 { 0x02fc, nv04_graph_mthd_set_operation }, 1011 { 0x02fc, nv04_graph_mthd_set_operation },
561 {}, 1012 {},
562}; 1013};
563 1014
1015static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
1016 { 0x0184, nv04_graph_mthd_bind_chroma },
1017 { 0x0188, nv04_graph_mthd_bind_nv04_patt },
1018 { 0x018c, nv04_graph_mthd_bind_rop },
1019 { 0x0190, nv04_graph_mthd_bind_beta1 },
1020 { 0x0194, nv04_graph_mthd_bind_beta4 },
1021 { 0x0198, nv04_graph_mthd_bind_surf2d },
1022 { 0x02fc, nv04_graph_mthd_set_operation },
1023 {},
1024};
1025
1026static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
1027 { 0x0188, nv04_graph_mthd_bind_nv01_patt },
1028 { 0x018c, nv04_graph_mthd_bind_rop },
1029 { 0x0190, nv04_graph_mthd_bind_beta1 },
1030 { 0x0194, nv04_graph_mthd_bind_surf_dst },
1031 { 0x0304, nv04_graph_mthd_set_operation },
1032 {},
1033};
1034
1035static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
1036 { 0x0188, nv04_graph_mthd_bind_nv04_patt },
1037 { 0x018c, nv04_graph_mthd_bind_rop },
1038 { 0x0190, nv04_graph_mthd_bind_beta1 },
1039 { 0x0194, nv04_graph_mthd_bind_beta4 },
1040 { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
1041 { 0x0304, nv04_graph_mthd_set_operation },
1042 {},
1043};
1044
1045static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
1046 { 0x0184, nv04_graph_mthd_bind_clip },
1047 { 0x0188, nv04_graph_mthd_bind_nv01_patt },
1048 { 0x018c, nv04_graph_mthd_bind_rop },
1049 { 0x0190, nv04_graph_mthd_bind_beta1 },
1050 { 0x0194, nv04_graph_mthd_bind_surf_dst },
1051 { 0x02fc, nv04_graph_mthd_set_operation },
1052 {},
1053};
1054
1055static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
1056 { 0x0184, nv04_graph_mthd_bind_clip },
1057 { 0x0188, nv04_graph_mthd_bind_nv04_patt },
1058 { 0x018c, nv04_graph_mthd_bind_rop },
1059 { 0x0190, nv04_graph_mthd_bind_beta1 },
1060 { 0x0194, nv04_graph_mthd_bind_beta4 },
1061 { 0x0198, nv04_graph_mthd_bind_surf2d },
1062 { 0x02fc, nv04_graph_mthd_set_operation },
1063 {},
1064};
1065
1066static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
1067 { 0x0188, nv04_graph_mthd_bind_clip },
1068 { 0x018c, nv04_graph_mthd_bind_surf_color },
1069 { 0x0190, nv04_graph_mthd_bind_surf_zeta },
1070 {},
1071};
1072
1073static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
1074 { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
1075 { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
1076 {},
1077};
1078
564struct nouveau_pgraph_object_class nv04_graph_grclass[] = { 1079struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
565 { 0x0039, false, NULL }, 1080 { 0x0038, false, NULL }, /* dvd subpicture */
566 { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */ 1081 { 0x0039, false, NULL }, /* m2mf */
567 { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */ 1082 { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
568 { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */ 1083 { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
569 { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */ 1084 { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
1085 { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
1086 { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
1087 { 0x0064, false, NULL }, /* nv05 iifc */
1088 { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
1089 { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
1090 { 0x0065, false, NULL }, /* nv05 ifc */
1091 { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
1092 { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
1093 { 0x0066, false, NULL }, /* nv05 sifc */
1094 { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
1095 { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
570 { 0x0030, false, NULL }, /* null */ 1096 { 0x0030, false, NULL }, /* null */
571 { 0x0042, false, NULL }, /* surf2d */ 1097 { 0x0042, false, NULL }, /* surf2d */
572 { 0x0043, false, NULL }, /* rop */ 1098 { 0x0043, false, NULL }, /* rop */
573 { 0x0012, false, NULL }, /* beta1 */ 1099 { 0x0012, false, NULL }, /* beta1 */
574 { 0x0072, false, NULL }, /* beta4 */ 1100 { 0x0072, false, NULL }, /* beta4 */
575 { 0x0019, false, NULL }, /* cliprect */ 1101 { 0x0019, false, NULL }, /* cliprect */
576 { 0x0044, false, NULL }, /* pattern */ 1102 { 0x0018, false, NULL }, /* nv01 pattern */
1103 { 0x0044, false, NULL }, /* nv04 pattern */
577 { 0x0052, false, NULL }, /* swzsurf */ 1104 { 0x0052, false, NULL }, /* swzsurf */
578 { 0x0053, false, NULL }, /* surf3d */ 1105 { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
1106 { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
579 { 0x0054, false, NULL }, /* tex_tri */ 1107 { 0x0054, false, NULL }, /* tex_tri */
580 { 0x0055, false, NULL }, /* multitex_tri */ 1108 { 0x0055, false, NULL }, /* multitex_tri */
1109 { 0x0017, false, NULL }, /* nv01 chroma */
1110 { 0x0057, false, NULL }, /* nv04 chroma */
1111 { 0x0058, false, NULL }, /* surf_dst */
1112 { 0x0059, false, NULL }, /* surf_src */
1113 { 0x005a, false, NULL }, /* surf_color */
1114 { 0x005b, false, NULL }, /* surf_zeta */
1115 { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
1116 { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
1117 { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
1118 { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
1119 { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
1120 { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
581 { 0x506e, true, nv04_graph_mthds_sw }, 1121 { 0x506e, true, nv04_graph_mthds_sw },
582 {} 1122 {}
583}; 1123};
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index 11b11c31f543..9b5c97469588 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -115,11 +115,6 @@
115 115
116/* TODO: 116/* TODO:
117 * - get vs count from 0x1540 117 * - get vs count from 0x1540
118 * - document unimplemented bits compared to nvidia
119 * - nsource handling
120 * - R0 & 0x0200 handling
121 * - single-vs handling
122 * - 400314 bit 0
123 */ 118 */
124 119
125static int 120static int
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
new file mode 100644
index 000000000000..2cdc2bfe7179
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_calc.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "drm_fixed.h"
27#include "nouveau_drv.h"
28#include "nouveau_hw.h"
29
30int
31nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
32 int *N1, int *M1, int *N2, int *M2, int *P)
33{
34 struct nouveau_pll_vals pll_vals;
35 int ret;
36
37 ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals);
38 if (ret <= 0)
39 return ret;
40
41 *N1 = pll_vals.N1;
42 *M1 = pll_vals.M1;
43 *N2 = pll_vals.N2;
44 *M2 = pll_vals.M2;
45 *P = pll_vals.log2P;
46 return ret;
47}
48
49int
50nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk,
51 int *N, int *fN, int *M, int *P)
52{
53 fixed20_12 fb_div, a, b;
54
55 *P = pll->vco1.maxfreq / clk;
56 if (*P > pll->max_p)
57 *P = pll->max_p;
58 if (*P < pll->min_p)
59 *P = pll->min_p;
60
61 /* *M = ceil(refclk / pll->vco.max_inputfreq); */
62 a.full = dfixed_const(pll->refclk);
63 b.full = dfixed_const(pll->vco1.max_inputfreq);
64 a.full = dfixed_div(a, b);
65 a.full = dfixed_ceil(a);
66 *M = dfixed_trunc(a);
67
68 /* fb_div = (vco * *M) / refclk; */
69 fb_div.full = dfixed_const(clk * *P);
70 fb_div.full = dfixed_mul(fb_div, a);
71 a.full = dfixed_const(pll->refclk);
72 fb_div.full = dfixed_div(fb_div, a);
73
74 /* *N = floor(fb_div); */
75 a.full = dfixed_floor(fb_div);
76 *N = dfixed_trunc(fb_div);
77
78 /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */
79 b.full = dfixed_const(8192);
80 a.full = dfixed_mul(a, b);
81 fb_div.full = dfixed_mul(fb_div, b);
82 fb_div.full = fb_div.full - a.full;
83 *fN = dfixed_trunc(fb_div) - 4096;
84 *fN &= 0xffff;
85
86 return clk;
87}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index cfabeb974a56..b4e4a3b05eae 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -264,32 +264,40 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
264int 264int
265nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) 265nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
266{ 266{
267 uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head); 267 uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
268 struct nouveau_pll_vals pll; 268 struct pll_lims pll;
269 struct pll_lims limits;
270 uint32_t reg1, reg2; 269 uint32_t reg1, reg2;
271 int ret; 270 int ret, N1, M1, N2, M2, P;
272 271
273 ret = get_pll_limits(dev, pll_reg, &limits); 272 ret = get_pll_limits(dev, reg, &pll);
274 if (ret) 273 if (ret)
275 return ret; 274 return ret;
276 275
277 ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll); 276 if (pll.vco2.maxfreq) {
278 if (ret <= 0) 277 ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P);
279 return ret; 278 if (ret <= 0)
279 return 0;
280
281 NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
282 pclk, ret, N1, M1, N2, M2, P);
280 283
281 if (limits.vco2.maxfreq) { 284 reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00;
282 reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00; 285 reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00;
283 reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00; 286 nv_wr32(dev, reg, 0x10000611);
284 nv_wr32(dev, pll_reg, 0x10000611); 287 nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
285 nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1); 288 nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
286 nv_wr32(dev, pll_reg + 8,
287 reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
288 } else { 289 } else {
289 reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000; 290 ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
290 nv_wr32(dev, pll_reg, 0x50000610); 291 if (ret <= 0)
291 nv_wr32(dev, pll_reg + 4, reg1 | 292 return 0;
292 (pll.log2P << 16) | (pll.M1 << 8) | pll.N1); 293
294 NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
295 pclk, ret, N1, N2, M1, P);
296
297 reg1 = nv_rd32(dev, reg + 4) & 0xffc00000;
298 nv_wr32(dev, reg, 0x50000610);
299 nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
300 nv_wr32(dev, reg + 8, N2);
293 } 301 }
294 302
295 return 0; 303 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 649db4c1b690..580a5d10be93 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -29,6 +29,7 @@
29#include "nouveau_encoder.h" 29#include "nouveau_encoder.h"
30#include "nouveau_connector.h" 30#include "nouveau_connector.h"
31#include "nouveau_fb.h" 31#include "nouveau_fb.h"
32#include "nouveau_fbcon.h"
32#include "drm_crtc_helper.h" 33#include "drm_crtc_helper.h"
33 34
34static void 35static void
@@ -783,6 +784,37 @@ ack:
783} 784}
784 785
785static void 786static void
787nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
788{
789 int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
790 struct drm_encoder *encoder;
791 uint32_t tmp, unk0 = 0, unk1 = 0;
792
793 if (dcb->type != OUTPUT_DP)
794 return;
795
796 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
797 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
798
799 if (nv_encoder->dcb == dcb) {
800 unk0 = nv_encoder->dp.unk0;
801 unk1 = nv_encoder->dp.unk1;
802 break;
803 }
804 }
805
806 if (unk0 || unk1) {
807 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
808 tmp &= 0xfffffe03;
809 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
810
811 tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
812 tmp &= 0xfef080c0;
813 nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
814 }
815}
816
817static void
786nv50_display_unk20_handler(struct drm_device *dev) 818nv50_display_unk20_handler(struct drm_device *dev)
787{ 819{
788 struct dcb_entry *dcbent; 820 struct dcb_entry *dcbent;
@@ -805,6 +837,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
805 837
806 nouveau_bios_run_display_table(dev, dcbent, script, pclk); 838 nouveau_bios_run_display_table(dev, dcbent, script, pclk);
807 839
840 nv50_display_unk20_dp_hack(dev, dcbent);
841
808 tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head)); 842 tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
809 tmp &= ~0x000000f; 843 tmp &= ~0x000000f;
810 nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp); 844 nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
@@ -945,6 +979,8 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
945 nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); 979 nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
946 if (dev_priv->chipset >= 0x90) 980 if (dev_priv->chipset >= 0x90)
947 nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); 981 nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
982
983 drm_helper_hpd_irq_event(dev);
948} 984}
949 985
950void 986void
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index a8c70e7e9184..6bf025c6fc6f 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -6,8 +6,8 @@
6void 6void
7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
8{ 8{
9 struct nouveau_fbcon_par *par = info->par; 9 struct nouveau_fbdev *nfbdev = info->par;
10 struct drm_device *dev = par->dev; 10 struct drm_device *dev = nfbdev->dev;
11 struct drm_nouveau_private *dev_priv = dev->dev_private; 11 struct drm_nouveau_private *dev_priv = dev->dev_private;
12 struct nouveau_channel *chan = dev_priv->channel; 12 struct nouveau_channel *chan = dev_priv->channel;
13 13
@@ -49,8 +49,8 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
49void 49void
50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
51{ 51{
52 struct nouveau_fbcon_par *par = info->par; 52 struct nouveau_fbdev *nfbdev = info->par;
53 struct drm_device *dev = par->dev; 53 struct drm_device *dev = nfbdev->dev;
54 struct drm_nouveau_private *dev_priv = dev->dev_private; 54 struct drm_nouveau_private *dev_priv = dev->dev_private;
55 struct nouveau_channel *chan = dev_priv->channel; 55 struct nouveau_channel *chan = dev_priv->channel;
56 56
@@ -84,8 +84,8 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
84void 84void
85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
86{ 86{
87 struct nouveau_fbcon_par *par = info->par; 87 struct nouveau_fbdev *nfbdev = info->par;
88 struct drm_device *dev = par->dev; 88 struct drm_device *dev = nfbdev->dev;
89 struct drm_nouveau_private *dev_priv = dev->dev_private; 89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 struct nouveau_channel *chan = dev_priv->channel; 90 struct nouveau_channel *chan = dev_priv->channel;
91 uint32_t width, dwords, *data = (uint32_t *)image->data; 91 uint32_t width, dwords, *data = (uint32_t *)image->data;
@@ -152,8 +152,8 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
152int 152int
153nv50_fbcon_accel_init(struct fb_info *info) 153nv50_fbcon_accel_init(struct fb_info *info)
154{ 154{
155 struct nouveau_fbcon_par *par = info->par; 155 struct nouveau_fbdev *nfbdev = info->par;
156 struct drm_device *dev = par->dev; 156 struct drm_device *dev = nfbdev->dev;
157 struct drm_nouveau_private *dev_priv = dev->dev_private; 157 struct drm_nouveau_private *dev_priv = dev->dev_private;
158 struct nouveau_channel *chan = dev_priv->channel; 158 struct nouveau_channel *chan = dev_priv->channel;
159 struct nouveau_gpuobj *eng2d = NULL; 159 struct nouveau_gpuobj *eng2d = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 0c68698f23df..b11eaf9c5c7c 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -321,18 +321,23 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
321 encoder->possible_clones = 0; 321 encoder->possible_clones = 0;
322 322
323 if (nv_encoder->dcb->type == OUTPUT_DP) { 323 if (nv_encoder->dcb->type == OUTPUT_DP) {
324 uint32_t mc, or = nv_encoder->or; 324 int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
325 uint32_t tmp;
325 326
326 if (dev_priv->chipset < 0x90 || 327 if (dev_priv->chipset < 0x90 ||
327 dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) 328 dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
328 mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or)); 329 tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
329 else 330 else
330 mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or)); 331 tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
331 332
332 switch ((mc & 0x00000f00) >> 8) { 333 switch ((tmp & 0x00000f00) >> 8) {
333 case 8: 334 case 8:
334 case 9: 335 case 9:
335 nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16; 336 nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16;
337 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
338 nv_encoder->dp.unk0 = tmp & 0x000001fc;
339 tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
340 nv_encoder->dp.unk1 = tmp & 0x010f7f3f;
336 break; 341 break;
337 default: 342 default:
338 break; 343 break;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 5319d9e2f7ba..1bc72c3190a9 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -5742,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
5742#define ATOM_PP_THERMALCONTROLLER_RV6xx 7 5742#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
5743#define ATOM_PP_THERMALCONTROLLER_RV770 8 5743#define ATOM_PP_THERMALCONTROLLER_RV770 8
5744#define ATOM_PP_THERMALCONTROLLER_ADT7473 9 5744#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
5745#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
5746#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
5747#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
5745 5748
5746typedef struct _ATOM_PPLIB_STATE 5749typedef struct _ATOM_PPLIB_STATE
5747{ 5750{
@@ -5749,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE
5749 UCHAR ucClockStateIndices[1]; // variable-sized 5752 UCHAR ucClockStateIndices[1]; // variable-sized
5750} ATOM_PPLIB_STATE; 5753} ATOM_PPLIB_STATE;
5751 5754
5755typedef struct _ATOM_PPLIB_FANTABLE
5756{
5757 UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
5758 UCHAR ucTHyst; // Temperature hysteresis. Integer.
5759 USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
5760 USHORT usTMed; // The middle temperature where we change slopes.
5761 USHORT usTHigh; // The high point above TMed for adjusting the second slope.
5762 USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
5763 USHORT usPWMMed; // The PWM value (in percent) at TMed.
5764 USHORT usPWMHigh; // The PWM value at THigh.
5765} ATOM_PPLIB_FANTABLE;
5766
5767typedef struct _ATOM_PPLIB_EXTENDEDHEADER
5768{
5769 USHORT usSize;
5770 ULONG ulMaxEngineClock; // For Overdrive.
5771 ULONG ulMaxMemoryClock; // For Overdrive.
5772 // Add extra system parameters here, always adjust size to include all fields.
5773} ATOM_PPLIB_EXTENDEDHEADER;
5774
5752//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps 5775//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
5753#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1 5776#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
5754#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2 5777#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
@@ -5762,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE
5762#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512 5785#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
5763#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024 5786#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
5764#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048 5787#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
5788#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
5789#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
5790#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
5791#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
5792#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
5793#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
5765 5794
5766typedef struct _ATOM_PPLIB_POWERPLAYTABLE 5795typedef struct _ATOM_PPLIB_POWERPLAYTABLE
5767{ 5796{
@@ -5797,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
5797 5826
5798} ATOM_PPLIB_POWERPLAYTABLE; 5827} ATOM_PPLIB_POWERPLAYTABLE;
5799 5828
5829typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
5830{
5831 ATOM_PPLIB_POWERPLAYTABLE basicTable;
5832 UCHAR ucNumCustomThermalPolicy;
5833 USHORT usCustomThermalPolicyArrayOffset;
5834}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
5835
5836typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
5837{
5838 ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
5839 USHORT usFormatID; // To be used ONLY by PPGen.
5840 USHORT usFanTableOffset;
5841 USHORT usExtendendedHeaderOffset;
5842} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
5843
5800//// ATOM_PPLIB_NONCLOCK_INFO::usClassification 5844//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
5801#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 5845#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
5802#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 5846#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
@@ -5816,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
5816#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400 5860#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
5817#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800 5861#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
5818#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 5862#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
5819// remaining 3 bits are reserved 5863#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
5864#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
5865#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
5820 5866
5821//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings 5867//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
5822#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 5868#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
@@ -5840,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
5840 5886
5841#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000 5887#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
5842#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000 5888#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
5889#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
5843#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000 5890#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
5844 5891
5845#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 5892//memory related flags
5893#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
5894
5895//M3 Arb //2bits, current 3 sets of parameters in total
5896#define ATOM_PPLIB_M3ARB_MASK 0x00060000
5897#define ATOM_PPLIB_M3ARB_SHIFT 17
5846 5898
5847// Contained in an array starting at the offset 5899// Contained in an array starting at the offset
5848// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. 5900// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
@@ -5860,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
5860// Contained in an array starting at the offset 5912// Contained in an array starting at the offset
5861// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. 5913// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
5862// referenced from ATOM_PPLIB_STATE::ucClockStateIndices 5914// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
5915#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
5916#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
5917
5863typedef struct _ATOM_PPLIB_R600_CLOCK_INFO 5918typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
5864{ 5919{
5865 USHORT usEngineClockLow; 5920 USHORT usEngineClockLow;
@@ -5882,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
5882#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4 5937#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
5883#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8 5938#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
5884#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16 5939#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
5940#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
5941
5942typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
5943{
5944 USHORT usEngineClockLow;
5945 UCHAR ucEngineClockHigh;
5946
5947 USHORT usMemoryClockLow;
5948 UCHAR ucMemoryClockHigh;
5949
5950 USHORT usVDDC;
5951 USHORT usVDDCI;
5952 USHORT usUnused;
5953
5954 ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
5955
5956} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
5885 5957
5886typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO 5958typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
5887 5959
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a87990b3ae84..03dd6c41dc19 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -26,7 +26,7 @@
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
28#include <drm/radeon_drm.h> 28#include <drm/radeon_drm.h>
29#include "radeon_fixed.h" 29#include <drm/drm_fixed.h>
30#include "radeon.h" 30#include "radeon.h"
31#include "atom.h" 31#include "atom.h"
32#include "atom-bits.h" 32#include "atom-bits.h"
@@ -245,25 +245,27 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
245 245
246 switch (mode) { 246 switch (mode) {
247 case DRM_MODE_DPMS_ON: 247 case DRM_MODE_DPMS_ON:
248 radeon_crtc->enabled = true;
249 /* adjust pm to dpms changes BEFORE enabling crtcs */
250 radeon_pm_compute_clocks(rdev);
248 atombios_enable_crtc(crtc, ATOM_ENABLE); 251 atombios_enable_crtc(crtc, ATOM_ENABLE);
249 if (ASIC_IS_DCE3(rdev)) 252 if (ASIC_IS_DCE3(rdev))
250 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 253 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
251 atombios_blank_crtc(crtc, ATOM_DISABLE); 254 atombios_blank_crtc(crtc, ATOM_DISABLE);
252 /* XXX re-enable when interrupt support is added */ 255 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
253 if (!ASIC_IS_DCE4(rdev))
254 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
255 radeon_crtc_load_lut(crtc); 256 radeon_crtc_load_lut(crtc);
256 break; 257 break;
257 case DRM_MODE_DPMS_STANDBY: 258 case DRM_MODE_DPMS_STANDBY:
258 case DRM_MODE_DPMS_SUSPEND: 259 case DRM_MODE_DPMS_SUSPEND:
259 case DRM_MODE_DPMS_OFF: 260 case DRM_MODE_DPMS_OFF:
260 /* XXX re-enable when interrupt support is added */ 261 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
261 if (!ASIC_IS_DCE4(rdev))
262 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
263 atombios_blank_crtc(crtc, ATOM_ENABLE); 262 atombios_blank_crtc(crtc, ATOM_ENABLE);
264 if (ASIC_IS_DCE3(rdev)) 263 if (ASIC_IS_DCE3(rdev))
265 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); 264 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
266 atombios_enable_crtc(crtc, ATOM_DISABLE); 265 atombios_enable_crtc(crtc, ATOM_DISABLE);
266 radeon_crtc->enabled = false;
267 /* adjust pm to dpms changes AFTER disabling crtcs */
268 radeon_pm_compute_clocks(rdev);
267 break; 269 break;
268 } 270 }
269} 271}
@@ -1160,6 +1162,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1160 struct drm_display_mode *mode, 1162 struct drm_display_mode *mode,
1161 struct drm_display_mode *adjusted_mode) 1163 struct drm_display_mode *adjusted_mode)
1162{ 1164{
1165 struct drm_device *dev = crtc->dev;
1166 struct radeon_device *rdev = dev->dev_private;
1167
1168 /* adjust pm to upcoming mode change */
1169 radeon_pm_compute_clocks(rdev);
1170
1163 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 1171 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1164 return false; 1172 return false;
1165 return true; 1173 return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 28b31c64f48d..abffb1499e22 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -351,7 +351,7 @@ retry:
351 args.v1.ucChannelID = chan->rec.i2c_id; 351 args.v1.ucChannelID = chan->rec.i2c_id;
352 args.v1.ucDelay = delay / 10; 352 args.v1.ucDelay = delay / 10;
353 if (ASIC_IS_DCE4(rdev)) 353 if (ASIC_IS_DCE4(rdev))
354 args.v2.ucHPD_ID = chan->rec.hpd_id; 354 args.v2.ucHPD_ID = chan->rec.hpd;
355 355
356 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 356 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
357 357
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e8f447e20507..8c8e4d3cbaa3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -28,39 +28,235 @@
28#include "radeon.h" 28#include "radeon.h"
29#include "radeon_asic.h" 29#include "radeon_asic.h"
30#include "radeon_drm.h" 30#include "radeon_drm.h"
31#include "rv770d.h" 31#include "evergreend.h"
32#include "atom.h" 32#include "atom.h"
33#include "avivod.h" 33#include "avivod.h"
34#include "evergreen_reg.h" 34#include "evergreen_reg.h"
35 35
36#define EVERGREEN_PFP_UCODE_SIZE 1120
37#define EVERGREEN_PM4_UCODE_SIZE 1376
38
36static void evergreen_gpu_init(struct radeon_device *rdev); 39static void evergreen_gpu_init(struct radeon_device *rdev);
37void evergreen_fini(struct radeon_device *rdev); 40void evergreen_fini(struct radeon_device *rdev);
38 41
42void evergreen_pm_misc(struct radeon_device *rdev)
43{
44
45}
46
47void evergreen_pm_prepare(struct radeon_device *rdev)
48{
49 struct drm_device *ddev = rdev->ddev;
50 struct drm_crtc *crtc;
51 struct radeon_crtc *radeon_crtc;
52 u32 tmp;
53
54 /* disable any active CRTCs */
55 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
56 radeon_crtc = to_radeon_crtc(crtc);
57 if (radeon_crtc->enabled) {
58 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
59 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
60 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
61 }
62 }
63}
64
65void evergreen_pm_finish(struct radeon_device *rdev)
66{
67 struct drm_device *ddev = rdev->ddev;
68 struct drm_crtc *crtc;
69 struct radeon_crtc *radeon_crtc;
70 u32 tmp;
71
72 /* enable any active CRTCs */
73 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
74 radeon_crtc = to_radeon_crtc(crtc);
75 if (radeon_crtc->enabled) {
76 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
77 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
78 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
79 }
80 }
81}
82
39bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 83bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
40{ 84{
41 bool connected = false; 85 bool connected = false;
42 /* XXX */ 86
87 switch (hpd) {
88 case RADEON_HPD_1:
89 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
90 connected = true;
91 break;
92 case RADEON_HPD_2:
93 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
94 connected = true;
95 break;
96 case RADEON_HPD_3:
97 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
98 connected = true;
99 break;
100 case RADEON_HPD_4:
101 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
102 connected = true;
103 break;
104 case RADEON_HPD_5:
105 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
106 connected = true;
107 break;
108 case RADEON_HPD_6:
109 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
110 connected = true;
111 break;
112 default:
113 break;
114 }
115
43 return connected; 116 return connected;
44} 117}
45 118
46void evergreen_hpd_set_polarity(struct radeon_device *rdev, 119void evergreen_hpd_set_polarity(struct radeon_device *rdev,
47 enum radeon_hpd_id hpd) 120 enum radeon_hpd_id hpd)
48{ 121{
49 /* XXX */ 122 u32 tmp;
123 bool connected = evergreen_hpd_sense(rdev, hpd);
124
125 switch (hpd) {
126 case RADEON_HPD_1:
127 tmp = RREG32(DC_HPD1_INT_CONTROL);
128 if (connected)
129 tmp &= ~DC_HPDx_INT_POLARITY;
130 else
131 tmp |= DC_HPDx_INT_POLARITY;
132 WREG32(DC_HPD1_INT_CONTROL, tmp);
133 break;
134 case RADEON_HPD_2:
135 tmp = RREG32(DC_HPD2_INT_CONTROL);
136 if (connected)
137 tmp &= ~DC_HPDx_INT_POLARITY;
138 else
139 tmp |= DC_HPDx_INT_POLARITY;
140 WREG32(DC_HPD2_INT_CONTROL, tmp);
141 break;
142 case RADEON_HPD_3:
143 tmp = RREG32(DC_HPD3_INT_CONTROL);
144 if (connected)
145 tmp &= ~DC_HPDx_INT_POLARITY;
146 else
147 tmp |= DC_HPDx_INT_POLARITY;
148 WREG32(DC_HPD3_INT_CONTROL, tmp);
149 break;
150 case RADEON_HPD_4:
151 tmp = RREG32(DC_HPD4_INT_CONTROL);
152 if (connected)
153 tmp &= ~DC_HPDx_INT_POLARITY;
154 else
155 tmp |= DC_HPDx_INT_POLARITY;
156 WREG32(DC_HPD4_INT_CONTROL, tmp);
157 break;
158 case RADEON_HPD_5:
159 tmp = RREG32(DC_HPD5_INT_CONTROL);
160 if (connected)
161 tmp &= ~DC_HPDx_INT_POLARITY;
162 else
163 tmp |= DC_HPDx_INT_POLARITY;
164 WREG32(DC_HPD5_INT_CONTROL, tmp);
165 break;
166 case RADEON_HPD_6:
167 tmp = RREG32(DC_HPD6_INT_CONTROL);
168 if (connected)
169 tmp &= ~DC_HPDx_INT_POLARITY;
170 else
171 tmp |= DC_HPDx_INT_POLARITY;
172 WREG32(DC_HPD6_INT_CONTROL, tmp);
173 break;
174 default:
175 break;
176 }
50} 177}
51 178
52void evergreen_hpd_init(struct radeon_device *rdev) 179void evergreen_hpd_init(struct radeon_device *rdev)
53{ 180{
54 /* XXX */ 181 struct drm_device *dev = rdev->ddev;
182 struct drm_connector *connector;
183 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
184 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
185
186 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
187 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
188 switch (radeon_connector->hpd.hpd) {
189 case RADEON_HPD_1:
190 WREG32(DC_HPD1_CONTROL, tmp);
191 rdev->irq.hpd[0] = true;
192 break;
193 case RADEON_HPD_2:
194 WREG32(DC_HPD2_CONTROL, tmp);
195 rdev->irq.hpd[1] = true;
196 break;
197 case RADEON_HPD_3:
198 WREG32(DC_HPD3_CONTROL, tmp);
199 rdev->irq.hpd[2] = true;
200 break;
201 case RADEON_HPD_4:
202 WREG32(DC_HPD4_CONTROL, tmp);
203 rdev->irq.hpd[3] = true;
204 break;
205 case RADEON_HPD_5:
206 WREG32(DC_HPD5_CONTROL, tmp);
207 rdev->irq.hpd[4] = true;
208 break;
209 case RADEON_HPD_6:
210 WREG32(DC_HPD6_CONTROL, tmp);
211 rdev->irq.hpd[5] = true;
212 break;
213 default:
214 break;
215 }
216 }
217 if (rdev->irq.installed)
218 evergreen_irq_set(rdev);
55} 219}
56 220
57 221void evergreen_hpd_fini(struct radeon_device *rdev)
58void evergreen_bandwidth_update(struct radeon_device *rdev)
59{ 222{
60 /* XXX */ 223 struct drm_device *dev = rdev->ddev;
224 struct drm_connector *connector;
225
226 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
227 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
228 switch (radeon_connector->hpd.hpd) {
229 case RADEON_HPD_1:
230 WREG32(DC_HPD1_CONTROL, 0);
231 rdev->irq.hpd[0] = false;
232 break;
233 case RADEON_HPD_2:
234 WREG32(DC_HPD2_CONTROL, 0);
235 rdev->irq.hpd[1] = false;
236 break;
237 case RADEON_HPD_3:
238 WREG32(DC_HPD3_CONTROL, 0);
239 rdev->irq.hpd[2] = false;
240 break;
241 case RADEON_HPD_4:
242 WREG32(DC_HPD4_CONTROL, 0);
243 rdev->irq.hpd[3] = false;
244 break;
245 case RADEON_HPD_5:
246 WREG32(DC_HPD5_CONTROL, 0);
247 rdev->irq.hpd[4] = false;
248 break;
249 case RADEON_HPD_6:
250 WREG32(DC_HPD6_CONTROL, 0);
251 rdev->irq.hpd[5] = false;
252 break;
253 default:
254 break;
255 }
256 }
61} 257}
62 258
63void evergreen_hpd_fini(struct radeon_device *rdev) 259void evergreen_bandwidth_update(struct radeon_device *rdev)
64{ 260{
65 /* XXX */ 261 /* XXX */
66} 262}
@@ -83,10 +279,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
83/* 279/*
84 * GART 280 * GART
85 */ 281 */
282void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
283{
284 unsigned i;
285 u32 tmp;
286
287 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
288 for (i = 0; i < rdev->usec_timeout; i++) {
289 /* read MC_STATUS */
290 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
291 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
292 if (tmp == 2) {
293 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
294 return;
295 }
296 if (tmp) {
297 return;
298 }
299 udelay(1);
300 }
301}
302
86int evergreen_pcie_gart_enable(struct radeon_device *rdev) 303int evergreen_pcie_gart_enable(struct radeon_device *rdev)
87{ 304{
88 u32 tmp; 305 u32 tmp;
89 int r, i; 306 int r;
90 307
91 if (rdev->gart.table.vram.robj == NULL) { 308 if (rdev->gart.table.vram.robj == NULL) {
92 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 309 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -121,10 +338,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
121 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 338 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
122 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 339 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
123 (u32)(rdev->dummy_page.addr >> 12)); 340 (u32)(rdev->dummy_page.addr >> 12));
124 for (i = 1; i < 7; i++) 341 WREG32(VM_CONTEXT1_CNTL, 0);
125 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
126 342
127 r600_pcie_gart_tlb_flush(rdev); 343 evergreen_pcie_gart_tlb_flush(rdev);
128 rdev->gart.ready = true; 344 rdev->gart.ready = true;
129 return 0; 345 return 0;
130} 346}
@@ -132,11 +348,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
132void evergreen_pcie_gart_disable(struct radeon_device *rdev) 348void evergreen_pcie_gart_disable(struct radeon_device *rdev)
133{ 349{
134 u32 tmp; 350 u32 tmp;
135 int i, r; 351 int r;
136 352
137 /* Disable all tables */ 353 /* Disable all tables */
138 for (i = 0; i < 7; i++) 354 WREG32(VM_CONTEXT0_CNTL, 0);
139 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 355 WREG32(VM_CONTEXT1_CNTL, 0);
140 356
141 /* Setup L2 cache */ 357 /* Setup L2 cache */
142 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | 358 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
@@ -173,7 +389,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev)
173void evergreen_agp_enable(struct radeon_device *rdev) 389void evergreen_agp_enable(struct radeon_device *rdev)
174{ 390{
175 u32 tmp; 391 u32 tmp;
176 int i;
177 392
178 /* Setup L2 cache */ 393 /* Setup L2 cache */
179 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 394 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -193,8 +408,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
193 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 408 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
194 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 409 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
195 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 410 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
196 for (i = 0; i < 7; i++) 411 WREG32(VM_CONTEXT0_CNTL, 0);
197 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 412 WREG32(VM_CONTEXT1_CNTL, 0);
198} 413}
199 414
200static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 415static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -400,40 +615,656 @@ static void evergreen_mc_program(struct radeon_device *rdev)
400 rv515_vga_render_disable(rdev); 615 rv515_vga_render_disable(rdev);
401} 616}
402 617
403#if 0
404/* 618/*
405 * CP. 619 * CP.
406 */ 620 */
407static void evergreen_cp_stop(struct radeon_device *rdev)
408{
409 /* XXX */
410}
411
412 621
413static int evergreen_cp_load_microcode(struct radeon_device *rdev) 622static int evergreen_cp_load_microcode(struct radeon_device *rdev)
414{ 623{
415 /* XXX */ 624 const __be32 *fw_data;
625 int i;
416 626
627 if (!rdev->me_fw || !rdev->pfp_fw)
628 return -EINVAL;
629
630 r700_cp_stop(rdev);
631 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
632
633 fw_data = (const __be32 *)rdev->pfp_fw->data;
634 WREG32(CP_PFP_UCODE_ADDR, 0);
635 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
636 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
637 WREG32(CP_PFP_UCODE_ADDR, 0);
638
639 fw_data = (const __be32 *)rdev->me_fw->data;
640 WREG32(CP_ME_RAM_WADDR, 0);
641 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
642 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
643
644 WREG32(CP_PFP_UCODE_ADDR, 0);
645 WREG32(CP_ME_RAM_WADDR, 0);
646 WREG32(CP_ME_RAM_RADDR, 0);
417 return 0; 647 return 0;
418} 648}
419 649
650int evergreen_cp_resume(struct radeon_device *rdev)
651{
652 u32 tmp;
653 u32 rb_bufsz;
654 int r;
655
656 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
657 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
658 SOFT_RESET_PA |
659 SOFT_RESET_SH |
660 SOFT_RESET_VGT |
661 SOFT_RESET_SX));
662 RREG32(GRBM_SOFT_RESET);
663 mdelay(15);
664 WREG32(GRBM_SOFT_RESET, 0);
665 RREG32(GRBM_SOFT_RESET);
666
667 /* Set ring buffer size */
668 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
669 tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
670#ifdef __BIG_ENDIAN
671 tmp |= BUF_SWAP_32BIT;
672#endif
673 WREG32(CP_RB_CNTL, tmp);
674 WREG32(CP_SEM_WAIT_TIMER, 0x4);
675
676 /* Set the write pointer delay */
677 WREG32(CP_RB_WPTR_DELAY, 0);
678
679 /* Initialize the ring buffer's read and write pointers */
680 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
681 WREG32(CP_RB_RPTR_WR, 0);
682 WREG32(CP_RB_WPTR, 0);
683 WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
684 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
685 mdelay(1);
686 WREG32(CP_RB_CNTL, tmp);
687
688 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
689 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
690
691 rdev->cp.rptr = RREG32(CP_RB_RPTR);
692 rdev->cp.wptr = RREG32(CP_RB_WPTR);
693
694 r600_cp_start(rdev);
695 rdev->cp.ready = true;
696 r = radeon_ring_test(rdev);
697 if (r) {
698 rdev->cp.ready = false;
699 return r;
700 }
701 return 0;
702}
420 703
421/* 704/*
422 * Core functions 705 * Core functions
423 */ 706 */
424static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes, 707static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
708 u32 num_tile_pipes,
425 u32 num_backends, 709 u32 num_backends,
426 u32 backend_disable_mask) 710 u32 backend_disable_mask)
427{ 711{
428 u32 backend_map = 0; 712 u32 backend_map = 0;
713 u32 enabled_backends_mask = 0;
714 u32 enabled_backends_count = 0;
715 u32 cur_pipe;
716 u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
717 u32 cur_backend = 0;
718 u32 i;
719 bool force_no_swizzle;
720
721 if (num_tile_pipes > EVERGREEN_MAX_PIPES)
722 num_tile_pipes = EVERGREEN_MAX_PIPES;
723 if (num_tile_pipes < 1)
724 num_tile_pipes = 1;
725 if (num_backends > EVERGREEN_MAX_BACKENDS)
726 num_backends = EVERGREEN_MAX_BACKENDS;
727 if (num_backends < 1)
728 num_backends = 1;
729
730 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
731 if (((backend_disable_mask >> i) & 1) == 0) {
732 enabled_backends_mask |= (1 << i);
733 ++enabled_backends_count;
734 }
735 if (enabled_backends_count == num_backends)
736 break;
737 }
738
739 if (enabled_backends_count == 0) {
740 enabled_backends_mask = 1;
741 enabled_backends_count = 1;
742 }
743
744 if (enabled_backends_count != num_backends)
745 num_backends = enabled_backends_count;
746
747 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
748 switch (rdev->family) {
749 case CHIP_CEDAR:
750 case CHIP_REDWOOD:
751 force_no_swizzle = false;
752 break;
753 case CHIP_CYPRESS:
754 case CHIP_HEMLOCK:
755 case CHIP_JUNIPER:
756 default:
757 force_no_swizzle = true;
758 break;
759 }
760 if (force_no_swizzle) {
761 bool last_backend_enabled = false;
762
763 force_no_swizzle = false;
764 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
765 if (((enabled_backends_mask >> i) & 1) == 1) {
766 if (last_backend_enabled)
767 force_no_swizzle = true;
768 last_backend_enabled = true;
769 } else
770 last_backend_enabled = false;
771 }
772 }
773
774 switch (num_tile_pipes) {
775 case 1:
776 case 3:
777 case 5:
778 case 7:
779 DRM_ERROR("odd number of pipes!\n");
780 break;
781 case 2:
782 swizzle_pipe[0] = 0;
783 swizzle_pipe[1] = 1;
784 break;
785 case 4:
786 if (force_no_swizzle) {
787 swizzle_pipe[0] = 0;
788 swizzle_pipe[1] = 1;
789 swizzle_pipe[2] = 2;
790 swizzle_pipe[3] = 3;
791 } else {
792 swizzle_pipe[0] = 0;
793 swizzle_pipe[1] = 2;
794 swizzle_pipe[2] = 1;
795 swizzle_pipe[3] = 3;
796 }
797 break;
798 case 6:
799 if (force_no_swizzle) {
800 swizzle_pipe[0] = 0;
801 swizzle_pipe[1] = 1;
802 swizzle_pipe[2] = 2;
803 swizzle_pipe[3] = 3;
804 swizzle_pipe[4] = 4;
805 swizzle_pipe[5] = 5;
806 } else {
807 swizzle_pipe[0] = 0;
808 swizzle_pipe[1] = 2;
809 swizzle_pipe[2] = 4;
810 swizzle_pipe[3] = 1;
811 swizzle_pipe[4] = 3;
812 swizzle_pipe[5] = 5;
813 }
814 break;
815 case 8:
816 if (force_no_swizzle) {
817 swizzle_pipe[0] = 0;
818 swizzle_pipe[1] = 1;
819 swizzle_pipe[2] = 2;
820 swizzle_pipe[3] = 3;
821 swizzle_pipe[4] = 4;
822 swizzle_pipe[5] = 5;
823 swizzle_pipe[6] = 6;
824 swizzle_pipe[7] = 7;
825 } else {
826 swizzle_pipe[0] = 0;
827 swizzle_pipe[1] = 2;
828 swizzle_pipe[2] = 4;
829 swizzle_pipe[3] = 6;
830 swizzle_pipe[4] = 1;
831 swizzle_pipe[5] = 3;
832 swizzle_pipe[6] = 5;
833 swizzle_pipe[7] = 7;
834 }
835 break;
836 }
837
838 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
839 while (((1 << cur_backend) & enabled_backends_mask) == 0)
840 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
841
842 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
843
844 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
845 }
429 846
430 return backend_map; 847 return backend_map;
431} 848}
432#endif
433 849
434static void evergreen_gpu_init(struct radeon_device *rdev) 850static void evergreen_gpu_init(struct radeon_device *rdev)
435{ 851{
436 /* XXX */ 852 u32 cc_rb_backend_disable = 0;
853 u32 cc_gc_shader_pipe_config;
854 u32 gb_addr_config = 0;
855 u32 mc_shared_chmap, mc_arb_ramcfg;
856 u32 gb_backend_map;
857 u32 grbm_gfx_index;
858 u32 sx_debug_1;
859 u32 smx_dc_ctl0;
860 u32 sq_config;
861 u32 sq_lds_resource_mgmt;
862 u32 sq_gpr_resource_mgmt_1;
863 u32 sq_gpr_resource_mgmt_2;
864 u32 sq_gpr_resource_mgmt_3;
865 u32 sq_thread_resource_mgmt;
866 u32 sq_thread_resource_mgmt_2;
867 u32 sq_stack_resource_mgmt_1;
868 u32 sq_stack_resource_mgmt_2;
869 u32 sq_stack_resource_mgmt_3;
870 u32 vgt_cache_invalidation;
871 u32 hdp_host_path_cntl;
872 int i, j, num_shader_engines, ps_thread_count;
873
874 switch (rdev->family) {
875 case CHIP_CYPRESS:
876 case CHIP_HEMLOCK:
877 rdev->config.evergreen.num_ses = 2;
878 rdev->config.evergreen.max_pipes = 4;
879 rdev->config.evergreen.max_tile_pipes = 8;
880 rdev->config.evergreen.max_simds = 10;
881 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
882 rdev->config.evergreen.max_gprs = 256;
883 rdev->config.evergreen.max_threads = 248;
884 rdev->config.evergreen.max_gs_threads = 32;
885 rdev->config.evergreen.max_stack_entries = 512;
886 rdev->config.evergreen.sx_num_of_sets = 4;
887 rdev->config.evergreen.sx_max_export_size = 256;
888 rdev->config.evergreen.sx_max_export_pos_size = 64;
889 rdev->config.evergreen.sx_max_export_smx_size = 192;
890 rdev->config.evergreen.max_hw_contexts = 8;
891 rdev->config.evergreen.sq_num_cf_insts = 2;
892
893 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
894 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
895 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
896 break;
897 case CHIP_JUNIPER:
898 rdev->config.evergreen.num_ses = 1;
899 rdev->config.evergreen.max_pipes = 4;
900 rdev->config.evergreen.max_tile_pipes = 4;
901 rdev->config.evergreen.max_simds = 10;
902 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
903 rdev->config.evergreen.max_gprs = 256;
904 rdev->config.evergreen.max_threads = 248;
905 rdev->config.evergreen.max_gs_threads = 32;
906 rdev->config.evergreen.max_stack_entries = 512;
907 rdev->config.evergreen.sx_num_of_sets = 4;
908 rdev->config.evergreen.sx_max_export_size = 256;
909 rdev->config.evergreen.sx_max_export_pos_size = 64;
910 rdev->config.evergreen.sx_max_export_smx_size = 192;
911 rdev->config.evergreen.max_hw_contexts = 8;
912 rdev->config.evergreen.sq_num_cf_insts = 2;
913
914 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
915 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
916 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
917 break;
918 case CHIP_REDWOOD:
919 rdev->config.evergreen.num_ses = 1;
920 rdev->config.evergreen.max_pipes = 4;
921 rdev->config.evergreen.max_tile_pipes = 4;
922 rdev->config.evergreen.max_simds = 5;
923 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
924 rdev->config.evergreen.max_gprs = 256;
925 rdev->config.evergreen.max_threads = 248;
926 rdev->config.evergreen.max_gs_threads = 32;
927 rdev->config.evergreen.max_stack_entries = 256;
928 rdev->config.evergreen.sx_num_of_sets = 4;
929 rdev->config.evergreen.sx_max_export_size = 256;
930 rdev->config.evergreen.sx_max_export_pos_size = 64;
931 rdev->config.evergreen.sx_max_export_smx_size = 192;
932 rdev->config.evergreen.max_hw_contexts = 8;
933 rdev->config.evergreen.sq_num_cf_insts = 2;
934
935 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
936 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
937 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
938 break;
939 case CHIP_CEDAR:
940 default:
941 rdev->config.evergreen.num_ses = 1;
942 rdev->config.evergreen.max_pipes = 2;
943 rdev->config.evergreen.max_tile_pipes = 2;
944 rdev->config.evergreen.max_simds = 2;
945 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
946 rdev->config.evergreen.max_gprs = 256;
947 rdev->config.evergreen.max_threads = 192;
948 rdev->config.evergreen.max_gs_threads = 16;
949 rdev->config.evergreen.max_stack_entries = 256;
950 rdev->config.evergreen.sx_num_of_sets = 4;
951 rdev->config.evergreen.sx_max_export_size = 128;
952 rdev->config.evergreen.sx_max_export_pos_size = 32;
953 rdev->config.evergreen.sx_max_export_smx_size = 96;
954 rdev->config.evergreen.max_hw_contexts = 4;
955 rdev->config.evergreen.sq_num_cf_insts = 1;
956
957 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
958 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
959 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
960 break;
961 }
962
963 /* Initialize HDP */
964 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
965 WREG32((0x2c14 + j), 0x00000000);
966 WREG32((0x2c18 + j), 0x00000000);
967 WREG32((0x2c1c + j), 0x00000000);
968 WREG32((0x2c20 + j), 0x00000000);
969 WREG32((0x2c24 + j), 0x00000000);
970 }
971
972 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
973
974 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
975
976 cc_gc_shader_pipe_config |=
977 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
978 & EVERGREEN_MAX_PIPES_MASK);
979 cc_gc_shader_pipe_config |=
980 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
981 & EVERGREEN_MAX_SIMDS_MASK);
982
983 cc_rb_backend_disable =
984 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
985 & EVERGREEN_MAX_BACKENDS_MASK);
986
987
988 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
989 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
990
991 switch (rdev->config.evergreen.max_tile_pipes) {
992 case 1:
993 default:
994 gb_addr_config |= NUM_PIPES(0);
995 break;
996 case 2:
997 gb_addr_config |= NUM_PIPES(1);
998 break;
999 case 4:
1000 gb_addr_config |= NUM_PIPES(2);
1001 break;
1002 case 8:
1003 gb_addr_config |= NUM_PIPES(3);
1004 break;
1005 }
1006
1007 gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1008 gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
1009 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
1010 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
1011 gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
1012 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
1013
1014 if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
1015 gb_addr_config |= ROW_SIZE(2);
1016 else
1017 gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
1018
1019 if (rdev->ddev->pdev->device == 0x689e) {
1020 u32 efuse_straps_4;
1021 u32 efuse_straps_3;
1022 u8 efuse_box_bit_131_124;
1023
1024 WREG32(RCU_IND_INDEX, 0x204);
1025 efuse_straps_4 = RREG32(RCU_IND_DATA);
1026 WREG32(RCU_IND_INDEX, 0x203);
1027 efuse_straps_3 = RREG32(RCU_IND_DATA);
1028 efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
1029
1030 switch(efuse_box_bit_131_124) {
1031 case 0x00:
1032 gb_backend_map = 0x76543210;
1033 break;
1034 case 0x55:
1035 gb_backend_map = 0x77553311;
1036 break;
1037 case 0x56:
1038 gb_backend_map = 0x77553300;
1039 break;
1040 case 0x59:
1041 gb_backend_map = 0x77552211;
1042 break;
1043 case 0x66:
1044 gb_backend_map = 0x77443300;
1045 break;
1046 case 0x99:
1047 gb_backend_map = 0x66552211;
1048 break;
1049 case 0x5a:
1050 gb_backend_map = 0x77552200;
1051 break;
1052 case 0xaa:
1053 gb_backend_map = 0x66442200;
1054 break;
1055 case 0x95:
1056 gb_backend_map = 0x66553311;
1057 break;
1058 default:
1059 DRM_ERROR("bad backend map, using default\n");
1060 gb_backend_map =
1061 evergreen_get_tile_pipe_to_backend_map(rdev,
1062 rdev->config.evergreen.max_tile_pipes,
1063 rdev->config.evergreen.max_backends,
1064 ((EVERGREEN_MAX_BACKENDS_MASK <<
1065 rdev->config.evergreen.max_backends) &
1066 EVERGREEN_MAX_BACKENDS_MASK));
1067 break;
1068 }
1069 } else if (rdev->ddev->pdev->device == 0x68b9) {
1070 u32 efuse_straps_3;
1071 u8 efuse_box_bit_127_124;
1072
1073 WREG32(RCU_IND_INDEX, 0x203);
1074 efuse_straps_3 = RREG32(RCU_IND_DATA);
1075 efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
1076
1077 switch(efuse_box_bit_127_124) {
1078 case 0x0:
1079 gb_backend_map = 0x00003210;
1080 break;
1081 case 0x5:
1082 case 0x6:
1083 case 0x9:
1084 case 0xa:
1085 gb_backend_map = 0x00003311;
1086 break;
1087 default:
1088 DRM_ERROR("bad backend map, using default\n");
1089 gb_backend_map =
1090 evergreen_get_tile_pipe_to_backend_map(rdev,
1091 rdev->config.evergreen.max_tile_pipes,
1092 rdev->config.evergreen.max_backends,
1093 ((EVERGREEN_MAX_BACKENDS_MASK <<
1094 rdev->config.evergreen.max_backends) &
1095 EVERGREEN_MAX_BACKENDS_MASK));
1096 break;
1097 }
1098 } else
1099 gb_backend_map =
1100 evergreen_get_tile_pipe_to_backend_map(rdev,
1101 rdev->config.evergreen.max_tile_pipes,
1102 rdev->config.evergreen.max_backends,
1103 ((EVERGREEN_MAX_BACKENDS_MASK <<
1104 rdev->config.evergreen.max_backends) &
1105 EVERGREEN_MAX_BACKENDS_MASK));
1106
1107 WREG32(GB_BACKEND_MAP, gb_backend_map);
1108 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1109 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1110 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1111
1112 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
1113 grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
1114
1115 for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
1116 u32 rb = cc_rb_backend_disable | (0xf0 << 16);
1117 u32 sp = cc_gc_shader_pipe_config;
1118 u32 gfx = grbm_gfx_index | SE_INDEX(i);
1119
1120 if (i == num_shader_engines) {
1121 rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
1122 sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
1123 }
1124
1125 WREG32(GRBM_GFX_INDEX, gfx);
1126 WREG32(RLC_GFX_INDEX, gfx);
1127
1128 WREG32(CC_RB_BACKEND_DISABLE, rb);
1129 WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
1130 WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
1131 WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
1132 }
1133
1134 grbm_gfx_index |= SE_BROADCAST_WRITES;
1135 WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
1136 WREG32(RLC_GFX_INDEX, grbm_gfx_index);
1137
1138 WREG32(CGTS_SYS_TCC_DISABLE, 0);
1139 WREG32(CGTS_TCC_DISABLE, 0);
1140 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
1141 WREG32(CGTS_USER_TCC_DISABLE, 0);
1142
1143 /* set HW defaults for 3D engine */
1144 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
1145 ROQ_IB2_START(0x2b)));
1146
1147 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
1148
1149 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
1150 SYNC_GRADIENT |
1151 SYNC_WALKER |
1152 SYNC_ALIGNER));
1153
1154 sx_debug_1 = RREG32(SX_DEBUG_1);
1155 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
1156 WREG32(SX_DEBUG_1, sx_debug_1);
1157
1158
1159 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
1160 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
1161 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
1162 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
1163
1164 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
1165 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
1166 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
1167
1168 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
1169 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
1170 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
1171
1172 WREG32(VGT_NUM_INSTANCES, 1);
1173 WREG32(SPI_CONFIG_CNTL, 0);
1174 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
1175 WREG32(CP_PERFMON_CNTL, 0);
1176
1177 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
1178 FETCH_FIFO_HIWATER(0x4) |
1179 DONE_FIFO_HIWATER(0xe0) |
1180 ALU_UPDATE_FIFO_HIWATER(0x8)));
1181
1182 sq_config = RREG32(SQ_CONFIG);
1183 sq_config &= ~(PS_PRIO(3) |
1184 VS_PRIO(3) |
1185 GS_PRIO(3) |
1186 ES_PRIO(3));
1187 sq_config |= (VC_ENABLE |
1188 EXPORT_SRC_C |
1189 PS_PRIO(0) |
1190 VS_PRIO(1) |
1191 GS_PRIO(2) |
1192 ES_PRIO(3));
1193
1194 if (rdev->family == CHIP_CEDAR)
1195 /* no vertex cache */
1196 sq_config &= ~VC_ENABLE;
1197
1198 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
1199
1200 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
1201 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
1202 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
1203 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
1204 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
1205 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
1206 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
1207
1208 if (rdev->family == CHIP_CEDAR)
1209 ps_thread_count = 96;
1210 else
1211 ps_thread_count = 128;
1212
1213 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
1214 sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
1215 sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
1216 sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
1217 sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
1218 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
1219
1220 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1221 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1222 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1223 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1224 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1225 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1226
1227 WREG32(SQ_CONFIG, sq_config);
1228 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1229 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1230 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
1231 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1232 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
1233 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1234 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1235 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
1236 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
1237 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
1238
1239 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1240 FORCE_EOV_MAX_REZ_CNT(255)));
1241
1242 if (rdev->family == CHIP_CEDAR)
1243 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
1244 else
1245 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
1246 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
1247 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
1248
1249 WREG32(VGT_GS_VERTEX_REUSE, 16);
1250 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1251
1252 WREG32(CB_PERF_CTR0_SEL_0, 0);
1253 WREG32(CB_PERF_CTR0_SEL_1, 0);
1254 WREG32(CB_PERF_CTR1_SEL_0, 0);
1255 WREG32(CB_PERF_CTR1_SEL_1, 0);
1256 WREG32(CB_PERF_CTR2_SEL_0, 0);
1257 WREG32(CB_PERF_CTR2_SEL_1, 0);
1258 WREG32(CB_PERF_CTR3_SEL_0, 0);
1259 WREG32(CB_PERF_CTR3_SEL_1, 0);
1260
1261 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1262 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1263
1264 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1265
1266 udelay(50);
1267
437} 1268}
438 1269
439int evergreen_mc_init(struct radeon_device *rdev) 1270int evergreen_mc_init(struct radeon_device *rdev)
@@ -476,26 +1307,627 @@ int evergreen_mc_init(struct radeon_device *rdev)
476 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 1307 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
477 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 1308 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
478 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1309 rdev->mc.visible_vram_size = rdev->mc.aper_size;
479 /* FIXME remove this once we support unmappable VRAM */
480 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
481 rdev->mc.mc_vram_size = rdev->mc.aper_size;
482 rdev->mc.real_vram_size = rdev->mc.aper_size;
483 }
484 r600_vram_gtt_location(rdev, &rdev->mc); 1310 r600_vram_gtt_location(rdev, &rdev->mc);
485 radeon_update_bandwidth_info(rdev); 1311 radeon_update_bandwidth_info(rdev);
486 1312
487 return 0; 1313 return 0;
488} 1314}
489 1315
490int evergreen_gpu_reset(struct radeon_device *rdev) 1316bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
491{ 1317{
492 /* FIXME: implement for evergreen */ 1318 /* FIXME: implement for evergreen */
1319 return false;
1320}
1321
1322static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
1323{
1324 struct evergreen_mc_save save;
1325 u32 srbm_reset = 0;
1326 u32 grbm_reset = 0;
1327
1328 dev_info(rdev->dev, "GPU softreset \n");
1329 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
1330 RREG32(GRBM_STATUS));
1331 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
1332 RREG32(GRBM_STATUS_SE0));
1333 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
1334 RREG32(GRBM_STATUS_SE1));
1335 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
1336 RREG32(SRBM_STATUS));
1337 evergreen_mc_stop(rdev, &save);
1338 if (evergreen_mc_wait_for_idle(rdev)) {
1339 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1340 }
1341 /* Disable CP parsing/prefetching */
1342 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1343
1344 /* reset all the gfx blocks */
1345 grbm_reset = (SOFT_RESET_CP |
1346 SOFT_RESET_CB |
1347 SOFT_RESET_DB |
1348 SOFT_RESET_PA |
1349 SOFT_RESET_SC |
1350 SOFT_RESET_SPI |
1351 SOFT_RESET_SH |
1352 SOFT_RESET_SX |
1353 SOFT_RESET_TC |
1354 SOFT_RESET_TA |
1355 SOFT_RESET_VC |
1356 SOFT_RESET_VGT);
1357
1358 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
1359 WREG32(GRBM_SOFT_RESET, grbm_reset);
1360 (void)RREG32(GRBM_SOFT_RESET);
1361 udelay(50);
1362 WREG32(GRBM_SOFT_RESET, 0);
1363 (void)RREG32(GRBM_SOFT_RESET);
1364
1365 /* reset all the system blocks */
1366 srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
1367
1368 dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
1369 WREG32(SRBM_SOFT_RESET, srbm_reset);
1370 (void)RREG32(SRBM_SOFT_RESET);
1371 udelay(50);
1372 WREG32(SRBM_SOFT_RESET, 0);
1373 (void)RREG32(SRBM_SOFT_RESET);
1374 /* Wait a little for things to settle down */
1375 udelay(50);
1376 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
1377 RREG32(GRBM_STATUS));
1378 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
1379 RREG32(GRBM_STATUS_SE0));
1380 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
1381 RREG32(GRBM_STATUS_SE1));
1382 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
1383 RREG32(SRBM_STATUS));
1384 /* After reset we need to reinit the asic as GPU often endup in an
1385 * incoherent state.
1386 */
1387 atom_asic_init(rdev->mode_info.atom_context);
1388 evergreen_mc_resume(rdev, &save);
1389 return 0;
1390}
1391
1392int evergreen_asic_reset(struct radeon_device *rdev)
1393{
1394 return evergreen_gpu_soft_reset(rdev);
1395}
1396
1397/* Interrupts */
1398
1399u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
1400{
1401 switch (crtc) {
1402 case 0:
1403 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
1404 case 1:
1405 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
1406 case 2:
1407 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
1408 case 3:
1409 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
1410 case 4:
1411 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
1412 case 5:
1413 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
1414 default:
1415 return 0;
1416 }
1417}
1418
1419void evergreen_disable_interrupt_state(struct radeon_device *rdev)
1420{
1421 u32 tmp;
1422
1423 WREG32(CP_INT_CNTL, 0);
1424 WREG32(GRBM_INT_CNTL, 0);
1425 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1426 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
1427 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
1428 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
1429 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1430 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1431
1432 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1433 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
1434 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
1435 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
1436 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1437 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1438
1439 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
1440 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
1441
1442 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1443 WREG32(DC_HPD1_INT_CONTROL, tmp);
1444 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1445 WREG32(DC_HPD2_INT_CONTROL, tmp);
1446 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1447 WREG32(DC_HPD3_INT_CONTROL, tmp);
1448 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1449 WREG32(DC_HPD4_INT_CONTROL, tmp);
1450 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1451 WREG32(DC_HPD5_INT_CONTROL, tmp);
1452 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1453 WREG32(DC_HPD6_INT_CONTROL, tmp);
1454
1455}
1456
1457int evergreen_irq_set(struct radeon_device *rdev)
1458{
1459 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
1460 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
1461 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
1462 u32 grbm_int_cntl = 0;
1463
1464 if (!rdev->irq.installed) {
1465 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
1466 return -EINVAL;
1467 }
1468 /* don't enable anything if the ih is disabled */
1469 if (!rdev->ih.enabled) {
1470 r600_disable_interrupts(rdev);
1471 /* force the active interrupt state to all disabled */
1472 evergreen_disable_interrupt_state(rdev);
1473 return 0;
1474 }
1475
1476 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
1477 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
1478 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
1479 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
1480 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
1481 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
1482
1483 if (rdev->irq.sw_int) {
1484 DRM_DEBUG("evergreen_irq_set: sw int\n");
1485 cp_int_cntl |= RB_INT_ENABLE;
1486 }
1487 if (rdev->irq.crtc_vblank_int[0]) {
1488 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
1489 crtc1 |= VBLANK_INT_MASK;
1490 }
1491 if (rdev->irq.crtc_vblank_int[1]) {
1492 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
1493 crtc2 |= VBLANK_INT_MASK;
1494 }
1495 if (rdev->irq.crtc_vblank_int[2]) {
1496 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
1497 crtc3 |= VBLANK_INT_MASK;
1498 }
1499 if (rdev->irq.crtc_vblank_int[3]) {
1500 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
1501 crtc4 |= VBLANK_INT_MASK;
1502 }
1503 if (rdev->irq.crtc_vblank_int[4]) {
1504 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
1505 crtc5 |= VBLANK_INT_MASK;
1506 }
1507 if (rdev->irq.crtc_vblank_int[5]) {
1508 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
1509 crtc6 |= VBLANK_INT_MASK;
1510 }
1511 if (rdev->irq.hpd[0]) {
1512 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
1513 hpd1 |= DC_HPDx_INT_EN;
1514 }
1515 if (rdev->irq.hpd[1]) {
1516 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
1517 hpd2 |= DC_HPDx_INT_EN;
1518 }
1519 if (rdev->irq.hpd[2]) {
1520 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
1521 hpd3 |= DC_HPDx_INT_EN;
1522 }
1523 if (rdev->irq.hpd[3]) {
1524 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
1525 hpd4 |= DC_HPDx_INT_EN;
1526 }
1527 if (rdev->irq.hpd[4]) {
1528 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
1529 hpd5 |= DC_HPDx_INT_EN;
1530 }
1531 if (rdev->irq.hpd[5]) {
1532 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
1533 hpd6 |= DC_HPDx_INT_EN;
1534 }
1535 if (rdev->irq.gui_idle) {
1536 DRM_DEBUG("gui idle\n");
1537 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
1538 }
1539
1540 WREG32(CP_INT_CNTL, cp_int_cntl);
1541 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
1542
1543 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
1544 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
1545 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
1546 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
1547 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
1548 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
1549
1550 WREG32(DC_HPD1_INT_CONTROL, hpd1);
1551 WREG32(DC_HPD2_INT_CONTROL, hpd2);
1552 WREG32(DC_HPD3_INT_CONTROL, hpd3);
1553 WREG32(DC_HPD4_INT_CONTROL, hpd4);
1554 WREG32(DC_HPD5_INT_CONTROL, hpd5);
1555 WREG32(DC_HPD6_INT_CONTROL, hpd6);
1556
493 return 0; 1557 return 0;
494} 1558}
495 1559
1560static inline void evergreen_irq_ack(struct radeon_device *rdev,
1561 u32 *disp_int,
1562 u32 *disp_int_cont,
1563 u32 *disp_int_cont2,
1564 u32 *disp_int_cont3,
1565 u32 *disp_int_cont4,
1566 u32 *disp_int_cont5)
1567{
1568 u32 tmp;
1569
1570 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
1571 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
1572 *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
1573 *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
1574 *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
1575 *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
1576
1577 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
1578 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
1579 if (*disp_int & LB_D1_VLINE_INTERRUPT)
1580 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
1581
1582 if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
1583 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
1584 if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
1585 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
1586
1587 if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
1588 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
1589 if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
1590 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
1591
1592 if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
1593 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
1594 if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
1595 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
1596
1597 if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
1598 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
1599 if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
1600 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
1601
1602 if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
1603 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
1604 if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
1605 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
1606
1607 if (*disp_int & DC_HPD1_INTERRUPT) {
1608 tmp = RREG32(DC_HPD1_INT_CONTROL);
1609 tmp |= DC_HPDx_INT_ACK;
1610 WREG32(DC_HPD1_INT_CONTROL, tmp);
1611 }
1612 if (*disp_int_cont & DC_HPD2_INTERRUPT) {
1613 tmp = RREG32(DC_HPD2_INT_CONTROL);
1614 tmp |= DC_HPDx_INT_ACK;
1615 WREG32(DC_HPD2_INT_CONTROL, tmp);
1616 }
1617 if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
1618 tmp = RREG32(DC_HPD3_INT_CONTROL);
1619 tmp |= DC_HPDx_INT_ACK;
1620 WREG32(DC_HPD3_INT_CONTROL, tmp);
1621 }
1622 if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
1623 tmp = RREG32(DC_HPD4_INT_CONTROL);
1624 tmp |= DC_HPDx_INT_ACK;
1625 WREG32(DC_HPD4_INT_CONTROL, tmp);
1626 }
1627 if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
1628 tmp = RREG32(DC_HPD5_INT_CONTROL);
1629 tmp |= DC_HPDx_INT_ACK;
1630 WREG32(DC_HPD5_INT_CONTROL, tmp);
1631 }
1632 if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
1633 tmp = RREG32(DC_HPD5_INT_CONTROL);
1634 tmp |= DC_HPDx_INT_ACK;
1635 WREG32(DC_HPD6_INT_CONTROL, tmp);
1636 }
1637}
1638
1639void evergreen_irq_disable(struct radeon_device *rdev)
1640{
1641 u32 disp_int, disp_int_cont, disp_int_cont2;
1642 u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
1643
1644 r600_disable_interrupts(rdev);
1645 /* Wait and acknowledge irq */
1646 mdelay(1);
1647 evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
1648 &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
1649 evergreen_disable_interrupt_state(rdev);
1650}
1651
1652static void evergreen_irq_suspend(struct radeon_device *rdev)
1653{
1654 evergreen_irq_disable(rdev);
1655 r600_rlc_stop(rdev);
1656}
1657
1658static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
1659{
1660 u32 wptr, tmp;
1661
1662 /* XXX use writeback */
1663 wptr = RREG32(IH_RB_WPTR);
1664
1665 if (wptr & RB_OVERFLOW) {
1666 /* When a ring buffer overflow happen start parsing interrupt
1667 * from the last not overwritten vector (wptr + 16). Hopefully
1668 * this should allow us to catchup.
1669 */
1670 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
1671 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
1672 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
1673 tmp = RREG32(IH_RB_CNTL);
1674 tmp |= IH_WPTR_OVERFLOW_CLEAR;
1675 WREG32(IH_RB_CNTL, tmp);
1676 }
1677 return (wptr & rdev->ih.ptr_mask);
1678}
1679
1680int evergreen_irq_process(struct radeon_device *rdev)
1681{
1682 u32 wptr = evergreen_get_ih_wptr(rdev);
1683 u32 rptr = rdev->ih.rptr;
1684 u32 src_id, src_data;
1685 u32 ring_index;
1686 u32 disp_int, disp_int_cont, disp_int_cont2;
1687 u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
1688 unsigned long flags;
1689 bool queue_hotplug = false;
1690
1691 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
1692 if (!rdev->ih.enabled)
1693 return IRQ_NONE;
1694
1695 spin_lock_irqsave(&rdev->ih.lock, flags);
1696
1697 if (rptr == wptr) {
1698 spin_unlock_irqrestore(&rdev->ih.lock, flags);
1699 return IRQ_NONE;
1700 }
1701 if (rdev->shutdown) {
1702 spin_unlock_irqrestore(&rdev->ih.lock, flags);
1703 return IRQ_NONE;
1704 }
1705
1706restart_ih:
1707 /* display interrupts */
1708 evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
1709 &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
1710
1711 rdev->ih.wptr = wptr;
1712 while (rptr != wptr) {
1713 /* wptr/rptr are in bytes! */
1714 ring_index = rptr / 4;
1715 src_id = rdev->ih.ring[ring_index] & 0xff;
1716 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
1717
1718 switch (src_id) {
1719 case 1: /* D1 vblank/vline */
1720 switch (src_data) {
1721 case 0: /* D1 vblank */
1722 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
1723 drm_handle_vblank(rdev->ddev, 0);
1724 wake_up(&rdev->irq.vblank_queue);
1725 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
1726 DRM_DEBUG("IH: D1 vblank\n");
1727 }
1728 break;
1729 case 1: /* D1 vline */
1730 if (disp_int & LB_D1_VLINE_INTERRUPT) {
1731 disp_int &= ~LB_D1_VLINE_INTERRUPT;
1732 DRM_DEBUG("IH: D1 vline\n");
1733 }
1734 break;
1735 default:
1736 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1737 break;
1738 }
1739 break;
1740 case 2: /* D2 vblank/vline */
1741 switch (src_data) {
1742 case 0: /* D2 vblank */
1743 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
1744 drm_handle_vblank(rdev->ddev, 1);
1745 wake_up(&rdev->irq.vblank_queue);
1746 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
1747 DRM_DEBUG("IH: D2 vblank\n");
1748 }
1749 break;
1750 case 1: /* D2 vline */
1751 if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
1752 disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
1753 DRM_DEBUG("IH: D2 vline\n");
1754 }
1755 break;
1756 default:
1757 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1758 break;
1759 }
1760 break;
1761 case 3: /* D3 vblank/vline */
1762 switch (src_data) {
1763 case 0: /* D3 vblank */
1764 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
1765 drm_handle_vblank(rdev->ddev, 2);
1766 wake_up(&rdev->irq.vblank_queue);
1767 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
1768 DRM_DEBUG("IH: D3 vblank\n");
1769 }
1770 break;
1771 case 1: /* D3 vline */
1772 if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
1773 disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
1774 DRM_DEBUG("IH: D3 vline\n");
1775 }
1776 break;
1777 default:
1778 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1779 break;
1780 }
1781 break;
1782 case 4: /* D4 vblank/vline */
1783 switch (src_data) {
1784 case 0: /* D4 vblank */
1785 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
1786 drm_handle_vblank(rdev->ddev, 3);
1787 wake_up(&rdev->irq.vblank_queue);
1788 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
1789 DRM_DEBUG("IH: D4 vblank\n");
1790 }
1791 break;
1792 case 1: /* D4 vline */
1793 if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
1794 disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
1795 DRM_DEBUG("IH: D4 vline\n");
1796 }
1797 break;
1798 default:
1799 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1800 break;
1801 }
1802 break;
1803 case 5: /* D5 vblank/vline */
1804 switch (src_data) {
1805 case 0: /* D5 vblank */
1806 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
1807 drm_handle_vblank(rdev->ddev, 4);
1808 wake_up(&rdev->irq.vblank_queue);
1809 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
1810 DRM_DEBUG("IH: D5 vblank\n");
1811 }
1812 break;
1813 case 1: /* D5 vline */
1814 if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
1815 disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
1816 DRM_DEBUG("IH: D5 vline\n");
1817 }
1818 break;
1819 default:
1820 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1821 break;
1822 }
1823 break;
1824 case 6: /* D6 vblank/vline */
1825 switch (src_data) {
1826 case 0: /* D6 vblank */
1827 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
1828 drm_handle_vblank(rdev->ddev, 5);
1829 wake_up(&rdev->irq.vblank_queue);
1830 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
1831 DRM_DEBUG("IH: D6 vblank\n");
1832 }
1833 break;
1834 case 1: /* D6 vline */
1835 if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
1836 disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
1837 DRM_DEBUG("IH: D6 vline\n");
1838 }
1839 break;
1840 default:
1841 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1842 break;
1843 }
1844 break;
1845 case 42: /* HPD hotplug */
1846 switch (src_data) {
1847 case 0:
1848 if (disp_int & DC_HPD1_INTERRUPT) {
1849 disp_int &= ~DC_HPD1_INTERRUPT;
1850 queue_hotplug = true;
1851 DRM_DEBUG("IH: HPD1\n");
1852 }
1853 break;
1854 case 1:
1855 if (disp_int_cont & DC_HPD2_INTERRUPT) {
1856 disp_int_cont &= ~DC_HPD2_INTERRUPT;
1857 queue_hotplug = true;
1858 DRM_DEBUG("IH: HPD2\n");
1859 }
1860 break;
1861 case 2:
1862 if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
1863 disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
1864 queue_hotplug = true;
1865 DRM_DEBUG("IH: HPD3\n");
1866 }
1867 break;
1868 case 3:
1869 if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
1870 disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
1871 queue_hotplug = true;
1872 DRM_DEBUG("IH: HPD4\n");
1873 }
1874 break;
1875 case 4:
1876 if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
1877 disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
1878 queue_hotplug = true;
1879 DRM_DEBUG("IH: HPD5\n");
1880 }
1881 break;
1882 case 5:
1883 if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
1884 disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
1885 queue_hotplug = true;
1886 DRM_DEBUG("IH: HPD6\n");
1887 }
1888 break;
1889 default:
1890 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1891 break;
1892 }
1893 break;
1894 case 176: /* CP_INT in ring buffer */
1895 case 177: /* CP_INT in IB1 */
1896 case 178: /* CP_INT in IB2 */
1897 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
1898 radeon_fence_process(rdev);
1899 break;
1900 case 181: /* CP EOP event */
1901 DRM_DEBUG("IH: CP EOP\n");
1902 break;
1903 case 233: /* GUI IDLE */
1904 DRM_DEBUG("IH: CP EOP\n");
1905 rdev->pm.gui_idle = true;
1906 wake_up(&rdev->irq.idle_queue);
1907 break;
1908 default:
1909 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1910 break;
1911 }
1912
1913 /* wptr/rptr are in bytes! */
1914 rptr += 16;
1915 rptr &= rdev->ih.ptr_mask;
1916 }
1917 /* make sure wptr hasn't changed while processing */
1918 wptr = evergreen_get_ih_wptr(rdev);
1919 if (wptr != rdev->ih.wptr)
1920 goto restart_ih;
1921 if (queue_hotplug)
1922 queue_work(rdev->wq, &rdev->hotplug_work);
1923 rdev->ih.rptr = rptr;
1924 WREG32(IH_RB_RPTR, rdev->ih.rptr);
1925 spin_unlock_irqrestore(&rdev->ih.lock, flags);
1926 return IRQ_HANDLED;
1927}
1928
496static int evergreen_startup(struct radeon_device *rdev) 1929static int evergreen_startup(struct radeon_device *rdev)
497{ 1930{
498#if 0
499 int r; 1931 int r;
500 1932
501 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 1933 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -505,17 +1937,15 @@ static int evergreen_startup(struct radeon_device *rdev)
505 return r; 1937 return r;
506 } 1938 }
507 } 1939 }
508#endif 1940
509 evergreen_mc_program(rdev); 1941 evergreen_mc_program(rdev);
510#if 0
511 if (rdev->flags & RADEON_IS_AGP) { 1942 if (rdev->flags & RADEON_IS_AGP) {
512 evergreem_agp_enable(rdev); 1943 evergreen_agp_enable(rdev);
513 } else { 1944 } else {
514 r = evergreen_pcie_gart_enable(rdev); 1945 r = evergreen_pcie_gart_enable(rdev);
515 if (r) 1946 if (r)
516 return r; 1947 return r;
517 } 1948 }
518#endif
519 evergreen_gpu_init(rdev); 1949 evergreen_gpu_init(rdev);
520#if 0 1950#if 0
521 if (!rdev->r600_blit.shader_obj) { 1951 if (!rdev->r600_blit.shader_obj) {
@@ -536,6 +1966,7 @@ static int evergreen_startup(struct radeon_device *rdev)
536 DRM_ERROR("failed to pin blit object %d\n", r); 1966 DRM_ERROR("failed to pin blit object %d\n", r);
537 return r; 1967 return r;
538 } 1968 }
1969#endif
539 1970
540 /* Enable IRQ */ 1971 /* Enable IRQ */
541 r = r600_irq_init(rdev); 1972 r = r600_irq_init(rdev);
@@ -544,7 +1975,7 @@ static int evergreen_startup(struct radeon_device *rdev)
544 radeon_irq_kms_fini(rdev); 1975 radeon_irq_kms_fini(rdev);
545 return r; 1976 return r;
546 } 1977 }
547 r600_irq_set(rdev); 1978 evergreen_irq_set(rdev);
548 1979
549 r = radeon_ring_init(rdev, rdev->cp.ring_size); 1980 r = radeon_ring_init(rdev, rdev->cp.ring_size);
550 if (r) 1981 if (r)
@@ -552,12 +1983,12 @@ static int evergreen_startup(struct radeon_device *rdev)
552 r = evergreen_cp_load_microcode(rdev); 1983 r = evergreen_cp_load_microcode(rdev);
553 if (r) 1984 if (r)
554 return r; 1985 return r;
555 r = r600_cp_resume(rdev); 1986 r = evergreen_cp_resume(rdev);
556 if (r) 1987 if (r)
557 return r; 1988 return r;
558 /* write back buffer are not vital so don't worry about failure */ 1989 /* write back buffer are not vital so don't worry about failure */
559 r600_wb_enable(rdev); 1990 r600_wb_enable(rdev);
560#endif 1991
561 return 0; 1992 return 0;
562} 1993}
563 1994
@@ -582,13 +2013,13 @@ int evergreen_resume(struct radeon_device *rdev)
582 DRM_ERROR("r600 startup failed on resume\n"); 2013 DRM_ERROR("r600 startup failed on resume\n");
583 return r; 2014 return r;
584 } 2015 }
585#if 0 2016
586 r = r600_ib_test(rdev); 2017 r = r600_ib_test(rdev);
587 if (r) { 2018 if (r) {
588 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 2019 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
589 return r; 2020 return r;
590 } 2021 }
591#endif 2022
592 return r; 2023 return r;
593 2024
594} 2025}
@@ -597,12 +2028,14 @@ int evergreen_suspend(struct radeon_device *rdev)
597{ 2028{
598#if 0 2029#if 0
599 int r; 2030 int r;
600 2031#endif
601 /* FIXME: we should wait for ring to be empty */ 2032 /* FIXME: we should wait for ring to be empty */
602 r700_cp_stop(rdev); 2033 r700_cp_stop(rdev);
603 rdev->cp.ready = false; 2034 rdev->cp.ready = false;
2035 evergreen_irq_suspend(rdev);
604 r600_wb_disable(rdev); 2036 r600_wb_disable(rdev);
605 evergreen_pcie_gart_disable(rdev); 2037 evergreen_pcie_gart_disable(rdev);
2038#if 0
606 /* unpin shaders bo */ 2039 /* unpin shaders bo */
607 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 2040 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
608 if (likely(r == 0)) { 2041 if (likely(r == 0)) {
@@ -682,8 +2115,6 @@ int evergreen_init(struct radeon_device *rdev)
682 r = radeon_clocks_init(rdev); 2115 r = radeon_clocks_init(rdev);
683 if (r) 2116 if (r)
684 return r; 2117 return r;
685 /* Initialize power management */
686 radeon_pm_init(rdev);
687 /* Fence driver */ 2118 /* Fence driver */
688 r = radeon_fence_driver_init(rdev); 2119 r = radeon_fence_driver_init(rdev);
689 if (r) 2120 if (r)
@@ -702,7 +2133,7 @@ int evergreen_init(struct radeon_device *rdev)
702 r = radeon_bo_init(rdev); 2133 r = radeon_bo_init(rdev);
703 if (r) 2134 if (r)
704 return r; 2135 return r;
705#if 0 2136
706 r = radeon_irq_kms_init(rdev); 2137 r = radeon_irq_kms_init(rdev);
707 if (r) 2138 if (r)
708 return r; 2139 return r;
@@ -716,14 +2147,16 @@ int evergreen_init(struct radeon_device *rdev)
716 r = r600_pcie_gart_init(rdev); 2147 r = r600_pcie_gart_init(rdev);
717 if (r) 2148 if (r)
718 return r; 2149 return r;
719#endif 2150
720 rdev->accel_working = false; 2151 rdev->accel_working = false;
721 r = evergreen_startup(rdev); 2152 r = evergreen_startup(rdev);
722 if (r) { 2153 if (r) {
723 evergreen_suspend(rdev); 2154 dev_err(rdev->dev, "disabling GPU acceleration\n");
724 /*r600_wb_fini(rdev);*/ 2155 r700_cp_fini(rdev);
725 /*radeon_ring_fini(rdev);*/ 2156 r600_wb_fini(rdev);
726 /*evergreen_pcie_gart_fini(rdev);*/ 2157 r600_irq_fini(rdev);
2158 radeon_irq_kms_fini(rdev);
2159 evergreen_pcie_gart_fini(rdev);
727 rdev->accel_working = false; 2160 rdev->accel_working = false;
728 } 2161 }
729 if (rdev->accel_working) { 2162 if (rdev->accel_working) {
@@ -743,16 +2176,12 @@ int evergreen_init(struct radeon_device *rdev)
743 2176
744void evergreen_fini(struct radeon_device *rdev) 2177void evergreen_fini(struct radeon_device *rdev)
745{ 2178{
746 radeon_pm_fini(rdev); 2179 /*r600_blit_fini(rdev);*/
747 evergreen_suspend(rdev); 2180 r700_cp_fini(rdev);
748#if 0 2181 r600_wb_fini(rdev);
749 r600_blit_fini(rdev);
750 r600_irq_fini(rdev); 2182 r600_irq_fini(rdev);
751 radeon_irq_kms_fini(rdev); 2183 radeon_irq_kms_fini(rdev);
752 radeon_ring_fini(rdev);
753 r600_wb_fini(rdev);
754 evergreen_pcie_gart_fini(rdev); 2184 evergreen_pcie_gart_fini(rdev);
755#endif
756 radeon_gem_fini(rdev); 2185 radeon_gem_fini(rdev);
757 radeon_fence_driver_fini(rdev); 2186 radeon_fence_driver_fini(rdev);
758 radeon_clocks_fini(rdev); 2187 radeon_clocks_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index f7c7c9643433..af86af836f13 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -164,8 +164,12 @@
164#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0) 164#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
165 165
166/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */ 166/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
167#define EVERGREEN_CRTC_V_BLANK_START_END 0x6e34
167#define EVERGREEN_CRTC_CONTROL 0x6e70 168#define EVERGREEN_CRTC_CONTROL 0x6e70
168# define EVERGREEN_CRTC_MASTER_EN (1 << 0) 169# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
170# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
171#define EVERGREEN_CRTC_STATUS 0x6e8c
172#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
169#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 173#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
170 174
171#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 175#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
new file mode 100644
index 000000000000..93e9e17ad54a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -0,0 +1,556 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef EVERGREEND_H
25#define EVERGREEND_H
26
27#define EVERGREEN_MAX_SH_GPRS 256
28#define EVERGREEN_MAX_TEMP_GPRS 16
29#define EVERGREEN_MAX_SH_THREADS 256
30#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096
31#define EVERGREEN_MAX_FRC_EOV_CNT 16384
32#define EVERGREEN_MAX_BACKENDS 8
33#define EVERGREEN_MAX_BACKENDS_MASK 0xFF
34#define EVERGREEN_MAX_SIMDS 16
35#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF
36#define EVERGREEN_MAX_PIPES 8
37#define EVERGREEN_MAX_PIPES_MASK 0xFF
38#define EVERGREEN_MAX_LDS_NUM 0xFFFF
39
40/* Registers */
41
42#define RCU_IND_INDEX 0x100
43#define RCU_IND_DATA 0x104
44
45#define GRBM_GFX_INDEX 0x802C
46#define INSTANCE_INDEX(x) ((x) << 0)
47#define SE_INDEX(x) ((x) << 16)
48#define INSTANCE_BROADCAST_WRITES (1 << 30)
49#define SE_BROADCAST_WRITES (1 << 31)
50#define RLC_GFX_INDEX 0x3fC4
51#define CC_GC_SHADER_PIPE_CONFIG 0x8950
52#define WRITE_DIS (1 << 0)
53#define CC_RB_BACKEND_DISABLE 0x98F4
54#define BACKEND_DISABLE(x) ((x) << 16)
55#define GB_ADDR_CONFIG 0x98F8
56#define NUM_PIPES(x) ((x) << 0)
57#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
58#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
59#define NUM_SHADER_ENGINES(x) ((x) << 12)
60#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
61#define NUM_GPUS(x) ((x) << 20)
62#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
63#define ROW_SIZE(x) ((x) << 28)
64#define GB_BACKEND_MAP 0x98FC
65#define DMIF_ADDR_CONFIG 0xBD4
66#define HDP_ADDR_CONFIG 0x2F48
67
68#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
69#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
70
71#define CGTS_SYS_TCC_DISABLE 0x3F90
72#define CGTS_TCC_DISABLE 0x9148
73#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
74#define CGTS_USER_TCC_DISABLE 0x914C
75
76#define CONFIG_MEMSIZE 0x5428
77
78#define CP_ME_CNTL 0x86D8
79#define CP_ME_HALT (1 << 28)
80#define CP_PFP_HALT (1 << 26)
81#define CP_ME_RAM_DATA 0xC160
82#define CP_ME_RAM_RADDR 0xC158
83#define CP_ME_RAM_WADDR 0xC15C
84#define CP_MEQ_THRESHOLDS 0x8764
85#define STQ_SPLIT(x) ((x) << 0)
86#define CP_PERFMON_CNTL 0x87FC
87#define CP_PFP_UCODE_ADDR 0xC150
88#define CP_PFP_UCODE_DATA 0xC154
89#define CP_QUEUE_THRESHOLDS 0x8760
90#define ROQ_IB1_START(x) ((x) << 0)
91#define ROQ_IB2_START(x) ((x) << 8)
92#define CP_RB_BASE 0xC100
93#define CP_RB_CNTL 0xC104
94#define RB_BUFSZ(x) ((x) << 0)
95#define RB_BLKSZ(x) ((x) << 8)
96#define RB_NO_UPDATE (1 << 27)
97#define RB_RPTR_WR_ENA (1 << 31)
98#define BUF_SWAP_32BIT (2 << 16)
99#define CP_RB_RPTR 0x8700
100#define CP_RB_RPTR_ADDR 0xC10C
101#define CP_RB_RPTR_ADDR_HI 0xC110
102#define CP_RB_RPTR_WR 0xC108
103#define CP_RB_WPTR 0xC114
104#define CP_RB_WPTR_ADDR 0xC118
105#define CP_RB_WPTR_ADDR_HI 0xC11C
106#define CP_RB_WPTR_DELAY 0x8704
107#define CP_SEM_WAIT_TIMER 0x85BC
108#define CP_DEBUG 0xC1FC
109
110
111#define GC_USER_SHADER_PIPE_CONFIG 0x8954
112#define INACTIVE_QD_PIPES(x) ((x) << 8)
113#define INACTIVE_QD_PIPES_MASK 0x0000FF00
114#define INACTIVE_SIMDS(x) ((x) << 16)
115#define INACTIVE_SIMDS_MASK 0x00FF0000
116
117#define GRBM_CNTL 0x8000
118#define GRBM_READ_TIMEOUT(x) ((x) << 0)
119#define GRBM_SOFT_RESET 0x8020
120#define SOFT_RESET_CP (1 << 0)
121#define SOFT_RESET_CB (1 << 1)
122#define SOFT_RESET_DB (1 << 3)
123#define SOFT_RESET_PA (1 << 5)
124#define SOFT_RESET_SC (1 << 6)
125#define SOFT_RESET_SPI (1 << 8)
126#define SOFT_RESET_SH (1 << 9)
127#define SOFT_RESET_SX (1 << 10)
128#define SOFT_RESET_TC (1 << 11)
129#define SOFT_RESET_TA (1 << 12)
130#define SOFT_RESET_VC (1 << 13)
131#define SOFT_RESET_VGT (1 << 14)
132
133#define GRBM_STATUS 0x8010
134#define CMDFIFO_AVAIL_MASK 0x0000000F
135#define SRBM_RQ_PENDING (1 << 5)
136#define CF_RQ_PENDING (1 << 7)
137#define PF_RQ_PENDING (1 << 8)
138#define GRBM_EE_BUSY (1 << 10)
139#define SX_CLEAN (1 << 11)
140#define DB_CLEAN (1 << 12)
141#define CB_CLEAN (1 << 13)
142#define TA_BUSY (1 << 14)
143#define VGT_BUSY_NO_DMA (1 << 16)
144#define VGT_BUSY (1 << 17)
145#define SX_BUSY (1 << 20)
146#define SH_BUSY (1 << 21)
147#define SPI_BUSY (1 << 22)
148#define SC_BUSY (1 << 24)
149#define PA_BUSY (1 << 25)
150#define DB_BUSY (1 << 26)
151#define CP_COHERENCY_BUSY (1 << 28)
152#define CP_BUSY (1 << 29)
153#define CB_BUSY (1 << 30)
154#define GUI_ACTIVE (1 << 31)
155#define GRBM_STATUS_SE0 0x8014
156#define GRBM_STATUS_SE1 0x8018
157#define SE_SX_CLEAN (1 << 0)
158#define SE_DB_CLEAN (1 << 1)
159#define SE_CB_CLEAN (1 << 2)
160#define SE_TA_BUSY (1 << 25)
161#define SE_SX_BUSY (1 << 26)
162#define SE_SPI_BUSY (1 << 27)
163#define SE_SH_BUSY (1 << 28)
164#define SE_SC_BUSY (1 << 29)
165#define SE_DB_BUSY (1 << 30)
166#define SE_CB_BUSY (1 << 31)
167
168#define HDP_HOST_PATH_CNTL 0x2C00
169#define HDP_NONSURFACE_BASE 0x2C04
170#define HDP_NONSURFACE_INFO 0x2C08
171#define HDP_NONSURFACE_SIZE 0x2C0C
172#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
173#define HDP_TILING_CONFIG 0x2F3C
174
175#define MC_SHARED_CHMAP 0x2004
176#define NOOFCHAN_SHIFT 12
177#define NOOFCHAN_MASK 0x00003000
178
179#define MC_ARB_RAMCFG 0x2760
180#define NOOFBANK_SHIFT 0
181#define NOOFBANK_MASK 0x00000003
182#define NOOFRANK_SHIFT 2
183#define NOOFRANK_MASK 0x00000004
184#define NOOFROWS_SHIFT 3
185#define NOOFROWS_MASK 0x00000038
186#define NOOFCOLS_SHIFT 6
187#define NOOFCOLS_MASK 0x000000C0
188#define CHANSIZE_SHIFT 8
189#define CHANSIZE_MASK 0x00000100
190#define BURSTLENGTH_SHIFT 9
191#define BURSTLENGTH_MASK 0x00000200
192#define CHANSIZE_OVERRIDE (1 << 11)
193#define MC_VM_AGP_TOP 0x2028
194#define MC_VM_AGP_BOT 0x202C
195#define MC_VM_AGP_BASE 0x2030
196#define MC_VM_FB_LOCATION 0x2024
197#define MC_VM_MB_L1_TLB0_CNTL 0x2234
198#define MC_VM_MB_L1_TLB1_CNTL 0x2238
199#define MC_VM_MB_L1_TLB2_CNTL 0x223C
200#define MC_VM_MB_L1_TLB3_CNTL 0x2240
201#define ENABLE_L1_TLB (1 << 0)
202#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
203#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
204#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
205#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
206#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
207#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
208#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
209#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
210#define MC_VM_MD_L1_TLB0_CNTL 0x2654
211#define MC_VM_MD_L1_TLB1_CNTL 0x2658
212#define MC_VM_MD_L1_TLB2_CNTL 0x265C
213#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
214#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
215#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
216
217#define PA_CL_ENHANCE 0x8A14
218#define CLIP_VTX_REORDER_ENA (1 << 0)
219#define NUM_CLIP_SEQ(x) ((x) << 1)
220#define PA_SC_AA_CONFIG 0x28C04
221#define PA_SC_CLIPRECT_RULE 0x2820C
222#define PA_SC_EDGERULE 0x28230
223#define PA_SC_FIFO_SIZE 0x8BCC
224#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
225#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
226#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
227#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
228#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
229#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
230#define PA_SC_LINE_STIPPLE 0x28A0C
231#define PA_SC_LINE_STIPPLE_STATE 0x8B10
232
233#define SCRATCH_REG0 0x8500
234#define SCRATCH_REG1 0x8504
235#define SCRATCH_REG2 0x8508
236#define SCRATCH_REG3 0x850C
237#define SCRATCH_REG4 0x8510
238#define SCRATCH_REG5 0x8514
239#define SCRATCH_REG6 0x8518
240#define SCRATCH_REG7 0x851C
241#define SCRATCH_UMSK 0x8540
242#define SCRATCH_ADDR 0x8544
243
244#define SMX_DC_CTL0 0xA020
245#define USE_HASH_FUNCTION (1 << 0)
246#define NUMBER_OF_SETS(x) ((x) << 1)
247#define FLUSH_ALL_ON_EVENT (1 << 10)
248#define STALL_ON_EVENT (1 << 11)
249#define SMX_EVENT_CTL 0xA02C
250#define ES_FLUSH_CTL(x) ((x) << 0)
251#define GS_FLUSH_CTL(x) ((x) << 3)
252#define ACK_FLUSH_CTL(x) ((x) << 6)
253#define SYNC_FLUSH_CTL (1 << 8)
254
255#define SPI_CONFIG_CNTL 0x9100
256#define GPR_WRITE_PRIORITY(x) ((x) << 0)
257#define SPI_CONFIG_CNTL_1 0x913C
258#define VTX_DONE_DELAY(x) ((x) << 0)
259#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
260#define SPI_INPUT_Z 0x286D8
261#define SPI_PS_IN_CONTROL_0 0x286CC
262#define NUM_INTERP(x) ((x)<<0)
263#define POSITION_ENA (1<<8)
264#define POSITION_CENTROID (1<<9)
265#define POSITION_ADDR(x) ((x)<<10)
266#define PARAM_GEN(x) ((x)<<15)
267#define PARAM_GEN_ADDR(x) ((x)<<19)
268#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
269#define PERSP_GRADIENT_ENA (1<<28)
270#define LINEAR_GRADIENT_ENA (1<<29)
271#define POSITION_SAMPLE (1<<30)
272#define BARYC_AT_SAMPLE_ENA (1<<31)
273
274#define SQ_CONFIG 0x8C00
275#define VC_ENABLE (1 << 0)
276#define EXPORT_SRC_C (1 << 1)
277#define CS_PRIO(x) ((x) << 18)
278#define LS_PRIO(x) ((x) << 20)
279#define HS_PRIO(x) ((x) << 22)
280#define PS_PRIO(x) ((x) << 24)
281#define VS_PRIO(x) ((x) << 26)
282#define GS_PRIO(x) ((x) << 28)
283#define ES_PRIO(x) ((x) << 30)
284#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
285#define NUM_PS_GPRS(x) ((x) << 0)
286#define NUM_VS_GPRS(x) ((x) << 16)
287#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
288#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
289#define NUM_GS_GPRS(x) ((x) << 0)
290#define NUM_ES_GPRS(x) ((x) << 16)
291#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
292#define NUM_HS_GPRS(x) ((x) << 0)
293#define NUM_LS_GPRS(x) ((x) << 16)
294#define SQ_THREAD_RESOURCE_MGMT 0x8C18
295#define NUM_PS_THREADS(x) ((x) << 0)
296#define NUM_VS_THREADS(x) ((x) << 8)
297#define NUM_GS_THREADS(x) ((x) << 16)
298#define NUM_ES_THREADS(x) ((x) << 24)
299#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C
300#define NUM_HS_THREADS(x) ((x) << 0)
301#define NUM_LS_THREADS(x) ((x) << 8)
302#define SQ_STACK_RESOURCE_MGMT_1 0x8C20
303#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
304#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
305#define SQ_STACK_RESOURCE_MGMT_2 0x8C24
306#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
307#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
308#define SQ_STACK_RESOURCE_MGMT_3 0x8C28
309#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
310#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
311#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
312#define SQ_LDS_RESOURCE_MGMT 0x8E2C
313
314#define SQ_MS_FIFO_SIZES 0x8CF0
315#define CACHE_FIFO_SIZE(x) ((x) << 0)
316#define FETCH_FIFO_HIWATER(x) ((x) << 8)
317#define DONE_FIFO_HIWATER(x) ((x) << 16)
318#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
319
320#define SX_DEBUG_1 0x9058
321#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
322#define SX_EXPORT_BUFFER_SIZES 0x900C
323#define COLOR_BUFFER_SIZE(x) ((x) << 0)
324#define POSITION_BUFFER_SIZE(x) ((x) << 8)
325#define SMX_BUFFER_SIZE(x) ((x) << 16)
326#define SX_MISC 0x28350
327
328#define CB_PERF_CTR0_SEL_0 0x9A20
329#define CB_PERF_CTR0_SEL_1 0x9A24
330#define CB_PERF_CTR1_SEL_0 0x9A28
331#define CB_PERF_CTR1_SEL_1 0x9A2C
332#define CB_PERF_CTR2_SEL_0 0x9A30
333#define CB_PERF_CTR2_SEL_1 0x9A34
334#define CB_PERF_CTR3_SEL_0 0x9A38
335#define CB_PERF_CTR3_SEL_1 0x9A3C
336
337#define TA_CNTL_AUX 0x9508
338#define DISABLE_CUBE_WRAP (1 << 0)
339#define DISABLE_CUBE_ANISO (1 << 1)
340#define SYNC_GRADIENT (1 << 24)
341#define SYNC_WALKER (1 << 25)
342#define SYNC_ALIGNER (1 << 26)
343
344#define VGT_CACHE_INVALIDATION 0x88C4
345#define CACHE_INVALIDATION(x) ((x) << 0)
346#define VC_ONLY 0
347#define TC_ONLY 1
348#define VC_AND_TC 2
349#define AUTO_INVLD_EN(x) ((x) << 6)
350#define NO_AUTO 0
351#define ES_AUTO 1
352#define GS_AUTO 2
353#define ES_AND_GS_AUTO 3
354#define VGT_GS_VERTEX_REUSE 0x88D4
355#define VGT_NUM_INSTANCES 0x8974
356#define VGT_OUT_DEALLOC_CNTL 0x28C5C
357#define DEALLOC_DIST_MASK 0x0000007F
358#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
359#define VTX_REUSE_DEPTH_MASK 0x000000FF
360
361#define VM_CONTEXT0_CNTL 0x1410
362#define ENABLE_CONTEXT (1 << 0)
363#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
364#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
365#define VM_CONTEXT1_CNTL 0x1414
366#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
367#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
368#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
369#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
370#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
371#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
372#define RESPONSE_TYPE_MASK 0x000000F0
373#define RESPONSE_TYPE_SHIFT 4
374#define VM_L2_CNTL 0x1400
375#define ENABLE_L2_CACHE (1 << 0)
376#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
377#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
378#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
379#define VM_L2_CNTL2 0x1404
380#define INVALIDATE_ALL_L1_TLBS (1 << 0)
381#define INVALIDATE_L2_CACHE (1 << 1)
382#define VM_L2_CNTL3 0x1408
383#define BANK_SELECT(x) ((x) << 0)
384#define CACHE_UPDATE_MODE(x) ((x) << 6)
385#define VM_L2_STATUS 0x140C
386#define L2_BUSY (1 << 0)
387
388#define WAIT_UNTIL 0x8040
389
390#define SRBM_STATUS 0x0E50
391#define SRBM_SOFT_RESET 0x0E60
392#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
393#define SOFT_RESET_BIF (1 << 1)
394#define SOFT_RESET_CG (1 << 2)
395#define SOFT_RESET_DC (1 << 5)
396#define SOFT_RESET_GRBM (1 << 8)
397#define SOFT_RESET_HDP (1 << 9)
398#define SOFT_RESET_IH (1 << 10)
399#define SOFT_RESET_MC (1 << 11)
400#define SOFT_RESET_RLC (1 << 13)
401#define SOFT_RESET_ROM (1 << 14)
402#define SOFT_RESET_SEM (1 << 15)
403#define SOFT_RESET_VMC (1 << 17)
404#define SOFT_RESET_TST (1 << 21)
405#define SOFT_RESET_REGBB (1 << 22)
406#define SOFT_RESET_ORB (1 << 23)
407
408#define IH_RB_CNTL 0x3e00
409# define IH_RB_ENABLE (1 << 0)
410# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
411# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
412# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
413# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
414# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
415# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
416#define IH_RB_BASE 0x3e04
417#define IH_RB_RPTR 0x3e08
418#define IH_RB_WPTR 0x3e0c
419# define RB_OVERFLOW (1 << 0)
420# define WPTR_OFFSET_MASK 0x3fffc
421#define IH_RB_WPTR_ADDR_HI 0x3e10
422#define IH_RB_WPTR_ADDR_LO 0x3e14
423#define IH_CNTL 0x3e18
424# define ENABLE_INTR (1 << 0)
425# define IH_MC_SWAP(x) ((x) << 2)
426# define IH_MC_SWAP_NONE 0
427# define IH_MC_SWAP_16BIT 1
428# define IH_MC_SWAP_32BIT 2
429# define IH_MC_SWAP_64BIT 3
430# define RPTR_REARM (1 << 4)
431# define MC_WRREQ_CREDIT(x) ((x) << 15)
432# define MC_WR_CLEAN_CNT(x) ((x) << 20)
433
434#define CP_INT_CNTL 0xc124
435# define CNTX_BUSY_INT_ENABLE (1 << 19)
436# define CNTX_EMPTY_INT_ENABLE (1 << 20)
437# define SCRATCH_INT_ENABLE (1 << 25)
438# define TIME_STAMP_INT_ENABLE (1 << 26)
439# define IB2_INT_ENABLE (1 << 29)
440# define IB1_INT_ENABLE (1 << 30)
441# define RB_INT_ENABLE (1 << 31)
442#define CP_INT_STATUS 0xc128
443# define SCRATCH_INT_STAT (1 << 25)
444# define TIME_STAMP_INT_STAT (1 << 26)
445# define IB2_INT_STAT (1 << 29)
446# define IB1_INT_STAT (1 << 30)
447# define RB_INT_STAT (1 << 31)
448
449#define GRBM_INT_CNTL 0x8060
450# define RDERR_INT_ENABLE (1 << 0)
451# define GUI_IDLE_INT_ENABLE (1 << 19)
452
453/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
454#define CRTC_STATUS_FRAME_COUNT 0x6e98
455
456/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
457#define VLINE_STATUS 0x6bb8
458# define VLINE_OCCURRED (1 << 0)
459# define VLINE_ACK (1 << 4)
460# define VLINE_STAT (1 << 12)
461# define VLINE_INTERRUPT (1 << 16)
462# define VLINE_INTERRUPT_TYPE (1 << 17)
463/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
464#define VBLANK_STATUS 0x6bbc
465# define VBLANK_OCCURRED (1 << 0)
466# define VBLANK_ACK (1 << 4)
467# define VBLANK_STAT (1 << 12)
468# define VBLANK_INTERRUPT (1 << 16)
469# define VBLANK_INTERRUPT_TYPE (1 << 17)
470
471/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
472#define INT_MASK 0x6b40
473# define VBLANK_INT_MASK (1 << 0)
474# define VLINE_INT_MASK (1 << 4)
475
476#define DISP_INTERRUPT_STATUS 0x60f4
477# define LB_D1_VLINE_INTERRUPT (1 << 2)
478# define LB_D1_VBLANK_INTERRUPT (1 << 3)
479# define DC_HPD1_INTERRUPT (1 << 17)
480# define DC_HPD1_RX_INTERRUPT (1 << 18)
481# define DACA_AUTODETECT_INTERRUPT (1 << 22)
482# define DACB_AUTODETECT_INTERRUPT (1 << 23)
483# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
484# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
485#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
486# define LB_D2_VLINE_INTERRUPT (1 << 2)
487# define LB_D2_VBLANK_INTERRUPT (1 << 3)
488# define DC_HPD2_INTERRUPT (1 << 17)
489# define DC_HPD2_RX_INTERRUPT (1 << 18)
490# define DISP_TIMER_INTERRUPT (1 << 24)
491#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
492# define LB_D3_VLINE_INTERRUPT (1 << 2)
493# define LB_D3_VBLANK_INTERRUPT (1 << 3)
494# define DC_HPD3_INTERRUPT (1 << 17)
495# define DC_HPD3_RX_INTERRUPT (1 << 18)
496#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
497# define LB_D4_VLINE_INTERRUPT (1 << 2)
498# define LB_D4_VBLANK_INTERRUPT (1 << 3)
499# define DC_HPD4_INTERRUPT (1 << 17)
500# define DC_HPD4_RX_INTERRUPT (1 << 18)
501#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
502# define LB_D5_VLINE_INTERRUPT (1 << 2)
503# define LB_D5_VBLANK_INTERRUPT (1 << 3)
504# define DC_HPD5_INTERRUPT (1 << 17)
505# define DC_HPD5_RX_INTERRUPT (1 << 18)
506#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
507# define LB_D6_VLINE_INTERRUPT (1 << 2)
508# define LB_D6_VBLANK_INTERRUPT (1 << 3)
509# define DC_HPD6_INTERRUPT (1 << 17)
510# define DC_HPD6_RX_INTERRUPT (1 << 18)
511
512/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
513#define GRPH_INT_STATUS 0x6858
514# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
515# define GRPH_PFLIP_INT_CLEAR (1 << 8)
516/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
517#define GRPH_INT_CONTROL 0x685c
518# define GRPH_PFLIP_INT_MASK (1 << 0)
519# define GRPH_PFLIP_INT_TYPE (1 << 8)
520
521#define DACA_AUTODETECT_INT_CONTROL 0x66c8
522#define DACB_AUTODETECT_INT_CONTROL 0x67c8
523
524#define DC_HPD1_INT_STATUS 0x601c
525#define DC_HPD2_INT_STATUS 0x6028
526#define DC_HPD3_INT_STATUS 0x6034
527#define DC_HPD4_INT_STATUS 0x6040
528#define DC_HPD5_INT_STATUS 0x604c
529#define DC_HPD6_INT_STATUS 0x6058
530# define DC_HPDx_INT_STATUS (1 << 0)
531# define DC_HPDx_SENSE (1 << 1)
532# define DC_HPDx_RX_INT_STATUS (1 << 8)
533
534#define DC_HPD1_INT_CONTROL 0x6020
535#define DC_HPD2_INT_CONTROL 0x602c
536#define DC_HPD3_INT_CONTROL 0x6038
537#define DC_HPD4_INT_CONTROL 0x6044
538#define DC_HPD5_INT_CONTROL 0x6050
539#define DC_HPD6_INT_CONTROL 0x605c
540# define DC_HPDx_INT_ACK (1 << 0)
541# define DC_HPDx_INT_POLARITY (1 << 8)
542# define DC_HPDx_INT_EN (1 << 16)
543# define DC_HPDx_RX_INT_ACK (1 << 20)
544# define DC_HPDx_RX_INT_EN (1 << 24)
545
546#define DC_HPD1_CONTROL 0x6024
547#define DC_HPD2_CONTROL 0x6030
548#define DC_HPD3_CONTROL 0x603c
549#define DC_HPD4_CONTROL 0x6048
550#define DC_HPD5_CONTROL 0x6054
551#define DC_HPD6_CONTROL 0x6060
552# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
553# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
554# define DC_HPDx_EN (1 << 28)
555
556#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cf60c0b3ef15..cc004b05d63e 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -37,6 +37,7 @@
37#include "rs100d.h" 37#include "rs100d.h"
38#include "rv200d.h" 38#include "rv200d.h"
39#include "rv250d.h" 39#include "rv250d.h"
40#include "atom.h"
40 41
41#include <linux/firmware.h> 42#include <linux/firmware.h>
42#include <linux/platform_device.h> 43#include <linux/platform_device.h>
@@ -67,6 +68,264 @@ MODULE_FIRMWARE(FIRMWARE_R520);
67 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 68 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
68 */ 69 */
69 70
71void r100_pm_get_dynpm_state(struct radeon_device *rdev)
72{
73 int i;
74 rdev->pm.dynpm_can_upclock = true;
75 rdev->pm.dynpm_can_downclock = true;
76
77 switch (rdev->pm.dynpm_planned_action) {
78 case DYNPM_ACTION_MINIMUM:
79 rdev->pm.requested_power_state_index = 0;
80 rdev->pm.dynpm_can_downclock = false;
81 break;
82 case DYNPM_ACTION_DOWNCLOCK:
83 if (rdev->pm.current_power_state_index == 0) {
84 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
85 rdev->pm.dynpm_can_downclock = false;
86 } else {
87 if (rdev->pm.active_crtc_count > 1) {
88 for (i = 0; i < rdev->pm.num_power_states; i++) {
89 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
90 continue;
91 else if (i >= rdev->pm.current_power_state_index) {
92 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
93 break;
94 } else {
95 rdev->pm.requested_power_state_index = i;
96 break;
97 }
98 }
99 } else
100 rdev->pm.requested_power_state_index =
101 rdev->pm.current_power_state_index - 1;
102 }
103 /* don't use the power state if crtcs are active and no display flag is set */
104 if ((rdev->pm.active_crtc_count > 0) &&
105 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
106 RADEON_PM_MODE_NO_DISPLAY)) {
107 rdev->pm.requested_power_state_index++;
108 }
109 break;
110 case DYNPM_ACTION_UPCLOCK:
111 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
112 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
113 rdev->pm.dynpm_can_upclock = false;
114 } else {
115 if (rdev->pm.active_crtc_count > 1) {
116 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
117 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
118 continue;
119 else if (i <= rdev->pm.current_power_state_index) {
120 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
121 break;
122 } else {
123 rdev->pm.requested_power_state_index = i;
124 break;
125 }
126 }
127 } else
128 rdev->pm.requested_power_state_index =
129 rdev->pm.current_power_state_index + 1;
130 }
131 break;
132 case DYNPM_ACTION_DEFAULT:
133 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
134 rdev->pm.dynpm_can_upclock = false;
135 break;
136 case DYNPM_ACTION_NONE:
137 default:
138 DRM_ERROR("Requested mode for not defined action\n");
139 return;
140 }
141 /* only one clock mode per power state */
142 rdev->pm.requested_clock_mode_index = 0;
143
144 DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
145 rdev->pm.power_state[rdev->pm.requested_power_state_index].
146 clock_info[rdev->pm.requested_clock_mode_index].sclk,
147 rdev->pm.power_state[rdev->pm.requested_power_state_index].
148 clock_info[rdev->pm.requested_clock_mode_index].mclk,
149 rdev->pm.power_state[rdev->pm.requested_power_state_index].
150 pcie_lanes);
151}
152
153void r100_pm_init_profile(struct radeon_device *rdev)
154{
155 /* default */
156 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
157 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
158 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
159 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
160 /* low sh */
161 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
162 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
163 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
164 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
165 /* high sh */
166 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
167 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
168 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
169 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
170 /* low mh */
171 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
172 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
173 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
174 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
175 /* high mh */
176 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
177 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
178 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
179 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
180}
181
182void r100_pm_misc(struct radeon_device *rdev)
183{
184 int requested_index = rdev->pm.requested_power_state_index;
185 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
186 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
187 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
188
189 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
190 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
191 tmp = RREG32(voltage->gpio.reg);
192 if (voltage->active_high)
193 tmp |= voltage->gpio.mask;
194 else
195 tmp &= ~(voltage->gpio.mask);
196 WREG32(voltage->gpio.reg, tmp);
197 if (voltage->delay)
198 udelay(voltage->delay);
199 } else {
200 tmp = RREG32(voltage->gpio.reg);
201 if (voltage->active_high)
202 tmp &= ~voltage->gpio.mask;
203 else
204 tmp |= voltage->gpio.mask;
205 WREG32(voltage->gpio.reg, tmp);
206 if (voltage->delay)
207 udelay(voltage->delay);
208 }
209 }
210
211 sclk_cntl = RREG32_PLL(SCLK_CNTL);
212 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
213 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
214 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
215 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
216 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
217 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
218 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
219 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
220 else
221 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
222 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
223 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
224 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
225 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
226 } else
227 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
228
229 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
230 sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
231 if (voltage->delay) {
232 sclk_more_cntl |= VOLTAGE_DROP_SYNC;
233 switch (voltage->delay) {
234 case 33:
235 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
236 break;
237 case 66:
238 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
239 break;
240 case 99:
241 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
242 break;
243 case 132:
244 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
245 break;
246 }
247 } else
248 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
249 } else
250 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
251
252 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
253 sclk_cntl &= ~FORCE_HDP;
254 else
255 sclk_cntl |= FORCE_HDP;
256
257 WREG32_PLL(SCLK_CNTL, sclk_cntl);
258 WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
259 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
260
261 /* set pcie lanes */
262 if ((rdev->flags & RADEON_IS_PCIE) &&
263 !(rdev->flags & RADEON_IS_IGP) &&
264 rdev->asic->set_pcie_lanes &&
265 (ps->pcie_lanes !=
266 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
267 radeon_set_pcie_lanes(rdev,
268 ps->pcie_lanes);
269 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
270 }
271}
272
273void r100_pm_prepare(struct radeon_device *rdev)
274{
275 struct drm_device *ddev = rdev->ddev;
276 struct drm_crtc *crtc;
277 struct radeon_crtc *radeon_crtc;
278 u32 tmp;
279
280 /* disable any active CRTCs */
281 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
282 radeon_crtc = to_radeon_crtc(crtc);
283 if (radeon_crtc->enabled) {
284 if (radeon_crtc->crtc_id) {
285 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
286 tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
287 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
288 } else {
289 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
290 tmp |= RADEON_CRTC_DISP_REQ_EN_B;
291 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
292 }
293 }
294 }
295}
296
297void r100_pm_finish(struct radeon_device *rdev)
298{
299 struct drm_device *ddev = rdev->ddev;
300 struct drm_crtc *crtc;
301 struct radeon_crtc *radeon_crtc;
302 u32 tmp;
303
304 /* enable any active CRTCs */
305 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
306 radeon_crtc = to_radeon_crtc(crtc);
307 if (radeon_crtc->enabled) {
308 if (radeon_crtc->crtc_id) {
309 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
310 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
311 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
312 } else {
313 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
314 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
315 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
316 }
317 }
318 }
319}
320
321bool r100_gui_idle(struct radeon_device *rdev)
322{
323 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
324 return false;
325 else
326 return true;
327}
328
70/* hpd for digital panel detect/disconnect */ 329/* hpd for digital panel detect/disconnect */
71bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 330bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
72{ 331{
@@ -254,6 +513,9 @@ int r100_irq_set(struct radeon_device *rdev)
254 if (rdev->irq.sw_int) { 513 if (rdev->irq.sw_int) {
255 tmp |= RADEON_SW_INT_ENABLE; 514 tmp |= RADEON_SW_INT_ENABLE;
256 } 515 }
516 if (rdev->irq.gui_idle) {
517 tmp |= RADEON_GUI_IDLE_MASK;
518 }
257 if (rdev->irq.crtc_vblank_int[0]) { 519 if (rdev->irq.crtc_vblank_int[0]) {
258 tmp |= RADEON_CRTC_VBLANK_MASK; 520 tmp |= RADEON_CRTC_VBLANK_MASK;
259 } 521 }
@@ -288,6 +550,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
288 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 550 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
289 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 551 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
290 552
553 /* the interrupt works, but the status bit is permanently asserted */
554 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
555 if (!rdev->irq.gui_idle_acked)
556 irq_mask |= RADEON_GUI_IDLE_STAT;
557 }
558
291 if (irqs) { 559 if (irqs) {
292 WREG32(RADEON_GEN_INT_STATUS, irqs); 560 WREG32(RADEON_GEN_INT_STATUS, irqs);
293 } 561 }
@@ -299,6 +567,9 @@ int r100_irq_process(struct radeon_device *rdev)
299 uint32_t status, msi_rearm; 567 uint32_t status, msi_rearm;
300 bool queue_hotplug = false; 568 bool queue_hotplug = false;
301 569
570 /* reset gui idle ack. the status bit is broken */
571 rdev->irq.gui_idle_acked = false;
572
302 status = r100_irq_ack(rdev); 573 status = r100_irq_ack(rdev);
303 if (!status) { 574 if (!status) {
304 return IRQ_NONE; 575 return IRQ_NONE;
@@ -311,6 +582,12 @@ int r100_irq_process(struct radeon_device *rdev)
311 if (status & RADEON_SW_INT_TEST) { 582 if (status & RADEON_SW_INT_TEST) {
312 radeon_fence_process(rdev); 583 radeon_fence_process(rdev);
313 } 584 }
585 /* gui idle interrupt */
586 if (status & RADEON_GUI_IDLE_STAT) {
587 rdev->irq.gui_idle_acked = true;
588 rdev->pm.gui_idle = true;
589 wake_up(&rdev->irq.idle_queue);
590 }
314 /* Vertical blank interrupts */ 591 /* Vertical blank interrupts */
315 if (status & RADEON_CRTC_VBLANK_STAT) { 592 if (status & RADEON_CRTC_VBLANK_STAT) {
316 drm_handle_vblank(rdev->ddev, 0); 593 drm_handle_vblank(rdev->ddev, 0);
@@ -332,6 +609,8 @@ int r100_irq_process(struct radeon_device *rdev)
332 } 609 }
333 status = r100_irq_ack(rdev); 610 status = r100_irq_ack(rdev);
334 } 611 }
612 /* reset gui idle ack. the status bit is broken */
613 rdev->irq.gui_idle_acked = false;
335 if (queue_hotplug) 614 if (queue_hotplug)
336 queue_work(rdev->wq, &rdev->hotplug_work); 615 queue_work(rdev->wq, &rdev->hotplug_work);
337 if (rdev->msi_enabled) { 616 if (rdev->msi_enabled) {
@@ -663,26 +942,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
663 if (r100_debugfs_cp_init(rdev)) { 942 if (r100_debugfs_cp_init(rdev)) {
664 DRM_ERROR("Failed to register debugfs file for CP !\n"); 943 DRM_ERROR("Failed to register debugfs file for CP !\n");
665 } 944 }
666 /* Reset CP */
667 tmp = RREG32(RADEON_CP_CSQ_STAT);
668 if ((tmp & (1 << 31))) {
669 DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
670 WREG32(RADEON_CP_CSQ_MODE, 0);
671 WREG32(RADEON_CP_CSQ_CNTL, 0);
672 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
673 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
674 mdelay(2);
675 WREG32(RADEON_RBBM_SOFT_RESET, 0);
676 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
677 mdelay(2);
678 tmp = RREG32(RADEON_CP_CSQ_STAT);
679 if ((tmp & (1 << 31))) {
680 DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
681 }
682 } else {
683 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
684 }
685
686 if (!rdev->me_fw) { 945 if (!rdev->me_fw) {
687 r = r100_cp_init_microcode(rdev); 946 r = r100_cp_init_microcode(rdev);
688 if (r) { 947 if (r) {
@@ -787,39 +1046,6 @@ void r100_cp_disable(struct radeon_device *rdev)
787 } 1046 }
788} 1047}
789 1048
790int r100_cp_reset(struct radeon_device *rdev)
791{
792 uint32_t tmp;
793 bool reinit_cp;
794 int i;
795
796 reinit_cp = rdev->cp.ready;
797 rdev->cp.ready = false;
798 WREG32(RADEON_CP_CSQ_MODE, 0);
799 WREG32(RADEON_CP_CSQ_CNTL, 0);
800 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
801 (void)RREG32(RADEON_RBBM_SOFT_RESET);
802 udelay(200);
803 WREG32(RADEON_RBBM_SOFT_RESET, 0);
804 /* Wait to prevent race in RBBM_STATUS */
805 mdelay(1);
806 for (i = 0; i < rdev->usec_timeout; i++) {
807 tmp = RREG32(RADEON_RBBM_STATUS);
808 if (!(tmp & (1 << 16))) {
809 DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
810 tmp);
811 if (reinit_cp) {
812 return r100_cp_init(rdev, rdev->cp.ring_size);
813 }
814 return 0;
815 }
816 DRM_UDELAY(1);
817 }
818 tmp = RREG32(RADEON_RBBM_STATUS);
819 DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
820 return -1;
821}
822
823void r100_cp_commit(struct radeon_device *rdev) 1049void r100_cp_commit(struct radeon_device *rdev)
824{ 1050{
825 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); 1051 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@@ -1733,76 +1959,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
1733 return -1; 1959 return -1;
1734} 1960}
1735 1961
1736void r100_gpu_init(struct radeon_device *rdev) 1962void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1737{ 1963{
1738 /* TODO: anythings to do here ? pipes ? */ 1964 lockup->last_cp_rptr = cp->rptr;
1739 r100_hdp_reset(rdev); 1965 lockup->last_jiffies = jiffies;
1966}
1967
1968/**
1969 * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
1970 * @rdev: radeon device structure
1971 * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
1972 * @cp: radeon_cp structure holding CP information
1973 *
1974 * We don't need to initialize the lockup tracking information as we will either
1975 * have CP rptr to a different value of jiffies wrap around which will force
1976 * initialization of the lockup tracking informations.
1977 *
1978 * A possible false positivie is if we get call after while and last_cp_rptr ==
1979 * the current CP rptr, even if it's unlikely it might happen. To avoid this
1980 * if the elapsed time since last call is bigger than 2 second than we return
1981 * false and update the tracking information. Due to this the caller must call
1982 * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
1983 * the fencing code should be cautious about that.
1984 *
1985 * Caller should write to the ring to force CP to do something so we don't get
1986 * false positive when CP is just gived nothing to do.
1987 *
1988 **/
1989bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1990{
1991 unsigned long cjiffies, elapsed;
1992
1993 cjiffies = jiffies;
1994 if (!time_after(cjiffies, lockup->last_jiffies)) {
1995 /* likely a wrap around */
1996 lockup->last_cp_rptr = cp->rptr;
1997 lockup->last_jiffies = jiffies;
1998 return false;
1999 }
2000 if (cp->rptr != lockup->last_cp_rptr) {
2001 /* CP is still working no lockup */
2002 lockup->last_cp_rptr = cp->rptr;
2003 lockup->last_jiffies = jiffies;
2004 return false;
2005 }
2006 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
2007 if (elapsed >= 3000) {
2008 /* very likely the improbable case where current
2009 * rptr is equal to last recorded, a while ago, rptr
2010 * this is more likely a false positive update tracking
2011 * information which should force us to be recall at
2012 * latter point
2013 */
2014 lockup->last_cp_rptr = cp->rptr;
2015 lockup->last_jiffies = jiffies;
2016 return false;
2017 }
2018 if (elapsed >= 1000) {
2019 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
2020 return true;
2021 }
2022 /* give a chance to the GPU ... */
2023 return false;
1740} 2024}
1741 2025
1742void r100_hdp_reset(struct radeon_device *rdev) 2026bool r100_gpu_is_lockup(struct radeon_device *rdev)
1743{ 2027{
1744 uint32_t tmp; 2028 u32 rbbm_status;
2029 int r;
1745 2030
1746 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; 2031 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
1747 tmp |= (7 << 28); 2032 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
1748 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); 2033 r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
1749 (void)RREG32(RADEON_HOST_PATH_CNTL); 2034 return false;
1750 udelay(200); 2035 }
1751 WREG32(RADEON_RBBM_SOFT_RESET, 0); 2036 /* force CP activities */
1752 WREG32(RADEON_HOST_PATH_CNTL, tmp); 2037 r = radeon_ring_lock(rdev, 2);
1753 (void)RREG32(RADEON_HOST_PATH_CNTL); 2038 if (!r) {
2039 /* PACKET2 NOP */
2040 radeon_ring_write(rdev, 0x80000000);
2041 radeon_ring_write(rdev, 0x80000000);
2042 radeon_ring_unlock_commit(rdev);
2043 }
2044 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
2045 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
1754} 2046}
1755 2047
1756int r100_rb2d_reset(struct radeon_device *rdev) 2048void r100_bm_disable(struct radeon_device *rdev)
1757{ 2049{
1758 uint32_t tmp; 2050 u32 tmp;
1759 int i;
1760 2051
1761 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); 2052 /* disable bus mastering */
1762 (void)RREG32(RADEON_RBBM_SOFT_RESET); 2053 tmp = RREG32(R_000030_BUS_CNTL);
1763 udelay(200); 2054 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
1764 WREG32(RADEON_RBBM_SOFT_RESET, 0); 2055 mdelay(1);
1765 /* Wait to prevent race in RBBM_STATUS */ 2056 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2057 mdelay(1);
2058 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2059 tmp = RREG32(RADEON_BUS_CNTL);
2060 mdelay(1);
2061 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
2062 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
1766 mdelay(1); 2063 mdelay(1);
1767 for (i = 0; i < rdev->usec_timeout; i++) {
1768 tmp = RREG32(RADEON_RBBM_STATUS);
1769 if (!(tmp & (1 << 26))) {
1770 DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1771 tmp);
1772 return 0;
1773 }
1774 DRM_UDELAY(1);
1775 }
1776 tmp = RREG32(RADEON_RBBM_STATUS);
1777 DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1778 return -1;
1779} 2064}
1780 2065
1781int r100_gpu_reset(struct radeon_device *rdev) 2066int r100_asic_reset(struct radeon_device *rdev)
1782{ 2067{
1783 uint32_t status; 2068 struct r100_mc_save save;
2069 u32 status, tmp;
1784 2070
1785 /* reset order likely matter */ 2071 r100_mc_stop(rdev, &save);
1786 status = RREG32(RADEON_RBBM_STATUS); 2072 status = RREG32(R_000E40_RBBM_STATUS);
1787 /* reset HDP */ 2073 if (!G_000E40_GUI_ACTIVE(status)) {
1788 r100_hdp_reset(rdev); 2074 return 0;
1789 /* reset rb2d */
1790 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1791 r100_rb2d_reset(rdev);
1792 } 2075 }
1793 /* TODO: reset 3D engine */ 2076 status = RREG32(R_000E40_RBBM_STATUS);
2077 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2078 /* stop CP */
2079 WREG32(RADEON_CP_CSQ_CNTL, 0);
2080 tmp = RREG32(RADEON_CP_RB_CNTL);
2081 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2082 WREG32(RADEON_CP_RB_RPTR_WR, 0);
2083 WREG32(RADEON_CP_RB_WPTR, 0);
2084 WREG32(RADEON_CP_RB_CNTL, tmp);
2085 /* save PCI state */
2086 pci_save_state(rdev->pdev);
2087 /* disable bus mastering */
2088 r100_bm_disable(rdev);
2089 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2090 S_0000F0_SOFT_RESET_RE(1) |
2091 S_0000F0_SOFT_RESET_PP(1) |
2092 S_0000F0_SOFT_RESET_RB(1));
2093 RREG32(R_0000F0_RBBM_SOFT_RESET);
2094 mdelay(500);
2095 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2096 mdelay(1);
2097 status = RREG32(R_000E40_RBBM_STATUS);
2098 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
1794 /* reset CP */ 2099 /* reset CP */
1795 status = RREG32(RADEON_RBBM_STATUS); 2100 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
1796 if (status & (1 << 16)) { 2101 RREG32(R_0000F0_RBBM_SOFT_RESET);
1797 r100_cp_reset(rdev); 2102 mdelay(500);
1798 } 2103 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2104 mdelay(1);
2105 status = RREG32(R_000E40_RBBM_STATUS);
2106 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2107 /* restore PCI & busmastering */
2108 pci_restore_state(rdev->pdev);
2109 r100_enable_bm(rdev);
1799 /* Check if GPU is idle */ 2110 /* Check if GPU is idle */
1800 status = RREG32(RADEON_RBBM_STATUS); 2111 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
1801 if (status & RADEON_RBBM_ACTIVE) { 2112 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
1802 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 2113 dev_err(rdev->dev, "failed to reset GPU\n");
2114 rdev->gpu_lockup = true;
1803 return -1; 2115 return -1;
1804 } 2116 }
1805 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); 2117 r100_mc_resume(rdev, &save);
2118 dev_info(rdev->dev, "GPU reset succeed\n");
1806 return 0; 2119 return 0;
1807} 2120}
1808 2121
@@ -2002,11 +2315,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
2002 else 2315 else
2003 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2316 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2004 } 2317 }
2005 /* FIXME remove this once we support unmappable VRAM */
2006 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
2007 rdev->mc.mc_vram_size = rdev->mc.aper_size;
2008 rdev->mc.real_vram_size = rdev->mc.aper_size;
2009 }
2010} 2318}
2011 2319
2012void r100_vga_set_state(struct radeon_device *rdev, bool state) 2320void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -2335,53 +2643,53 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2335 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; 2643 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
2336 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 2644 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
2337 fixed20_12 memtcas_ff[8] = { 2645 fixed20_12 memtcas_ff[8] = {
2338 fixed_init(1), 2646 dfixed_init(1),
2339 fixed_init(2), 2647 dfixed_init(2),
2340 fixed_init(3), 2648 dfixed_init(3),
2341 fixed_init(0), 2649 dfixed_init(0),
2342 fixed_init_half(1), 2650 dfixed_init_half(1),
2343 fixed_init_half(2), 2651 dfixed_init_half(2),
2344 fixed_init(0), 2652 dfixed_init(0),
2345 }; 2653 };
2346 fixed20_12 memtcas_rs480_ff[8] = { 2654 fixed20_12 memtcas_rs480_ff[8] = {
2347 fixed_init(0), 2655 dfixed_init(0),
2348 fixed_init(1), 2656 dfixed_init(1),
2349 fixed_init(2), 2657 dfixed_init(2),
2350 fixed_init(3), 2658 dfixed_init(3),
2351 fixed_init(0), 2659 dfixed_init(0),
2352 fixed_init_half(1), 2660 dfixed_init_half(1),
2353 fixed_init_half(2), 2661 dfixed_init_half(2),
2354 fixed_init_half(3), 2662 dfixed_init_half(3),
2355 }; 2663 };
2356 fixed20_12 memtcas2_ff[8] = { 2664 fixed20_12 memtcas2_ff[8] = {
2357 fixed_init(0), 2665 dfixed_init(0),
2358 fixed_init(1), 2666 dfixed_init(1),
2359 fixed_init(2), 2667 dfixed_init(2),
2360 fixed_init(3), 2668 dfixed_init(3),
2361 fixed_init(4), 2669 dfixed_init(4),
2362 fixed_init(5), 2670 dfixed_init(5),
2363 fixed_init(6), 2671 dfixed_init(6),
2364 fixed_init(7), 2672 dfixed_init(7),
2365 }; 2673 };
2366 fixed20_12 memtrbs[8] = { 2674 fixed20_12 memtrbs[8] = {
2367 fixed_init(1), 2675 dfixed_init(1),
2368 fixed_init_half(1), 2676 dfixed_init_half(1),
2369 fixed_init(2), 2677 dfixed_init(2),
2370 fixed_init_half(2), 2678 dfixed_init_half(2),
2371 fixed_init(3), 2679 dfixed_init(3),
2372 fixed_init_half(3), 2680 dfixed_init_half(3),
2373 fixed_init(4), 2681 dfixed_init(4),
2374 fixed_init_half(4) 2682 dfixed_init_half(4)
2375 }; 2683 };
2376 fixed20_12 memtrbs_r4xx[8] = { 2684 fixed20_12 memtrbs_r4xx[8] = {
2377 fixed_init(4), 2685 dfixed_init(4),
2378 fixed_init(5), 2686 dfixed_init(5),
2379 fixed_init(6), 2687 dfixed_init(6),
2380 fixed_init(7), 2688 dfixed_init(7),
2381 fixed_init(8), 2689 dfixed_init(8),
2382 fixed_init(9), 2690 dfixed_init(9),
2383 fixed_init(10), 2691 dfixed_init(10),
2384 fixed_init(11) 2692 dfixed_init(11)
2385 }; 2693 };
2386 fixed20_12 min_mem_eff; 2694 fixed20_12 min_mem_eff;
2387 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 2695 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
@@ -2412,7 +2720,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2412 } 2720 }
2413 } 2721 }
2414 2722
2415 min_mem_eff.full = rfixed_const_8(0); 2723 min_mem_eff.full = dfixed_const_8(0);
2416 /* get modes */ 2724 /* get modes */
2417 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { 2725 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
2418 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); 2726 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
@@ -2433,28 +2741,28 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2433 mclk_ff = rdev->pm.mclk; 2741 mclk_ff = rdev->pm.mclk;
2434 2742
2435 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 2743 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
2436 temp_ff.full = rfixed_const(temp); 2744 temp_ff.full = dfixed_const(temp);
2437 mem_bw.full = rfixed_mul(mclk_ff, temp_ff); 2745 mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
2438 2746
2439 pix_clk.full = 0; 2747 pix_clk.full = 0;
2440 pix_clk2.full = 0; 2748 pix_clk2.full = 0;
2441 peak_disp_bw.full = 0; 2749 peak_disp_bw.full = 0;
2442 if (mode1) { 2750 if (mode1) {
2443 temp_ff.full = rfixed_const(1000); 2751 temp_ff.full = dfixed_const(1000);
2444 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ 2752 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
2445 pix_clk.full = rfixed_div(pix_clk, temp_ff); 2753 pix_clk.full = dfixed_div(pix_clk, temp_ff);
2446 temp_ff.full = rfixed_const(pixel_bytes1); 2754 temp_ff.full = dfixed_const(pixel_bytes1);
2447 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); 2755 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
2448 } 2756 }
2449 if (mode2) { 2757 if (mode2) {
2450 temp_ff.full = rfixed_const(1000); 2758 temp_ff.full = dfixed_const(1000);
2451 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ 2759 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
2452 pix_clk2.full = rfixed_div(pix_clk2, temp_ff); 2760 pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
2453 temp_ff.full = rfixed_const(pixel_bytes2); 2761 temp_ff.full = dfixed_const(pixel_bytes2);
2454 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); 2762 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
2455 } 2763 }
2456 2764
2457 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); 2765 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
2458 if (peak_disp_bw.full >= mem_bw.full) { 2766 if (peak_disp_bw.full >= mem_bw.full) {
2459 DRM_ERROR("You may not have enough display bandwidth for current mode\n" 2767 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
2460 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); 2768 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
@@ -2496,9 +2804,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2496 mem_tras = ((temp >> 12) & 0xf) + 4; 2804 mem_tras = ((temp >> 12) & 0xf) + 4;
2497 } 2805 }
2498 /* convert to FF */ 2806 /* convert to FF */
2499 trcd_ff.full = rfixed_const(mem_trcd); 2807 trcd_ff.full = dfixed_const(mem_trcd);
2500 trp_ff.full = rfixed_const(mem_trp); 2808 trp_ff.full = dfixed_const(mem_trp);
2501 tras_ff.full = rfixed_const(mem_tras); 2809 tras_ff.full = dfixed_const(mem_tras);
2502 2810
2503 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ 2811 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
2504 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); 2812 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
@@ -2516,7 +2824,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2516 /* extra cas latency stored in bits 23-25 0-4 clocks */ 2824 /* extra cas latency stored in bits 23-25 0-4 clocks */
2517 data = (temp >> 23) & 0x7; 2825 data = (temp >> 23) & 0x7;
2518 if (data < 5) 2826 if (data < 5)
2519 tcas_ff.full += rfixed_const(data); 2827 tcas_ff.full += dfixed_const(data);
2520 } 2828 }
2521 2829
2522 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 2830 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
@@ -2553,72 +2861,72 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2553 2861
2554 if (rdev->flags & RADEON_IS_AGP) { 2862 if (rdev->flags & RADEON_IS_AGP) {
2555 fixed20_12 agpmode_ff; 2863 fixed20_12 agpmode_ff;
2556 agpmode_ff.full = rfixed_const(radeon_agpmode); 2864 agpmode_ff.full = dfixed_const(radeon_agpmode);
2557 temp_ff.full = rfixed_const_666(16); 2865 temp_ff.full = dfixed_const_666(16);
2558 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); 2866 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
2559 } 2867 }
2560 /* TODO PCIE lanes may affect this - agpmode == 16?? */ 2868 /* TODO PCIE lanes may affect this - agpmode == 16?? */
2561 2869
2562 if (ASIC_IS_R300(rdev)) { 2870 if (ASIC_IS_R300(rdev)) {
2563 sclk_delay_ff.full = rfixed_const(250); 2871 sclk_delay_ff.full = dfixed_const(250);
2564 } else { 2872 } else {
2565 if ((rdev->family == CHIP_RV100) || 2873 if ((rdev->family == CHIP_RV100) ||
2566 rdev->flags & RADEON_IS_IGP) { 2874 rdev->flags & RADEON_IS_IGP) {
2567 if (rdev->mc.vram_is_ddr) 2875 if (rdev->mc.vram_is_ddr)
2568 sclk_delay_ff.full = rfixed_const(41); 2876 sclk_delay_ff.full = dfixed_const(41);
2569 else 2877 else
2570 sclk_delay_ff.full = rfixed_const(33); 2878 sclk_delay_ff.full = dfixed_const(33);
2571 } else { 2879 } else {
2572 if (rdev->mc.vram_width == 128) 2880 if (rdev->mc.vram_width == 128)
2573 sclk_delay_ff.full = rfixed_const(57); 2881 sclk_delay_ff.full = dfixed_const(57);
2574 else 2882 else
2575 sclk_delay_ff.full = rfixed_const(41); 2883 sclk_delay_ff.full = dfixed_const(41);
2576 } 2884 }
2577 } 2885 }
2578 2886
2579 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); 2887 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
2580 2888
2581 if (rdev->mc.vram_is_ddr) { 2889 if (rdev->mc.vram_is_ddr) {
2582 if (rdev->mc.vram_width == 32) { 2890 if (rdev->mc.vram_width == 32) {
2583 k1.full = rfixed_const(40); 2891 k1.full = dfixed_const(40);
2584 c = 3; 2892 c = 3;
2585 } else { 2893 } else {
2586 k1.full = rfixed_const(20); 2894 k1.full = dfixed_const(20);
2587 c = 1; 2895 c = 1;
2588 } 2896 }
2589 } else { 2897 } else {
2590 k1.full = rfixed_const(40); 2898 k1.full = dfixed_const(40);
2591 c = 3; 2899 c = 3;
2592 } 2900 }
2593 2901
2594 temp_ff.full = rfixed_const(2); 2902 temp_ff.full = dfixed_const(2);
2595 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); 2903 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
2596 temp_ff.full = rfixed_const(c); 2904 temp_ff.full = dfixed_const(c);
2597 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); 2905 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
2598 temp_ff.full = rfixed_const(4); 2906 temp_ff.full = dfixed_const(4);
2599 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); 2907 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
2600 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); 2908 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
2601 mc_latency_mclk.full += k1.full; 2909 mc_latency_mclk.full += k1.full;
2602 2910
2603 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); 2911 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
2604 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); 2912 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
2605 2913
2606 /* 2914 /*
2607 HW cursor time assuming worst case of full size colour cursor. 2915 HW cursor time assuming worst case of full size colour cursor.
2608 */ 2916 */
2609 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); 2917 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2610 temp_ff.full += trcd_ff.full; 2918 temp_ff.full += trcd_ff.full;
2611 if (temp_ff.full < tras_ff.full) 2919 if (temp_ff.full < tras_ff.full)
2612 temp_ff.full = tras_ff.full; 2920 temp_ff.full = tras_ff.full;
2613 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); 2921 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
2614 2922
2615 temp_ff.full = rfixed_const(cur_size); 2923 temp_ff.full = dfixed_const(cur_size);
2616 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); 2924 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
2617 /* 2925 /*
2618 Find the total latency for the display data. 2926 Find the total latency for the display data.
2619 */ 2927 */
2620 disp_latency_overhead.full = rfixed_const(8); 2928 disp_latency_overhead.full = dfixed_const(8);
2621 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); 2929 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
2622 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 2930 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2623 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 2931 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2624 2932
@@ -2646,16 +2954,16 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2646 /* 2954 /*
2647 Find the drain rate of the display buffer. 2955 Find the drain rate of the display buffer.
2648 */ 2956 */
2649 temp_ff.full = rfixed_const((16/pixel_bytes1)); 2957 temp_ff.full = dfixed_const((16/pixel_bytes1));
2650 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); 2958 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
2651 2959
2652 /* 2960 /*
2653 Find the critical point of the display buffer. 2961 Find the critical point of the display buffer.
2654 */ 2962 */
2655 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); 2963 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
2656 crit_point_ff.full += rfixed_const_half(0); 2964 crit_point_ff.full += dfixed_const_half(0);
2657 2965
2658 critical_point = rfixed_trunc(crit_point_ff); 2966 critical_point = dfixed_trunc(crit_point_ff);
2659 2967
2660 if (rdev->disp_priority == 2) { 2968 if (rdev->disp_priority == 2) {
2661 critical_point = 0; 2969 critical_point = 0;
@@ -2726,8 +3034,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2726 /* 3034 /*
2727 Find the drain rate of the display buffer. 3035 Find the drain rate of the display buffer.
2728 */ 3036 */
2729 temp_ff.full = rfixed_const((16/pixel_bytes2)); 3037 temp_ff.full = dfixed_const((16/pixel_bytes2));
2730 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); 3038 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
2731 3039
2732 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); 3040 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
2733 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); 3041 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
@@ -2748,8 +3056,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2748 critical_point2 = 0; 3056 critical_point2 = 0;
2749 else { 3057 else {
2750 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; 3058 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
2751 temp_ff.full = rfixed_const(temp); 3059 temp_ff.full = dfixed_const(temp);
2752 temp_ff.full = rfixed_mul(mclk_ff, temp_ff); 3060 temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
2753 if (sclk_ff.full < temp_ff.full) 3061 if (sclk_ff.full < temp_ff.full)
2754 temp_ff.full = sclk_ff.full; 3062 temp_ff.full = sclk_ff.full;
2755 3063
@@ -2757,15 +3065,15 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2757 3065
2758 if (mode1) { 3066 if (mode1) {
2759 temp_ff.full = read_return_rate.full - disp_drain_rate.full; 3067 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
2760 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); 3068 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
2761 } else { 3069 } else {
2762 time_disp1_drop_priority.full = 0; 3070 time_disp1_drop_priority.full = 0;
2763 } 3071 }
2764 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; 3072 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
2765 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); 3073 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
2766 crit_point_ff.full += rfixed_const_half(0); 3074 crit_point_ff.full += dfixed_const_half(0);
2767 3075
2768 critical_point2 = rfixed_trunc(crit_point_ff); 3076 critical_point2 = dfixed_trunc(crit_point_ff);
2769 3077
2770 if (rdev->disp_priority == 2) { 3078 if (rdev->disp_priority == 2) {
2771 critical_point2 = 0; 3079 critical_point2 = 0;
@@ -3399,7 +3707,7 @@ static int r100_startup(struct radeon_device *rdev)
3399 /* Resume clock */ 3707 /* Resume clock */
3400 r100_clock_startup(rdev); 3708 r100_clock_startup(rdev);
3401 /* Initialize GPU configuration (# pipes, ...) */ 3709 /* Initialize GPU configuration (# pipes, ...) */
3402 r100_gpu_init(rdev); 3710// r100_gpu_init(rdev);
3403 /* Initialize GART (initialize after TTM so we can allocate 3711 /* Initialize GART (initialize after TTM so we can allocate
3404 * memory through TTM but finalize after TTM) */ 3712 * memory through TTM but finalize after TTM) */
3405 r100_enable_bm(rdev); 3713 r100_enable_bm(rdev);
@@ -3436,7 +3744,7 @@ int r100_resume(struct radeon_device *rdev)
3436 /* Resume clock before doing reset */ 3744 /* Resume clock before doing reset */
3437 r100_clock_startup(rdev); 3745 r100_clock_startup(rdev);
3438 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3746 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3439 if (radeon_gpu_reset(rdev)) { 3747 if (radeon_asic_reset(rdev)) {
3440 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3748 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3441 RREG32(R_000E40_RBBM_STATUS), 3749 RREG32(R_000E40_RBBM_STATUS),
3442 RREG32(R_0007C0_CP_STAT)); 3750 RREG32(R_0007C0_CP_STAT));
@@ -3462,7 +3770,6 @@ int r100_suspend(struct radeon_device *rdev)
3462 3770
3463void r100_fini(struct radeon_device *rdev) 3771void r100_fini(struct radeon_device *rdev)
3464{ 3772{
3465 radeon_pm_fini(rdev);
3466 r100_cp_fini(rdev); 3773 r100_cp_fini(rdev);
3467 r100_wb_fini(rdev); 3774 r100_wb_fini(rdev);
3468 r100_ib_fini(rdev); 3775 r100_ib_fini(rdev);
@@ -3505,7 +3812,7 @@ int r100_init(struct radeon_device *rdev)
3505 return r; 3812 return r;
3506 } 3813 }
3507 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3814 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3508 if (radeon_gpu_reset(rdev)) { 3815 if (radeon_asic_reset(rdev)) {
3509 dev_warn(rdev->dev, 3816 dev_warn(rdev->dev,
3510 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3817 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3511 RREG32(R_000E40_RBBM_STATUS), 3818 RREG32(R_000E40_RBBM_STATUS),
@@ -3518,8 +3825,6 @@ int r100_init(struct radeon_device *rdev)
3518 r100_errata(rdev); 3825 r100_errata(rdev);
3519 /* Initialize clocks */ 3826 /* Initialize clocks */
3520 radeon_get_clock_info(rdev->ddev); 3827 radeon_get_clock_info(rdev->ddev);
3521 /* Initialize power management */
3522 radeon_pm_init(rdev);
3523 /* initialize AGP */ 3828 /* initialize AGP */
3524 if (rdev->flags & RADEON_IS_AGP) { 3829 if (rdev->flags & RADEON_IS_AGP) {
3525 r = radeon_agp_init(rdev); 3830 r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index df29a630c466..d016b16fa116 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -74,6 +74,134 @@
74#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) 74#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
75 75
76/* Registers */ 76/* Registers */
77#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
78#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
79#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
80#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
81#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
82#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
83#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
84#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2)
85#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1)
86#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB
87#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
88#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
89#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
90#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
91#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
92#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
93#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
94#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
95#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
96#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
97#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
98#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
99#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
100#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
101#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
102#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
103#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
104#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
105#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
106#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
107#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
108#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
109#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
110#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
111#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
112#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
113#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
114#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
115#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
116#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
117#define R_000030_BUS_CNTL 0x000030
118#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0)
119#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1)
120#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE
121#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1)
122#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1)
123#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD
124#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2)
125#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1)
126#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB
127#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3)
128#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1)
129#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7
130#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4)
131#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1)
132#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF
133#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5)
134#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1)
135#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF
136#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6)
137#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1)
138#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF
139#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7)
140#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1)
141#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F
142#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8)
143#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1)
144#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF
145#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9)
146#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1)
147#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF
148#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10)
149#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1)
150#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF
151#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11)
152#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1)
153#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF
154#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12)
155#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1)
156#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF
157#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13)
158#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1)
159#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF
160#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14)
161#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1)
162#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF
163#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15)
164#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1)
165#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF
166#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16)
167#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF)
168#define C_000030_BUS_RETRY_WS 0xFFF0FFFF
169#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20)
170#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1)
171#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF
172#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21)
173#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1)
174#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF
175#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22)
176#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1)
177#define C_000030_BUS_SUSPEND 0xFFBFFFFF
178#define S_000030_LAT_16X(x) (((x) & 0x1) << 23)
179#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1)
180#define C_000030_LAT_16X 0xFF7FFFFF
181#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24)
182#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1)
183#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF
184#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25)
185#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1)
186#define C_000030_ENFRCWRDY 0xFDFFFFFF
187#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26)
188#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1)
189#define C_000030_BUS_MSTR_WS 0xFBFFFFFF
190#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27)
191#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1)
192#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF
193#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28)
194#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1)
195#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF
196#define S_000030_SERR_EN(x) (((x) & 0x1) << 29)
197#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1)
198#define C_000030_SERR_EN 0xDFFFFFFF
199#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30)
200#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1)
201#define C_000030_BUS_READ_BURST 0xBFFFFFFF
202#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31)
203#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1)
204#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF
77#define R_000040_GEN_INT_CNTL 0x000040 205#define R_000040_GEN_INT_CNTL 0x000040
78#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0) 206#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0)
79#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1) 207#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1)
@@ -710,5 +838,41 @@
710#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) 838#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
711#define C_00000D_FORCE_RB 0xEFFFFFFF 839#define C_00000D_FORCE_RB 0xEFFFFFFF
712 840
841/* PLL regs */
842#define SCLK_CNTL 0xd
843#define FORCE_HDP (1 << 17)
844#define CLK_PWRMGT_CNTL 0x14
845#define GLOBAL_PMAN_EN (1 << 10)
846#define DISP_PM (1 << 20)
847#define PLL_PWRMGT_CNTL 0x15
848#define MPLL_TURNOFF (1 << 0)
849#define SPLL_TURNOFF (1 << 1)
850#define PPLL_TURNOFF (1 << 2)
851#define P2PLL_TURNOFF (1 << 3)
852#define TVPLL_TURNOFF (1 << 4)
853#define MOBILE_SU (1 << 16)
854#define SU_SCLK_USE_BCLK (1 << 17)
855#define SCLK_CNTL2 0x1e
856#define REDUCED_SPEED_SCLK_MODE (1 << 16)
857#define REDUCED_SPEED_SCLK_SEL(x) ((x) << 17)
858#define MCLK_MISC 0x1f
859#define EN_MCLK_TRISTATE_IN_SUSPEND (1 << 18)
860#define SCLK_MORE_CNTL 0x35
861#define REDUCED_SPEED_SCLK_EN (1 << 16)
862#define IO_CG_VOLTAGE_DROP (1 << 17)
863#define VOLTAGE_DELAY_SEL(x) ((x) << 20)
864#define VOLTAGE_DROP_SYNC (1 << 19)
865
866/* mmreg */
867#define DISP_PWR_MAN 0xd08
868#define DISP_D3_GRPH_RST (1 << 18)
869#define DISP_D3_SUBPIC_RST (1 << 19)
870#define DISP_D3_OV0_RST (1 << 20)
871#define DISP_D1D2_GRPH_RST (1 << 21)
872#define DISP_D1D2_SUBPIC_RST (1 << 22)
873#define DISP_D1D2_OV0_RST (1 << 23)
874#define DISP_DVO_ENABLE_RST (1 << 24)
875#define TV_ENABLE_RST (1 << 25)
876#define AUTO_PWRUP_EN (1 << 26)
713 877
714#endif 878#endif
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index a5ff8076b423..b2f9efe2897c 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -27,8 +27,9 @@
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include "drmP.h" 30#include <drm/drmP.h>
31#include "drm.h" 31#include <drm/drm.h>
32#include <drm/drm_crtc_helper.h>
32#include "radeon_reg.h" 33#include "radeon_reg.h"
33#include "radeon.h" 34#include "radeon.h"
34#include "radeon_asic.h" 35#include "radeon_asic.h"
@@ -151,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
151 u32 tmp; 152 u32 tmp;
152 int r; 153 int r;
153 154
155 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
156 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
157 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
158 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
154 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 159 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
155 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 160 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
156 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 161 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
@@ -323,7 +328,6 @@ void r300_gpu_init(struct radeon_device *rdev)
323{ 328{
324 uint32_t gb_tile_config, tmp; 329 uint32_t gb_tile_config, tmp;
325 330
326 r100_hdp_reset(rdev);
327 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || 331 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
328 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { 332 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
329 /* r300,r350 */ 333 /* r300,r350 */
@@ -375,89 +379,85 @@ void r300_gpu_init(struct radeon_device *rdev)
375 rdev->num_gb_pipes, rdev->num_z_pipes); 379 rdev->num_gb_pipes, rdev->num_z_pipes);
376} 380}
377 381
378int r300_ga_reset(struct radeon_device *rdev) 382bool r300_gpu_is_lockup(struct radeon_device *rdev)
379{ 383{
380 uint32_t tmp; 384 u32 rbbm_status;
381 bool reinit_cp; 385 int r;
382 int i;
383 386
384 reinit_cp = rdev->cp.ready; 387 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
385 rdev->cp.ready = false; 388 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
386 for (i = 0; i < rdev->usec_timeout; i++) { 389 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
387 WREG32(RADEON_CP_CSQ_MODE, 0); 390 return false;
388 WREG32(RADEON_CP_CSQ_CNTL, 0);
389 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
390 (void)RREG32(RADEON_RBBM_SOFT_RESET);
391 udelay(200);
392 WREG32(RADEON_RBBM_SOFT_RESET, 0);
393 /* Wait to prevent race in RBBM_STATUS */
394 mdelay(1);
395 tmp = RREG32(RADEON_RBBM_STATUS);
396 if (tmp & ((1 << 20) | (1 << 26))) {
397 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
398 /* GA still busy soft reset it */
399 WREG32(0x429C, 0x200);
400 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
401 WREG32(R300_RE_SCISSORS_TL, 0);
402 WREG32(R300_RE_SCISSORS_BR, 0);
403 WREG32(0x24AC, 0);
404 }
405 /* Wait to prevent race in RBBM_STATUS */
406 mdelay(1);
407 tmp = RREG32(RADEON_RBBM_STATUS);
408 if (!(tmp & ((1 << 20) | (1 << 26)))) {
409 break;
410 }
411 } 391 }
412 for (i = 0; i < rdev->usec_timeout; i++) { 392 /* force CP activities */
413 tmp = RREG32(RADEON_RBBM_STATUS); 393 r = radeon_ring_lock(rdev, 2);
414 if (!(tmp & ((1 << 20) | (1 << 26)))) { 394 if (!r) {
415 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", 395 /* PACKET2 NOP */
416 tmp); 396 radeon_ring_write(rdev, 0x80000000);
417 if (reinit_cp) { 397 radeon_ring_write(rdev, 0x80000000);
418 return r100_cp_init(rdev, rdev->cp.ring_size); 398 radeon_ring_unlock_commit(rdev);
419 }
420 return 0;
421 }
422 DRM_UDELAY(1);
423 } 399 }
424 tmp = RREG32(RADEON_RBBM_STATUS); 400 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
425 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); 401 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
426 return -1;
427} 402}
428 403
429int r300_gpu_reset(struct radeon_device *rdev) 404int r300_asic_reset(struct radeon_device *rdev)
430{ 405{
431 uint32_t status; 406 struct r100_mc_save save;
432 407 u32 status, tmp;
433 /* reset order likely matter */ 408
434 status = RREG32(RADEON_RBBM_STATUS); 409 r100_mc_stop(rdev, &save);
435 /* reset HDP */ 410 status = RREG32(R_000E40_RBBM_STATUS);
436 r100_hdp_reset(rdev); 411 if (!G_000E40_GUI_ACTIVE(status)) {
437 /* reset rb2d */ 412 return 0;
438 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
439 r100_rb2d_reset(rdev);
440 }
441 /* reset GA */
442 if (status & ((1 << 20) | (1 << 26))) {
443 r300_ga_reset(rdev);
444 }
445 /* reset CP */
446 status = RREG32(RADEON_RBBM_STATUS);
447 if (status & (1 << 16)) {
448 r100_cp_reset(rdev);
449 } 413 }
414 status = RREG32(R_000E40_RBBM_STATUS);
415 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
416 /* stop CP */
417 WREG32(RADEON_CP_CSQ_CNTL, 0);
418 tmp = RREG32(RADEON_CP_RB_CNTL);
419 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
420 WREG32(RADEON_CP_RB_RPTR_WR, 0);
421 WREG32(RADEON_CP_RB_WPTR, 0);
422 WREG32(RADEON_CP_RB_CNTL, tmp);
423 /* save PCI state */
424 pci_save_state(rdev->pdev);
425 /* disable bus mastering */
426 r100_bm_disable(rdev);
427 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
428 S_0000F0_SOFT_RESET_GA(1));
429 RREG32(R_0000F0_RBBM_SOFT_RESET);
430 mdelay(500);
431 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
432 mdelay(1);
433 status = RREG32(R_000E40_RBBM_STATUS);
434 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
435 /* resetting the CP seems to be problematic sometimes it end up
436 * hard locking the computer, but it's necessary for successfull
437 * reset more test & playing is needed on R3XX/R4XX to find a
438 * reliable (if any solution)
439 */
440 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
441 RREG32(R_0000F0_RBBM_SOFT_RESET);
442 mdelay(500);
443 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
444 mdelay(1);
445 status = RREG32(R_000E40_RBBM_STATUS);
446 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
447 /* restore PCI & busmastering */
448 pci_restore_state(rdev->pdev);
449 r100_enable_bm(rdev);
450 /* Check if GPU is idle */ 450 /* Check if GPU is idle */
451 status = RREG32(RADEON_RBBM_STATUS); 451 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
452 if (status & RADEON_RBBM_ACTIVE) { 452 dev_err(rdev->dev, "failed to reset GPU\n");
453 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 453 rdev->gpu_lockup = true;
454 return -1; 454 return -1;
455 } 455 }
456 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); 456 r100_mc_resume(rdev, &save);
457 dev_info(rdev->dev, "GPU reset succeed\n");
457 return 0; 458 return 0;
458} 459}
459 460
460
461/* 461/*
462 * r300,r350,rv350,rv380 VRAM info 462 * r300,r350,rv350,rv380 VRAM info
463 */ 463 */
@@ -1316,7 +1316,7 @@ int r300_resume(struct radeon_device *rdev)
1316 /* Resume clock before doing reset */ 1316 /* Resume clock before doing reset */
1317 r300_clock_startup(rdev); 1317 r300_clock_startup(rdev);
1318 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1318 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1319 if (radeon_gpu_reset(rdev)) { 1319 if (radeon_asic_reset(rdev)) {
1320 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1320 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1321 RREG32(R_000E40_RBBM_STATUS), 1321 RREG32(R_000E40_RBBM_STATUS),
1322 RREG32(R_0007C0_CP_STAT)); 1322 RREG32(R_0007C0_CP_STAT));
@@ -1344,7 +1344,6 @@ int r300_suspend(struct radeon_device *rdev)
1344 1344
1345void r300_fini(struct radeon_device *rdev) 1345void r300_fini(struct radeon_device *rdev)
1346{ 1346{
1347 radeon_pm_fini(rdev);
1348 r100_cp_fini(rdev); 1347 r100_cp_fini(rdev);
1349 r100_wb_fini(rdev); 1348 r100_wb_fini(rdev);
1350 r100_ib_fini(rdev); 1349 r100_ib_fini(rdev);
@@ -1387,7 +1386,7 @@ int r300_init(struct radeon_device *rdev)
1387 return r; 1386 return r;
1388 } 1387 }
1389 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1388 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1390 if (radeon_gpu_reset(rdev)) { 1389 if (radeon_asic_reset(rdev)) {
1391 dev_warn(rdev->dev, 1390 dev_warn(rdev->dev,
1392 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1391 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1393 RREG32(R_000E40_RBBM_STATUS), 1392 RREG32(R_000E40_RBBM_STATUS),
@@ -1400,8 +1399,6 @@ int r300_init(struct radeon_device *rdev)
1400 r300_errata(rdev); 1399 r300_errata(rdev);
1401 /* Initialize clocks */ 1400 /* Initialize clocks */
1402 radeon_get_clock_info(rdev->ddev); 1401 radeon_get_clock_info(rdev->ddev);
1403 /* Initialize power management */
1404 radeon_pm_init(rdev);
1405 /* initialize AGP */ 1402 /* initialize AGP */
1406 if (rdev->flags & RADEON_IS_AGP) { 1403 if (rdev->flags & RADEON_IS_AGP) {
1407 r = radeon_agp_init(rdev); 1404 r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 4c73114f0de9..968a33317fbf 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -209,7 +209,52 @@
209#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) 209#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
210#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) 210#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
211#define C_000E40_GUI_ACTIVE 0x7FFFFFFF 211#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
212 212#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
213#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
214#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
215#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
216#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
217#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
218#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
219#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
220#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
221#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
222#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
223#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
224#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
225#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
226#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
227#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
228#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
229#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
230#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
231#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
232#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
233#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
234#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
235#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
236#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
237#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
238#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
239#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
240#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
241#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
242#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
243#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
244#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
245#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
246#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
247#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
248#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
249#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
250#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
251#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
252#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
253#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
254#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
255#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
256#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
257#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
213 258
214#define R_00000D_SCLK_CNTL 0x00000D 259#define R_00000D_SCLK_CNTL 0x00000D
215#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) 260#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c2bda4ad62e7..4415a5ee5871 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -36,6 +36,35 @@
36#include "r420d.h" 36#include "r420d.h"
37#include "r420_reg_safe.h" 37#include "r420_reg_safe.h"
38 38
39void r420_pm_init_profile(struct radeon_device *rdev)
40{
41 /* default */
42 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
43 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
44 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
45 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
46 /* low sh */
47 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
48 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
49 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
50 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
51 /* high sh */
52 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
53 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
54 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
55 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
56 /* low mh */
57 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
58 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
59 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
60 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
61 /* high mh */
62 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
63 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
64 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
65 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
66}
67
39static void r420_set_reg_safe(struct radeon_device *rdev) 68static void r420_set_reg_safe(struct radeon_device *rdev)
40{ 69{
41 rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; 70 rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
@@ -241,7 +270,7 @@ int r420_resume(struct radeon_device *rdev)
241 /* Resume clock before doing reset */ 270 /* Resume clock before doing reset */
242 r420_clock_resume(rdev); 271 r420_clock_resume(rdev);
243 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 272 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
244 if (radeon_gpu_reset(rdev)) { 273 if (radeon_asic_reset(rdev)) {
245 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 274 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
246 RREG32(R_000E40_RBBM_STATUS), 275 RREG32(R_000E40_RBBM_STATUS),
247 RREG32(R_0007C0_CP_STAT)); 276 RREG32(R_0007C0_CP_STAT));
@@ -274,7 +303,6 @@ int r420_suspend(struct radeon_device *rdev)
274 303
275void r420_fini(struct radeon_device *rdev) 304void r420_fini(struct radeon_device *rdev)
276{ 305{
277 radeon_pm_fini(rdev);
278 r100_cp_fini(rdev); 306 r100_cp_fini(rdev);
279 r100_wb_fini(rdev); 307 r100_wb_fini(rdev);
280 r100_ib_fini(rdev); 308 r100_ib_fini(rdev);
@@ -322,7 +350,7 @@ int r420_init(struct radeon_device *rdev)
322 } 350 }
323 } 351 }
324 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 352 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
325 if (radeon_gpu_reset(rdev)) { 353 if (radeon_asic_reset(rdev)) {
326 dev_warn(rdev->dev, 354 dev_warn(rdev->dev,
327 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 355 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
328 RREG32(R_000E40_RBBM_STATUS), 356 RREG32(R_000E40_RBBM_STATUS),
@@ -334,8 +362,6 @@ int r420_init(struct radeon_device *rdev)
334 362
335 /* Initialize clocks */ 363 /* Initialize clocks */
336 radeon_get_clock_info(rdev->ddev); 364 radeon_get_clock_info(rdev->ddev);
337 /* Initialize power management */
338 radeon_pm_init(rdev);
339 /* initialize AGP */ 365 /* initialize AGP */
340 if (rdev->flags & RADEON_IS_AGP) { 366 if (rdev->flags & RADEON_IS_AGP) {
341 r = radeon_agp_init(rdev); 367 r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 0cf2ad2a5585..93c9a2bbccf8 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -347,9 +347,11 @@
347 347
348#define AVIVO_D1CRTC_CONTROL 0x6080 348#define AVIVO_D1CRTC_CONTROL 0x6080
349# define AVIVO_CRTC_EN (1 << 0) 349# define AVIVO_CRTC_EN (1 << 0)
350# define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
350#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 351#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
351#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 352#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
352#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c 353#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
354#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
353#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 355#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
354#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 356#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
355 357
@@ -488,6 +490,7 @@
488#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 490#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
489#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 491#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
490#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c 492#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
493#define AVIVO_D2CRTC_STATUS_POSITION 0x68a0
491#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4 494#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4
492#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 495#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
493 496
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3c44b8d39318..34330df28483 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -53,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev)
53{ 53{
54 unsigned pipe_select_current, gb_pipe_select, tmp; 54 unsigned pipe_select_current, gb_pipe_select, tmp;
55 55
56 r100_hdp_reset(rdev);
57 rv515_vga_render_disable(rdev); 56 rv515_vga_render_disable(rdev);
58 /* 57 /*
59 * DST_PIPE_CONFIG 0x170C 58 * DST_PIPE_CONFIG 0x170C
@@ -209,7 +208,7 @@ int r520_resume(struct radeon_device *rdev)
209 /* Resume clock before doing reset */ 208 /* Resume clock before doing reset */
210 rv515_clock_startup(rdev); 209 rv515_clock_startup(rdev);
211 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 210 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
212 if (radeon_gpu_reset(rdev)) { 211 if (radeon_asic_reset(rdev)) {
213 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 212 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
214 RREG32(R_000E40_RBBM_STATUS), 213 RREG32(R_000E40_RBBM_STATUS),
215 RREG32(R_0007C0_CP_STAT)); 214 RREG32(R_0007C0_CP_STAT));
@@ -246,7 +245,7 @@ int r520_init(struct radeon_device *rdev)
246 return -EINVAL; 245 return -EINVAL;
247 } 246 }
248 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 247 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
249 if (radeon_gpu_reset(rdev)) { 248 if (radeon_asic_reset(rdev)) {
250 dev_warn(rdev->dev, 249 dev_warn(rdev->dev,
251 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 250 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
252 RREG32(R_000E40_RBBM_STATUS), 251 RREG32(R_000E40_RBBM_STATUS),
@@ -262,8 +261,6 @@ int r520_init(struct radeon_device *rdev)
262 } 261 }
263 /* Initialize clocks */ 262 /* Initialize clocks */
264 radeon_get_clock_info(rdev->ddev); 263 radeon_get_clock_info(rdev->ddev);
265 /* Initialize power management */
266 radeon_pm_init(rdev);
267 /* initialize AGP */ 264 /* initialize AGP */
268 if (rdev->flags & RADEON_IS_AGP) { 265 if (rdev->flags & RADEON_IS_AGP) {
269 r = radeon_agp_init(rdev); 266 r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 8f3454e2056a..44e96a2ae25a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -44,6 +44,9 @@
44#define R700_PFP_UCODE_SIZE 848 44#define R700_PFP_UCODE_SIZE 848
45#define R700_PM4_UCODE_SIZE 1360 45#define R700_PM4_UCODE_SIZE 1360
46#define R700_RLC_UCODE_SIZE 1024 46#define R700_RLC_UCODE_SIZE 1024
47#define EVERGREEN_PFP_UCODE_SIZE 1120
48#define EVERGREEN_PM4_UCODE_SIZE 1376
49#define EVERGREEN_RLC_UCODE_SIZE 768
47 50
48/* Firmware Names */ 51/* Firmware Names */
49MODULE_FIRMWARE("radeon/R600_pfp.bin"); 52MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -68,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin");
68MODULE_FIRMWARE("radeon/RV710_me.bin"); 71MODULE_FIRMWARE("radeon/RV710_me.bin");
69MODULE_FIRMWARE("radeon/R600_rlc.bin"); 72MODULE_FIRMWARE("radeon/R600_rlc.bin");
70MODULE_FIRMWARE("radeon/R700_rlc.bin"); 73MODULE_FIRMWARE("radeon/R700_rlc.bin");
74MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
71 86
72int r600_debugfs_mc_info_init(struct radeon_device *rdev); 87int r600_debugfs_mc_info_init(struct radeon_device *rdev);
73 88
@@ -75,6 +90,401 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
75int r600_mc_wait_for_idle(struct radeon_device *rdev); 90int r600_mc_wait_for_idle(struct radeon_device *rdev);
76void r600_gpu_init(struct radeon_device *rdev); 91void r600_gpu_init(struct radeon_device *rdev);
77void r600_fini(struct radeon_device *rdev); 92void r600_fini(struct radeon_device *rdev);
93void r600_irq_disable(struct radeon_device *rdev);
94
95void r600_pm_get_dynpm_state(struct radeon_device *rdev)
96{
97 int i;
98
99 rdev->pm.dynpm_can_upclock = true;
100 rdev->pm.dynpm_can_downclock = true;
101
102 /* power state array is low to high, default is first */
103 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
104 int min_power_state_index = 0;
105
106 if (rdev->pm.num_power_states > 2)
107 min_power_state_index = 1;
108
109 switch (rdev->pm.dynpm_planned_action) {
110 case DYNPM_ACTION_MINIMUM:
111 rdev->pm.requested_power_state_index = min_power_state_index;
112 rdev->pm.requested_clock_mode_index = 0;
113 rdev->pm.dynpm_can_downclock = false;
114 break;
115 case DYNPM_ACTION_DOWNCLOCK:
116 if (rdev->pm.current_power_state_index == min_power_state_index) {
117 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
118 rdev->pm.dynpm_can_downclock = false;
119 } else {
120 if (rdev->pm.active_crtc_count > 1) {
121 for (i = 0; i < rdev->pm.num_power_states; i++) {
122 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
123 continue;
124 else if (i >= rdev->pm.current_power_state_index) {
125 rdev->pm.requested_power_state_index =
126 rdev->pm.current_power_state_index;
127 break;
128 } else {
129 rdev->pm.requested_power_state_index = i;
130 break;
131 }
132 }
133 } else
134 rdev->pm.requested_power_state_index =
135 rdev->pm.current_power_state_index - 1;
136 }
137 rdev->pm.requested_clock_mode_index = 0;
138 /* don't use the power state if crtcs are active and no display flag is set */
139 if ((rdev->pm.active_crtc_count > 0) &&
140 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
141 clock_info[rdev->pm.requested_clock_mode_index].flags &
142 RADEON_PM_MODE_NO_DISPLAY)) {
143 rdev->pm.requested_power_state_index++;
144 }
145 break;
146 case DYNPM_ACTION_UPCLOCK:
147 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
148 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
149 rdev->pm.dynpm_can_upclock = false;
150 } else {
151 if (rdev->pm.active_crtc_count > 1) {
152 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
153 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
154 continue;
155 else if (i <= rdev->pm.current_power_state_index) {
156 rdev->pm.requested_power_state_index =
157 rdev->pm.current_power_state_index;
158 break;
159 } else {
160 rdev->pm.requested_power_state_index = i;
161 break;
162 }
163 }
164 } else
165 rdev->pm.requested_power_state_index =
166 rdev->pm.current_power_state_index + 1;
167 }
168 rdev->pm.requested_clock_mode_index = 0;
169 break;
170 case DYNPM_ACTION_DEFAULT:
171 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
172 rdev->pm.requested_clock_mode_index = 0;
173 rdev->pm.dynpm_can_upclock = false;
174 break;
175 case DYNPM_ACTION_NONE:
176 default:
177 DRM_ERROR("Requested mode for not defined action\n");
178 return;
179 }
180 } else {
181 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
182 /* for now just select the first power state and switch between clock modes */
183 /* power state array is low to high, default is first (0) */
184 if (rdev->pm.active_crtc_count > 1) {
185 rdev->pm.requested_power_state_index = -1;
186 /* start at 1 as we don't want the default mode */
187 for (i = 1; i < rdev->pm.num_power_states; i++) {
188 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
189 continue;
190 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
191 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
192 rdev->pm.requested_power_state_index = i;
193 break;
194 }
195 }
196 /* if nothing selected, grab the default state. */
197 if (rdev->pm.requested_power_state_index == -1)
198 rdev->pm.requested_power_state_index = 0;
199 } else
200 rdev->pm.requested_power_state_index = 1;
201
202 switch (rdev->pm.dynpm_planned_action) {
203 case DYNPM_ACTION_MINIMUM:
204 rdev->pm.requested_clock_mode_index = 0;
205 rdev->pm.dynpm_can_downclock = false;
206 break;
207 case DYNPM_ACTION_DOWNCLOCK:
208 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
209 if (rdev->pm.current_clock_mode_index == 0) {
210 rdev->pm.requested_clock_mode_index = 0;
211 rdev->pm.dynpm_can_downclock = false;
212 } else
213 rdev->pm.requested_clock_mode_index =
214 rdev->pm.current_clock_mode_index - 1;
215 } else {
216 rdev->pm.requested_clock_mode_index = 0;
217 rdev->pm.dynpm_can_downclock = false;
218 }
219 /* don't use the power state if crtcs are active and no display flag is set */
220 if ((rdev->pm.active_crtc_count > 0) &&
221 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
222 clock_info[rdev->pm.requested_clock_mode_index].flags &
223 RADEON_PM_MODE_NO_DISPLAY)) {
224 rdev->pm.requested_clock_mode_index++;
225 }
226 break;
227 case DYNPM_ACTION_UPCLOCK:
228 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
229 if (rdev->pm.current_clock_mode_index ==
230 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
231 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
232 rdev->pm.dynpm_can_upclock = false;
233 } else
234 rdev->pm.requested_clock_mode_index =
235 rdev->pm.current_clock_mode_index + 1;
236 } else {
237 rdev->pm.requested_clock_mode_index =
238 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
239 rdev->pm.dynpm_can_upclock = false;
240 }
241 break;
242 case DYNPM_ACTION_DEFAULT:
243 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
244 rdev->pm.requested_clock_mode_index = 0;
245 rdev->pm.dynpm_can_upclock = false;
246 break;
247 case DYNPM_ACTION_NONE:
248 default:
249 DRM_ERROR("Requested mode for not defined action\n");
250 return;
251 }
252 }
253
254 DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
255 rdev->pm.power_state[rdev->pm.requested_power_state_index].
256 clock_info[rdev->pm.requested_clock_mode_index].sclk,
257 rdev->pm.power_state[rdev->pm.requested_power_state_index].
258 clock_info[rdev->pm.requested_clock_mode_index].mclk,
259 rdev->pm.power_state[rdev->pm.requested_power_state_index].
260 pcie_lanes);
261}
262
263static int r600_pm_get_type_index(struct radeon_device *rdev,
264 enum radeon_pm_state_type ps_type,
265 int instance)
266{
267 int i;
268 int found_instance = -1;
269
270 for (i = 0; i < rdev->pm.num_power_states; i++) {
271 if (rdev->pm.power_state[i].type == ps_type) {
272 found_instance++;
273 if (found_instance == instance)
274 return i;
275 }
276 }
277 /* return default if no match */
278 return rdev->pm.default_power_state_index;
279}
280
281void rs780_pm_init_profile(struct radeon_device *rdev)
282{
283 if (rdev->pm.num_power_states == 2) {
284 /* default */
285 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
286 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
287 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
288 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
289 /* low sh */
290 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
291 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
292 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
293 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
294 /* high sh */
295 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
296 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
297 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
298 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
299 /* low mh */
300 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
301 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
302 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
303 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
304 /* high mh */
305 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
306 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
307 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
308 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
309 } else if (rdev->pm.num_power_states == 3) {
310 /* default */
311 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
312 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
313 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
314 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
315 /* low sh */
316 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
317 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
318 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
320 /* high sh */
321 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
322 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
323 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
325 /* low mh */
326 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
327 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
328 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
330 /* high mh */
331 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
332 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
333 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
334 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
335 } else {
336 /* default */
337 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
338 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
339 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
340 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
341 /* low sh */
342 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
343 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
344 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
345 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
346 /* high sh */
347 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
348 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
349 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
350 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
351 /* low mh */
352 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
353 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
354 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
355 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
356 /* high mh */
357 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
358 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
359 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
360 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
361 }
362}
363
364void r600_pm_init_profile(struct radeon_device *rdev)
365{
366 if (rdev->family == CHIP_R600) {
367 /* XXX */
368 /* default */
369 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
370 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
371 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
372 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
373 /* low sh */
374 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
375 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
376 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
377 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
378 /* high sh */
379 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
380 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
381 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
382 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
383 /* low mh */
384 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
385 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
386 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
387 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
388 /* high mh */
389 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
390 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
391 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
392 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
393 } else {
394 if (rdev->pm.num_power_states < 4) {
395 /* default */
396 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
397 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
398 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
399 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
400 /* low sh */
401 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
402 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
403 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
404 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
405 /* high sh */
406 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
407 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
408 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
409 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
410 /* low mh */
411 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
412 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
413 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
414 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
415 /* high mh */
416 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
417 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
418 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
419 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
420 } else {
421 /* default */
422 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
423 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
424 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
425 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
426 /* low sh */
427 if (rdev->flags & RADEON_IS_MOBILITY) {
428 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
429 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
430 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
431 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
432 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
433 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
434 } else {
435 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
436 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
437 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
438 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
439 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
440 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
441 }
442 /* high sh */
443 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
444 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
445 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
446 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
447 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
448 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
449 /* low mh */
450 if (rdev->flags & RADEON_IS_MOBILITY) {
451 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
452 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
453 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
454 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
455 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
456 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
457 } else {
458 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
459 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
460 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
461 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
462 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
463 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
464 }
465 /* high mh */
466 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
467 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
468 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
469 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
470 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
471 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
472 }
473 }
474}
475
476void r600_pm_misc(struct radeon_device *rdev)
477{
478
479}
480
481bool r600_gui_idle(struct radeon_device *rdev)
482{
483 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
484 return false;
485 else
486 return true;
487}
78 488
79/* hpd for digital panel detect/disconnect */ 489/* hpd for digital panel detect/disconnect */
80bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 490bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
@@ -714,11 +1124,6 @@ int r600_mc_init(struct radeon_device *rdev)
714 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1124 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
715 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1125 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
716 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1126 rdev->mc.visible_vram_size = rdev->mc.aper_size;
717 /* FIXME remove this once we support unmappable VRAM */
718 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
719 rdev->mc.mc_vram_size = rdev->mc.aper_size;
720 rdev->mc.real_vram_size = rdev->mc.aper_size;
721 }
722 r600_vram_gtt_location(rdev, &rdev->mc); 1127 r600_vram_gtt_location(rdev, &rdev->mc);
723 1128
724 if (rdev->flags & RADEON_IS_IGP) 1129 if (rdev->flags & RADEON_IS_IGP)
@@ -750,7 +1155,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
750 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) | 1155 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
751 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | 1156 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
752 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); 1157 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
753 u32 srbm_reset = 0;
754 u32 tmp; 1158 u32 tmp;
755 1159
756 dev_info(rdev->dev, "GPU softreset \n"); 1160 dev_info(rdev->dev, "GPU softreset \n");
@@ -765,7 +1169,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
765 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1169 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
766 } 1170 }
767 /* Disable CP parsing/prefetching */ 1171 /* Disable CP parsing/prefetching */
768 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); 1172 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
769 /* Check if any of the rendering block is busy and reset it */ 1173 /* Check if any of the rendering block is busy and reset it */
770 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || 1174 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
771 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { 1175 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -784,72 +1188,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
784 S_008020_SOFT_RESET_VGT(1); 1188 S_008020_SOFT_RESET_VGT(1);
785 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); 1189 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
786 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 1190 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
787 (void)RREG32(R_008020_GRBM_SOFT_RESET); 1191 RREG32(R_008020_GRBM_SOFT_RESET);
788 udelay(50); 1192 mdelay(15);
789 WREG32(R_008020_GRBM_SOFT_RESET, 0); 1193 WREG32(R_008020_GRBM_SOFT_RESET, 0);
790 (void)RREG32(R_008020_GRBM_SOFT_RESET);
791 } 1194 }
792 /* Reset CP (we always reset CP) */ 1195 /* Reset CP (we always reset CP) */
793 tmp = S_008020_SOFT_RESET_CP(1); 1196 tmp = S_008020_SOFT_RESET_CP(1);
794 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); 1197 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
795 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 1198 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
796 (void)RREG32(R_008020_GRBM_SOFT_RESET); 1199 RREG32(R_008020_GRBM_SOFT_RESET);
797 udelay(50); 1200 mdelay(15);
798 WREG32(R_008020_GRBM_SOFT_RESET, 0); 1201 WREG32(R_008020_GRBM_SOFT_RESET, 0);
799 (void)RREG32(R_008020_GRBM_SOFT_RESET);
800 /* Reset others GPU block if necessary */
801 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
802 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
803 if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
804 srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
805 if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
806 srbm_reset |= S_000E60_SOFT_RESET_IH(1);
807 if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
808 srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
809 if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
810 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
811 if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
812 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
813 if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
814 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
815 if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
816 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
817 if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
818 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
819 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
820 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
821 if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
822 srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
823 if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
824 srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
825 dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
826 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
827 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
828 udelay(50);
829 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
830 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
831 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
832 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
833 udelay(50);
834 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
835 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
836 /* Wait a little for things to settle down */ 1202 /* Wait a little for things to settle down */
837 udelay(50); 1203 mdelay(1);
838 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", 1204 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
839 RREG32(R_008010_GRBM_STATUS)); 1205 RREG32(R_008010_GRBM_STATUS));
840 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", 1206 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
841 RREG32(R_008014_GRBM_STATUS2)); 1207 RREG32(R_008014_GRBM_STATUS2));
842 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", 1208 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
843 RREG32(R_000E50_SRBM_STATUS)); 1209 RREG32(R_000E50_SRBM_STATUS));
844 /* After reset we need to reinit the asic as GPU often endup in an
845 * incoherent state.
846 */
847 atom_asic_init(rdev->mode_info.atom_context);
848 rv515_mc_resume(rdev, &save); 1210 rv515_mc_resume(rdev, &save);
849 return 0; 1211 return 0;
850} 1212}
851 1213
852int r600_gpu_reset(struct radeon_device *rdev) 1214bool r600_gpu_is_lockup(struct radeon_device *rdev)
1215{
1216 u32 srbm_status;
1217 u32 grbm_status;
1218 u32 grbm_status2;
1219 int r;
1220
1221 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1222 grbm_status = RREG32(R_008010_GRBM_STATUS);
1223 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1224 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1225 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1226 return false;
1227 }
1228 /* force CP activities */
1229 r = radeon_ring_lock(rdev, 2);
1230 if (!r) {
1231 /* PACKET2 NOP */
1232 radeon_ring_write(rdev, 0x80000000);
1233 radeon_ring_write(rdev, 0x80000000);
1234 radeon_ring_unlock_commit(rdev);
1235 }
1236 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1237 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1238}
1239
1240int r600_asic_reset(struct radeon_device *rdev)
853{ 1241{
854 return r600_gpu_soft_reset(rdev); 1242 return r600_gpu_soft_reset(rdev);
855} 1243}
@@ -1467,10 +1855,31 @@ int r600_init_microcode(struct radeon_device *rdev)
1467 chip_name = "RV710"; 1855 chip_name = "RV710";
1468 rlc_chip_name = "R700"; 1856 rlc_chip_name = "R700";
1469 break; 1857 break;
1858 case CHIP_CEDAR:
1859 chip_name = "CEDAR";
1860 rlc_chip_name = "CEDAR";
1861 break;
1862 case CHIP_REDWOOD:
1863 chip_name = "REDWOOD";
1864 rlc_chip_name = "REDWOOD";
1865 break;
1866 case CHIP_JUNIPER:
1867 chip_name = "JUNIPER";
1868 rlc_chip_name = "JUNIPER";
1869 break;
1870 case CHIP_CYPRESS:
1871 case CHIP_HEMLOCK:
1872 chip_name = "CYPRESS";
1873 rlc_chip_name = "CYPRESS";
1874 break;
1470 default: BUG(); 1875 default: BUG();
1471 } 1876 }
1472 1877
1473 if (rdev->family >= CHIP_RV770) { 1878 if (rdev->family >= CHIP_CEDAR) {
1879 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1880 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1881 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1882 } else if (rdev->family >= CHIP_RV770) {
1474 pfp_req_size = R700_PFP_UCODE_SIZE * 4; 1883 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1475 me_req_size = R700_PM4_UCODE_SIZE * 4; 1884 me_req_size = R700_PM4_UCODE_SIZE * 4;
1476 rlc_req_size = R700_RLC_UCODE_SIZE * 4; 1885 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
@@ -1584,12 +1993,15 @@ int r600_cp_start(struct radeon_device *rdev)
1584 } 1993 }
1585 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 1994 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1586 radeon_ring_write(rdev, 0x1); 1995 radeon_ring_write(rdev, 0x1);
1587 if (rdev->family < CHIP_RV770) { 1996 if (rdev->family >= CHIP_CEDAR) {
1588 radeon_ring_write(rdev, 0x3); 1997 radeon_ring_write(rdev, 0x0);
1589 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1); 1998 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
1590 } else { 1999 } else if (rdev->family >= CHIP_RV770) {
1591 radeon_ring_write(rdev, 0x0); 2000 radeon_ring_write(rdev, 0x0);
1592 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); 2001 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2002 } else {
2003 radeon_ring_write(rdev, 0x3);
2004 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1593 } 2005 }
1594 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2006 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1595 radeon_ring_write(rdev, 0); 2007 radeon_ring_write(rdev, 0);
@@ -2051,8 +2463,6 @@ int r600_init(struct radeon_device *rdev)
2051 r = radeon_clocks_init(rdev); 2463 r = radeon_clocks_init(rdev);
2052 if (r) 2464 if (r)
2053 return r; 2465 return r;
2054 /* Initialize power management */
2055 radeon_pm_init(rdev);
2056 /* Fence driver */ 2466 /* Fence driver */
2057 r = radeon_fence_driver_init(rdev); 2467 r = radeon_fence_driver_init(rdev);
2058 if (r) 2468 if (r)
@@ -2117,7 +2527,6 @@ int r600_init(struct radeon_device *rdev)
2117 2527
2118void r600_fini(struct radeon_device *rdev) 2528void r600_fini(struct radeon_device *rdev)
2119{ 2529{
2120 radeon_pm_fini(rdev);
2121 r600_audio_fini(rdev); 2530 r600_audio_fini(rdev);
2122 r600_blit_fini(rdev); 2531 r600_blit_fini(rdev);
2123 r600_cp_fini(rdev); 2532 r600_cp_fini(rdev);
@@ -2290,10 +2699,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev)
2290 } 2699 }
2291} 2700}
2292 2701
2293static void r600_rlc_stop(struct radeon_device *rdev) 2702void r600_rlc_stop(struct radeon_device *rdev)
2294{ 2703{
2295 2704
2296 if (rdev->family >= CHIP_RV770) { 2705 if ((rdev->family >= CHIP_RV770) &&
2706 (rdev->family <= CHIP_RV740)) {
2297 /* r7xx asics need to soft reset RLC before halting */ 2707 /* r7xx asics need to soft reset RLC before halting */
2298 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); 2708 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2299 RREG32(SRBM_SOFT_RESET); 2709 RREG32(SRBM_SOFT_RESET);
@@ -2330,7 +2740,12 @@ static int r600_rlc_init(struct radeon_device *rdev)
2330 WREG32(RLC_UCODE_CNTL, 0); 2740 WREG32(RLC_UCODE_CNTL, 0);
2331 2741
2332 fw_data = (const __be32 *)rdev->rlc_fw->data; 2742 fw_data = (const __be32 *)rdev->rlc_fw->data;
2333 if (rdev->family >= CHIP_RV770) { 2743 if (rdev->family >= CHIP_CEDAR) {
2744 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2745 WREG32(RLC_UCODE_ADDR, i);
2746 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2747 }
2748 } else if (rdev->family >= CHIP_RV770) {
2334 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { 2749 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2335 WREG32(RLC_UCODE_ADDR, i); 2750 WREG32(RLC_UCODE_ADDR, i);
2336 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 2751 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
@@ -2360,7 +2775,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev)
2360 rdev->ih.enabled = true; 2775 rdev->ih.enabled = true;
2361} 2776}
2362 2777
2363static void r600_disable_interrupts(struct radeon_device *rdev) 2778void r600_disable_interrupts(struct radeon_device *rdev)
2364{ 2779{
2365 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 2780 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2366 u32 ih_cntl = RREG32(IH_CNTL); 2781 u32 ih_cntl = RREG32(IH_CNTL);
@@ -2475,7 +2890,10 @@ int r600_irq_init(struct radeon_device *rdev)
2475 WREG32(IH_CNTL, ih_cntl); 2890 WREG32(IH_CNTL, ih_cntl);
2476 2891
2477 /* force the active interrupt state to all disabled */ 2892 /* force the active interrupt state to all disabled */
2478 r600_disable_interrupt_state(rdev); 2893 if (rdev->family >= CHIP_CEDAR)
2894 evergreen_disable_interrupt_state(rdev);
2895 else
2896 r600_disable_interrupt_state(rdev);
2479 2897
2480 /* enable irqs */ 2898 /* enable irqs */
2481 r600_enable_interrupts(rdev); 2899 r600_enable_interrupts(rdev);
@@ -2485,7 +2903,7 @@ int r600_irq_init(struct radeon_device *rdev)
2485 2903
2486void r600_irq_suspend(struct radeon_device *rdev) 2904void r600_irq_suspend(struct radeon_device *rdev)
2487{ 2905{
2488 r600_disable_interrupts(rdev); 2906 r600_irq_disable(rdev);
2489 r600_rlc_stop(rdev); 2907 r600_rlc_stop(rdev);
2490} 2908}
2491 2909
@@ -2500,6 +2918,8 @@ int r600_irq_set(struct radeon_device *rdev)
2500 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 2918 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2501 u32 mode_int = 0; 2919 u32 mode_int = 0;
2502 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 2920 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2921 u32 grbm_int_cntl = 0;
2922 u32 hdmi1, hdmi2;
2503 2923
2504 if (!rdev->irq.installed) { 2924 if (!rdev->irq.installed) {
2505 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 2925 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
@@ -2513,7 +2933,9 @@ int r600_irq_set(struct radeon_device *rdev)
2513 return 0; 2933 return 0;
2514 } 2934 }
2515 2935
2936 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2516 if (ASIC_IS_DCE3(rdev)) { 2937 if (ASIC_IS_DCE3(rdev)) {
2938 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2517 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 2939 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2518 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 2940 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2519 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 2941 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2523,6 +2945,7 @@ int r600_irq_set(struct radeon_device *rdev)
2523 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 2945 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2524 } 2946 }
2525 } else { 2947 } else {
2948 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2526 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; 2949 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2527 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; 2950 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2528 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; 2951 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2564,10 +2987,25 @@ int r600_irq_set(struct radeon_device *rdev)
2564 DRM_DEBUG("r600_irq_set: hpd 6\n"); 2987 DRM_DEBUG("r600_irq_set: hpd 6\n");
2565 hpd6 |= DC_HPDx_INT_EN; 2988 hpd6 |= DC_HPDx_INT_EN;
2566 } 2989 }
2990 if (rdev->irq.hdmi[0]) {
2991 DRM_DEBUG("r600_irq_set: hdmi 1\n");
2992 hdmi1 |= R600_HDMI_INT_EN;
2993 }
2994 if (rdev->irq.hdmi[1]) {
2995 DRM_DEBUG("r600_irq_set: hdmi 2\n");
2996 hdmi2 |= R600_HDMI_INT_EN;
2997 }
2998 if (rdev->irq.gui_idle) {
2999 DRM_DEBUG("gui idle\n");
3000 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3001 }
2567 3002
2568 WREG32(CP_INT_CNTL, cp_int_cntl); 3003 WREG32(CP_INT_CNTL, cp_int_cntl);
2569 WREG32(DxMODE_INT_MASK, mode_int); 3004 WREG32(DxMODE_INT_MASK, mode_int);
3005 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3006 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
2570 if (ASIC_IS_DCE3(rdev)) { 3007 if (ASIC_IS_DCE3(rdev)) {
3008 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
2571 WREG32(DC_HPD1_INT_CONTROL, hpd1); 3009 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2572 WREG32(DC_HPD2_INT_CONTROL, hpd2); 3010 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2573 WREG32(DC_HPD3_INT_CONTROL, hpd3); 3011 WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -2577,6 +3015,7 @@ int r600_irq_set(struct radeon_device *rdev)
2577 WREG32(DC_HPD6_INT_CONTROL, hpd6); 3015 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2578 } 3016 }
2579 } else { 3017 } else {
3018 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
2580 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 3019 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2581 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 3020 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2582 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); 3021 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
@@ -2660,6 +3099,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
2660 WREG32(DC_HPD6_INT_CONTROL, tmp); 3099 WREG32(DC_HPD6_INT_CONTROL, tmp);
2661 } 3100 }
2662 } 3101 }
3102 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3103 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3104 }
3105 if (ASIC_IS_DCE3(rdev)) {
3106 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3107 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3108 }
3109 } else {
3110 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3111 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3112 }
3113 }
2663} 3114}
2664 3115
2665void r600_irq_disable(struct radeon_device *rdev) 3116void r600_irq_disable(struct radeon_device *rdev)
@@ -2713,6 +3164,8 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2713 * 19 1 FP Hot plug detection B 3164 * 19 1 FP Hot plug detection B
2714 * 19 2 DAC A auto-detection 3165 * 19 2 DAC A auto-detection
2715 * 19 3 DAC B auto-detection 3166 * 19 3 DAC B auto-detection
3167 * 21 4 HDMI block A
3168 * 21 5 HDMI block B
2716 * 176 - CP_INT RB 3169 * 176 - CP_INT RB
2717 * 177 - CP_INT IB1 3170 * 177 - CP_INT IB1
2718 * 178 - CP_INT IB2 3171 * 178 - CP_INT IB2
@@ -2852,6 +3305,10 @@ restart_ih:
2852 break; 3305 break;
2853 } 3306 }
2854 break; 3307 break;
3308 case 21: /* HDMI */
3309 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3310 r600_audio_schedule_polling(rdev);
3311 break;
2855 case 176: /* CP_INT in ring buffer */ 3312 case 176: /* CP_INT in ring buffer */
2856 case 177: /* CP_INT in IB1 */ 3313 case 177: /* CP_INT in IB1 */
2857 case 178: /* CP_INT in IB2 */ 3314 case 178: /* CP_INT in IB2 */
@@ -2861,6 +3318,11 @@ restart_ih:
2861 case 181: /* CP EOP event */ 3318 case 181: /* CP EOP event */
2862 DRM_DEBUG("IH: CP EOP\n"); 3319 DRM_DEBUG("IH: CP EOP\n");
2863 break; 3320 break;
3321 case 233: /* GUI IDLE */
3322 DRM_DEBUG("IH: CP EOP\n");
3323 rdev->pm.gui_idle = true;
3324 wake_up(&rdev->irq.idle_queue);
3325 break;
2864 default: 3326 default:
2865 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3327 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2866 break; 3328 break;
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 1d898051c631..2b26553c352c 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -44,7 +44,7 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev)
44/* 44/*
45 * current number of channels 45 * current number of channels
46 */ 46 */
47static int r600_audio_channels(struct radeon_device *rdev) 47int r600_audio_channels(struct radeon_device *rdev)
48{ 48{
49 return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1; 49 return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
50} 50}
@@ -52,7 +52,7 @@ static int r600_audio_channels(struct radeon_device *rdev)
52/* 52/*
53 * current bits per sample 53 * current bits per sample
54 */ 54 */
55static int r600_audio_bits_per_sample(struct radeon_device *rdev) 55int r600_audio_bits_per_sample(struct radeon_device *rdev)
56{ 56{
57 uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4; 57 uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
58 switch (value) { 58 switch (value) {
@@ -71,7 +71,7 @@ static int r600_audio_bits_per_sample(struct radeon_device *rdev)
71/* 71/*
72 * current sampling rate in HZ 72 * current sampling rate in HZ
73 */ 73 */
74static int r600_audio_rate(struct radeon_device *rdev) 74int r600_audio_rate(struct radeon_device *rdev)
75{ 75{
76 uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); 76 uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
77 uint32_t result; 77 uint32_t result;
@@ -90,7 +90,7 @@ static int r600_audio_rate(struct radeon_device *rdev)
90/* 90/*
91 * iec 60958 status bits 91 * iec 60958 status bits
92 */ 92 */
93static uint8_t r600_audio_status_bits(struct radeon_device *rdev) 93uint8_t r600_audio_status_bits(struct radeon_device *rdev)
94{ 94{
95 return RREG32(R600_AUDIO_STATUS_BITS) & 0xff; 95 return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
96} 96}
@@ -98,12 +98,21 @@ static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
98/* 98/*
99 * iec 60958 category code 99 * iec 60958 category code
100 */ 100 */
101static uint8_t r600_audio_category_code(struct radeon_device *rdev) 101uint8_t r600_audio_category_code(struct radeon_device *rdev)
102{ 102{
103 return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff; 103 return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
104} 104}
105 105
106/* 106/*
107 * schedule next audio update event
108 */
109void r600_audio_schedule_polling(struct radeon_device *rdev)
110{
111 mod_timer(&rdev->audio_timer,
112 jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
113}
114
115/*
107 * update all hdmi interfaces with current audio parameters 116 * update all hdmi interfaces with current audio parameters
108 */ 117 */
109static void r600_audio_update_hdmi(unsigned long param) 118static void r600_audio_update_hdmi(unsigned long param)
@@ -118,7 +127,7 @@ static void r600_audio_update_hdmi(unsigned long param)
118 uint8_t category_code = r600_audio_category_code(rdev); 127 uint8_t category_code = r600_audio_category_code(rdev);
119 128
120 struct drm_encoder *encoder; 129 struct drm_encoder *encoder;
121 int changes = 0; 130 int changes = 0, still_going = 0;
122 131
123 changes |= channels != rdev->audio_channels; 132 changes |= channels != rdev->audio_channels;
124 changes |= rate != rdev->audio_rate; 133 changes |= rate != rdev->audio_rate;
@@ -135,15 +144,13 @@ static void r600_audio_update_hdmi(unsigned long param)
135 } 144 }
136 145
137 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 146 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
147 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
148 still_going |= radeon_encoder->audio_polling_active;
138 if (changes || r600_hdmi_buffer_status_changed(encoder)) 149 if (changes || r600_hdmi_buffer_status_changed(encoder))
139 r600_hdmi_update_audio_settings( 150 r600_hdmi_update_audio_settings(encoder);
140 encoder, channels,
141 rate, bps, status_bits,
142 category_code);
143 } 151 }
144 152
145 mod_timer(&rdev->audio_timer, 153 if(still_going) r600_audio_schedule_polling(rdev);
146 jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
147} 154}
148 155
149/* 156/*
@@ -176,9 +183,34 @@ int r600_audio_init(struct radeon_device *rdev)
176 r600_audio_update_hdmi, 183 r600_audio_update_hdmi,
177 (unsigned long)rdev); 184 (unsigned long)rdev);
178 185
186 return 0;
187}
188
189/*
190 * enable the polling timer, to check for status changes
191 */
192void r600_audio_enable_polling(struct drm_encoder *encoder)
193{
194 struct drm_device *dev = encoder->dev;
195 struct radeon_device *rdev = dev->dev_private;
196 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
197
198 DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active);
199 if (radeon_encoder->audio_polling_active)
200 return;
201
202 radeon_encoder->audio_polling_active = 1;
179 mod_timer(&rdev->audio_timer, jiffies + 1); 203 mod_timer(&rdev->audio_timer, jiffies + 1);
204}
180 205
181 return 0; 206/*
207 * disable the polling timer, so we get no more status updates
208 */
209void r600_audio_disable_polling(struct drm_encoder *encoder)
210{
211 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
212 DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active);
213 radeon_encoder->audio_polling_active = 0;
182} 214}
183 215
184/* 216/*
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index f6c6c77db7e0..d13622ae74e9 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -447,6 +447,9 @@ int r600_blit_init(struct radeon_device *rdev)
447 u32 packet2s[16]; 447 u32 packet2s[16];
448 int num_packet2s = 0; 448 int num_packet2s = 0;
449 449
450 /* don't reinitialize blit */
451 if (rdev->r600_blit.shader_obj)
452 return 0;
450 mutex_init(&rdev->r600_blit.mutex); 453 mutex_init(&rdev->r600_blit.mutex);
451 rdev->r600_blit.state_offset = 0; 454 rdev->r600_blit.state_offset = 0;
452 455
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 2616b822ba68..26b4bc9d89a5 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -290,17 +290,15 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
290 if (!offset) 290 if (!offset)
291 return; 291 return;
292 292
293 if (r600_hdmi_is_audio_buffer_filled(encoder)) { 293 if (!radeon_encoder->hdmi_audio_workaround ||
294 /* disable audio workaround and start delivering of audio frames */ 294 r600_hdmi_is_audio_buffer_filled(encoder)) {
295 WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
296 295
297 } else if (radeon_encoder->hdmi_audio_workaround) { 296 /* disable audio workaround */
298 /* enable audio workaround and start delivering of audio frames */ 297 WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
299 WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
300 298
301 } else { 299 } else {
302 /* disable audio workaround and stop delivering of audio frames */ 300 /* enable audio workaround */
303 WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001); 301 WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
304 } 302 }
305} 303}
306 304
@@ -345,25 +343,23 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
345 343
346 /* audio packets per line, does anyone know how to calc this ? */ 344 /* audio packets per line, does anyone know how to calc this ? */
347 WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000); 345 WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
348
349 /* update? reset? don't realy know */
350 WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
351} 346}
352 347
353/* 348/*
354 * update settings with current parameters from audio engine 349 * update settings with current parameters from audio engine
355 */ 350 */
356void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, 351void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
357 int channels,
358 int rate,
359 int bps,
360 uint8_t status_bits,
361 uint8_t category_code)
362{ 352{
363 struct drm_device *dev = encoder->dev; 353 struct drm_device *dev = encoder->dev;
364 struct radeon_device *rdev = dev->dev_private; 354 struct radeon_device *rdev = dev->dev_private;
365 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 355 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
366 356
357 int channels = r600_audio_channels(rdev);
358 int rate = r600_audio_rate(rdev);
359 int bps = r600_audio_bits_per_sample(rdev);
360 uint8_t status_bits = r600_audio_status_bits(rdev);
361 uint8_t category_code = r600_audio_category_code(rdev);
362
367 uint32_t iec; 363 uint32_t iec;
368 364
369 if (!offset) 365 if (!offset)
@@ -415,9 +411,6 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
415 r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0); 411 r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
416 412
417 r600_hdmi_audio_workaround(encoder); 413 r600_hdmi_audio_workaround(encoder);
418
419 /* update? reset? don't realy know */
420 WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
421} 414}
422 415
423static int r600_hdmi_find_free_block(struct drm_device *dev) 416static int r600_hdmi_find_free_block(struct drm_device *dev)
@@ -486,6 +479,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
486 struct drm_device *dev = encoder->dev; 479 struct drm_device *dev = encoder->dev;
487 struct radeon_device *rdev = dev->dev_private; 480 struct radeon_device *rdev = dev->dev_private;
488 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 481 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
482 uint32_t offset;
489 483
490 if (ASIC_IS_DCE4(rdev)) 484 if (ASIC_IS_DCE4(rdev))
491 return; 485 return;
@@ -499,10 +493,10 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
499 } 493 }
500 } 494 }
501 495
496 offset = radeon_encoder->hdmi_offset;
502 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { 497 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
503 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1); 498 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
504 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { 499 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
505 int offset = radeon_encoder->hdmi_offset;
506 switch (radeon_encoder->encoder_id) { 500 switch (radeon_encoder->encoder_id) {
507 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 501 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
508 WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4); 502 WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
@@ -518,6 +512,21 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
518 } 512 }
519 } 513 }
520 514
515 if (rdev->irq.installed
516 && rdev->family != CHIP_RS600
517 && rdev->family != CHIP_RS690
518 && rdev->family != CHIP_RS740) {
519
520 /* if irq is available use it */
521 rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
522 radeon_irq_set(rdev);
523
524 r600_audio_disable_polling(encoder);
525 } else {
526 /* if not fallback to polling */
527 r600_audio_enable_polling(encoder);
528 }
529
521 DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n", 530 DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
522 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); 531 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
523} 532}
@@ -530,22 +539,30 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
530 struct drm_device *dev = encoder->dev; 539 struct drm_device *dev = encoder->dev;
531 struct radeon_device *rdev = dev->dev_private; 540 struct radeon_device *rdev = dev->dev_private;
532 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 541 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
542 uint32_t offset;
533 543
534 if (ASIC_IS_DCE4(rdev)) 544 if (ASIC_IS_DCE4(rdev))
535 return; 545 return;
536 546
537 if (!radeon_encoder->hdmi_offset) { 547 offset = radeon_encoder->hdmi_offset;
548 if (!offset) {
538 dev_err(rdev->dev, "Disabling not enabled HDMI\n"); 549 dev_err(rdev->dev, "Disabling not enabled HDMI\n");
539 return; 550 return;
540 } 551 }
541 552
542 DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n", 553 DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
543 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); 554 offset, radeon_encoder->encoder_id);
555
556 /* disable irq */
557 rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false;
558 radeon_irq_set(rdev);
559
560 /* disable polling */
561 r600_audio_disable_polling(encoder);
544 562
545 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { 563 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
546 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1); 564 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
547 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { 565 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
548 int offset = radeon_encoder->hdmi_offset;
549 switch (radeon_encoder->encoder_id) { 566 switch (radeon_encoder->encoder_id) {
550 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 567 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
551 WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4); 568 WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 7b1d22370f6e..d84612ae47e0 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -157,33 +157,36 @@
157#define R600_HDMI_BLOCK3 0x7800 157#define R600_HDMI_BLOCK3 0x7800
158 158
159/* HDMI registers */ 159/* HDMI registers */
160#define R600_HDMI_ENABLE 0x00 160#define R600_HDMI_ENABLE 0x00
161#define R600_HDMI_STATUS 0x04 161#define R600_HDMI_STATUS 0x04
162#define R600_HDMI_CNTL 0x08 162# define R600_HDMI_INT_PENDING (1 << 29)
163#define R600_HDMI_UNKNOWN_0 0x0C 163#define R600_HDMI_CNTL 0x08
164#define R600_HDMI_AUDIOCNTL 0x10 164# define R600_HDMI_INT_EN (1 << 28)
165#define R600_HDMI_VIDEOCNTL 0x14 165# define R600_HDMI_INT_ACK (1 << 29)
166#define R600_HDMI_VERSION 0x18 166#define R600_HDMI_UNKNOWN_0 0x0C
167#define R600_HDMI_UNKNOWN_1 0x28 167#define R600_HDMI_AUDIOCNTL 0x10
168#define R600_HDMI_VIDEOINFOFRAME_0 0x54 168#define R600_HDMI_VIDEOCNTL 0x14
169#define R600_HDMI_VIDEOINFOFRAME_1 0x58 169#define R600_HDMI_VERSION 0x18
170#define R600_HDMI_VIDEOINFOFRAME_2 0x5c 170#define R600_HDMI_UNKNOWN_1 0x28
171#define R600_HDMI_VIDEOINFOFRAME_3 0x60 171#define R600_HDMI_VIDEOINFOFRAME_0 0x54
172#define R600_HDMI_32kHz_CTS 0xac 172#define R600_HDMI_VIDEOINFOFRAME_1 0x58
173#define R600_HDMI_32kHz_N 0xb0 173#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
174#define R600_HDMI_44_1kHz_CTS 0xb4 174#define R600_HDMI_VIDEOINFOFRAME_3 0x60
175#define R600_HDMI_44_1kHz_N 0xb8 175#define R600_HDMI_32kHz_CTS 0xac
176#define R600_HDMI_48kHz_CTS 0xbc 176#define R600_HDMI_32kHz_N 0xb0
177#define R600_HDMI_48kHz_N 0xc0 177#define R600_HDMI_44_1kHz_CTS 0xb4
178#define R600_HDMI_AUDIOINFOFRAME_0 0xcc 178#define R600_HDMI_44_1kHz_N 0xb8
179#define R600_HDMI_AUDIOINFOFRAME_1 0xd0 179#define R600_HDMI_48kHz_CTS 0xbc
180#define R600_HDMI_IEC60958_1 0xd4 180#define R600_HDMI_48kHz_N 0xc0
181#define R600_HDMI_IEC60958_2 0xd8 181#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
182#define R600_HDMI_UNKNOWN_2 0xdc 182#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
183#define R600_HDMI_AUDIO_DEBUG_0 0xe0 183#define R600_HDMI_IEC60958_1 0xd4
184#define R600_HDMI_AUDIO_DEBUG_1 0xe4 184#define R600_HDMI_IEC60958_2 0xd8
185#define R600_HDMI_AUDIO_DEBUG_2 0xe8 185#define R600_HDMI_UNKNOWN_2 0xdc
186#define R600_HDMI_AUDIO_DEBUG_3 0xec 186#define R600_HDMI_AUDIO_DEBUG_0 0xe0
187#define R600_HDMI_AUDIO_DEBUG_1 0xe4
188#define R600_HDMI_AUDIO_DEBUG_2 0xe8
189#define R600_HDMI_AUDIO_DEBUG_3 0xec
187 190
188/* HDMI additional config base register addresses */ 191/* HDMI additional config base register addresses */
189#define R600_HDMI_CONFIG1 0x7600 192#define R600_HDMI_CONFIG1 0x7600
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 034218c3dbbb..66a37fb75839 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,7 +89,6 @@ extern int radeon_testing;
89extern int radeon_connector_table; 89extern int radeon_connector_table;
90extern int radeon_tv; 90extern int radeon_tv;
91extern int radeon_new_pll; 91extern int radeon_new_pll;
92extern int radeon_dynpm;
93extern int radeon_audio; 92extern int radeon_audio;
94extern int radeon_disp_priority; 93extern int radeon_disp_priority;
95extern int radeon_hw_i2c; 94extern int radeon_hw_i2c;
@@ -99,6 +98,7 @@ extern int radeon_hw_i2c;
99 * symbol; 98 * symbol;
100 */ 99 */
101#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 100#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
101#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
102/* RADEON_IB_POOL_SIZE must be a power of 2 */ 102/* RADEON_IB_POOL_SIZE must be a power of 2 */
103#define RADEON_IB_POOL_SIZE 16 103#define RADEON_IB_POOL_SIZE 16
104#define RADEON_DEBUGFS_MAX_NUM_FILES 32 104#define RADEON_DEBUGFS_MAX_NUM_FILES 32
@@ -172,6 +172,8 @@ struct radeon_clock {
172int radeon_pm_init(struct radeon_device *rdev); 172int radeon_pm_init(struct radeon_device *rdev);
173void radeon_pm_fini(struct radeon_device *rdev); 173void radeon_pm_fini(struct radeon_device *rdev);
174void radeon_pm_compute_clocks(struct radeon_device *rdev); 174void radeon_pm_compute_clocks(struct radeon_device *rdev);
175void radeon_pm_suspend(struct radeon_device *rdev);
176void radeon_pm_resume(struct radeon_device *rdev);
175void radeon_combios_get_power_modes(struct radeon_device *rdev); 177void radeon_combios_get_power_modes(struct radeon_device *rdev);
176void radeon_atombios_get_power_modes(struct radeon_device *rdev); 178void radeon_atombios_get_power_modes(struct radeon_device *rdev);
177 179
@@ -182,7 +184,8 @@ struct radeon_fence_driver {
182 uint32_t scratch_reg; 184 uint32_t scratch_reg;
183 atomic_t seq; 185 atomic_t seq;
184 uint32_t last_seq; 186 uint32_t last_seq;
185 unsigned long count_timeout; 187 unsigned long last_jiffies;
188 unsigned long last_timeout;
186 wait_queue_head_t queue; 189 wait_queue_head_t queue;
187 rwlock_t lock; 190 rwlock_t lock;
188 struct list_head created; 191 struct list_head created;
@@ -197,7 +200,6 @@ struct radeon_fence {
197 struct list_head list; 200 struct list_head list;
198 /* protected by radeon_fence.lock */ 201 /* protected by radeon_fence.lock */
199 uint32_t seq; 202 uint32_t seq;
200 unsigned long timeout;
201 bool emited; 203 bool emited;
202 bool signaled; 204 bool signaled;
203}; 205};
@@ -259,6 +261,7 @@ struct radeon_bo_list {
259 unsigned rdomain; 261 unsigned rdomain;
260 unsigned wdomain; 262 unsigned wdomain;
261 u32 tiling_flags; 263 u32 tiling_flags;
264 bool reserved;
262}; 265};
263 266
264/* 267/*
@@ -371,10 +374,15 @@ struct radeon_irq {
371 bool installed; 374 bool installed;
372 bool sw_int; 375 bool sw_int;
373 /* FIXME: use a define max crtc rather than hardcode it */ 376 /* FIXME: use a define max crtc rather than hardcode it */
374 bool crtc_vblank_int[2]; 377 bool crtc_vblank_int[6];
375 wait_queue_head_t vblank_queue; 378 wait_queue_head_t vblank_queue;
376 /* FIXME: use defines for max hpd/dacs */ 379 /* FIXME: use defines for max hpd/dacs */
377 bool hpd[6]; 380 bool hpd[6];
381 bool gui_idle;
382 bool gui_idle_acked;
383 wait_queue_head_t idle_queue;
384 /* FIXME: use defines for max HDMI blocks */
385 bool hdmi[2];
378 spinlock_t sw_lock; 386 spinlock_t sw_lock;
379 int sw_refcount; 387 int sw_refcount;
380}; 388};
@@ -462,7 +470,9 @@ int radeon_ib_test(struct radeon_device *rdev);
462extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); 470extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
463/* Ring access between begin & end cannot sleep */ 471/* Ring access between begin & end cannot sleep */
464void radeon_ring_free_size(struct radeon_device *rdev); 472void radeon_ring_free_size(struct radeon_device *rdev);
473int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
465int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); 474int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
475void radeon_ring_commit(struct radeon_device *rdev);
466void radeon_ring_unlock_commit(struct radeon_device *rdev); 476void radeon_ring_unlock_commit(struct radeon_device *rdev);
467void radeon_ring_unlock_undo(struct radeon_device *rdev); 477void radeon_ring_unlock_undo(struct radeon_device *rdev);
468int radeon_ring_test(struct radeon_device *rdev); 478int radeon_ring_test(struct radeon_device *rdev);
@@ -597,17 +607,24 @@ struct radeon_wb {
597 * Equation between gpu/memory clock and available bandwidth is hw dependent 607 * Equation between gpu/memory clock and available bandwidth is hw dependent
598 * (type of memory, bus size, efficiency, ...) 608 * (type of memory, bus size, efficiency, ...)
599 */ 609 */
600enum radeon_pm_state { 610
601 PM_STATE_DISABLED, 611enum radeon_pm_method {
602 PM_STATE_MINIMUM, 612 PM_METHOD_PROFILE,
603 PM_STATE_PAUSED, 613 PM_METHOD_DYNPM,
604 PM_STATE_ACTIVE 614};
615
616enum radeon_dynpm_state {
617 DYNPM_STATE_DISABLED,
618 DYNPM_STATE_MINIMUM,
619 DYNPM_STATE_PAUSED,
620 DYNPM_STATE_ACTIVE
605}; 621};
606enum radeon_pm_action { 622enum radeon_dynpm_action {
607 PM_ACTION_NONE, 623 DYNPM_ACTION_NONE,
608 PM_ACTION_MINIMUM, 624 DYNPM_ACTION_MINIMUM,
609 PM_ACTION_DOWNCLOCK, 625 DYNPM_ACTION_DOWNCLOCK,
610 PM_ACTION_UPCLOCK 626 DYNPM_ACTION_UPCLOCK,
627 DYNPM_ACTION_DEFAULT
611}; 628};
612 629
613enum radeon_voltage_type { 630enum radeon_voltage_type {
@@ -625,11 +642,25 @@ enum radeon_pm_state_type {
625 POWER_STATE_TYPE_PERFORMANCE, 642 POWER_STATE_TYPE_PERFORMANCE,
626}; 643};
627 644
628enum radeon_pm_clock_mode_type { 645enum radeon_pm_profile_type {
629 POWER_MODE_TYPE_DEFAULT, 646 PM_PROFILE_DEFAULT,
630 POWER_MODE_TYPE_LOW, 647 PM_PROFILE_AUTO,
631 POWER_MODE_TYPE_MID, 648 PM_PROFILE_LOW,
632 POWER_MODE_TYPE_HIGH, 649 PM_PROFILE_HIGH,
650};
651
652#define PM_PROFILE_DEFAULT_IDX 0
653#define PM_PROFILE_LOW_SH_IDX 1
654#define PM_PROFILE_HIGH_SH_IDX 2
655#define PM_PROFILE_LOW_MH_IDX 3
656#define PM_PROFILE_HIGH_MH_IDX 4
657#define PM_PROFILE_MAX 5
658
659struct radeon_pm_profile {
660 int dpms_off_ps_idx;
661 int dpms_on_ps_idx;
662 int dpms_off_cm_idx;
663 int dpms_on_cm_idx;
633}; 664};
634 665
635struct radeon_voltage { 666struct radeon_voltage {
@@ -646,12 +677,8 @@ struct radeon_voltage {
646 u32 voltage; 677 u32 voltage;
647}; 678};
648 679
649struct radeon_pm_non_clock_info { 680/* clock mode flags */
650 /* pcie lanes */ 681#define RADEON_PM_MODE_NO_DISPLAY (1 << 0)
651 int pcie_lanes;
652 /* standardized non-clock flags */
653 u32 flags;
654};
655 682
656struct radeon_pm_clock_info { 683struct radeon_pm_clock_info {
657 /* memory clock */ 684 /* memory clock */
@@ -660,10 +687,13 @@ struct radeon_pm_clock_info {
660 u32 sclk; 687 u32 sclk;
661 /* voltage info */ 688 /* voltage info */
662 struct radeon_voltage voltage; 689 struct radeon_voltage voltage;
663 /* standardized clock flags - not sure we'll need these */ 690 /* standardized clock flags */
664 u32 flags; 691 u32 flags;
665}; 692};
666 693
694/* state flags */
695#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
696
667struct radeon_power_state { 697struct radeon_power_state {
668 enum radeon_pm_state_type type; 698 enum radeon_pm_state_type type;
669 /* XXX: use a define for num clock modes */ 699 /* XXX: use a define for num clock modes */
@@ -671,9 +701,11 @@ struct radeon_power_state {
671 /* number of valid clock modes in this power state */ 701 /* number of valid clock modes in this power state */
672 int num_clock_modes; 702 int num_clock_modes;
673 struct radeon_pm_clock_info *default_clock_mode; 703 struct radeon_pm_clock_info *default_clock_mode;
674 /* non clock info about this state */ 704 /* standardized state flags */
675 struct radeon_pm_non_clock_info non_clock_info; 705 u32 flags;
676 bool voltage_drop_active; 706 u32 misc; /* vbios specific flags */
707 u32 misc2; /* vbios specific flags */
708 int pcie_lanes; /* pcie lanes */
677}; 709};
678 710
679/* 711/*
@@ -683,14 +715,11 @@ struct radeon_power_state {
683 715
684struct radeon_pm { 716struct radeon_pm {
685 struct mutex mutex; 717 struct mutex mutex;
686 struct delayed_work idle_work; 718 u32 active_crtcs;
687 enum radeon_pm_state state; 719 int active_crtc_count;
688 enum radeon_pm_action planned_action;
689 unsigned long action_timeout;
690 bool downclocked;
691 int active_crtcs;
692 int req_vblank; 720 int req_vblank;
693 bool vblank_sync; 721 bool vblank_sync;
722 bool gui_idle;
694 fixed20_12 max_bandwidth; 723 fixed20_12 max_bandwidth;
695 fixed20_12 igp_sideport_mclk; 724 fixed20_12 igp_sideport_mclk;
696 fixed20_12 igp_system_mclk; 725 fixed20_12 igp_system_mclk;
@@ -707,12 +736,27 @@ struct radeon_pm {
707 struct radeon_power_state power_state[8]; 736 struct radeon_power_state power_state[8];
708 /* number of valid power states */ 737 /* number of valid power states */
709 int num_power_states; 738 int num_power_states;
710 struct radeon_power_state *current_power_state; 739 int current_power_state_index;
711 struct radeon_pm_clock_info *current_clock_mode; 740 int current_clock_mode_index;
712 struct radeon_power_state *requested_power_state; 741 int requested_power_state_index;
713 struct radeon_pm_clock_info *requested_clock_mode; 742 int requested_clock_mode_index;
714 struct radeon_power_state *default_power_state; 743 int default_power_state_index;
744 u32 current_sclk;
745 u32 current_mclk;
715 struct radeon_i2c_chan *i2c_bus; 746 struct radeon_i2c_chan *i2c_bus;
747 /* selected pm method */
748 enum radeon_pm_method pm_method;
749 /* dynpm power management */
750 struct delayed_work dynpm_idle_work;
751 enum radeon_dynpm_state dynpm_state;
752 enum radeon_dynpm_action dynpm_planned_action;
753 unsigned long dynpm_action_timeout;
754 bool dynpm_can_upclock;
755 bool dynpm_can_downclock;
756 /* profile-based power management */
757 enum radeon_pm_profile_type profile;
758 int profile_index;
759 struct radeon_pm_profile profiles[PM_PROFILE_MAX];
716}; 760};
717 761
718 762
@@ -746,7 +790,8 @@ struct radeon_asic {
746 int (*resume)(struct radeon_device *rdev); 790 int (*resume)(struct radeon_device *rdev);
747 int (*suspend)(struct radeon_device *rdev); 791 int (*suspend)(struct radeon_device *rdev);
748 void (*vga_set_state)(struct radeon_device *rdev, bool state); 792 void (*vga_set_state)(struct radeon_device *rdev, bool state);
749 int (*gpu_reset)(struct radeon_device *rdev); 793 bool (*gpu_is_lockup)(struct radeon_device *rdev);
794 int (*asic_reset)(struct radeon_device *rdev);
750 void (*gart_tlb_flush)(struct radeon_device *rdev); 795 void (*gart_tlb_flush)(struct radeon_device *rdev);
751 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); 796 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
752 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); 797 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -799,44 +844,84 @@ struct radeon_asic {
799 * through ring. 844 * through ring.
800 */ 845 */
801 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); 846 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
847 bool (*gui_idle)(struct radeon_device *rdev);
848 /* power management */
849 void (*pm_misc)(struct radeon_device *rdev);
850 void (*pm_prepare)(struct radeon_device *rdev);
851 void (*pm_finish)(struct radeon_device *rdev);
852 void (*pm_init_profile)(struct radeon_device *rdev);
853 void (*pm_get_dynpm_state)(struct radeon_device *rdev);
802}; 854};
803 855
804/* 856/*
805 * Asic structures 857 * Asic structures
806 */ 858 */
859struct r100_gpu_lockup {
860 unsigned long last_jiffies;
861 u32 last_cp_rptr;
862};
863
807struct r100_asic { 864struct r100_asic {
808 const unsigned *reg_safe_bm; 865 const unsigned *reg_safe_bm;
809 unsigned reg_safe_bm_size; 866 unsigned reg_safe_bm_size;
810 u32 hdp_cntl; 867 u32 hdp_cntl;
868 struct r100_gpu_lockup lockup;
811}; 869};
812 870
813struct r300_asic { 871struct r300_asic {
814 const unsigned *reg_safe_bm; 872 const unsigned *reg_safe_bm;
815 unsigned reg_safe_bm_size; 873 unsigned reg_safe_bm_size;
816 u32 resync_scratch; 874 u32 resync_scratch;
817 u32 hdp_cntl; 875 u32 hdp_cntl;
876 struct r100_gpu_lockup lockup;
818}; 877};
819 878
820struct r600_asic { 879struct r600_asic {
821 unsigned max_pipes; 880 unsigned max_pipes;
822 unsigned max_tile_pipes; 881 unsigned max_tile_pipes;
823 unsigned max_simds; 882 unsigned max_simds;
824 unsigned max_backends; 883 unsigned max_backends;
825 unsigned max_gprs; 884 unsigned max_gprs;
826 unsigned max_threads; 885 unsigned max_threads;
827 unsigned max_stack_entries; 886 unsigned max_stack_entries;
828 unsigned max_hw_contexts; 887 unsigned max_hw_contexts;
829 unsigned max_gs_threads; 888 unsigned max_gs_threads;
830 unsigned sx_max_export_size; 889 unsigned sx_max_export_size;
831 unsigned sx_max_export_pos_size; 890 unsigned sx_max_export_pos_size;
832 unsigned sx_max_export_smx_size; 891 unsigned sx_max_export_smx_size;
833 unsigned sq_num_cf_insts; 892 unsigned sq_num_cf_insts;
834 unsigned tiling_nbanks; 893 unsigned tiling_nbanks;
835 unsigned tiling_npipes; 894 unsigned tiling_npipes;
836 unsigned tiling_group_size; 895 unsigned tiling_group_size;
896 struct r100_gpu_lockup lockup;
837}; 897};
838 898
839struct rv770_asic { 899struct rv770_asic {
900 unsigned max_pipes;
901 unsigned max_tile_pipes;
902 unsigned max_simds;
903 unsigned max_backends;
904 unsigned max_gprs;
905 unsigned max_threads;
906 unsigned max_stack_entries;
907 unsigned max_hw_contexts;
908 unsigned max_gs_threads;
909 unsigned sx_max_export_size;
910 unsigned sx_max_export_pos_size;
911 unsigned sx_max_export_smx_size;
912 unsigned sq_num_cf_insts;
913 unsigned sx_num_of_sets;
914 unsigned sc_prim_fifo_size;
915 unsigned sc_hiz_tile_fifo_size;
916 unsigned sc_earlyz_tile_fifo_fize;
917 unsigned tiling_nbanks;
918 unsigned tiling_npipes;
919 unsigned tiling_group_size;
920 struct r100_gpu_lockup lockup;
921};
922
923struct evergreen_asic {
924 unsigned num_ses;
840 unsigned max_pipes; 925 unsigned max_pipes;
841 unsigned max_tile_pipes; 926 unsigned max_tile_pipes;
842 unsigned max_simds; 927 unsigned max_simds;
@@ -853,7 +938,7 @@ struct rv770_asic {
853 unsigned sx_num_of_sets; 938 unsigned sx_num_of_sets;
854 unsigned sc_prim_fifo_size; 939 unsigned sc_prim_fifo_size;
855 unsigned sc_hiz_tile_fifo_size; 940 unsigned sc_hiz_tile_fifo_size;
856 unsigned sc_earlyz_tile_fifo_fize; 941 unsigned sc_earlyz_tile_fifo_size;
857 unsigned tiling_nbanks; 942 unsigned tiling_nbanks;
858 unsigned tiling_npipes; 943 unsigned tiling_npipes;
859 unsigned tiling_group_size; 944 unsigned tiling_group_size;
@@ -864,6 +949,7 @@ union radeon_asic_config {
864 struct r100_asic r100; 949 struct r100_asic r100;
865 struct r600_asic r600; 950 struct r600_asic r600;
866 struct rv770_asic rv770; 951 struct rv770_asic rv770;
952 struct evergreen_asic evergreen;
867}; 953};
868 954
869/* 955/*
@@ -927,9 +1013,6 @@ struct radeon_device {
927 bool is_atom_bios; 1013 bool is_atom_bios;
928 uint16_t bios_header_start; 1014 uint16_t bios_header_start;
929 struct radeon_bo *stollen_vga_memory; 1015 struct radeon_bo *stollen_vga_memory;
930 struct fb_info *fbdev_info;
931 struct radeon_bo *fbdev_rbo;
932 struct radeon_framebuffer *fbdev_rfb;
933 /* Register mmio */ 1016 /* Register mmio */
934 resource_size_t rmmio_base; 1017 resource_size_t rmmio_base;
935 resource_size_t rmmio_size; 1018 resource_size_t rmmio_size;
@@ -974,6 +1057,7 @@ struct radeon_device {
974 struct work_struct hotplug_work; 1057 struct work_struct hotplug_work;
975 int num_crtc; /* number of crtcs */ 1058 int num_crtc; /* number of crtcs */
976 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 1059 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
1060 struct mutex vram_mutex;
977 1061
978 /* audio stuff */ 1062 /* audio stuff */
979 struct timer_list audio_timer; 1063 struct timer_list audio_timer;
@@ -984,6 +1068,7 @@ struct radeon_device {
984 uint8_t audio_category_code; 1068 uint8_t audio_category_code;
985 1069
986 bool powered_down; 1070 bool powered_down;
1071 struct notifier_block acpi_nb;
987}; 1072};
988 1073
989int radeon_device_init(struct radeon_device *rdev, 1074int radeon_device_init(struct radeon_device *rdev,
@@ -1145,7 +1230,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1145#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 1230#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
1146#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) 1231#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
1147#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 1232#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
1148#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) 1233#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
1234#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
1149#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) 1235#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
1150#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) 1236#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
1151#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) 1237#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
@@ -1173,9 +1259,16 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1173#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) 1259#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
1174#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) 1260#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
1175#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) 1261#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
1262#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
1263#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
1264#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
1265#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
1266#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
1267#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
1176 1268
1177/* Common functions */ 1269/* Common functions */
1178/* AGP */ 1270/* AGP */
1271extern int radeon_gpu_reset(struct radeon_device *rdev);
1179extern void radeon_agp_disable(struct radeon_device *rdev); 1272extern void radeon_agp_disable(struct radeon_device *rdev);
1180extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1273extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
1181extern void radeon_gart_restore(struct radeon_device *rdev); 1274extern void radeon_gart_restore(struct radeon_device *rdev);
@@ -1200,6 +1293,8 @@ extern int radeon_resume_kms(struct drm_device *dev);
1200extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1293extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1201 1294
1202/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1295/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
1296extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
1297extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
1203 1298
1204/* rv200,rv250,rv280 */ 1299/* rv200,rv250,rv280 */
1205extern void r200_set_safe_registers(struct radeon_device *rdev); 1300extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1260,6 +1355,7 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
1260extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 1355extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1261extern bool r600_card_posted(struct radeon_device *rdev); 1356extern bool r600_card_posted(struct radeon_device *rdev);
1262extern void r600_cp_stop(struct radeon_device *rdev); 1357extern void r600_cp_stop(struct radeon_device *rdev);
1358extern int r600_cp_start(struct radeon_device *rdev);
1263extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1359extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1264extern int r600_cp_resume(struct radeon_device *rdev); 1360extern int r600_cp_resume(struct radeon_device *rdev);
1265extern void r600_cp_fini(struct radeon_device *rdev); 1361extern void r600_cp_fini(struct radeon_device *rdev);
@@ -1276,29 +1372,39 @@ extern void r600_scratch_init(struct radeon_device *rdev);
1276extern int r600_blit_init(struct radeon_device *rdev); 1372extern int r600_blit_init(struct radeon_device *rdev);
1277extern void r600_blit_fini(struct radeon_device *rdev); 1373extern void r600_blit_fini(struct radeon_device *rdev);
1278extern int r600_init_microcode(struct radeon_device *rdev); 1374extern int r600_init_microcode(struct radeon_device *rdev);
1279extern int r600_gpu_reset(struct radeon_device *rdev); 1375extern int r600_asic_reset(struct radeon_device *rdev);
1280/* r600 irq */ 1376/* r600 irq */
1281extern int r600_irq_init(struct radeon_device *rdev); 1377extern int r600_irq_init(struct radeon_device *rdev);
1282extern void r600_irq_fini(struct radeon_device *rdev); 1378extern void r600_irq_fini(struct radeon_device *rdev);
1283extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); 1379extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1284extern int r600_irq_set(struct radeon_device *rdev); 1380extern int r600_irq_set(struct radeon_device *rdev);
1285extern void r600_irq_suspend(struct radeon_device *rdev); 1381extern void r600_irq_suspend(struct radeon_device *rdev);
1382extern void r600_disable_interrupts(struct radeon_device *rdev);
1383extern void r600_rlc_stop(struct radeon_device *rdev);
1286/* r600 audio */ 1384/* r600 audio */
1287extern int r600_audio_init(struct radeon_device *rdev); 1385extern int r600_audio_init(struct radeon_device *rdev);
1288extern int r600_audio_tmds_index(struct drm_encoder *encoder); 1386extern int r600_audio_tmds_index(struct drm_encoder *encoder);
1289extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); 1387extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
1388extern int r600_audio_channels(struct radeon_device *rdev);
1389extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
1390extern int r600_audio_rate(struct radeon_device *rdev);
1391extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
1392extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
1393extern void r600_audio_schedule_polling(struct radeon_device *rdev);
1394extern void r600_audio_enable_polling(struct drm_encoder *encoder);
1395extern void r600_audio_disable_polling(struct drm_encoder *encoder);
1290extern void r600_audio_fini(struct radeon_device *rdev); 1396extern void r600_audio_fini(struct radeon_device *rdev);
1291extern void r600_hdmi_init(struct drm_encoder *encoder); 1397extern void r600_hdmi_init(struct drm_encoder *encoder);
1292extern void r600_hdmi_enable(struct drm_encoder *encoder); 1398extern void r600_hdmi_enable(struct drm_encoder *encoder);
1293extern void r600_hdmi_disable(struct drm_encoder *encoder); 1399extern void r600_hdmi_disable(struct drm_encoder *encoder);
1294extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1400extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1295extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); 1401extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
1296extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, 1402extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
1297 int channels, 1403
1298 int rate, 1404extern void r700_cp_stop(struct radeon_device *rdev);
1299 int bps, 1405extern void r700_cp_fini(struct radeon_device *rdev);
1300 uint8_t status_bits, 1406extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
1301 uint8_t category_code); 1407extern int evergreen_irq_set(struct radeon_device *rdev);
1302 1408
1303/* evergreen */ 1409/* evergreen */
1304struct evergreen_mc_save { 1410struct evergreen_mc_save {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a4b4bc9fa322..e57df08d4aeb 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,7 +134,8 @@ static struct radeon_asic r100_asic = {
134 .suspend = &r100_suspend, 134 .suspend = &r100_suspend,
135 .resume = &r100_resume, 135 .resume = &r100_resume,
136 .vga_set_state = &r100_vga_set_state, 136 .vga_set_state = &r100_vga_set_state,
137 .gpu_reset = &r100_gpu_reset, 137 .gpu_is_lockup = &r100_gpu_is_lockup,
138 .asic_reset = &r100_asic_reset,
138 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 139 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
139 .gart_set_page = &r100_pci_gart_set_page, 140 .gart_set_page = &r100_pci_gart_set_page,
140 .cp_commit = &r100_cp_commit, 141 .cp_commit = &r100_cp_commit,
@@ -164,6 +165,12 @@ static struct radeon_asic r100_asic = {
164 .hpd_sense = &r100_hpd_sense, 165 .hpd_sense = &r100_hpd_sense,
165 .hpd_set_polarity = &r100_hpd_set_polarity, 166 .hpd_set_polarity = &r100_hpd_set_polarity,
166 .ioctl_wait_idle = NULL, 167 .ioctl_wait_idle = NULL,
168 .gui_idle = &r100_gui_idle,
169 .pm_misc = &r100_pm_misc,
170 .pm_prepare = &r100_pm_prepare,
171 .pm_finish = &r100_pm_finish,
172 .pm_init_profile = &r100_pm_init_profile,
173 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
167}; 174};
168 175
169static struct radeon_asic r200_asic = { 176static struct radeon_asic r200_asic = {
@@ -172,7 +179,8 @@ static struct radeon_asic r200_asic = {
172 .suspend = &r100_suspend, 179 .suspend = &r100_suspend,
173 .resume = &r100_resume, 180 .resume = &r100_resume,
174 .vga_set_state = &r100_vga_set_state, 181 .vga_set_state = &r100_vga_set_state,
175 .gpu_reset = &r100_gpu_reset, 182 .gpu_is_lockup = &r100_gpu_is_lockup,
183 .asic_reset = &r100_asic_reset,
176 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 184 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
177 .gart_set_page = &r100_pci_gart_set_page, 185 .gart_set_page = &r100_pci_gart_set_page,
178 .cp_commit = &r100_cp_commit, 186 .cp_commit = &r100_cp_commit,
@@ -201,6 +209,12 @@ static struct radeon_asic r200_asic = {
201 .hpd_sense = &r100_hpd_sense, 209 .hpd_sense = &r100_hpd_sense,
202 .hpd_set_polarity = &r100_hpd_set_polarity, 210 .hpd_set_polarity = &r100_hpd_set_polarity,
203 .ioctl_wait_idle = NULL, 211 .ioctl_wait_idle = NULL,
212 .gui_idle = &r100_gui_idle,
213 .pm_misc = &r100_pm_misc,
214 .pm_prepare = &r100_pm_prepare,
215 .pm_finish = &r100_pm_finish,
216 .pm_init_profile = &r100_pm_init_profile,
217 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
204}; 218};
205 219
206static struct radeon_asic r300_asic = { 220static struct radeon_asic r300_asic = {
@@ -209,7 +223,8 @@ static struct radeon_asic r300_asic = {
209 .suspend = &r300_suspend, 223 .suspend = &r300_suspend,
210 .resume = &r300_resume, 224 .resume = &r300_resume,
211 .vga_set_state = &r100_vga_set_state, 225 .vga_set_state = &r100_vga_set_state,
212 .gpu_reset = &r300_gpu_reset, 226 .gpu_is_lockup = &r300_gpu_is_lockup,
227 .asic_reset = &r300_asic_reset,
213 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 228 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
214 .gart_set_page = &r100_pci_gart_set_page, 229 .gart_set_page = &r100_pci_gart_set_page,
215 .cp_commit = &r100_cp_commit, 230 .cp_commit = &r100_cp_commit,
@@ -239,6 +254,12 @@ static struct radeon_asic r300_asic = {
239 .hpd_sense = &r100_hpd_sense, 254 .hpd_sense = &r100_hpd_sense,
240 .hpd_set_polarity = &r100_hpd_set_polarity, 255 .hpd_set_polarity = &r100_hpd_set_polarity,
241 .ioctl_wait_idle = NULL, 256 .ioctl_wait_idle = NULL,
257 .gui_idle = &r100_gui_idle,
258 .pm_misc = &r100_pm_misc,
259 .pm_prepare = &r100_pm_prepare,
260 .pm_finish = &r100_pm_finish,
261 .pm_init_profile = &r100_pm_init_profile,
262 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
242}; 263};
243 264
244static struct radeon_asic r300_asic_pcie = { 265static struct radeon_asic r300_asic_pcie = {
@@ -247,7 +268,8 @@ static struct radeon_asic r300_asic_pcie = {
247 .suspend = &r300_suspend, 268 .suspend = &r300_suspend,
248 .resume = &r300_resume, 269 .resume = &r300_resume,
249 .vga_set_state = &r100_vga_set_state, 270 .vga_set_state = &r100_vga_set_state,
250 .gpu_reset = &r300_gpu_reset, 271 .gpu_is_lockup = &r300_gpu_is_lockup,
272 .asic_reset = &r300_asic_reset,
251 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 273 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
252 .gart_set_page = &rv370_pcie_gart_set_page, 274 .gart_set_page = &rv370_pcie_gart_set_page,
253 .cp_commit = &r100_cp_commit, 275 .cp_commit = &r100_cp_commit,
@@ -276,6 +298,12 @@ static struct radeon_asic r300_asic_pcie = {
276 .hpd_sense = &r100_hpd_sense, 298 .hpd_sense = &r100_hpd_sense,
277 .hpd_set_polarity = &r100_hpd_set_polarity, 299 .hpd_set_polarity = &r100_hpd_set_polarity,
278 .ioctl_wait_idle = NULL, 300 .ioctl_wait_idle = NULL,
301 .gui_idle = &r100_gui_idle,
302 .pm_misc = &r100_pm_misc,
303 .pm_prepare = &r100_pm_prepare,
304 .pm_finish = &r100_pm_finish,
305 .pm_init_profile = &r100_pm_init_profile,
306 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
279}; 307};
280 308
281static struct radeon_asic r420_asic = { 309static struct radeon_asic r420_asic = {
@@ -284,7 +312,8 @@ static struct radeon_asic r420_asic = {
284 .suspend = &r420_suspend, 312 .suspend = &r420_suspend,
285 .resume = &r420_resume, 313 .resume = &r420_resume,
286 .vga_set_state = &r100_vga_set_state, 314 .vga_set_state = &r100_vga_set_state,
287 .gpu_reset = &r300_gpu_reset, 315 .gpu_is_lockup = &r300_gpu_is_lockup,
316 .asic_reset = &r300_asic_reset,
288 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 317 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
289 .gart_set_page = &rv370_pcie_gart_set_page, 318 .gart_set_page = &rv370_pcie_gart_set_page,
290 .cp_commit = &r100_cp_commit, 319 .cp_commit = &r100_cp_commit,
@@ -314,6 +343,12 @@ static struct radeon_asic r420_asic = {
314 .hpd_sense = &r100_hpd_sense, 343 .hpd_sense = &r100_hpd_sense,
315 .hpd_set_polarity = &r100_hpd_set_polarity, 344 .hpd_set_polarity = &r100_hpd_set_polarity,
316 .ioctl_wait_idle = NULL, 345 .ioctl_wait_idle = NULL,
346 .gui_idle = &r100_gui_idle,
347 .pm_misc = &r100_pm_misc,
348 .pm_prepare = &r100_pm_prepare,
349 .pm_finish = &r100_pm_finish,
350 .pm_init_profile = &r420_pm_init_profile,
351 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
317}; 352};
318 353
319static struct radeon_asic rs400_asic = { 354static struct radeon_asic rs400_asic = {
@@ -322,7 +357,8 @@ static struct radeon_asic rs400_asic = {
322 .suspend = &rs400_suspend, 357 .suspend = &rs400_suspend,
323 .resume = &rs400_resume, 358 .resume = &rs400_resume,
324 .vga_set_state = &r100_vga_set_state, 359 .vga_set_state = &r100_vga_set_state,
325 .gpu_reset = &r300_gpu_reset, 360 .gpu_is_lockup = &r300_gpu_is_lockup,
361 .asic_reset = &r300_asic_reset,
326 .gart_tlb_flush = &rs400_gart_tlb_flush, 362 .gart_tlb_flush = &rs400_gart_tlb_flush,
327 .gart_set_page = &rs400_gart_set_page, 363 .gart_set_page = &rs400_gart_set_page,
328 .cp_commit = &r100_cp_commit, 364 .cp_commit = &r100_cp_commit,
@@ -352,6 +388,12 @@ static struct radeon_asic rs400_asic = {
352 .hpd_sense = &r100_hpd_sense, 388 .hpd_sense = &r100_hpd_sense,
353 .hpd_set_polarity = &r100_hpd_set_polarity, 389 .hpd_set_polarity = &r100_hpd_set_polarity,
354 .ioctl_wait_idle = NULL, 390 .ioctl_wait_idle = NULL,
391 .gui_idle = &r100_gui_idle,
392 .pm_misc = &r100_pm_misc,
393 .pm_prepare = &r100_pm_prepare,
394 .pm_finish = &r100_pm_finish,
395 .pm_init_profile = &r100_pm_init_profile,
396 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
355}; 397};
356 398
357static struct radeon_asic rs600_asic = { 399static struct radeon_asic rs600_asic = {
@@ -360,7 +402,8 @@ static struct radeon_asic rs600_asic = {
360 .suspend = &rs600_suspend, 402 .suspend = &rs600_suspend,
361 .resume = &rs600_resume, 403 .resume = &rs600_resume,
362 .vga_set_state = &r100_vga_set_state, 404 .vga_set_state = &r100_vga_set_state,
363 .gpu_reset = &r300_gpu_reset, 405 .gpu_is_lockup = &r300_gpu_is_lockup,
406 .asic_reset = &rs600_asic_reset,
364 .gart_tlb_flush = &rs600_gart_tlb_flush, 407 .gart_tlb_flush = &rs600_gart_tlb_flush,
365 .gart_set_page = &rs600_gart_set_page, 408 .gart_set_page = &rs600_gart_set_page,
366 .cp_commit = &r100_cp_commit, 409 .cp_commit = &r100_cp_commit,
@@ -390,6 +433,12 @@ static struct radeon_asic rs600_asic = {
390 .hpd_sense = &rs600_hpd_sense, 433 .hpd_sense = &rs600_hpd_sense,
391 .hpd_set_polarity = &rs600_hpd_set_polarity, 434 .hpd_set_polarity = &rs600_hpd_set_polarity,
392 .ioctl_wait_idle = NULL, 435 .ioctl_wait_idle = NULL,
436 .gui_idle = &r100_gui_idle,
437 .pm_misc = &rs600_pm_misc,
438 .pm_prepare = &rs600_pm_prepare,
439 .pm_finish = &rs600_pm_finish,
440 .pm_init_profile = &r420_pm_init_profile,
441 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
393}; 442};
394 443
395static struct radeon_asic rs690_asic = { 444static struct radeon_asic rs690_asic = {
@@ -398,7 +447,8 @@ static struct radeon_asic rs690_asic = {
398 .suspend = &rs690_suspend, 447 .suspend = &rs690_suspend,
399 .resume = &rs690_resume, 448 .resume = &rs690_resume,
400 .vga_set_state = &r100_vga_set_state, 449 .vga_set_state = &r100_vga_set_state,
401 .gpu_reset = &r300_gpu_reset, 450 .gpu_is_lockup = &r300_gpu_is_lockup,
451 .asic_reset = &rs600_asic_reset,
402 .gart_tlb_flush = &rs400_gart_tlb_flush, 452 .gart_tlb_flush = &rs400_gart_tlb_flush,
403 .gart_set_page = &rs400_gart_set_page, 453 .gart_set_page = &rs400_gart_set_page,
404 .cp_commit = &r100_cp_commit, 454 .cp_commit = &r100_cp_commit,
@@ -428,6 +478,12 @@ static struct radeon_asic rs690_asic = {
428 .hpd_sense = &rs600_hpd_sense, 478 .hpd_sense = &rs600_hpd_sense,
429 .hpd_set_polarity = &rs600_hpd_set_polarity, 479 .hpd_set_polarity = &rs600_hpd_set_polarity,
430 .ioctl_wait_idle = NULL, 480 .ioctl_wait_idle = NULL,
481 .gui_idle = &r100_gui_idle,
482 .pm_misc = &rs600_pm_misc,
483 .pm_prepare = &rs600_pm_prepare,
484 .pm_finish = &rs600_pm_finish,
485 .pm_init_profile = &r420_pm_init_profile,
486 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
431}; 487};
432 488
433static struct radeon_asic rv515_asic = { 489static struct radeon_asic rv515_asic = {
@@ -436,7 +492,8 @@ static struct radeon_asic rv515_asic = {
436 .suspend = &rv515_suspend, 492 .suspend = &rv515_suspend,
437 .resume = &rv515_resume, 493 .resume = &rv515_resume,
438 .vga_set_state = &r100_vga_set_state, 494 .vga_set_state = &r100_vga_set_state,
439 .gpu_reset = &rv515_gpu_reset, 495 .gpu_is_lockup = &r300_gpu_is_lockup,
496 .asic_reset = &rs600_asic_reset,
440 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 497 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
441 .gart_set_page = &rv370_pcie_gart_set_page, 498 .gart_set_page = &rv370_pcie_gart_set_page,
442 .cp_commit = &r100_cp_commit, 499 .cp_commit = &r100_cp_commit,
@@ -466,6 +523,12 @@ static struct radeon_asic rv515_asic = {
466 .hpd_sense = &rs600_hpd_sense, 523 .hpd_sense = &rs600_hpd_sense,
467 .hpd_set_polarity = &rs600_hpd_set_polarity, 524 .hpd_set_polarity = &rs600_hpd_set_polarity,
468 .ioctl_wait_idle = NULL, 525 .ioctl_wait_idle = NULL,
526 .gui_idle = &r100_gui_idle,
527 .pm_misc = &rs600_pm_misc,
528 .pm_prepare = &rs600_pm_prepare,
529 .pm_finish = &rs600_pm_finish,
530 .pm_init_profile = &r420_pm_init_profile,
531 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
469}; 532};
470 533
471static struct radeon_asic r520_asic = { 534static struct radeon_asic r520_asic = {
@@ -474,7 +537,8 @@ static struct radeon_asic r520_asic = {
474 .suspend = &rv515_suspend, 537 .suspend = &rv515_suspend,
475 .resume = &r520_resume, 538 .resume = &r520_resume,
476 .vga_set_state = &r100_vga_set_state, 539 .vga_set_state = &r100_vga_set_state,
477 .gpu_reset = &rv515_gpu_reset, 540 .gpu_is_lockup = &r300_gpu_is_lockup,
541 .asic_reset = &rs600_asic_reset,
478 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 542 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
479 .gart_set_page = &rv370_pcie_gart_set_page, 543 .gart_set_page = &rv370_pcie_gart_set_page,
480 .cp_commit = &r100_cp_commit, 544 .cp_commit = &r100_cp_commit,
@@ -504,6 +568,12 @@ static struct radeon_asic r520_asic = {
504 .hpd_sense = &rs600_hpd_sense, 568 .hpd_sense = &rs600_hpd_sense,
505 .hpd_set_polarity = &rs600_hpd_set_polarity, 569 .hpd_set_polarity = &rs600_hpd_set_polarity,
506 .ioctl_wait_idle = NULL, 570 .ioctl_wait_idle = NULL,
571 .gui_idle = &r100_gui_idle,
572 .pm_misc = &rs600_pm_misc,
573 .pm_prepare = &rs600_pm_prepare,
574 .pm_finish = &rs600_pm_finish,
575 .pm_init_profile = &r420_pm_init_profile,
576 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
507}; 577};
508 578
509static struct radeon_asic r600_asic = { 579static struct radeon_asic r600_asic = {
@@ -513,7 +583,8 @@ static struct radeon_asic r600_asic = {
513 .resume = &r600_resume, 583 .resume = &r600_resume,
514 .cp_commit = &r600_cp_commit, 584 .cp_commit = &r600_cp_commit,
515 .vga_set_state = &r600_vga_set_state, 585 .vga_set_state = &r600_vga_set_state,
516 .gpu_reset = &r600_gpu_reset, 586 .gpu_is_lockup = &r600_gpu_is_lockup,
587 .asic_reset = &r600_asic_reset,
517 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 588 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
518 .gart_set_page = &rs600_gart_set_page, 589 .gart_set_page = &rs600_gart_set_page,
519 .ring_test = &r600_ring_test, 590 .ring_test = &r600_ring_test,
@@ -541,6 +612,12 @@ static struct radeon_asic r600_asic = {
541 .hpd_sense = &r600_hpd_sense, 612 .hpd_sense = &r600_hpd_sense,
542 .hpd_set_polarity = &r600_hpd_set_polarity, 613 .hpd_set_polarity = &r600_hpd_set_polarity,
543 .ioctl_wait_idle = r600_ioctl_wait_idle, 614 .ioctl_wait_idle = r600_ioctl_wait_idle,
615 .gui_idle = &r600_gui_idle,
616 .pm_misc = &r600_pm_misc,
617 .pm_prepare = &rs600_pm_prepare,
618 .pm_finish = &rs600_pm_finish,
619 .pm_init_profile = &r600_pm_init_profile,
620 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
544}; 621};
545 622
546static struct radeon_asic rs780_asic = { 623static struct radeon_asic rs780_asic = {
@@ -549,8 +626,9 @@ static struct radeon_asic rs780_asic = {
549 .suspend = &r600_suspend, 626 .suspend = &r600_suspend,
550 .resume = &r600_resume, 627 .resume = &r600_resume,
551 .cp_commit = &r600_cp_commit, 628 .cp_commit = &r600_cp_commit,
629 .gpu_is_lockup = &r600_gpu_is_lockup,
552 .vga_set_state = &r600_vga_set_state, 630 .vga_set_state = &r600_vga_set_state,
553 .gpu_reset = &r600_gpu_reset, 631 .asic_reset = &r600_asic_reset,
554 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 632 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
555 .gart_set_page = &rs600_gart_set_page, 633 .gart_set_page = &rs600_gart_set_page,
556 .ring_test = &r600_ring_test, 634 .ring_test = &r600_ring_test,
@@ -578,6 +656,12 @@ static struct radeon_asic rs780_asic = {
578 .hpd_sense = &r600_hpd_sense, 656 .hpd_sense = &r600_hpd_sense,
579 .hpd_set_polarity = &r600_hpd_set_polarity, 657 .hpd_set_polarity = &r600_hpd_set_polarity,
580 .ioctl_wait_idle = r600_ioctl_wait_idle, 658 .ioctl_wait_idle = r600_ioctl_wait_idle,
659 .gui_idle = &r600_gui_idle,
660 .pm_misc = &r600_pm_misc,
661 .pm_prepare = &rs600_pm_prepare,
662 .pm_finish = &rs600_pm_finish,
663 .pm_init_profile = &rs780_pm_init_profile,
664 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
581}; 665};
582 666
583static struct radeon_asic rv770_asic = { 667static struct radeon_asic rv770_asic = {
@@ -586,7 +670,8 @@ static struct radeon_asic rv770_asic = {
586 .suspend = &rv770_suspend, 670 .suspend = &rv770_suspend,
587 .resume = &rv770_resume, 671 .resume = &rv770_resume,
588 .cp_commit = &r600_cp_commit, 672 .cp_commit = &r600_cp_commit,
589 .gpu_reset = &rv770_gpu_reset, 673 .asic_reset = &r600_asic_reset,
674 .gpu_is_lockup = &r600_gpu_is_lockup,
590 .vga_set_state = &r600_vga_set_state, 675 .vga_set_state = &r600_vga_set_state,
591 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 676 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
592 .gart_set_page = &rs600_gart_set_page, 677 .gart_set_page = &rs600_gart_set_page,
@@ -615,6 +700,12 @@ static struct radeon_asic rv770_asic = {
615 .hpd_sense = &r600_hpd_sense, 700 .hpd_sense = &r600_hpd_sense,
616 .hpd_set_polarity = &r600_hpd_set_polarity, 701 .hpd_set_polarity = &r600_hpd_set_polarity,
617 .ioctl_wait_idle = r600_ioctl_wait_idle, 702 .ioctl_wait_idle = r600_ioctl_wait_idle,
703 .gui_idle = &r600_gui_idle,
704 .pm_misc = &rv770_pm_misc,
705 .pm_prepare = &rs600_pm_prepare,
706 .pm_finish = &rs600_pm_finish,
707 .pm_init_profile = &r600_pm_init_profile,
708 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
618}; 709};
619 710
620static struct radeon_asic evergreen_asic = { 711static struct radeon_asic evergreen_asic = {
@@ -622,16 +713,17 @@ static struct radeon_asic evergreen_asic = {
622 .fini = &evergreen_fini, 713 .fini = &evergreen_fini,
623 .suspend = &evergreen_suspend, 714 .suspend = &evergreen_suspend,
624 .resume = &evergreen_resume, 715 .resume = &evergreen_resume,
625 .cp_commit = NULL, 716 .cp_commit = &r600_cp_commit,
626 .gpu_reset = &evergreen_gpu_reset, 717 .gpu_is_lockup = &evergreen_gpu_is_lockup,
718 .asic_reset = &evergreen_asic_reset,
627 .vga_set_state = &r600_vga_set_state, 719 .vga_set_state = &r600_vga_set_state,
628 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 720 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
629 .gart_set_page = &rs600_gart_set_page, 721 .gart_set_page = &rs600_gart_set_page,
630 .ring_test = NULL, 722 .ring_test = &r600_ring_test,
631 .ring_ib_execute = NULL, 723 .ring_ib_execute = &r600_ring_ib_execute,
632 .irq_set = NULL, 724 .irq_set = &evergreen_irq_set,
633 .irq_process = NULL, 725 .irq_process = &evergreen_irq_process,
634 .get_vblank_counter = NULL, 726 .get_vblank_counter = &evergreen_get_vblank_counter,
635 .fence_ring_emit = NULL, 727 .fence_ring_emit = NULL,
636 .cs_parse = NULL, 728 .cs_parse = NULL,
637 .copy_blit = NULL, 729 .copy_blit = NULL,
@@ -650,6 +742,12 @@ static struct radeon_asic evergreen_asic = {
650 .hpd_fini = &evergreen_hpd_fini, 742 .hpd_fini = &evergreen_hpd_fini,
651 .hpd_sense = &evergreen_hpd_sense, 743 .hpd_sense = &evergreen_hpd_sense,
652 .hpd_set_polarity = &evergreen_hpd_set_polarity, 744 .hpd_set_polarity = &evergreen_hpd_set_polarity,
745 .gui_idle = &r600_gui_idle,
746 .pm_misc = &evergreen_pm_misc,
747 .pm_prepare = &evergreen_pm_prepare,
748 .pm_finish = &evergreen_pm_finish,
749 .pm_init_profile = &r600_pm_init_profile,
750 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
653}; 751};
654 752
655int radeon_asic_init(struct radeon_device *rdev) 753int radeon_asic_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a0b8280663d1..5c40a3dfaca2 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -60,7 +60,8 @@ int r100_resume(struct radeon_device *rdev);
60uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 60uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
61void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 61void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
62void r100_vga_set_state(struct radeon_device *rdev, bool state); 62void r100_vga_set_state(struct radeon_device *rdev, bool state);
63int r100_gpu_reset(struct radeon_device *rdev); 63bool r100_gpu_is_lockup(struct radeon_device *rdev);
64int r100_asic_reset(struct radeon_device *rdev);
64u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 65u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
65void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 66void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
66int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 67int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -110,8 +111,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev);
110void r100_wb_disable(struct radeon_device *rdev); 111void r100_wb_disable(struct radeon_device *rdev);
111void r100_wb_fini(struct radeon_device *rdev); 112void r100_wb_fini(struct radeon_device *rdev);
112int r100_wb_init(struct radeon_device *rdev); 113int r100_wb_init(struct radeon_device *rdev);
113void r100_hdp_reset(struct radeon_device *rdev);
114int r100_rb2d_reset(struct radeon_device *rdev);
115int r100_cp_reset(struct radeon_device *rdev); 114int r100_cp_reset(struct radeon_device *rdev);
116void r100_vga_render_disable(struct radeon_device *rdev); 115void r100_vga_render_disable(struct radeon_device *rdev);
117int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 116int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
@@ -126,6 +125,13 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
126 unsigned idx); 125 unsigned idx);
127void r100_enable_bm(struct radeon_device *rdev); 126void r100_enable_bm(struct radeon_device *rdev);
128void r100_set_common_regs(struct radeon_device *rdev); 127void r100_set_common_regs(struct radeon_device *rdev);
128void r100_bm_disable(struct radeon_device *rdev);
129extern bool r100_gui_idle(struct radeon_device *rdev);
130extern void r100_pm_misc(struct radeon_device *rdev);
131extern void r100_pm_prepare(struct radeon_device *rdev);
132extern void r100_pm_finish(struct radeon_device *rdev);
133extern void r100_pm_init_profile(struct radeon_device *rdev);
134extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
129 135
130/* 136/*
131 * r200,rv250,rs300,rv280 137 * r200,rv250,rs300,rv280
@@ -134,7 +140,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
134 uint64_t src_offset, 140 uint64_t src_offset,
135 uint64_t dst_offset, 141 uint64_t dst_offset,
136 unsigned num_pages, 142 unsigned num_pages,
137 struct radeon_fence *fence); 143 struct radeon_fence *fence);
138 144
139/* 145/*
140 * r300,r350,rv350,rv380 146 * r300,r350,rv350,rv380
@@ -143,7 +149,8 @@ extern int r300_init(struct radeon_device *rdev);
143extern void r300_fini(struct radeon_device *rdev); 149extern void r300_fini(struct radeon_device *rdev);
144extern int r300_suspend(struct radeon_device *rdev); 150extern int r300_suspend(struct radeon_device *rdev);
145extern int r300_resume(struct radeon_device *rdev); 151extern int r300_resume(struct radeon_device *rdev);
146extern int r300_gpu_reset(struct radeon_device *rdev); 152extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
153extern int r300_asic_reset(struct radeon_device *rdev);
147extern void r300_ring_start(struct radeon_device *rdev); 154extern void r300_ring_start(struct radeon_device *rdev);
148extern void r300_fence_ring_emit(struct radeon_device *rdev, 155extern void r300_fence_ring_emit(struct radeon_device *rdev,
149 struct radeon_fence *fence); 156 struct radeon_fence *fence);
@@ -162,6 +169,7 @@ extern int r420_init(struct radeon_device *rdev);
162extern void r420_fini(struct radeon_device *rdev); 169extern void r420_fini(struct radeon_device *rdev);
163extern int r420_suspend(struct radeon_device *rdev); 170extern int r420_suspend(struct radeon_device *rdev);
164extern int r420_resume(struct radeon_device *rdev); 171extern int r420_resume(struct radeon_device *rdev);
172extern void r420_pm_init_profile(struct radeon_device *rdev);
165 173
166/* 174/*
167 * rs400,rs480 175 * rs400,rs480
@@ -178,6 +186,7 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
178/* 186/*
179 * rs600. 187 * rs600.
180 */ 188 */
189extern int rs600_asic_reset(struct radeon_device *rdev);
181extern int rs600_init(struct radeon_device *rdev); 190extern int rs600_init(struct radeon_device *rdev);
182extern void rs600_fini(struct radeon_device *rdev); 191extern void rs600_fini(struct radeon_device *rdev);
183extern int rs600_suspend(struct radeon_device *rdev); 192extern int rs600_suspend(struct radeon_device *rdev);
@@ -195,6 +204,9 @@ void rs600_hpd_fini(struct radeon_device *rdev);
195bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 204bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
196void rs600_hpd_set_polarity(struct radeon_device *rdev, 205void rs600_hpd_set_polarity(struct radeon_device *rdev,
197 enum radeon_hpd_id hpd); 206 enum radeon_hpd_id hpd);
207extern void rs600_pm_misc(struct radeon_device *rdev);
208extern void rs600_pm_prepare(struct radeon_device *rdev);
209extern void rs600_pm_finish(struct radeon_device *rdev);
198 210
199/* 211/*
200 * rs690,rs740 212 * rs690,rs740
@@ -212,7 +224,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev);
212 */ 224 */
213int rv515_init(struct radeon_device *rdev); 225int rv515_init(struct radeon_device *rdev);
214void rv515_fini(struct radeon_device *rdev); 226void rv515_fini(struct radeon_device *rdev);
215int rv515_gpu_reset(struct radeon_device *rdev);
216uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); 227uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
217void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 228void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
218void rv515_ring_start(struct radeon_device *rdev); 229void rv515_ring_start(struct radeon_device *rdev);
@@ -252,7 +263,8 @@ int r600_copy_dma(struct radeon_device *rdev,
252 struct radeon_fence *fence); 263 struct radeon_fence *fence);
253int r600_irq_process(struct radeon_device *rdev); 264int r600_irq_process(struct radeon_device *rdev);
254int r600_irq_set(struct radeon_device *rdev); 265int r600_irq_set(struct radeon_device *rdev);
255int r600_gpu_reset(struct radeon_device *rdev); 266bool r600_gpu_is_lockup(struct radeon_device *rdev);
267int r600_asic_reset(struct radeon_device *rdev);
256int r600_set_surface_reg(struct radeon_device *rdev, int reg, 268int r600_set_surface_reg(struct radeon_device *rdev, int reg,
257 uint32_t tiling_flags, uint32_t pitch, 269 uint32_t tiling_flags, uint32_t pitch,
258 uint32_t offset, uint32_t obj_size); 270 uint32_t offset, uint32_t obj_size);
@@ -268,6 +280,11 @@ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
268void r600_hpd_set_polarity(struct radeon_device *rdev, 280void r600_hpd_set_polarity(struct radeon_device *rdev,
269 enum radeon_hpd_id hpd); 281 enum radeon_hpd_id hpd);
270extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); 282extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
283extern bool r600_gui_idle(struct radeon_device *rdev);
284extern void r600_pm_misc(struct radeon_device *rdev);
285extern void r600_pm_init_profile(struct radeon_device *rdev);
286extern void rs780_pm_init_profile(struct radeon_device *rdev);
287extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
271 288
272/* 289/*
273 * rv770,rv730,rv710,rv740 290 * rv770,rv730,rv710,rv740
@@ -276,20 +293,29 @@ int rv770_init(struct radeon_device *rdev);
276void rv770_fini(struct radeon_device *rdev); 293void rv770_fini(struct radeon_device *rdev);
277int rv770_suspend(struct radeon_device *rdev); 294int rv770_suspend(struct radeon_device *rdev);
278int rv770_resume(struct radeon_device *rdev); 295int rv770_resume(struct radeon_device *rdev);
279int rv770_gpu_reset(struct radeon_device *rdev); 296extern void rv770_pm_misc(struct radeon_device *rdev);
280 297
281/* 298/*
282 * evergreen 299 * evergreen
283 */ 300 */
301void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
284int evergreen_init(struct radeon_device *rdev); 302int evergreen_init(struct radeon_device *rdev);
285void evergreen_fini(struct radeon_device *rdev); 303void evergreen_fini(struct radeon_device *rdev);
286int evergreen_suspend(struct radeon_device *rdev); 304int evergreen_suspend(struct radeon_device *rdev);
287int evergreen_resume(struct radeon_device *rdev); 305int evergreen_resume(struct radeon_device *rdev);
288int evergreen_gpu_reset(struct radeon_device *rdev); 306bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
307int evergreen_asic_reset(struct radeon_device *rdev);
289void evergreen_bandwidth_update(struct radeon_device *rdev); 308void evergreen_bandwidth_update(struct radeon_device *rdev);
290void evergreen_hpd_init(struct radeon_device *rdev); 309void evergreen_hpd_init(struct radeon_device *rdev);
291void evergreen_hpd_fini(struct radeon_device *rdev); 310void evergreen_hpd_fini(struct radeon_device *rdev);
292bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 311bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
293void evergreen_hpd_set_polarity(struct radeon_device *rdev, 312void evergreen_hpd_set_polarity(struct radeon_device *rdev,
294 enum radeon_hpd_id hpd); 313 enum radeon_hpd_id hpd);
314u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
315int evergreen_irq_set(struct radeon_device *rdev);
316int evergreen_irq_process(struct radeon_device *rdev);
317extern void evergreen_pm_misc(struct radeon_device *rdev);
318extern void evergreen_pm_prepare(struct radeon_device *rdev);
319extern void evergreen_pm_finish(struct radeon_device *rdev);
320
295#endif 321#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 9916d825401c..6e733fdc3349 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
530 } 530 }
531 531
532 /* look up gpio for ddc, hpd */ 532 /* look up gpio for ddc, hpd */
533 ddc_bus.valid = false;
534 hpd.hpd = RADEON_HPD_NONE;
533 if ((le16_to_cpu(path->usDeviceTag) & 535 if ((le16_to_cpu(path->usDeviceTag) &
534 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { 536 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
535 for (j = 0; j < con_obj->ucNumberOfObjects; j++) { 537 for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
@@ -547,7 +549,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
547 ATOM_I2C_RECORD *i2c_record; 549 ATOM_I2C_RECORD *i2c_record;
548 ATOM_HPD_INT_RECORD *hpd_record; 550 ATOM_HPD_INT_RECORD *hpd_record;
549 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; 551 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
550 hpd.hpd = RADEON_HPD_NONE;
551 552
552 while (record->ucRecordType > 0 553 while (record->ucRecordType > 0
553 && record-> 554 && record->
@@ -585,13 +586,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
585 break; 586 break;
586 } 587 }
587 } 588 }
588 } else {
589 hpd.hpd = RADEON_HPD_NONE;
590 ddc_bus.valid = false;
591 } 589 }
592 590
593 /* needed for aux chan transactions */ 591 /* needed for aux chan transactions */
594 ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0; 592 ddc_bus.hpd = hpd.hpd;
595 593
596 conn_id = le16_to_cpu(path->usConnObjectId); 594 conn_id = le16_to_cpu(path->usConnObjectId);
597 595
@@ -1174,7 +1172,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1174 lvds->native_mode.vtotal = lvds->native_mode.vdisplay + 1172 lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
1175 le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); 1173 le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
1176 lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + 1174 lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
1177 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); 1175 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
1178 lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + 1176 lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
1179 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); 1177 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
1180 lvds->panel_pwr_delay = 1178 lvds->panel_pwr_delay =
@@ -1442,26 +1440,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
1442 1440
1443static const char *thermal_controller_names[] = { 1441static const char *thermal_controller_names[] = {
1444 "NONE", 1442 "NONE",
1445 "LM63", 1443 "lm63",
1446 "ADM1032", 1444 "adm1032",
1447 "ADM1030", 1445 "adm1030",
1448 "MUA6649", 1446 "max6649",
1449 "LM64", 1447 "lm64",
1450 "F75375", 1448 "f75375",
1451 "ASC7512", 1449 "asc7xxx",
1452}; 1450};
1453 1451
1454static const char *pp_lib_thermal_controller_names[] = { 1452static const char *pp_lib_thermal_controller_names[] = {
1455 "NONE", 1453 "NONE",
1456 "LM63", 1454 "lm63",
1457 "ADM1032", 1455 "adm1032",
1458 "ADM1030", 1456 "adm1030",
1459 "MUA6649", 1457 "max6649",
1460 "LM64", 1458 "lm64",
1461 "F75375", 1459 "f75375",
1462 "RV6xx", 1460 "RV6xx",
1463 "RV770", 1461 "RV770",
1464 "ADT7473", 1462 "adt7473",
1463 "External GPIO",
1464 "Evergreen",
1465 "adt7473 with internal",
1466
1465}; 1467};
1466 1468
1467union power_info { 1469union power_info {
@@ -1485,7 +1487,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1485 int state_index = 0, mode_index = 0; 1487 int state_index = 0, mode_index = 0;
1486 struct radeon_i2c_bus_rec i2c_bus; 1488 struct radeon_i2c_bus_rec i2c_bus;
1487 1489
1488 rdev->pm.default_power_state = NULL; 1490 rdev->pm.default_power_state_index = -1;
1489 1491
1490 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 1492 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1491 &frev, &crev, &data_offset)) { 1493 &frev, &crev, &data_offset)) {
@@ -1498,10 +1500,19 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1498 power_info->info.ucOverdriveControllerAddress >> 1); 1500 power_info->info.ucOverdriveControllerAddress >> 1);
1499 i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); 1501 i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
1500 rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); 1502 rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
1503 if (rdev->pm.i2c_bus) {
1504 struct i2c_board_info info = { };
1505 const char *name = thermal_controller_names[power_info->info.
1506 ucOverdriveThermalController];
1507 info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
1508 strlcpy(info.type, name, sizeof(info.type));
1509 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
1510 }
1501 } 1511 }
1502 num_modes = power_info->info.ucNumOfPowerModeEntries; 1512 num_modes = power_info->info.ucNumOfPowerModeEntries;
1503 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) 1513 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
1504 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; 1514 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
1515 /* last mode is usually default, array is low to high */
1505 for (i = 0; i < num_modes; i++) { 1516 for (i = 0; i < num_modes; i++) {
1506 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 1517 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
1507 switch (frev) { 1518 switch (frev) {
@@ -1515,13 +1526,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1515 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || 1526 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1516 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) 1527 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1517 continue; 1528 continue;
1518 /* skip overclock modes for now */ 1529 rdev->pm.power_state[state_index].pcie_lanes =
1519 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
1520 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1521 (rdev->pm.power_state[state_index].clock_info[0].sclk >
1522 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1523 continue;
1524 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1525 power_info->info.asPowerPlayInfo[i].ucNumPciELanes; 1530 power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
1526 misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); 1531 misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
1527 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 1532 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
@@ -1542,6 +1547,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1542 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = 1547 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1543 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; 1548 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
1544 } 1549 }
1550 rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1551 rdev->pm.power_state[state_index].misc = misc;
1545 /* order matters! */ 1552 /* order matters! */
1546 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) 1553 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1547 rdev->pm.power_state[state_index].type = 1554 rdev->pm.power_state[state_index].type =
@@ -1555,15 +1562,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1555 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) 1562 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1556 rdev->pm.power_state[state_index].type = 1563 rdev->pm.power_state[state_index].type =
1557 POWER_STATE_TYPE_BALANCED; 1564 POWER_STATE_TYPE_BALANCED;
1558 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) 1565 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
1559 rdev->pm.power_state[state_index].type = 1566 rdev->pm.power_state[state_index].type =
1560 POWER_STATE_TYPE_PERFORMANCE; 1567 POWER_STATE_TYPE_PERFORMANCE;
1568 rdev->pm.power_state[state_index].flags &=
1569 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1570 }
1561 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { 1571 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1562 rdev->pm.power_state[state_index].type = 1572 rdev->pm.power_state[state_index].type =
1563 POWER_STATE_TYPE_DEFAULT; 1573 POWER_STATE_TYPE_DEFAULT;
1564 rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; 1574 rdev->pm.default_power_state_index = state_index;
1565 rdev->pm.power_state[state_index].default_clock_mode = 1575 rdev->pm.power_state[state_index].default_clock_mode =
1566 &rdev->pm.power_state[state_index].clock_info[0]; 1576 &rdev->pm.power_state[state_index].clock_info[0];
1577 rdev->pm.power_state[state_index].flags &=
1578 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1579 } else if (state_index == 0) {
1580 rdev->pm.power_state[state_index].clock_info[0].flags |=
1581 RADEON_PM_MODE_NO_DISPLAY;
1567 } 1582 }
1568 state_index++; 1583 state_index++;
1569 break; 1584 break;
@@ -1577,13 +1592,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1577 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || 1592 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1578 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) 1593 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1579 continue; 1594 continue;
1580 /* skip overclock modes for now */ 1595 rdev->pm.power_state[state_index].pcie_lanes =
1581 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
1582 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1583 (rdev->pm.power_state[state_index].clock_info[0].sclk >
1584 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1585 continue;
1586 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1587 power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; 1596 power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
1588 misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); 1597 misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
1589 misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); 1598 misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
@@ -1605,6 +1614,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1605 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = 1614 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1606 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; 1615 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
1607 } 1616 }
1617 rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1618 rdev->pm.power_state[state_index].misc = misc;
1619 rdev->pm.power_state[state_index].misc2 = misc2;
1608 /* order matters! */ 1620 /* order matters! */
1609 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) 1621 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1610 rdev->pm.power_state[state_index].type = 1622 rdev->pm.power_state[state_index].type =
@@ -1618,18 +1630,29 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1618 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) 1630 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1619 rdev->pm.power_state[state_index].type = 1631 rdev->pm.power_state[state_index].type =
1620 POWER_STATE_TYPE_BALANCED; 1632 POWER_STATE_TYPE_BALANCED;
1621 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) 1633 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
1622 rdev->pm.power_state[state_index].type = 1634 rdev->pm.power_state[state_index].type =
1623 POWER_STATE_TYPE_PERFORMANCE; 1635 POWER_STATE_TYPE_PERFORMANCE;
1636 rdev->pm.power_state[state_index].flags &=
1637 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1638 }
1624 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) 1639 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
1625 rdev->pm.power_state[state_index].type = 1640 rdev->pm.power_state[state_index].type =
1626 POWER_STATE_TYPE_BALANCED; 1641 POWER_STATE_TYPE_BALANCED;
1642 if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
1643 rdev->pm.power_state[state_index].flags &=
1644 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1627 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { 1645 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1628 rdev->pm.power_state[state_index].type = 1646 rdev->pm.power_state[state_index].type =
1629 POWER_STATE_TYPE_DEFAULT; 1647 POWER_STATE_TYPE_DEFAULT;
1630 rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; 1648 rdev->pm.default_power_state_index = state_index;
1631 rdev->pm.power_state[state_index].default_clock_mode = 1649 rdev->pm.power_state[state_index].default_clock_mode =
1632 &rdev->pm.power_state[state_index].clock_info[0]; 1650 &rdev->pm.power_state[state_index].clock_info[0];
1651 rdev->pm.power_state[state_index].flags &=
1652 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1653 } else if (state_index == 0) {
1654 rdev->pm.power_state[state_index].clock_info[0].flags |=
1655 RADEON_PM_MODE_NO_DISPLAY;
1633 } 1656 }
1634 state_index++; 1657 state_index++;
1635 break; 1658 break;
@@ -1643,13 +1666,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1643 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || 1666 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1644 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) 1667 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1645 continue; 1668 continue;
1646 /* skip overclock modes for now */ 1669 rdev->pm.power_state[state_index].pcie_lanes =
1647 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
1648 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1649 (rdev->pm.power_state[state_index].clock_info[0].sclk >
1650 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1651 continue;
1652 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1653 power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; 1670 power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
1654 misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); 1671 misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
1655 misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); 1672 misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
@@ -1677,6 +1694,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1677 power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; 1694 power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
1678 } 1695 }
1679 } 1696 }
1697 rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1698 rdev->pm.power_state[state_index].misc = misc;
1699 rdev->pm.power_state[state_index].misc2 = misc2;
1680 /* order matters! */ 1700 /* order matters! */
1681 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) 1701 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1682 rdev->pm.power_state[state_index].type = 1702 rdev->pm.power_state[state_index].type =
@@ -1690,42 +1710,76 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1690 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) 1710 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1691 rdev->pm.power_state[state_index].type = 1711 rdev->pm.power_state[state_index].type =
1692 POWER_STATE_TYPE_BALANCED; 1712 POWER_STATE_TYPE_BALANCED;
1693 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) 1713 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
1694 rdev->pm.power_state[state_index].type = 1714 rdev->pm.power_state[state_index].type =
1695 POWER_STATE_TYPE_PERFORMANCE; 1715 POWER_STATE_TYPE_PERFORMANCE;
1716 rdev->pm.power_state[state_index].flags &=
1717 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1718 }
1696 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) 1719 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
1697 rdev->pm.power_state[state_index].type = 1720 rdev->pm.power_state[state_index].type =
1698 POWER_STATE_TYPE_BALANCED; 1721 POWER_STATE_TYPE_BALANCED;
1699 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { 1722 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1700 rdev->pm.power_state[state_index].type = 1723 rdev->pm.power_state[state_index].type =
1701 POWER_STATE_TYPE_DEFAULT; 1724 POWER_STATE_TYPE_DEFAULT;
1702 rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; 1725 rdev->pm.default_power_state_index = state_index;
1703 rdev->pm.power_state[state_index].default_clock_mode = 1726 rdev->pm.power_state[state_index].default_clock_mode =
1704 &rdev->pm.power_state[state_index].clock_info[0]; 1727 &rdev->pm.power_state[state_index].clock_info[0];
1728 } else if (state_index == 0) {
1729 rdev->pm.power_state[state_index].clock_info[0].flags |=
1730 RADEON_PM_MODE_NO_DISPLAY;
1705 } 1731 }
1706 state_index++; 1732 state_index++;
1707 break; 1733 break;
1708 } 1734 }
1709 } 1735 }
1710 } else if (frev == 4) { 1736 /* last mode is usually default */
1737 if (rdev->pm.default_power_state_index == -1) {
1738 rdev->pm.power_state[state_index - 1].type =
1739 POWER_STATE_TYPE_DEFAULT;
1740 rdev->pm.default_power_state_index = state_index - 1;
1741 rdev->pm.power_state[state_index - 1].default_clock_mode =
1742 &rdev->pm.power_state[state_index - 1].clock_info[0];
1743 rdev->pm.power_state[state_index].flags &=
1744 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1745 rdev->pm.power_state[state_index].misc = 0;
1746 rdev->pm.power_state[state_index].misc2 = 0;
1747 }
1748 } else {
1711 /* add the i2c bus for thermal/fan chip */ 1749 /* add the i2c bus for thermal/fan chip */
1712 /* no support for internal controller yet */ 1750 /* no support for internal controller yet */
1713 if (power_info->info_4.sThermalController.ucType > 0) { 1751 ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
1714 if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || 1752 if (controller->ucType > 0) {
1715 (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) { 1753 if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
1754 (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
1755 (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) {
1716 DRM_INFO("Internal thermal controller %s fan control\n", 1756 DRM_INFO("Internal thermal controller %s fan control\n",
1717 (power_info->info_4.sThermalController.ucFanParameters & 1757 (controller->ucFanParameters &
1718 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 1758 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
1759 } else if ((controller->ucType ==
1760 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
1761 (controller->ucType ==
1762 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
1763 DRM_INFO("Special thermal controller config\n");
1719 } else { 1764 } else {
1720 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", 1765 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
1721 pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType], 1766 pp_lib_thermal_controller_names[controller->ucType],
1722 power_info->info_4.sThermalController.ucI2cAddress >> 1, 1767 controller->ucI2cAddress >> 1,
1723 (power_info->info_4.sThermalController.ucFanParameters & 1768 (controller->ucFanParameters &
1724 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 1769 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
1725 i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine); 1770 i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
1726 rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); 1771 rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
1772 if (rdev->pm.i2c_bus) {
1773 struct i2c_board_info info = { };
1774 const char *name = pp_lib_thermal_controller_names[controller->ucType];
1775 info.addr = controller->ucI2cAddress >> 1;
1776 strlcpy(info.type, name, sizeof(info.type));
1777 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
1778 }
1779
1727 } 1780 }
1728 } 1781 }
1782 /* first mode is usually default, followed by low to high */
1729 for (i = 0; i < power_info->info_4.ucNumStates; i++) { 1783 for (i = 0; i < power_info->info_4.ucNumStates; i++) {
1730 mode_index = 0; 1784 mode_index = 0;
1731 power_state = (struct _ATOM_PPLIB_STATE *) 1785 power_state = (struct _ATOM_PPLIB_STATE *)
@@ -1754,14 +1808,34 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1754 /* skip invalid modes */ 1808 /* skip invalid modes */
1755 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) 1809 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
1756 continue; 1810 continue;
1757 /* skip overclock modes for now */ 1811 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
1758 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > 1812 VOLTAGE_SW;
1759 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN) 1813 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
1814 clock_info->usVDDC;
1815 mode_index++;
1816 } else if (ASIC_IS_DCE4(rdev)) {
1817 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
1818 (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
1819 (mode_info->atom_context->bios +
1820 data_offset +
1821 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
1822 (power_state->ucClockStateIndices[j] *
1823 power_info->info_4.ucClockInfoSize));
1824 sclk = le16_to_cpu(clock_info->usEngineClockLow);
1825 sclk |= clock_info->ucEngineClockHigh << 16;
1826 mclk = le16_to_cpu(clock_info->usMemoryClockLow);
1827 mclk |= clock_info->ucMemoryClockHigh << 16;
1828 rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
1829 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
1830 /* skip invalid modes */
1831 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
1832 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
1760 continue; 1833 continue;
1761 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = 1834 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
1762 VOLTAGE_SW; 1835 VOLTAGE_SW;
1763 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 1836 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
1764 clock_info->usVDDC; 1837 clock_info->usVDDC;
1838 /* XXX usVDDCI */
1765 mode_index++; 1839 mode_index++;
1766 } else { 1840 } else {
1767 struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info = 1841 struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
@@ -1781,12 +1855,6 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1781 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || 1855 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
1782 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) 1856 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
1783 continue; 1857 continue;
1784 /* skip overclock modes for now */
1785 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
1786 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1787 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
1788 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1789 continue;
1790 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = 1858 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
1791 VOLTAGE_SW; 1859 VOLTAGE_SW;
1792 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 1860 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
@@ -1798,7 +1866,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1798 if (mode_index) { 1866 if (mode_index) {
1799 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); 1867 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
1800 misc2 = le16_to_cpu(non_clock_info->usClassification); 1868 misc2 = le16_to_cpu(non_clock_info->usClassification);
1801 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 1869 rdev->pm.power_state[state_index].misc = misc;
1870 rdev->pm.power_state[state_index].misc2 = misc2;
1871 rdev->pm.power_state[state_index].pcie_lanes =
1802 ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> 1872 ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
1803 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; 1873 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
1804 switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 1874 switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
@@ -1815,22 +1885,36 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1815 POWER_STATE_TYPE_PERFORMANCE; 1885 POWER_STATE_TYPE_PERFORMANCE;
1816 break; 1886 break;
1817 } 1887 }
1888 rdev->pm.power_state[state_index].flags = 0;
1889 if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
1890 rdev->pm.power_state[state_index].flags |=
1891 RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1818 if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { 1892 if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
1819 rdev->pm.power_state[state_index].type = 1893 rdev->pm.power_state[state_index].type =
1820 POWER_STATE_TYPE_DEFAULT; 1894 POWER_STATE_TYPE_DEFAULT;
1821 rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; 1895 rdev->pm.default_power_state_index = state_index;
1822 rdev->pm.power_state[state_index].default_clock_mode = 1896 rdev->pm.power_state[state_index].default_clock_mode =
1823 &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; 1897 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
1824 } 1898 }
1825 state_index++; 1899 state_index++;
1826 } 1900 }
1827 } 1901 }
1902 /* if multiple clock modes, mark the lowest as no display */
1903 for (i = 0; i < state_index; i++) {
1904 if (rdev->pm.power_state[i].num_clock_modes > 1)
1905 rdev->pm.power_state[i].clock_info[0].flags |=
1906 RADEON_PM_MODE_NO_DISPLAY;
1907 }
1908 /* first mode is usually default */
1909 if (rdev->pm.default_power_state_index == -1) {
1910 rdev->pm.power_state[0].type =
1911 POWER_STATE_TYPE_DEFAULT;
1912 rdev->pm.default_power_state_index = 0;
1913 rdev->pm.power_state[0].default_clock_mode =
1914 &rdev->pm.power_state[0].clock_info[0];
1915 }
1828 } 1916 }
1829 } else { 1917 } else {
1830 /* XXX figure out some good default low power mode for cards w/out power tables */
1831 }
1832
1833 if (rdev->pm.default_power_state == NULL) {
1834 /* add the default mode */ 1918 /* add the default mode */
1835 rdev->pm.power_state[state_index].type = 1919 rdev->pm.power_state[state_index].type =
1836 POWER_STATE_TYPE_DEFAULT; 1920 POWER_STATE_TYPE_DEFAULT;
@@ -1840,18 +1924,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1840 rdev->pm.power_state[state_index].default_clock_mode = 1924 rdev->pm.power_state[state_index].default_clock_mode =
1841 &rdev->pm.power_state[state_index].clock_info[0]; 1925 &rdev->pm.power_state[state_index].clock_info[0];
1842 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 1926 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
1843 if (rdev->asic->get_pcie_lanes) 1927 rdev->pm.power_state[state_index].pcie_lanes = 16;
1844 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev); 1928 rdev->pm.default_power_state_index = state_index;
1845 else 1929 rdev->pm.power_state[state_index].flags = 0;
1846 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
1847 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1848 state_index++; 1930 state_index++;
1849 } 1931 }
1932
1850 rdev->pm.num_power_states = state_index; 1933 rdev->pm.num_power_states = state_index;
1851 1934
1852 rdev->pm.current_power_state = rdev->pm.default_power_state; 1935 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1853 rdev->pm.current_clock_mode = 1936 rdev->pm.current_clock_mode_index = 0;
1854 rdev->pm.default_power_state->default_clock_mode;
1855} 1937}
1856 1938
1857void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) 1939void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 8ad71f701316..fbba938f8048 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -85,12 +85,11 @@ static bool radeon_read_bios(struct radeon_device *rdev)
85 pci_unmap_rom(rdev->pdev, bios); 85 pci_unmap_rom(rdev->pdev, bios);
86 return false; 86 return false;
87 } 87 }
88 rdev->bios = kmalloc(size, GFP_KERNEL); 88 rdev->bios = kmemdup(bios, size, GFP_KERNEL);
89 if (rdev->bios == NULL) { 89 if (rdev->bios == NULL) {
90 pci_unmap_rom(rdev->pdev, bios); 90 pci_unmap_rom(rdev->pdev, bios);
91 return false; 91 return false;
92 } 92 }
93 memcpy(rdev->bios, bios, size);
94 pci_unmap_rom(rdev->pdev, bios); 93 pci_unmap_rom(rdev->pdev, bios);
95 return true; 94 return true;
96} 95}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 37db8adb2748..7b5e10d3e9c9 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -450,17 +450,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
450{ 450{
451 int edid_info; 451 int edid_info;
452 struct edid *edid; 452 struct edid *edid;
453 unsigned char *raw;
453 edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); 454 edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
454 if (!edid_info) 455 if (!edid_info)
455 return false; 456 return false;
456 457
457 edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1), 458 raw = rdev->bios + edid_info;
458 GFP_KERNEL); 459 edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
459 if (edid == NULL) 460 if (edid == NULL)
460 return false; 461 return false;
461 462
462 memcpy((unsigned char *)edid, 463 memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
463 (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
464 464
465 if (!drm_edid_is_valid(edid)) { 465 if (!drm_edid_is_valid(edid)) {
466 kfree(edid); 466 kfree(edid);
@@ -600,7 +600,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
600 } 600 }
601 i2c.mm_i2c = false; 601 i2c.mm_i2c = false;
602 i2c.i2c_id = 0; 602 i2c.i2c_id = 0;
603 i2c.hpd_id = 0; 603 i2c.hpd = RADEON_HPD_NONE;
604 604
605 if (ddc_line) 605 if (ddc_line)
606 i2c.valid = true; 606 i2c.valid = true;
@@ -1113,18 +1113,20 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
1113 break; 1113 break;
1114 1114
1115 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && 1115 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
1116 (RBIOS16(tmp + 2) == 1116 (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
1117 lvds->native_mode.vdisplay)) { 1117 lvds->native_mode.htotal = lvds->native_mode.hdisplay +
1118 lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8; 1118 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
1119 lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8; 1119 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
1120 lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) + 1120 (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
1121 RBIOS16(tmp + 21)) * 8; 1121 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
1122 1122 (RBIOS8(tmp + 23) * 8);
1123 lvds->native_mode.vtotal = RBIOS16(tmp + 24); 1123
1124 lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff; 1124 lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
1125 lvds->native_mode.vsync_end = 1125 (RBIOS16(tmp + 24) - RBIOS16(tmp + 26));
1126 ((RBIOS16(tmp + 28) & 0xf800) >> 11) + 1126 lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
1127 (RBIOS16(tmp + 28) & 0x7ff); 1127 ((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26));
1128 lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
1129 ((RBIOS16(tmp + 28) & 0xf800) >> 11);
1128 1130
1129 lvds->native_mode.clock = RBIOS16(tmp + 9) * 10; 1131 lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
1130 lvds->native_mode.flags = 0; 1132 lvds->native_mode.flags = 0;
@@ -2196,7 +2198,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2196 ATOM_DEVICE_DFP1_SUPPORT); 2198 ATOM_DEVICE_DFP1_SUPPORT);
2197 2199
2198 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); 2200 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
2199 hpd.hpd = RADEON_HPD_NONE; 2201 hpd.hpd = RADEON_HPD_1;
2200 radeon_add_legacy_connector(dev, 2202 radeon_add_legacy_connector(dev,
2201 0, 2203 0,
2202 ATOM_DEVICE_CRT1_SUPPORT | 2204 ATOM_DEVICE_CRT1_SUPPORT |
@@ -2366,7 +2368,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2366 u8 rev, blocks, tmp; 2368 u8 rev, blocks, tmp;
2367 int state_index = 0; 2369 int state_index = 0;
2368 2370
2369 rdev->pm.default_power_state = NULL; 2371 rdev->pm.default_power_state_index = -1;
2370 2372
2371 if (rdev->flags & RADEON_IS_MOBILITY) { 2373 if (rdev->flags & RADEON_IS_MOBILITY) {
2372 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); 2374 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
@@ -2380,17 +2382,13 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2380 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || 2382 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
2381 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) 2383 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
2382 goto default_mode; 2384 goto default_mode;
2383 /* skip overclock modes for now */
2384 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
2385 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
2386 (rdev->pm.power_state[state_index].clock_info[0].sclk >
2387 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
2388 goto default_mode;
2389 rdev->pm.power_state[state_index].type = 2385 rdev->pm.power_state[state_index].type =
2390 POWER_STATE_TYPE_BATTERY; 2386 POWER_STATE_TYPE_BATTERY;
2391 misc = RBIOS16(offset + 0x5 + 0x0); 2387 misc = RBIOS16(offset + 0x5 + 0x0);
2392 if (rev > 4) 2388 if (rev > 4)
2393 misc2 = RBIOS16(offset + 0x5 + 0xe); 2389 misc2 = RBIOS16(offset + 0x5 + 0xe);
2390 rdev->pm.power_state[state_index].misc = misc;
2391 rdev->pm.power_state[state_index].misc2 = misc2;
2394 if (misc & 0x4) { 2392 if (misc & 0x4) {
2395 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; 2393 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
2396 if (misc & 0x8) 2394 if (misc & 0x8)
@@ -2437,8 +2435,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2437 } else 2435 } else
2438 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2436 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2439 if (rev > 6) 2437 if (rev > 6)
2440 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 2438 rdev->pm.power_state[state_index].pcie_lanes =
2441 RBIOS8(offset + 0x5 + 0x10); 2439 RBIOS8(offset + 0x5 + 0x10);
2440 rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
2442 state_index++; 2441 state_index++;
2443 } else { 2442 } else {
2444 /* XXX figure out some good default low power mode for mobility cards w/out power tables */ 2443 /* XXX figure out some good default low power mode for mobility cards w/out power tables */
@@ -2456,16 +2455,13 @@ default_mode:
2456 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; 2455 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2457 rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; 2456 rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
2458 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2457 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2459 if (rdev->asic->get_pcie_lanes) 2458 rdev->pm.power_state[state_index].pcie_lanes = 16;
2460 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev); 2459 rdev->pm.power_state[state_index].flags = 0;
2461 else 2460 rdev->pm.default_power_state_index = state_index;
2462 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
2463 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
2464 rdev->pm.num_power_states = state_index + 1; 2461 rdev->pm.num_power_states = state_index + 1;
2465 2462
2466 rdev->pm.current_power_state = rdev->pm.default_power_state; 2463 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2467 rdev->pm.current_clock_mode = 2464 rdev->pm.current_clock_mode_index = 0;
2468 rdev->pm.default_power_state->default_clock_mode;
2469} 2465}
2470 2466
2471void radeon_external_tmds_setup(struct drm_encoder *encoder) 2467void radeon_external_tmds_setup(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4559a53d5e57..0c7ccc6961a3 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1041,7 +1041,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1041 struct radeon_connector_atom_dig *radeon_dig_connector; 1041 struct radeon_connector_atom_dig *radeon_dig_connector;
1042 uint32_t subpixel_order = SubPixelNone; 1042 uint32_t subpixel_order = SubPixelNone;
1043 bool shared_ddc = false; 1043 bool shared_ddc = false;
1044 int ret;
1045 1044
1046 /* fixme - tv/cv/din */ 1045 /* fixme - tv/cv/din */
1047 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1046 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1076,9 +1075,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1076 switch (connector_type) { 1075 switch (connector_type) {
1077 case DRM_MODE_CONNECTOR_VGA: 1076 case DRM_MODE_CONNECTOR_VGA:
1078 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1077 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1079 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1078 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1080 if (ret)
1081 goto failed;
1082 if (i2c_bus->valid) { 1079 if (i2c_bus->valid) {
1083 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); 1080 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
1084 if (!radeon_connector->ddc_bus) 1081 if (!radeon_connector->ddc_bus)
@@ -1088,12 +1085,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1088 drm_connector_attach_property(&radeon_connector->base, 1085 drm_connector_attach_property(&radeon_connector->base,
1089 rdev->mode_info.load_detect_property, 1086 rdev->mode_info.load_detect_property,
1090 1); 1087 1);
1088 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1091 break; 1089 break;
1092 case DRM_MODE_CONNECTOR_DVIA: 1090 case DRM_MODE_CONNECTOR_DVIA:
1093 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1091 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1094 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1092 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1095 if (ret)
1096 goto failed;
1097 if (i2c_bus->valid) { 1093 if (i2c_bus->valid) {
1098 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1094 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
1099 if (!radeon_connector->ddc_bus) 1095 if (!radeon_connector->ddc_bus)
@@ -1113,9 +1109,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1113 radeon_dig_connector->igp_lane_info = igp_lane_info; 1109 radeon_dig_connector->igp_lane_info = igp_lane_info;
1114 radeon_connector->con_priv = radeon_dig_connector; 1110 radeon_connector->con_priv = radeon_dig_connector;
1115 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1111 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
1116 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1112 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1117 if (ret)
1118 goto failed;
1119 if (i2c_bus->valid) { 1113 if (i2c_bus->valid) {
1120 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1114 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
1121 if (!radeon_connector->ddc_bus) 1115 if (!radeon_connector->ddc_bus)
@@ -1141,9 +1135,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1141 radeon_dig_connector->igp_lane_info = igp_lane_info; 1135 radeon_dig_connector->igp_lane_info = igp_lane_info;
1142 radeon_connector->con_priv = radeon_dig_connector; 1136 radeon_connector->con_priv = radeon_dig_connector;
1143 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1137 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
1144 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1138 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1145 if (ret)
1146 goto failed;
1147 if (i2c_bus->valid) { 1139 if (i2c_bus->valid) {
1148 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI"); 1140 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
1149 if (!radeon_connector->ddc_bus) 1141 if (!radeon_connector->ddc_bus)
@@ -1163,9 +1155,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1163 radeon_dig_connector->igp_lane_info = igp_lane_info; 1155 radeon_dig_connector->igp_lane_info = igp_lane_info;
1164 radeon_connector->con_priv = radeon_dig_connector; 1156 radeon_connector->con_priv = radeon_dig_connector;
1165 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); 1157 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1166 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); 1158 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1167 if (ret)
1168 goto failed;
1169 if (i2c_bus->valid) { 1159 if (i2c_bus->valid) {
1170 /* add DP i2c bus */ 1160 /* add DP i2c bus */
1171 if (connector_type == DRM_MODE_CONNECTOR_eDP) 1161 if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1191,9 +1181,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1191 case DRM_MODE_CONNECTOR_9PinDIN: 1181 case DRM_MODE_CONNECTOR_9PinDIN:
1192 if (radeon_tv == 1) { 1182 if (radeon_tv == 1) {
1193 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1183 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1194 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1184 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1195 if (ret)
1196 goto failed;
1197 radeon_connector->dac_load_detect = true; 1185 radeon_connector->dac_load_detect = true;
1198 drm_connector_attach_property(&radeon_connector->base, 1186 drm_connector_attach_property(&radeon_connector->base,
1199 rdev->mode_info.load_detect_property, 1187 rdev->mode_info.load_detect_property,
@@ -1211,9 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1211 radeon_dig_connector->igp_lane_info = igp_lane_info; 1199 radeon_dig_connector->igp_lane_info = igp_lane_info;
1212 radeon_connector->con_priv = radeon_dig_connector; 1200 radeon_connector->con_priv = radeon_dig_connector;
1213 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1201 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
1214 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); 1202 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
1215 if (ret)
1216 goto failed;
1217 if (i2c_bus->valid) { 1203 if (i2c_bus->valid) {
1218 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); 1204 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
1219 if (!radeon_connector->ddc_bus) 1205 if (!radeon_connector->ddc_bus)
@@ -1226,6 +1212,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1226 break; 1212 break;
1227 } 1213 }
1228 1214
1215 if (hpd->hpd == RADEON_HPD_NONE) {
1216 if (i2c_bus->valid)
1217 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1218 } else
1219 connector->polled = DRM_CONNECTOR_POLL_HPD;
1220
1229 connector->display_info.subpixel_order = subpixel_order; 1221 connector->display_info.subpixel_order = subpixel_order;
1230 drm_sysfs_connector_add(connector); 1222 drm_sysfs_connector_add(connector);
1231 return; 1223 return;
@@ -1250,7 +1242,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
1250 struct drm_connector *connector; 1242 struct drm_connector *connector;
1251 struct radeon_connector *radeon_connector; 1243 struct radeon_connector *radeon_connector;
1252 uint32_t subpixel_order = SubPixelNone; 1244 uint32_t subpixel_order = SubPixelNone;
1253 int ret;
1254 1245
1255 /* fixme - tv/cv/din */ 1246 /* fixme - tv/cv/din */
1256 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1247 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1278,9 +1269,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1278 switch (connector_type) { 1269 switch (connector_type) {
1279 case DRM_MODE_CONNECTOR_VGA: 1270 case DRM_MODE_CONNECTOR_VGA:
1280 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1271 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1281 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1272 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1282 if (ret)
1283 goto failed;
1284 if (i2c_bus->valid) { 1273 if (i2c_bus->valid) {
1285 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); 1274 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
1286 if (!radeon_connector->ddc_bus) 1275 if (!radeon_connector->ddc_bus)
@@ -1290,12 +1279,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
1290 drm_connector_attach_property(&radeon_connector->base, 1279 drm_connector_attach_property(&radeon_connector->base,
1291 rdev->mode_info.load_detect_property, 1280 rdev->mode_info.load_detect_property,
1292 1); 1281 1);
1282 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1293 break; 1283 break;
1294 case DRM_MODE_CONNECTOR_DVIA: 1284 case DRM_MODE_CONNECTOR_DVIA:
1295 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1285 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1296 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1286 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1297 if (ret)
1298 goto failed;
1299 if (i2c_bus->valid) { 1287 if (i2c_bus->valid) {
1300 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1288 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
1301 if (!radeon_connector->ddc_bus) 1289 if (!radeon_connector->ddc_bus)
@@ -1309,9 +1297,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1309 case DRM_MODE_CONNECTOR_DVII: 1297 case DRM_MODE_CONNECTOR_DVII:
1310 case DRM_MODE_CONNECTOR_DVID: 1298 case DRM_MODE_CONNECTOR_DVID:
1311 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1299 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
1312 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1300 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1313 if (ret)
1314 goto failed;
1315 if (i2c_bus->valid) { 1301 if (i2c_bus->valid) {
1316 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1302 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
1317 if (!radeon_connector->ddc_bus) 1303 if (!radeon_connector->ddc_bus)
@@ -1330,9 +1316,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1330 case DRM_MODE_CONNECTOR_9PinDIN: 1316 case DRM_MODE_CONNECTOR_9PinDIN:
1331 if (radeon_tv == 1) { 1317 if (radeon_tv == 1) {
1332 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1318 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1333 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1319 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1334 if (ret)
1335 goto failed;
1336 radeon_connector->dac_load_detect = true; 1320 radeon_connector->dac_load_detect = true;
1337 /* RS400,RC410,RS480 chipset seems to report a lot 1321 /* RS400,RC410,RS480 chipset seems to report a lot
1338 * of false positive on load detect, we haven't yet 1322 * of false positive on load detect, we haven't yet
@@ -1351,9 +1335,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1351 break; 1335 break;
1352 case DRM_MODE_CONNECTOR_LVDS: 1336 case DRM_MODE_CONNECTOR_LVDS:
1353 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1337 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
1354 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); 1338 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
1355 if (ret)
1356 goto failed;
1357 if (i2c_bus->valid) { 1339 if (i2c_bus->valid) {
1358 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); 1340 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
1359 if (!radeon_connector->ddc_bus) 1341 if (!radeon_connector->ddc_bus)
@@ -1366,6 +1348,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
1366 break; 1348 break;
1367 } 1349 }
1368 1350
1351 if (hpd->hpd == RADEON_HPD_NONE) {
1352 if (i2c_bus->valid)
1353 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1354 } else
1355 connector->polled = DRM_CONNECTOR_POLL_HPD;
1369 connector->display_info.subpixel_order = subpixel_order; 1356 connector->display_info.subpixel_order = subpixel_order;
1370 drm_sysfs_connector_add(connector); 1357 drm_sysfs_connector_add(connector);
1371 return; 1358 return;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f9b0fe002c0a..ae0fb7356e62 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -220,10 +220,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
220 int r; 220 int r;
221 221
222 mutex_lock(&rdev->cs_mutex); 222 mutex_lock(&rdev->cs_mutex);
223 if (rdev->gpu_lockup) {
224 mutex_unlock(&rdev->cs_mutex);
225 return -EINVAL;
226 }
227 /* initialize parser */ 223 /* initialize parser */
228 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 224 memset(&parser, 0, sizeof(struct radeon_cs_parser));
229 parser.filp = filp; 225 parser.filp = filp;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7b629e305560..a20b612ffe75 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -299,24 +299,24 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev)
299 sclk = radeon_get_engine_clock(rdev); 299 sclk = radeon_get_engine_clock(rdev);
300 mclk = rdev->clock.default_mclk; 300 mclk = rdev->clock.default_mclk;
301 301
302 a.full = rfixed_const(100); 302 a.full = dfixed_const(100);
303 rdev->pm.sclk.full = rfixed_const(sclk); 303 rdev->pm.sclk.full = dfixed_const(sclk);
304 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 304 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
305 rdev->pm.mclk.full = rfixed_const(mclk); 305 rdev->pm.mclk.full = dfixed_const(mclk);
306 rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); 306 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
307 307
308 a.full = rfixed_const(16); 308 a.full = dfixed_const(16);
309 /* core_bandwidth = sclk(Mhz) * 16 */ 309 /* core_bandwidth = sclk(Mhz) * 16 */
310 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); 310 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
311 } else { 311 } else {
312 sclk = radeon_get_engine_clock(rdev); 312 sclk = radeon_get_engine_clock(rdev);
313 mclk = radeon_get_memory_clock(rdev); 313 mclk = radeon_get_memory_clock(rdev);
314 314
315 a.full = rfixed_const(100); 315 a.full = dfixed_const(100);
316 rdev->pm.sclk.full = rfixed_const(sclk); 316 rdev->pm.sclk.full = dfixed_const(sclk);
317 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 317 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
318 rdev->pm.mclk.full = rfixed_const(mclk); 318 rdev->pm.mclk.full = dfixed_const(mclk);
319 rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); 319 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
320 } 320 }
321} 321}
322 322
@@ -599,9 +599,11 @@ int radeon_device_init(struct radeon_device *rdev,
599 spin_lock_init(&rdev->ih.lock); 599 spin_lock_init(&rdev->ih.lock);
600 mutex_init(&rdev->gem.mutex); 600 mutex_init(&rdev->gem.mutex);
601 mutex_init(&rdev->pm.mutex); 601 mutex_init(&rdev->pm.mutex);
602 mutex_init(&rdev->vram_mutex);
602 rwlock_init(&rdev->fence_drv.lock); 603 rwlock_init(&rdev->fence_drv.lock);
603 INIT_LIST_HEAD(&rdev->gem.objects); 604 INIT_LIST_HEAD(&rdev->gem.objects);
604 init_waitqueue_head(&rdev->irq.vblank_queue); 605 init_waitqueue_head(&rdev->irq.vblank_queue);
606 init_waitqueue_head(&rdev->irq.idle_queue);
605 607
606 /* setup workqueue */ 608 /* setup workqueue */
607 rdev->wq = create_workqueue("radeon"); 609 rdev->wq = create_workqueue("radeon");
@@ -671,7 +673,7 @@ int radeon_device_init(struct radeon_device *rdev,
671 /* Acceleration not working on AGP card try again 673 /* Acceleration not working on AGP card try again
672 * with fallback to PCI or PCIE GART 674 * with fallback to PCI or PCIE GART
673 */ 675 */
674 radeon_gpu_reset(rdev); 676 radeon_asic_reset(rdev);
675 radeon_fini(rdev); 677 radeon_fini(rdev);
676 radeon_agp_disable(rdev); 678 radeon_agp_disable(rdev);
677 r = radeon_init(rdev); 679 r = radeon_init(rdev);
@@ -691,6 +693,8 @@ void radeon_device_fini(struct radeon_device *rdev)
691{ 693{
692 DRM_INFO("radeon: finishing device.\n"); 694 DRM_INFO("radeon: finishing device.\n");
693 rdev->shutdown = true; 695 rdev->shutdown = true;
696 /* evict vram memory */
697 radeon_bo_evict_vram(rdev);
694 radeon_fini(rdev); 698 radeon_fini(rdev);
695 destroy_workqueue(rdev->wq); 699 destroy_workqueue(rdev->wq);
696 vga_switcheroo_unregister_client(rdev->pdev); 700 vga_switcheroo_unregister_client(rdev->pdev);
@@ -728,9 +732,10 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
728 continue; 732 continue;
729 } 733 }
730 robj = rfb->obj->driver_private; 734 robj = rfb->obj->driver_private;
731 if (robj != rdev->fbdev_rbo) { 735 /* don't unpin kernel fb objects */
736 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
732 r = radeon_bo_reserve(robj, false); 737 r = radeon_bo_reserve(robj, false);
733 if (unlikely(r == 0)) { 738 if (r == 0) {
734 radeon_bo_unpin(robj); 739 radeon_bo_unpin(robj);
735 radeon_bo_unreserve(robj); 740 radeon_bo_unreserve(robj);
736 } 741 }
@@ -743,6 +748,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
743 748
744 radeon_save_bios_scratch_regs(rdev); 749 radeon_save_bios_scratch_regs(rdev);
745 750
751 radeon_pm_suspend(rdev);
746 radeon_suspend(rdev); 752 radeon_suspend(rdev);
747 radeon_hpd_fini(rdev); 753 radeon_hpd_fini(rdev);
748 /* evict remaining vram memory */ 754 /* evict remaining vram memory */
@@ -755,7 +761,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
755 pci_set_power_state(dev->pdev, PCI_D3hot); 761 pci_set_power_state(dev->pdev, PCI_D3hot);
756 } 762 }
757 acquire_console_sem(); 763 acquire_console_sem();
758 fb_set_suspend(rdev->fbdev_info, 1); 764 radeon_fbdev_set_suspend(rdev, 1);
759 release_console_sem(); 765 release_console_sem();
760 return 0; 766 return 0;
761} 767}
@@ -778,8 +784,9 @@ int radeon_resume_kms(struct drm_device *dev)
778 /* resume AGP if in use */ 784 /* resume AGP if in use */
779 radeon_agp_resume(rdev); 785 radeon_agp_resume(rdev);
780 radeon_resume(rdev); 786 radeon_resume(rdev);
787 radeon_pm_resume(rdev);
781 radeon_restore_bios_scratch_regs(rdev); 788 radeon_restore_bios_scratch_regs(rdev);
782 fb_set_suspend(rdev->fbdev_info, 0); 789 radeon_fbdev_set_suspend(rdev, 0);
783 release_console_sem(); 790 release_console_sem();
784 791
785 /* reset hpd state */ 792 /* reset hpd state */
@@ -789,6 +796,26 @@ int radeon_resume_kms(struct drm_device *dev)
789 return 0; 796 return 0;
790} 797}
791 798
799int radeon_gpu_reset(struct radeon_device *rdev)
800{
801 int r;
802
803 radeon_save_bios_scratch_regs(rdev);
804 radeon_suspend(rdev);
805
806 r = radeon_asic_reset(rdev);
807 if (!r) {
808 dev_info(rdev->dev, "GPU reset succeed\n");
809 radeon_resume(rdev);
810 radeon_restore_bios_scratch_regs(rdev);
811 drm_helper_resume_force_mode(rdev->ddev);
812 return 0;
813 }
814 /* bad news, how to tell it to userspace ? */
815 dev_info(rdev->dev, "GPU reset failed\n");
816 return r;
817}
818
792 819
793/* 820/*
794 * Debugfs 821 * Debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bb1c122cad21..1006549d1570 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -633,37 +633,37 @@ calc_fb_div(struct radeon_pll *pll,
633 633
634 vco_freq = freq * post_div; 634 vco_freq = freq * post_div;
635 /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ 635 /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
636 a.full = rfixed_const(pll->reference_freq); 636 a.full = dfixed_const(pll->reference_freq);
637 feedback_divider.full = rfixed_const(vco_freq); 637 feedback_divider.full = dfixed_const(vco_freq);
638 feedback_divider.full = rfixed_div(feedback_divider, a); 638 feedback_divider.full = dfixed_div(feedback_divider, a);
639 a.full = rfixed_const(ref_div); 639 a.full = dfixed_const(ref_div);
640 feedback_divider.full = rfixed_mul(feedback_divider, a); 640 feedback_divider.full = dfixed_mul(feedback_divider, a);
641 641
642 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 642 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
643 /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ 643 /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
644 a.full = rfixed_const(10); 644 a.full = dfixed_const(10);
645 feedback_divider.full = rfixed_mul(feedback_divider, a); 645 feedback_divider.full = dfixed_mul(feedback_divider, a);
646 feedback_divider.full += rfixed_const_half(0); 646 feedback_divider.full += dfixed_const_half(0);
647 feedback_divider.full = rfixed_floor(feedback_divider); 647 feedback_divider.full = dfixed_floor(feedback_divider);
648 feedback_divider.full = rfixed_div(feedback_divider, a); 648 feedback_divider.full = dfixed_div(feedback_divider, a);
649 649
650 /* *fb_div = floor(feedback_divider); */ 650 /* *fb_div = floor(feedback_divider); */
651 a.full = rfixed_floor(feedback_divider); 651 a.full = dfixed_floor(feedback_divider);
652 *fb_div = rfixed_trunc(a); 652 *fb_div = dfixed_trunc(a);
653 /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ 653 /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
654 a.full = rfixed_const(10); 654 a.full = dfixed_const(10);
655 b.full = rfixed_mul(feedback_divider, a); 655 b.full = dfixed_mul(feedback_divider, a);
656 656
657 feedback_divider.full = rfixed_floor(feedback_divider); 657 feedback_divider.full = dfixed_floor(feedback_divider);
658 feedback_divider.full = rfixed_mul(feedback_divider, a); 658 feedback_divider.full = dfixed_mul(feedback_divider, a);
659 feedback_divider.full = b.full - feedback_divider.full; 659 feedback_divider.full = b.full - feedback_divider.full;
660 *fb_div_frac = rfixed_trunc(feedback_divider); 660 *fb_div_frac = dfixed_trunc(feedback_divider);
661 } else { 661 } else {
662 /* *fb_div = floor(feedback_divider + 0.5); */ 662 /* *fb_div = floor(feedback_divider + 0.5); */
663 feedback_divider.full += rfixed_const_half(0); 663 feedback_divider.full += dfixed_const_half(0);
664 feedback_divider.full = rfixed_floor(feedback_divider); 664 feedback_divider.full = dfixed_floor(feedback_divider);
665 665
666 *fb_div = rfixed_trunc(feedback_divider); 666 *fb_div = dfixed_trunc(feedback_divider);
667 *fb_div_frac = 0; 667 *fb_div_frac = 0;
668 } 668 }
669 669
@@ -693,10 +693,10 @@ calc_fb_ref_div(struct radeon_pll *pll,
693 pll_out_max = pll->pll_out_max; 693 pll_out_max = pll->pll_out_max;
694 } 694 }
695 695
696 ffreq.full = rfixed_const(freq); 696 ffreq.full = dfixed_const(freq);
697 /* max_error = ffreq * 0.0025; */ 697 /* max_error = ffreq * 0.0025; */
698 a.full = rfixed_const(400); 698 a.full = dfixed_const(400);
699 max_error.full = rfixed_div(ffreq, a); 699 max_error.full = dfixed_div(ffreq, a);
700 700
701 for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { 701 for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
702 if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { 702 if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
@@ -707,9 +707,9 @@ calc_fb_ref_div(struct radeon_pll *pll,
707 continue; 707 continue;
708 708
709 /* pll_out = vco / post_div; */ 709 /* pll_out = vco / post_div; */
710 a.full = rfixed_const(post_div); 710 a.full = dfixed_const(post_div);
711 pll_out.full = rfixed_const(vco); 711 pll_out.full = dfixed_const(vco);
712 pll_out.full = rfixed_div(pll_out, a); 712 pll_out.full = dfixed_div(pll_out, a);
713 713
714 if (pll_out.full >= ffreq.full) { 714 if (pll_out.full >= ffreq.full) {
715 error.full = pll_out.full - ffreq.full; 715 error.full = pll_out.full - ffreq.full;
@@ -831,10 +831,6 @@ void radeon_compute_pll(struct radeon_pll *pll,
831static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 831static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
832{ 832{
833 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 833 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
834 struct drm_device *dev = fb->dev;
835
836 if (fb->fbdev)
837 radeonfb_remove(dev, fb);
838 834
839 if (radeon_fb->obj) 835 if (radeon_fb->obj)
840 drm_gem_object_unreference_unlocked(radeon_fb->obj); 836 drm_gem_object_unreference_unlocked(radeon_fb->obj);
@@ -856,21 +852,15 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
856 .create_handle = radeon_user_framebuffer_create_handle, 852 .create_handle = radeon_user_framebuffer_create_handle,
857}; 853};
858 854
859struct drm_framebuffer * 855void
860radeon_framebuffer_create(struct drm_device *dev, 856radeon_framebuffer_init(struct drm_device *dev,
861 struct drm_mode_fb_cmd *mode_cmd, 857 struct radeon_framebuffer *rfb,
862 struct drm_gem_object *obj) 858 struct drm_mode_fb_cmd *mode_cmd,
859 struct drm_gem_object *obj)
863{ 860{
864 struct radeon_framebuffer *radeon_fb; 861 rfb->obj = obj;
865 862 drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
866 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 863 drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
867 if (radeon_fb == NULL) {
868 return NULL;
869 }
870 drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
871 drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
872 radeon_fb->obj = obj;
873 return &radeon_fb->base;
874} 864}
875 865
876static struct drm_framebuffer * 866static struct drm_framebuffer *
@@ -879,6 +869,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
879 struct drm_mode_fb_cmd *mode_cmd) 869 struct drm_mode_fb_cmd *mode_cmd)
880{ 870{
881 struct drm_gem_object *obj; 871 struct drm_gem_object *obj;
872 struct radeon_framebuffer *radeon_fb;
882 873
883 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 874 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
884 if (obj == NULL) { 875 if (obj == NULL) {
@@ -886,12 +877,26 @@ radeon_user_framebuffer_create(struct drm_device *dev,
886 "can't create framebuffer\n", mode_cmd->handle); 877 "can't create framebuffer\n", mode_cmd->handle);
887 return NULL; 878 return NULL;
888 } 879 }
889 return radeon_framebuffer_create(dev, mode_cmd, obj); 880
881 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
882 if (radeon_fb == NULL) {
883 return NULL;
884 }
885
886 radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
887
888 return &radeon_fb->base;
889}
890
891static void radeon_output_poll_changed(struct drm_device *dev)
892{
893 struct radeon_device *rdev = dev->dev_private;
894 radeon_fb_output_poll_changed(rdev);
890} 895}
891 896
892static const struct drm_mode_config_funcs radeon_mode_funcs = { 897static const struct drm_mode_config_funcs radeon_mode_funcs = {
893 .fb_create = radeon_user_framebuffer_create, 898 .fb_create = radeon_user_framebuffer_create,
894 .fb_changed = radeonfb_probe, 899 .output_poll_changed = radeon_output_poll_changed
895}; 900};
896 901
897struct drm_prop_enum_list { 902struct drm_prop_enum_list {
@@ -978,8 +983,11 @@ void radeon_update_display_priority(struct radeon_device *rdev)
978 /* set display priority to high for r3xx, rv515 chips 983 /* set display priority to high for r3xx, rv515 chips
979 * this avoids flickering due to underflow to the 984 * this avoids flickering due to underflow to the
980 * display controllers during heavy acceleration. 985 * display controllers during heavy acceleration.
986 * Don't force high on rs4xx igp chips as it seems to
987 * affect the sound card. See kernel bug 15982.
981 */ 988 */
982 if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) 989 if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
990 !(rdev->flags & RADEON_IS_IGP))
983 rdev->disp_priority = 2; 991 rdev->disp_priority = 2;
984 else 992 else
985 rdev->disp_priority = 0; 993 rdev->disp_priority = 0;
@@ -1031,15 +1039,24 @@ int radeon_modeset_init(struct radeon_device *rdev)
1031 } 1039 }
1032 /* initialize hpd */ 1040 /* initialize hpd */
1033 radeon_hpd_init(rdev); 1041 radeon_hpd_init(rdev);
1034 drm_helper_initial_config(rdev->ddev); 1042
1043 /* Initialize power management */
1044 radeon_pm_init(rdev);
1045
1046 radeon_fbdev_init(rdev);
1047 drm_kms_helper_poll_init(rdev->ddev);
1048
1035 return 0; 1049 return 0;
1036} 1050}
1037 1051
1038void radeon_modeset_fini(struct radeon_device *rdev) 1052void radeon_modeset_fini(struct radeon_device *rdev)
1039{ 1053{
1054 radeon_fbdev_fini(rdev);
1040 kfree(rdev->mode_info.bios_hardcoded_edid); 1055 kfree(rdev->mode_info.bios_hardcoded_edid);
1056 radeon_pm_fini(rdev);
1041 1057
1042 if (rdev->mode_info.mode_config_initialized) { 1058 if (rdev->mode_info.mode_config_initialized) {
1059 drm_kms_helper_poll_fini(rdev->ddev);
1043 radeon_hpd_fini(rdev); 1060 radeon_hpd_fini(rdev);
1044 drm_mode_config_cleanup(rdev->ddev); 1061 drm_mode_config_cleanup(rdev->ddev);
1045 rdev->mode_info.mode_config_initialized = false; 1062 rdev->mode_info.mode_config_initialized = false;
@@ -1089,15 +1106,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1089 } 1106 }
1090 if (radeon_crtc->rmx_type != RMX_OFF) { 1107 if (radeon_crtc->rmx_type != RMX_OFF) {
1091 fixed20_12 a, b; 1108 fixed20_12 a, b;
1092 a.full = rfixed_const(crtc->mode.vdisplay); 1109 a.full = dfixed_const(crtc->mode.vdisplay);
1093 b.full = rfixed_const(radeon_crtc->native_mode.hdisplay); 1110 b.full = dfixed_const(radeon_crtc->native_mode.hdisplay);
1094 radeon_crtc->vsc.full = rfixed_div(a, b); 1111 radeon_crtc->vsc.full = dfixed_div(a, b);
1095 a.full = rfixed_const(crtc->mode.hdisplay); 1112 a.full = dfixed_const(crtc->mode.hdisplay);
1096 b.full = rfixed_const(radeon_crtc->native_mode.vdisplay); 1113 b.full = dfixed_const(radeon_crtc->native_mode.vdisplay);
1097 radeon_crtc->hsc.full = rfixed_div(a, b); 1114 radeon_crtc->hsc.full = dfixed_div(a, b);
1098 } else { 1115 } else {
1099 radeon_crtc->vsc.full = rfixed_const(1); 1116 radeon_crtc->vsc.full = dfixed_const(1);
1100 radeon_crtc->hsc.full = rfixed_const(1); 1117 radeon_crtc->hsc.full = dfixed_const(1);
1101 } 1118 }
1102 return true; 1119 return true;
1103} 1120}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b3749d47be7b..902d1731a652 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -44,9 +44,10 @@
44 * - 2.1.0 - add square tiling interface 44 * - 2.1.0 - add square tiling interface
45 * - 2.2.0 - add r6xx/r7xx const buffer support 45 * - 2.2.0 - add r6xx/r7xx const buffer support
46 * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs 46 * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
47 * - 2.4.0 - add crtc id query
47 */ 48 */
48#define KMS_DRIVER_MAJOR 2 49#define KMS_DRIVER_MAJOR 2
49#define KMS_DRIVER_MINOR 3 50#define KMS_DRIVER_MINOR 4
50#define KMS_DRIVER_PATCHLEVEL 0 51#define KMS_DRIVER_PATCHLEVEL 0
51int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 52int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
52int radeon_driver_unload_kms(struct drm_device *dev); 53int radeon_driver_unload_kms(struct drm_device *dev);
@@ -91,7 +92,6 @@ int radeon_testing = 0;
91int radeon_connector_table = 0; 92int radeon_connector_table = 0;
92int radeon_tv = 1; 93int radeon_tv = 1;
93int radeon_new_pll = -1; 94int radeon_new_pll = -1;
94int radeon_dynpm = -1;
95int radeon_audio = 1; 95int radeon_audio = 1;
96int radeon_disp_priority = 0; 96int radeon_disp_priority = 0;
97int radeon_hw_i2c = 0; 97int radeon_hw_i2c = 0;
@@ -132,9 +132,6 @@ module_param_named(tv, radeon_tv, int, 0444);
132MODULE_PARM_DESC(new_pll, "Select new PLL code"); 132MODULE_PARM_DESC(new_pll, "Select new PLL code");
133module_param_named(new_pll, radeon_new_pll, int, 0444); 133module_param_named(new_pll, radeon_new_pll, int, 0444);
134 134
135MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
136module_param_named(dynpm, radeon_dynpm, int, 0444);
137
138MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); 135MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
139module_param_named(audio, radeon_audio, int, 0444); 136module_param_named(audio, radeon_audio, int, 0444);
140 137
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c5ddaf58563a..1ebb100015b7 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -309,9 +309,6 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
309 struct drm_device *dev = encoder->dev; 309 struct drm_device *dev = encoder->dev;
310 struct radeon_device *rdev = dev->dev_private; 310 struct radeon_device *rdev = dev->dev_private;
311 311
312 /* adjust pm to upcoming mode change */
313 radeon_pm_compute_clocks(rdev);
314
315 /* set the active encoder to connector routing */ 312 /* set the active encoder to connector routing */
316 radeon_encoder_set_active_device(encoder); 313 radeon_encoder_set_active_device(encoder);
317 drm_mode_set_crtcinfo(adjusted_mode, 0); 314 drm_mode_set_crtcinfo(adjusted_mode, 0);
@@ -1111,8 +1108,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1111 } 1108 }
1112 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 1109 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
1113 1110
1114 /* adjust pm to dpms change */
1115 radeon_pm_compute_clocks(rdev);
1116} 1111}
1117 1112
1118union crtc_source_param { 1113union crtc_source_param {
@@ -1546,10 +1541,49 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
1546 1541
1547static void radeon_atom_encoder_disable(struct drm_encoder *encoder) 1542static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1548{ 1543{
1544 struct drm_device *dev = encoder->dev;
1545 struct radeon_device *rdev = dev->dev_private;
1549 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1546 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1550 struct radeon_encoder_atom_dig *dig; 1547 struct radeon_encoder_atom_dig *dig;
1551 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1548 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1552 1549
1550 switch (radeon_encoder->encoder_id) {
1551 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1552 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1553 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1554 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1555 atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
1556 break;
1557 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1558 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1559 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1560 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1561 if (ASIC_IS_DCE4(rdev))
1562 /* disable the transmitter */
1563 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1564 else {
1565 /* disable the encoder and transmitter */
1566 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1567 atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
1568 }
1569 break;
1570 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1571 atombios_ddia_setup(encoder, ATOM_DISABLE);
1572 break;
1573 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1574 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1575 atombios_external_tmds_setup(encoder, ATOM_DISABLE);
1576 break;
1577 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1578 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1579 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1580 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1581 atombios_dac_setup(encoder, ATOM_DISABLE);
1582 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1583 atombios_tv_setup(encoder, ATOM_DISABLE);
1584 break;
1585 }
1586
1553 if (radeon_encoder_is_digital(encoder)) { 1587 if (radeon_encoder_is_digital(encoder)) {
1554 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 1588 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
1555 r600_hdmi_disable(encoder); 1589 r600_hdmi_disable(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 9ac57a09784b..e192acfbf0cd 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -23,10 +23,6 @@
23 * Authors: 23 * Authors:
24 * David Airlie 24 * David Airlie
25 */ 25 */
26 /*
27 * Modularization
28 */
29
30#include <linux/module.h> 26#include <linux/module.h>
31#include <linux/slab.h> 27#include <linux/slab.h>
32#include <linux/fb.h> 28#include <linux/fb.h>
@@ -42,17 +38,21 @@
42 38
43#include <linux/vga_switcheroo.h> 39#include <linux/vga_switcheroo.h>
44 40
45struct radeon_fb_device { 41/* object hierarchy -
42 this contains a helper + a radeon fb
43 the helper contains a pointer to radeon framebuffer baseclass.
44*/
45struct radeon_fbdev {
46 struct drm_fb_helper helper; 46 struct drm_fb_helper helper;
47 struct radeon_framebuffer *rfb; 47 struct radeon_framebuffer rfb;
48 struct radeon_device *rdev; 48 struct list_head fbdev_list;
49 struct radeon_device *rdev;
49}; 50};
50 51
51static struct fb_ops radeonfb_ops = { 52static struct fb_ops radeonfb_ops = {
52 .owner = THIS_MODULE, 53 .owner = THIS_MODULE,
53 .fb_check_var = drm_fb_helper_check_var, 54 .fb_check_var = drm_fb_helper_check_var,
54 .fb_set_par = drm_fb_helper_set_par, 55 .fb_set_par = drm_fb_helper_set_par,
55 .fb_setcolreg = drm_fb_helper_setcolreg,
56 .fb_fillrect = cfb_fillrect, 56 .fb_fillrect = cfb_fillrect,
57 .fb_copyarea = cfb_copyarea, 57 .fb_copyarea = cfb_copyarea,
58 .fb_imageblit = cfb_imageblit, 58 .fb_imageblit = cfb_imageblit,
@@ -61,45 +61,6 @@ static struct fb_ops radeonfb_ops = {
61 .fb_setcmap = drm_fb_helper_setcmap, 61 .fb_setcmap = drm_fb_helper_setcmap,
62}; 62};
63 63
64/**
65 * Currently it is assumed that the old framebuffer is reused.
66 *
67 * LOCKING
68 * caller should hold the mode config lock.
69 *
70 */
71int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
72{
73 struct fb_info *info;
74 struct drm_framebuffer *fb;
75 struct drm_display_mode *mode = crtc->desired_mode;
76
77 fb = crtc->fb;
78 if (fb == NULL) {
79 return 1;
80 }
81 info = fb->fbdev;
82 if (info == NULL) {
83 return 1;
84 }
85 if (mode == NULL) {
86 return 1;
87 }
88 info->var.xres = mode->hdisplay;
89 info->var.right_margin = mode->hsync_start - mode->hdisplay;
90 info->var.hsync_len = mode->hsync_end - mode->hsync_start;
91 info->var.left_margin = mode->htotal - mode->hsync_end;
92 info->var.yres = mode->vdisplay;
93 info->var.lower_margin = mode->vsync_start - mode->vdisplay;
94 info->var.vsync_len = mode->vsync_end - mode->vsync_start;
95 info->var.upper_margin = mode->vtotal - mode->vsync_end;
96 info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
97 /* avoid overflow */
98 info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
99
100 return 0;
101}
102EXPORT_SYMBOL(radeonfb_resize);
103 64
104static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) 65static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
105{ 66{
@@ -125,57 +86,44 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
125 return aligned; 86 return aligned;
126} 87}
127 88
128static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { 89static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
129 .gamma_set = radeon_crtc_fb_gamma_set, 90{
130 .gamma_get = radeon_crtc_fb_gamma_get, 91 struct radeon_bo *rbo = gobj->driver_private;
131}; 92 int ret;
93
94 ret = radeon_bo_reserve(rbo, false);
95 if (likely(ret == 0)) {
96 radeon_bo_kunmap(rbo);
97 radeon_bo_unreserve(rbo);
98 }
99 drm_gem_object_unreference_unlocked(gobj);
100}
132 101
133int radeonfb_create(struct drm_device *dev, 102static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
134 uint32_t fb_width, uint32_t fb_height, 103 struct drm_mode_fb_cmd *mode_cmd,
135 uint32_t surface_width, uint32_t surface_height, 104 struct drm_gem_object **gobj_p)
136 uint32_t surface_depth, uint32_t surface_bpp,
137 struct drm_framebuffer **fb_p)
138{ 105{
139 struct radeon_device *rdev = dev->dev_private; 106 struct radeon_device *rdev = rfbdev->rdev;
140 struct fb_info *info;
141 struct radeon_fb_device *rfbdev;
142 struct drm_framebuffer *fb = NULL;
143 struct radeon_framebuffer *rfb;
144 struct drm_mode_fb_cmd mode_cmd;
145 struct drm_gem_object *gobj = NULL; 107 struct drm_gem_object *gobj = NULL;
146 struct radeon_bo *rbo = NULL; 108 struct radeon_bo *rbo = NULL;
147 struct device *device = &rdev->pdev->dev;
148 int size, aligned_size, ret;
149 u64 fb_gpuaddr;
150 void *fbptr = NULL;
151 unsigned long tmp;
152 bool fb_tiled = false; /* useful for testing */ 109 bool fb_tiled = false; /* useful for testing */
153 u32 tiling_flags = 0; 110 u32 tiling_flags = 0;
111 int ret;
112 int aligned_size, size;
154 113
155 mode_cmd.width = surface_width;
156 mode_cmd.height = surface_height;
157
158 /* avivo can't scanout real 24bpp */
159 if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
160 surface_bpp = 32;
161
162 mode_cmd.bpp = surface_bpp;
163 /* need to align pitch with crtc limits */ 114 /* need to align pitch with crtc limits */
164 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); 115 mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
165 mode_cmd.depth = surface_depth;
166 116
167 size = mode_cmd.pitch * mode_cmd.height; 117 size = mode_cmd->pitch * mode_cmd->height;
168 aligned_size = ALIGN(size, PAGE_SIZE); 118 aligned_size = ALIGN(size, PAGE_SIZE);
169
170 ret = radeon_gem_object_create(rdev, aligned_size, 0, 119 ret = radeon_gem_object_create(rdev, aligned_size, 0,
171 RADEON_GEM_DOMAIN_VRAM, 120 RADEON_GEM_DOMAIN_VRAM,
172 false, ttm_bo_type_kernel, 121 false, ttm_bo_type_kernel,
173 &gobj); 122 &gobj);
174 if (ret) { 123 if (ret) {
175 printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", 124 printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
176 surface_width, surface_height); 125 aligned_size);
177 ret = -ENOMEM; 126 return -ENOMEM;
178 goto out;
179 } 127 }
180 rbo = gobj->driver_private; 128 rbo = gobj->driver_private;
181 129
@@ -183,7 +131,7 @@ int radeonfb_create(struct drm_device *dev,
183 tiling_flags = RADEON_TILING_MACRO; 131 tiling_flags = RADEON_TILING_MACRO;
184 132
185#ifdef __BIG_ENDIAN 133#ifdef __BIG_ENDIAN
186 switch (mode_cmd.bpp) { 134 switch (mode_cmd->bpp) {
187 case 32: 135 case 32:
188 tiling_flags |= RADEON_TILING_SWAP_32BIT; 136 tiling_flags |= RADEON_TILING_SWAP_32BIT;
189 break; 137 break;
@@ -196,57 +144,81 @@ int radeonfb_create(struct drm_device *dev,
196 144
197 if (tiling_flags) { 145 if (tiling_flags) {
198 ret = radeon_bo_set_tiling_flags(rbo, 146 ret = radeon_bo_set_tiling_flags(rbo,
199 tiling_flags | RADEON_TILING_SURFACE, 147 tiling_flags | RADEON_TILING_SURFACE,
200 mode_cmd.pitch); 148 mode_cmd->pitch);
201 if (ret) 149 if (ret)
202 dev_err(rdev->dev, "FB failed to set tiling flags\n"); 150 dev_err(rdev->dev, "FB failed to set tiling flags\n");
203 } 151 }
204 mutex_lock(&rdev->ddev->struct_mutex); 152
205 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); 153
206 if (fb == NULL) {
207 DRM_ERROR("failed to allocate fb.\n");
208 ret = -ENOMEM;
209 goto out_unref;
210 }
211 ret = radeon_bo_reserve(rbo, false); 154 ret = radeon_bo_reserve(rbo, false);
212 if (unlikely(ret != 0)) 155 if (unlikely(ret != 0))
213 goto out_unref; 156 goto out_unref;
214 ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); 157 ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
215 if (ret) { 158 if (ret) {
216 radeon_bo_unreserve(rbo); 159 radeon_bo_unreserve(rbo);
217 goto out_unref; 160 goto out_unref;
218 } 161 }
219 if (fb_tiled) 162 if (fb_tiled)
220 radeon_bo_check_tiling(rbo, 0, 0); 163 radeon_bo_check_tiling(rbo, 0, 0);
221 ret = radeon_bo_kmap(rbo, &fbptr); 164 ret = radeon_bo_kmap(rbo, NULL);
222 radeon_bo_unreserve(rbo); 165 radeon_bo_unreserve(rbo);
223 if (ret) { 166 if (ret) {
224 goto out_unref; 167 goto out_unref;
225 } 168 }
226 169
227 list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); 170 *gobj_p = gobj;
171 return 0;
172out_unref:
173 radeonfb_destroy_pinned_object(gobj);
174 *gobj_p = NULL;
175 return ret;
176}
177
178static int radeonfb_create(struct radeon_fbdev *rfbdev,
179 struct drm_fb_helper_surface_size *sizes)
180{
181 struct radeon_device *rdev = rfbdev->rdev;
182 struct fb_info *info;
183 struct drm_framebuffer *fb = NULL;
184 struct drm_mode_fb_cmd mode_cmd;
185 struct drm_gem_object *gobj = NULL;
186 struct radeon_bo *rbo = NULL;
187 struct device *device = &rdev->pdev->dev;
188 int ret;
189 unsigned long tmp;
190
191 mode_cmd.width = sizes->surface_width;
192 mode_cmd.height = sizes->surface_height;
193
194 /* avivo can't scanout real 24bpp */
195 if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
196 sizes->surface_bpp = 32;
197
198 mode_cmd.bpp = sizes->surface_bpp;
199 mode_cmd.depth = sizes->surface_depth;
228 200
229 *fb_p = fb; 201 ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
230 rfb = to_radeon_framebuffer(fb); 202 rbo = gobj->driver_private;
231 rdev->fbdev_rfb = rfb;
232 rdev->fbdev_rbo = rbo;
233 203
234 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); 204 /* okay we have an object now allocate the framebuffer */
205 info = framebuffer_alloc(0, device);
235 if (info == NULL) { 206 if (info == NULL) {
236 ret = -ENOMEM; 207 ret = -ENOMEM;
237 goto out_unref; 208 goto out_unref;
238 } 209 }
239 210
240 rdev->fbdev_info = info; 211 info->par = rfbdev;
241 rfbdev = info->par; 212
242 rfbdev->helper.funcs = &radeon_fb_helper_funcs; 213 radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
243 rfbdev->helper.dev = dev;
244 ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
245 RADEONFB_CONN_LIMIT);
246 if (ret)
247 goto out_unref;
248 214
249 memset_io(fbptr, 0x0, aligned_size); 215 fb = &rfbdev->rfb.base;
216
217 /* setup helper */
218 rfbdev->helper.fb = fb;
219 rfbdev->helper.fbdev = info;
220
221 memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
250 222
251 strcpy(info->fix.id, "radeondrmfb"); 223 strcpy(info->fix.id, "radeondrmfb");
252 224
@@ -255,17 +227,22 @@ int radeonfb_create(struct drm_device *dev,
255 info->flags = FBINFO_DEFAULT; 227 info->flags = FBINFO_DEFAULT;
256 info->fbops = &radeonfb_ops; 228 info->fbops = &radeonfb_ops;
257 229
258 tmp = fb_gpuaddr - rdev->mc.vram_start; 230 tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
259 info->fix.smem_start = rdev->mc.aper_base + tmp; 231 info->fix.smem_start = rdev->mc.aper_base + tmp;
260 info->fix.smem_len = size; 232 info->fix.smem_len = radeon_bo_size(rbo);
261 info->screen_base = fbptr; 233 info->screen_base = rbo->kptr;
262 info->screen_size = size; 234 info->screen_size = radeon_bo_size(rbo);
263 235
264 drm_fb_helper_fill_var(info, fb, fb_width, fb_height); 236 drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
265 237
266 /* setup aperture base/size for vesafb takeover */ 238 /* setup aperture base/size for vesafb takeover */
267 info->aperture_base = rdev->ddev->mode_config.fb_base; 239 info->apertures = alloc_apertures(1);
268 info->aperture_size = rdev->mc.real_vram_size; 240 if (!info->apertures) {
241 ret = -ENOMEM;
242 goto out_unref;
243 }
244 info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
245 info->apertures->ranges[0].size = rdev->mc.real_vram_size;
269 246
270 info->fix.mmio_start = 0; 247 info->fix.mmio_start = 0;
271 info->fix.mmio_len = 0; 248 info->fix.mmio_len = 0;
@@ -274,44 +251,55 @@ int radeonfb_create(struct drm_device *dev,
274 info->pixmap.access_align = 32; 251 info->pixmap.access_align = 32;
275 info->pixmap.flags = FB_PIXMAP_SYSTEM; 252 info->pixmap.flags = FB_PIXMAP_SYSTEM;
276 info->pixmap.scan_align = 1; 253 info->pixmap.scan_align = 1;
254
277 if (info->screen_base == NULL) { 255 if (info->screen_base == NULL) {
278 ret = -ENOSPC; 256 ret = -ENOSPC;
279 goto out_unref; 257 goto out_unref;
280 } 258 }
259
260 ret = fb_alloc_cmap(&info->cmap, 256, 0);
261 if (ret) {
262 ret = -ENOMEM;
263 goto out_unref;
264 }
265
281 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); 266 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
282 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); 267 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
283 DRM_INFO("size %lu\n", (unsigned long)size); 268 DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
284 DRM_INFO("fb depth is %d\n", fb->depth); 269 DRM_INFO("fb depth is %d\n", fb->depth);
285 DRM_INFO(" pitch is %d\n", fb->pitch); 270 DRM_INFO(" pitch is %d\n", fb->pitch);
286 271
287 fb->fbdev = info;
288 rfbdev->rfb = rfb;
289 rfbdev->rdev = rdev;
290
291 mutex_unlock(&rdev->ddev->struct_mutex);
292 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); 272 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
293 return 0; 273 return 0;
294 274
295out_unref: 275out_unref:
296 if (rbo) { 276 if (rbo) {
297 ret = radeon_bo_reserve(rbo, false); 277
298 if (likely(ret == 0)) {
299 radeon_bo_kunmap(rbo);
300 radeon_bo_unreserve(rbo);
301 }
302 } 278 }
303 if (fb && ret) { 279 if (fb && ret) {
304 list_del(&fb->filp_head);
305 drm_gem_object_unreference(gobj); 280 drm_gem_object_unreference(gobj);
306 drm_framebuffer_cleanup(fb); 281 drm_framebuffer_cleanup(fb);
307 kfree(fb); 282 kfree(fb);
308 } 283 }
309 drm_gem_object_unreference(gobj);
310 mutex_unlock(&rdev->ddev->struct_mutex);
311out:
312 return ret; 284 return ret;
313} 285}
314 286
287static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
288 struct drm_fb_helper_surface_size *sizes)
289{
290 struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
291 int new_fb = 0;
292 int ret;
293
294 if (!helper->fb) {
295 ret = radeonfb_create(rfbdev, sizes);
296 if (ret)
297 return ret;
298 new_fb = 1;
299 }
300 return new_fb;
301}
302
315static char *mode_option; 303static char *mode_option;
316int radeon_parse_options(char *options) 304int radeon_parse_options(char *options)
317{ 305{
@@ -328,46 +316,102 @@ int radeon_parse_options(char *options)
328 return 0; 316 return 0;
329} 317}
330 318
331int radeonfb_probe(struct drm_device *dev) 319void radeon_fb_output_poll_changed(struct radeon_device *rdev)
332{ 320{
333 struct radeon_device *rdev = dev->dev_private; 321 drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
334 int bpp_sel = 32;
335
336 /* select 8 bpp console on RN50 or 16MB cards */
337 if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
338 bpp_sel = 8;
339
340 return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
341} 322}
342 323
343int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) 324static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
344{ 325{
345 struct fb_info *info; 326 struct fb_info *info;
346 struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); 327 struct radeon_framebuffer *rfb = &rfbdev->rfb;
347 struct radeon_bo *rbo; 328 struct radeon_bo *rbo;
348 int r; 329 int r;
349 330
350 if (!fb) { 331 if (rfbdev->helper.fbdev) {
351 return -EINVAL; 332 info = rfbdev->helper.fbdev;
333
334 unregister_framebuffer(info);
335 if (info->cmap.len)
336 fb_dealloc_cmap(&info->cmap);
337 framebuffer_release(info);
352 } 338 }
353 info = fb->fbdev; 339
354 if (info) { 340 if (rfb->obj) {
355 struct radeon_fb_device *rfbdev = info->par;
356 rbo = rfb->obj->driver_private; 341 rbo = rfb->obj->driver_private;
357 unregister_framebuffer(info);
358 r = radeon_bo_reserve(rbo, false); 342 r = radeon_bo_reserve(rbo, false);
359 if (likely(r == 0)) { 343 if (likely(r == 0)) {
360 radeon_bo_kunmap(rbo); 344 radeon_bo_kunmap(rbo);
361 radeon_bo_unpin(rbo); 345 radeon_bo_unpin(rbo);
362 radeon_bo_unreserve(rbo); 346 radeon_bo_unreserve(rbo);
363 } 347 }
364 drm_fb_helper_free(&rfbdev->helper); 348 drm_gem_object_unreference_unlocked(rfb->obj);
365 framebuffer_release(info);
366 } 349 }
350 drm_fb_helper_fini(&rfbdev->helper);
351 drm_framebuffer_cleanup(&rfb->base);
367 352
368 printk(KERN_INFO "unregistered panic notifier\n"); 353 return 0;
354}
355
356static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
357 .gamma_set = radeon_crtc_fb_gamma_set,
358 .gamma_get = radeon_crtc_fb_gamma_get,
359 .fb_probe = radeon_fb_find_or_create_single,
360};
361
362int radeon_fbdev_init(struct radeon_device *rdev)
363{
364 struct radeon_fbdev *rfbdev;
365 int bpp_sel = 32;
366
367 /* select 8 bpp console on RN50 or 16MB cards */
368 if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
369 bpp_sel = 8;
370
371 rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
372 if (!rfbdev)
373 return -ENOMEM;
374
375 rfbdev->rdev = rdev;
376 rdev->mode_info.rfbdev = rfbdev;
377 rfbdev->helper.funcs = &radeon_fb_helper_funcs;
369 378
379 drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
380 rdev->num_crtc,
381 RADEONFB_CONN_LIMIT);
382 drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
383 drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
370 return 0; 384 return 0;
371} 385}
372EXPORT_SYMBOL(radeonfb_remove); 386
373MODULE_LICENSE("GPL"); 387void radeon_fbdev_fini(struct radeon_device *rdev)
388{
389 if (!rdev->mode_info.rfbdev)
390 return;
391
392 radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
393 kfree(rdev->mode_info.rfbdev);
394 rdev->mode_info.rfbdev = NULL;
395}
396
397void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
398{
399 fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
400}
401
402int radeon_fbdev_total_size(struct radeon_device *rdev)
403{
404 struct radeon_bo *robj;
405 int size = 0;
406
407 robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
408 size += radeon_bo_size(robj);
409 return size;
410}
411
412bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
413{
414 if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
415 return true;
416 return false;
417}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d90f95b405c5..b1f9a81b5d1d 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -58,7 +58,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
58 radeon_fence_ring_emit(rdev, fence); 58 radeon_fence_ring_emit(rdev, fence);
59 59
60 fence->emited = true; 60 fence->emited = true;
61 fence->timeout = jiffies + ((2000 * HZ) / 1000);
62 list_del(&fence->list); 61 list_del(&fence->list);
63 list_add_tail(&fence->list, &rdev->fence_drv.emited); 62 list_add_tail(&fence->list, &rdev->fence_drv.emited);
64 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 63 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -71,15 +70,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
71 struct list_head *i, *n; 70 struct list_head *i, *n;
72 uint32_t seq; 71 uint32_t seq;
73 bool wake = false; 72 bool wake = false;
73 unsigned long cjiffies;
74 74
75 if (rdev == NULL) {
76 return true;
77 }
78 if (rdev->shutdown) {
79 return true;
80 }
81 seq = RREG32(rdev->fence_drv.scratch_reg); 75 seq = RREG32(rdev->fence_drv.scratch_reg);
82 rdev->fence_drv.last_seq = seq; 76 if (seq != rdev->fence_drv.last_seq) {
77 rdev->fence_drv.last_seq = seq;
78 rdev->fence_drv.last_jiffies = jiffies;
79 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
80 } else {
81 cjiffies = jiffies;
82 if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
83 cjiffies -= rdev->fence_drv.last_jiffies;
84 if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
85 /* update the timeout */
86 rdev->fence_drv.last_timeout -= cjiffies;
87 } else {
88 /* the 500ms timeout is elapsed we should test
89 * for GPU lockup
90 */
91 rdev->fence_drv.last_timeout = 1;
92 }
93 } else {
94 /* wrap around update last jiffies, we will just wait
95 * a little longer
96 */
97 rdev->fence_drv.last_jiffies = cjiffies;
98 }
99 return false;
100 }
83 n = NULL; 101 n = NULL;
84 list_for_each(i, &rdev->fence_drv.emited) { 102 list_for_each(i, &rdev->fence_drv.emited) {
85 fence = list_entry(i, struct radeon_fence, list); 103 fence = list_entry(i, struct radeon_fence, list);
@@ -171,9 +189,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
171int radeon_fence_wait(struct radeon_fence *fence, bool intr) 189int radeon_fence_wait(struct radeon_fence *fence, bool intr)
172{ 190{
173 struct radeon_device *rdev; 191 struct radeon_device *rdev;
174 unsigned long cur_jiffies; 192 unsigned long irq_flags, timeout;
175 unsigned long timeout; 193 u32 seq;
176 bool expired = false;
177 int r; 194 int r;
178 195
179 if (fence == NULL) { 196 if (fence == NULL) {
@@ -184,21 +201,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
184 if (radeon_fence_signaled(fence)) { 201 if (radeon_fence_signaled(fence)) {
185 return 0; 202 return 0;
186 } 203 }
187 204 timeout = rdev->fence_drv.last_timeout;
188retry: 205retry:
189 cur_jiffies = jiffies; 206 /* save current sequence used to check for GPU lockup */
190 timeout = HZ / 100; 207 seq = rdev->fence_drv.last_seq;
191 if (time_after(fence->timeout, cur_jiffies)) {
192 timeout = fence->timeout - cur_jiffies;
193 }
194
195 if (intr) { 208 if (intr) {
196 radeon_irq_kms_sw_irq_get(rdev); 209 radeon_irq_kms_sw_irq_get(rdev);
197 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 210 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
198 radeon_fence_signaled(fence), timeout); 211 radeon_fence_signaled(fence), timeout);
199 radeon_irq_kms_sw_irq_put(rdev); 212 radeon_irq_kms_sw_irq_put(rdev);
200 if (unlikely(r < 0)) 213 if (unlikely(r < 0)) {
201 return r; 214 return r;
215 }
202 } else { 216 } else {
203 radeon_irq_kms_sw_irq_get(rdev); 217 radeon_irq_kms_sw_irq_get(rdev);
204 r = wait_event_timeout(rdev->fence_drv.queue, 218 r = wait_event_timeout(rdev->fence_drv.queue,
@@ -206,38 +220,36 @@ retry:
206 radeon_irq_kms_sw_irq_put(rdev); 220 radeon_irq_kms_sw_irq_put(rdev);
207 } 221 }
208 if (unlikely(!radeon_fence_signaled(fence))) { 222 if (unlikely(!radeon_fence_signaled(fence))) {
209 if (unlikely(r == 0)) { 223 /* we were interrupted for some reason and fence isn't
210 expired = true; 224 * isn't signaled yet, resume wait
225 */
226 if (r) {
227 timeout = r;
228 goto retry;
211 } 229 }
212 if (unlikely(expired)) { 230 /* don't protect read access to rdev->fence_drv.last_seq
213 timeout = 1; 231 * if we experiencing a lockup the value doesn't change
214 if (time_after(cur_jiffies, fence->timeout)) { 232 */
215 timeout = cur_jiffies - fence->timeout; 233 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
216 } 234 /* good news we believe it's a lockup */
217 timeout = jiffies_to_msecs(timeout); 235 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
218 if (timeout > 500) { 236 /* FIXME: what should we do ? marking everyone
219 DRM_ERROR("fence(%p:0x%08X) %lums timeout " 237 * as signaled for now
220 "going to reset GPU\n", 238 */
221 fence, fence->seq, timeout); 239 rdev->gpu_lockup = true;
222 radeon_gpu_reset(rdev); 240 r = radeon_gpu_reset(rdev);
223 WREG32(rdev->fence_drv.scratch_reg, fence->seq); 241 if (r)
224 } 242 return r;
243 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
244 rdev->gpu_lockup = false;
225 } 245 }
246 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
247 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
248 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
249 rdev->fence_drv.last_jiffies = jiffies;
250 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
226 goto retry; 251 goto retry;
227 } 252 }
228 if (unlikely(expired)) {
229 rdev->fence_drv.count_timeout++;
230 cur_jiffies = jiffies;
231 timeout = 1;
232 if (time_after(cur_jiffies, fence->timeout)) {
233 timeout = cur_jiffies - fence->timeout;
234 }
235 timeout = jiffies_to_msecs(timeout);
236 DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
237 fence, fence->seq, timeout);
238 DRM_ERROR("last signaled fence(0x%08X)\n",
239 rdev->fence_drv.last_seq);
240 }
241 return 0; 253 return 0;
242} 254}
243 255
@@ -333,7 +345,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
333 INIT_LIST_HEAD(&rdev->fence_drv.created); 345 INIT_LIST_HEAD(&rdev->fence_drv.created);
334 INIT_LIST_HEAD(&rdev->fence_drv.emited); 346 INIT_LIST_HEAD(&rdev->fence_drv.emited);
335 INIT_LIST_HEAD(&rdev->fence_drv.signaled); 347 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
336 rdev->fence_drv.count_timeout = 0;
337 init_waitqueue_head(&rdev->fence_drv.queue); 348 init_waitqueue_head(&rdev->fence_drv.queue);
338 rdev->fence_drv.initialized = true; 349 rdev->fence_drv.initialized = true;
339 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 350 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
deleted file mode 100644
index 3d4d84e078ac..000000000000
--- a/drivers/gpu/drm/radeon/radeon_fixed.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 */
24#ifndef RADEON_FIXED_H
25#define RADEON_FIXED_H
26
27typedef union rfixed {
28 u32 full;
29} fixed20_12;
30
31
32#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
33#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
34#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
35#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
36#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
37#define fixed_init(A) { .full = rfixed_const((A)) }
38#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
39#define rfixed_trunc(A) ((A).full >> 12)
40
41static inline u32 rfixed_floor(fixed20_12 A)
42{
43 u32 non_frac = rfixed_trunc(A);
44
45 return rfixed_const(non_frac);
46}
47
48static inline u32 rfixed_ceil(fixed20_12 A)
49{
50 u32 non_frac = rfixed_trunc(A);
51
52 if (A.full > rfixed_const(non_frac))
53 return rfixed_const(non_frac + 1);
54 else
55 return rfixed_const(non_frac);
56}
57
58static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
59{
60 u64 tmp = ((u64)A.full << 13);
61
62 do_div(tmp, B.full);
63 tmp += 1;
64 tmp /= 2;
65 return lower_32_bits(tmp);
66}
67#endif
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 1770d3c07fd0..e65b90317fab 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -173,7 +173,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
173 int i, j; 173 int i, j;
174 174
175 if (!rdev->gart.ready) { 175 if (!rdev->gart.ready) {
176 DRM_ERROR("trying to bind memory to unitialized GART !\n"); 176 WARN(1, "trying to bind memory to unitialized GART !\n");
177 return -EINVAL; 177 return -EINVAL;
178 } 178 }
179 t = offset / RADEON_GPU_PAGE_SIZE; 179 t = offset / RADEON_GPU_PAGE_SIZE;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ef92d147d8f0..a72a3ee5d69b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -44,6 +44,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
44 if (robj) { 44 if (robj) {
45 radeon_bo_unref(&robj); 45 radeon_bo_unref(&robj);
46 } 46 }
47
48 drm_gem_object_release(gobj);
49 kfree(gobj);
47} 50}
48 51
49int radeon_gem_object_create(struct radeon_device *rdev, int size, 52int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -158,8 +161,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
158 args->vram_visible = rdev->mc.real_vram_size; 161 args->vram_visible = rdev->mc.real_vram_size;
159 if (rdev->stollen_vga_memory) 162 if (rdev->stollen_vga_memory)
160 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 163 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
161 if (rdev->fbdev_rbo) 164 args->vram_visible -= radeon_fbdev_total_size(rdev);
162 args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
163 args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 - 165 args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
164 RADEON_IB_POOL_SIZE*64*1024; 166 RADEON_IB_POOL_SIZE*64*1024;
165 return 0; 167 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a212041e8b0b..059bfa4098d7 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -26,6 +26,7 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include "drmP.h" 28#include "drmP.h"
29#include "drm_crtc_helper.h"
29#include "radeon_drm.h" 30#include "radeon_drm.h"
30#include "radeon_reg.h" 31#include "radeon_reg.h"
31#include "radeon.h" 32#include "radeon.h"
@@ -55,7 +56,7 @@ static void radeon_hotplug_work_func(struct work_struct *work)
55 radeon_connector_hotplug(connector); 56 radeon_connector_hotplug(connector);
56 } 57 }
57 /* Just fire off a uevent and let userspace tell us what to do */ 58 /* Just fire off a uevent and let userspace tell us what to do */
58 drm_sysfs_hotplug_event(dev); 59 drm_helper_hpd_irq_event(dev);
59} 60}
60 61
61void radeon_driver_irq_preinstall_kms(struct drm_device *dev) 62void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
@@ -67,6 +68,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
67 68
68 /* Disable *all* interrupts */ 69 /* Disable *all* interrupts */
69 rdev->irq.sw_int = false; 70 rdev->irq.sw_int = false;
71 rdev->irq.gui_idle = false;
70 for (i = 0; i < rdev->num_crtc; i++) 72 for (i = 0; i < rdev->num_crtc; i++)
71 rdev->irq.crtc_vblank_int[i] = false; 73 rdev->irq.crtc_vblank_int[i] = false;
72 for (i = 0; i < 6; i++) 74 for (i = 0; i < 6; i++)
@@ -96,6 +98,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
96 } 98 }
97 /* Disable *all* interrupts */ 99 /* Disable *all* interrupts */
98 rdev->irq.sw_int = false; 100 rdev->irq.sw_int = false;
101 rdev->irq.gui_idle = false;
99 for (i = 0; i < rdev->num_crtc; i++) 102 for (i = 0; i < rdev->num_crtc; i++)
100 rdev->irq.crtc_vblank_int[i] = false; 103 rdev->irq.crtc_vblank_int[i] = false;
101 for (i = 0; i < 6; i++) 104 for (i = 0; i < 6; i++)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index c633319f98ed..04068352ccd2 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -98,11 +98,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
98{ 98{
99 struct radeon_device *rdev = dev->dev_private; 99 struct radeon_device *rdev = dev->dev_private;
100 struct drm_radeon_info *info; 100 struct drm_radeon_info *info;
101 struct radeon_mode_info *minfo = &rdev->mode_info;
101 uint32_t *value_ptr; 102 uint32_t *value_ptr;
102 uint32_t value; 103 uint32_t value;
104 struct drm_crtc *crtc;
105 int i, found;
103 106
104 info = data; 107 info = data;
105 value_ptr = (uint32_t *)((unsigned long)info->value); 108 value_ptr = (uint32_t *)((unsigned long)info->value);
109 value = *value_ptr;
106 switch (info->request) { 110 switch (info->request) {
107 case RADEON_INFO_DEVICE_ID: 111 case RADEON_INFO_DEVICE_ID:
108 value = dev->pci_device; 112 value = dev->pci_device;
@@ -116,6 +120,20 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
116 case RADEON_INFO_ACCEL_WORKING: 120 case RADEON_INFO_ACCEL_WORKING:
117 value = rdev->accel_working; 121 value = rdev->accel_working;
118 break; 122 break;
123 case RADEON_INFO_CRTC_FROM_ID:
124 for (i = 0, found = 0; i < rdev->num_crtc; i++) {
125 crtc = (struct drm_crtc *)minfo->crtcs[i];
126 if (crtc && crtc->base.id == value) {
127 value = i;
128 found = 1;
129 break;
130 }
131 }
132 if (!found) {
133 DRM_DEBUG("unknown crtc id %d\n", value);
134 return -EINVAL;
135 }
136 break;
119 default: 137 default:
120 DRM_DEBUG("Invalid request %d\n", info->request); 138 DRM_DEBUG("Invalid request %d\n", info->request);
121 return -EINVAL; 139 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 88865e38fe30..e1e5255396ac 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -26,7 +26,7 @@
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
28#include <drm/radeon_drm.h> 28#include <drm/radeon_drm.h>
29#include "radeon_fixed.h" 29#include <drm/drm_fixed.h>
30#include "radeon.h" 30#include "radeon.h"
31#include "atom.h" 31#include "atom.h"
32 32
@@ -314,6 +314,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
314 314
315 switch (mode) { 315 switch (mode) {
316 case DRM_MODE_DPMS_ON: 316 case DRM_MODE_DPMS_ON:
317 radeon_crtc->enabled = true;
318 /* adjust pm to dpms changes BEFORE enabling crtcs */
319 radeon_pm_compute_clocks(rdev);
317 if (radeon_crtc->crtc_id) 320 if (radeon_crtc->crtc_id)
318 WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask)); 321 WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
319 else { 322 else {
@@ -335,6 +338,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
335 RADEON_CRTC_DISP_REQ_EN_B)); 338 RADEON_CRTC_DISP_REQ_EN_B));
336 WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask); 339 WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
337 } 340 }
341 radeon_crtc->enabled = false;
342 /* adjust pm to dpms changes AFTER disabling crtcs */
343 radeon_pm_compute_clocks(rdev);
338 break; 344 break;
339 } 345 }
340} 346}
@@ -966,6 +972,12 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
966 struct drm_display_mode *mode, 972 struct drm_display_mode *mode,
967 struct drm_display_mode *adjusted_mode) 973 struct drm_display_mode *adjusted_mode)
968{ 974{
975 struct drm_device *dev = crtc->dev;
976 struct radeon_device *rdev = dev->dev_private;
977
978 /* adjust pm to upcoming mode change */
979 radeon_pm_compute_clocks(rdev);
980
969 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 981 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
970 return false; 982 return false;
971 return true; 983 return true;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 0274abe17ad9..5a13b3eeef19 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -116,8 +116,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
116 else 116 else
117 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 117 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
118 118
119 /* adjust pm to dpms change */
120 radeon_pm_compute_clocks(rdev);
121} 119}
122 120
123static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) 121static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
@@ -217,11 +215,6 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
217 struct drm_display_mode *adjusted_mode) 215 struct drm_display_mode *adjusted_mode)
218{ 216{
219 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 217 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
220 struct drm_device *dev = encoder->dev;
221 struct radeon_device *rdev = dev->dev_private;
222
223 /* adjust pm to upcoming mode change */
224 radeon_pm_compute_clocks(rdev);
225 218
226 /* set the active encoder to connector routing */ 219 /* set the active encoder to connector routing */
227 radeon_encoder_set_active_device(encoder); 220 radeon_encoder_set_active_device(encoder);
@@ -286,8 +279,6 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
286 else 279 else
287 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 280 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
288 281
289 /* adjust pm to dpms change */
290 radeon_pm_compute_clocks(rdev);
291} 282}
292 283
293static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) 284static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
@@ -474,8 +465,6 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
474 else 465 else
475 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 466 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
476 467
477 /* adjust pm to dpms change */
478 radeon_pm_compute_clocks(rdev);
479} 468}
480 469
481static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) 470static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
@@ -642,8 +631,6 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
642 else 631 else
643 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 632 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
644 633
645 /* adjust pm to dpms change */
646 radeon_pm_compute_clocks(rdev);
647} 634}
648 635
649static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) 636static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
@@ -852,8 +839,6 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
852 else 839 else
853 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 840 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
854 841
855 /* adjust pm to dpms change */
856 radeon_pm_compute_clocks(rdev);
857} 842}
858 843
859static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) 844static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5413fcd63086..67358baf28b2 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -34,11 +34,12 @@
34#include <drm_mode.h> 34#include <drm_mode.h>
35#include <drm_edid.h> 35#include <drm_edid.h>
36#include <drm_dp_helper.h> 36#include <drm_dp_helper.h>
37#include <drm_fixed.h>
37#include <linux/i2c.h> 38#include <linux/i2c.h>
38#include <linux/i2c-id.h> 39#include <linux/i2c-id.h>
39#include <linux/i2c-algo-bit.h> 40#include <linux/i2c-algo-bit.h>
40#include "radeon_fixed.h"
41 41
42struct radeon_bo;
42struct radeon_device; 43struct radeon_device;
43 44
44#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) 45#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
@@ -65,6 +66,16 @@ enum radeon_tv_std {
65 TV_STD_PAL_N, 66 TV_STD_PAL_N,
66}; 67};
67 68
69enum radeon_hpd_id {
70 RADEON_HPD_1 = 0,
71 RADEON_HPD_2,
72 RADEON_HPD_3,
73 RADEON_HPD_4,
74 RADEON_HPD_5,
75 RADEON_HPD_6,
76 RADEON_HPD_NONE = 0xff,
77};
78
68/* radeon gpio-based i2c 79/* radeon gpio-based i2c
69 * 1. "mask" reg and bits 80 * 1. "mask" reg and bits
70 * grabs the gpio pins for software use 81 * grabs the gpio pins for software use
@@ -84,7 +95,7 @@ struct radeon_i2c_bus_rec {
84 /* id used by atom */ 95 /* id used by atom */
85 uint8_t i2c_id; 96 uint8_t i2c_id;
86 /* id used by atom */ 97 /* id used by atom */
87 uint8_t hpd_id; 98 enum radeon_hpd_id hpd;
88 /* can be used with hw i2c engine */ 99 /* can be used with hw i2c engine */
89 bool hw_capable; 100 bool hw_capable;
90 /* uses multi-media i2c engine */ 101 /* uses multi-media i2c engine */
@@ -202,6 +213,8 @@ enum radeon_dvo_chip {
202 DVO_SIL1178, 213 DVO_SIL1178,
203}; 214};
204 215
216struct radeon_fbdev;
217
205struct radeon_mode_info { 218struct radeon_mode_info {
206 struct atom_context *atom_context; 219 struct atom_context *atom_context;
207 struct card_info *atom_card_info; 220 struct card_info *atom_card_info;
@@ -218,6 +231,9 @@ struct radeon_mode_info {
218 struct drm_property *tmds_pll_property; 231 struct drm_property *tmds_pll_property;
219 /* hardcoded DFP edid from BIOS */ 232 /* hardcoded DFP edid from BIOS */
220 struct edid *bios_hardcoded_edid; 233 struct edid *bios_hardcoded_edid;
234
235 /* pointer to fbdev info structure */
236 struct radeon_fbdev *rfbdev;
221}; 237};
222 238
223#define MAX_H_CODE_TIMING_LEN 32 239#define MAX_H_CODE_TIMING_LEN 32
@@ -339,6 +355,7 @@ struct radeon_encoder {
339 enum radeon_rmx_type rmx_type; 355 enum radeon_rmx_type rmx_type;
340 struct drm_display_mode native_mode; 356 struct drm_display_mode native_mode;
341 void *enc_priv; 357 void *enc_priv;
358 int audio_polling_active;
342 int hdmi_offset; 359 int hdmi_offset;
343 int hdmi_config_offset; 360 int hdmi_config_offset;
344 int hdmi_audio_workaround; 361 int hdmi_audio_workaround;
@@ -363,16 +380,6 @@ struct radeon_gpio_rec {
363 u32 mask; 380 u32 mask;
364}; 381};
365 382
366enum radeon_hpd_id {
367 RADEON_HPD_NONE = 0,
368 RADEON_HPD_1,
369 RADEON_HPD_2,
370 RADEON_HPD_3,
371 RADEON_HPD_4,
372 RADEON_HPD_5,
373 RADEON_HPD_6,
374};
375
376struct radeon_hpd { 383struct radeon_hpd {
377 enum radeon_hpd_id hpd; 384 enum radeon_hpd_id hpd;
378 u8 plugged_state; 385 u8 plugged_state;
@@ -532,11 +539,10 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
532 u16 blue, int regno); 539 u16 blue, int regno);
533extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 540extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
534 u16 *blue, int regno); 541 u16 *blue, int regno);
535struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, 542void radeon_framebuffer_init(struct drm_device *dev,
536 struct drm_mode_fb_cmd *mode_cmd, 543 struct radeon_framebuffer *rfb,
537 struct drm_gem_object *obj); 544 struct drm_mode_fb_cmd *mode_cmd,
538 545 struct drm_gem_object *obj);
539int radeonfb_probe(struct drm_device *dev);
540 546
541int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); 547int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
542bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev); 548bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
@@ -575,4 +581,13 @@ void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
575void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, 581void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
576 struct drm_display_mode *mode, 582 struct drm_display_mode *mode,
577 struct drm_display_mode *adjusted_mode); 583 struct drm_display_mode *adjusted_mode);
584
585/* fbdev layer */
586int radeon_fbdev_init(struct radeon_device *rdev);
587void radeon_fbdev_fini(struct radeon_device *rdev);
588void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
589int radeon_fbdev_total_size(struct radeon_device *rdev);
590bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
591
592void radeon_fb_output_poll_changed(struct radeon_device *rdev);
578#endif 593#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 122774742bd5..d5b9373ce06c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -112,9 +112,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
112 112
113 radeon_ttm_placement_from_domain(bo, domain); 113 radeon_ttm_placement_from_domain(bo, domain);
114 /* Kernel allocation are uninterruptible */ 114 /* Kernel allocation are uninterruptible */
115 mutex_lock(&rdev->vram_mutex);
115 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 116 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
116 &bo->placement, 0, 0, !kernel, NULL, size, 117 &bo->placement, 0, 0, !kernel, NULL, size,
117 &radeon_ttm_bo_destroy); 118 &radeon_ttm_bo_destroy);
119 mutex_unlock(&rdev->vram_mutex);
118 if (unlikely(r != 0)) { 120 if (unlikely(r != 0)) {
119 if (r != -ERESTARTSYS) 121 if (r != -ERESTARTSYS)
120 dev_err(rdev->dev, 122 dev_err(rdev->dev,
@@ -166,11 +168,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
166void radeon_bo_unref(struct radeon_bo **bo) 168void radeon_bo_unref(struct radeon_bo **bo)
167{ 169{
168 struct ttm_buffer_object *tbo; 170 struct ttm_buffer_object *tbo;
171 struct radeon_device *rdev;
169 172
170 if ((*bo) == NULL) 173 if ((*bo) == NULL)
171 return; 174 return;
175 rdev = (*bo)->rdev;
172 tbo = &((*bo)->tbo); 176 tbo = &((*bo)->tbo);
177 mutex_lock(&rdev->vram_mutex);
173 ttm_bo_unref(&tbo); 178 ttm_bo_unref(&tbo);
179 mutex_unlock(&rdev->vram_mutex);
174 if (tbo == NULL) 180 if (tbo == NULL)
175 *bo = NULL; 181 *bo = NULL;
176} 182}
@@ -192,7 +198,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
192 } 198 }
193 for (i = 0; i < bo->placement.num_placement; i++) 199 for (i = 0; i < bo->placement.num_placement; i++)
194 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 200 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
195 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 201 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
196 if (likely(r == 0)) { 202 if (likely(r == 0)) {
197 bo->pin_count = 1; 203 bo->pin_count = 1;
198 if (gpu_addr != NULL) 204 if (gpu_addr != NULL)
@@ -216,7 +222,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
216 return 0; 222 return 0;
217 for (i = 0; i < bo->placement.num_placement; i++) 223 for (i = 0; i < bo->placement.num_placement; i++)
218 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 224 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
219 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 225 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
220 if (unlikely(r != 0)) 226 if (unlikely(r != 0))
221 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 227 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
222 return r; 228 return r;
@@ -295,6 +301,7 @@ int radeon_bo_list_reserve(struct list_head *head)
295 r = radeon_bo_reserve(lobj->bo, false); 301 r = radeon_bo_reserve(lobj->bo, false);
296 if (unlikely(r != 0)) 302 if (unlikely(r != 0))
297 return r; 303 return r;
304 lobj->reserved = true;
298 } 305 }
299 return 0; 306 return 0;
300} 307}
@@ -305,7 +312,7 @@ void radeon_bo_list_unreserve(struct list_head *head)
305 312
306 list_for_each_entry(lobj, head, list) { 313 list_for_each_entry(lobj, head, list) {
307 /* only unreserve object we successfully reserved */ 314 /* only unreserve object we successfully reserved */
308 if (radeon_bo_is_reserved(lobj->bo)) 315 if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
309 radeon_bo_unreserve(lobj->bo); 316 radeon_bo_unreserve(lobj->bo);
310 } 317 }
311} 318}
@@ -316,6 +323,9 @@ int radeon_bo_list_validate(struct list_head *head)
316 struct radeon_bo *bo; 323 struct radeon_bo *bo;
317 int r; 324 int r;
318 325
326 list_for_each_entry(lobj, head, list) {
327 lobj->reserved = false;
328 }
319 r = radeon_bo_list_reserve(head); 329 r = radeon_bo_list_reserve(head);
320 if (unlikely(r != 0)) { 330 if (unlikely(r != 0)) {
321 return r; 331 return r;
@@ -331,7 +341,7 @@ int radeon_bo_list_validate(struct list_head *head)
331 lobj->rdomain); 341 lobj->rdomain);
332 } 342 }
333 r = ttm_bo_validate(&bo->tbo, &bo->placement, 343 r = ttm_bo_validate(&bo->tbo, &bo->placement,
334 true, false); 344 true, false, false);
335 if (unlikely(r)) 345 if (unlikely(r))
336 return r; 346 return r;
337 } 347 }
@@ -499,11 +509,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
499 radeon_bo_check_tiling(rbo, 0, 1); 509 radeon_bo_check_tiling(rbo, 0, 1);
500} 510}
501 511
502void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 512int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
503{ 513{
514 struct radeon_device *rdev;
504 struct radeon_bo *rbo; 515 struct radeon_bo *rbo;
516 unsigned long offset, size;
517 int r;
518
505 if (!radeon_ttm_bo_is_radeon_bo(bo)) 519 if (!radeon_ttm_bo_is_radeon_bo(bo))
506 return; 520 return 0;
507 rbo = container_of(bo, struct radeon_bo, tbo); 521 rbo = container_of(bo, struct radeon_bo, tbo);
508 radeon_bo_check_tiling(rbo, 0, 0); 522 radeon_bo_check_tiling(rbo, 0, 0);
523 rdev = rbo->rdev;
524 if (bo->mem.mem_type == TTM_PL_VRAM) {
525 size = bo->mem.num_pages << PAGE_SHIFT;
526 offset = bo->mem.mm_node->start << PAGE_SHIFT;
527 if ((offset + size) > rdev->mc.visible_vram_size) {
528 /* hurrah the memory is not visible ! */
529 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
530 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
531 r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
532 if (unlikely(r != 0))
533 return r;
534 offset = bo->mem.mm_node->start << PAGE_SHIFT;
535 /* this should not happen */
536 if ((offset + size) > rdev->mc.visible_vram_size)
537 return -EINVAL;
538 }
539 }
540 return 0;
509} 541}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 7ab43de1e244..353998dc2c03 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
168 bool force_drop); 168 bool force_drop);
169extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, 169extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
170 struct ttm_mem_reg *mem); 170 struct ttm_mem_reg *mem);
171extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 171extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
172extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); 172extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
173#endif 173#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index a4b57493aa78..a8d162c6f829 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -23,164 +23,122 @@
23#include "drmP.h" 23#include "drmP.h"
24#include "radeon.h" 24#include "radeon.h"
25#include "avivod.h" 25#include "avivod.h"
26#ifdef CONFIG_ACPI
27#include <linux/acpi.h>
28#endif
29#include <linux/power_supply.h>
26 30
27#define RADEON_IDLE_LOOP_MS 100 31#define RADEON_IDLE_LOOP_MS 100
28#define RADEON_RECLOCK_DELAY_MS 200 32#define RADEON_RECLOCK_DELAY_MS 200
29#define RADEON_WAIT_VBLANK_TIMEOUT 200 33#define RADEON_WAIT_VBLANK_TIMEOUT 200
34#define RADEON_WAIT_IDLE_TIMEOUT 200
30 35
36static void radeon_dynpm_idle_work_handler(struct work_struct *work);
37static int radeon_debugfs_pm_init(struct radeon_device *rdev);
38static bool radeon_pm_in_vbl(struct radeon_device *rdev);
31static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); 39static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
32static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); 40static void radeon_pm_update_profile(struct radeon_device *rdev);
33static void radeon_pm_set_clocks(struct radeon_device *rdev); 41static void radeon_pm_set_clocks(struct radeon_device *rdev);
34static void radeon_pm_idle_work_handler(struct work_struct *work);
35static int radeon_debugfs_pm_init(struct radeon_device *rdev);
36
37static const char *pm_state_names[4] = {
38 "PM_STATE_DISABLED",
39 "PM_STATE_MINIMUM",
40 "PM_STATE_PAUSED",
41 "PM_STATE_ACTIVE"
42};
43 42
44static const char *pm_state_types[5] = { 43#define ACPI_AC_CLASS "ac_adapter"
45 "Default",
46 "Powersave",
47 "Battery",
48 "Balanced",
49 "Performance",
50};
51 44
52static void radeon_print_power_mode_info(struct radeon_device *rdev) 45#ifdef CONFIG_ACPI
46static int radeon_acpi_event(struct notifier_block *nb,
47 unsigned long val,
48 void *data)
53{ 49{
54 int i, j; 50 struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
55 bool is_default; 51 struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
56 52
57 DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states); 53 if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
58 for (i = 0; i < rdev->pm.num_power_states; i++) { 54 if (power_supply_is_system_supplied() > 0)
59 if (rdev->pm.default_power_state == &rdev->pm.power_state[i]) 55 DRM_DEBUG("pm: AC\n");
60 is_default = true;
61 else 56 else
62 is_default = false; 57 DRM_DEBUG("pm: DC\n");
63 DRM_INFO("State %d %s %s\n", i, 58
64 pm_state_types[rdev->pm.power_state[i].type], 59 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
65 is_default ? "(default)" : ""); 60 if (rdev->pm.profile == PM_PROFILE_AUTO) {
66 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) 61 mutex_lock(&rdev->pm.mutex);
67 DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes); 62 radeon_pm_update_profile(rdev);
68 DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes); 63 radeon_pm_set_clocks(rdev);
69 for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) { 64 mutex_unlock(&rdev->pm.mutex);
70 if (rdev->flags & RADEON_IS_IGP) 65 }
71 DRM_INFO("\t\t%d engine: %d\n",
72 j,
73 rdev->pm.power_state[i].clock_info[j].sclk * 10);
74 else
75 DRM_INFO("\t\t%d engine/memory: %d/%d\n",
76 j,
77 rdev->pm.power_state[i].clock_info[j].sclk * 10,
78 rdev->pm.power_state[i].clock_info[j].mclk * 10);
79 } 66 }
80 } 67 }
68
69 return NOTIFY_OK;
81} 70}
71#endif
82 72
83static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev, 73static void radeon_pm_update_profile(struct radeon_device *rdev)
84 enum radeon_pm_state_type type)
85{ 74{
86 int i, j; 75 switch (rdev->pm.profile) {
87 enum radeon_pm_state_type wanted_types[2]; 76 case PM_PROFILE_DEFAULT:
88 int wanted_count; 77 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
89
90 switch (type) {
91 case POWER_STATE_TYPE_DEFAULT:
92 default:
93 return rdev->pm.default_power_state;
94 case POWER_STATE_TYPE_POWERSAVE:
95 if (rdev->flags & RADEON_IS_MOBILITY) {
96 wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
97 wanted_types[1] = POWER_STATE_TYPE_BATTERY;
98 wanted_count = 2;
99 } else {
100 wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
101 wanted_count = 1;
102 }
103 break; 78 break;
104 case POWER_STATE_TYPE_BATTERY: 79 case PM_PROFILE_AUTO:
105 if (rdev->flags & RADEON_IS_MOBILITY) { 80 if (power_supply_is_system_supplied() > 0) {
106 wanted_types[0] = POWER_STATE_TYPE_BATTERY; 81 if (rdev->pm.active_crtc_count > 1)
107 wanted_types[1] = POWER_STATE_TYPE_POWERSAVE; 82 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
108 wanted_count = 2; 83 else
84 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
109 } else { 85 } else {
110 wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE; 86 if (rdev->pm.active_crtc_count > 1)
111 wanted_count = 1; 87 rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
88 else
89 rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
112 } 90 }
113 break; 91 break;
114 case POWER_STATE_TYPE_BALANCED: 92 case PM_PROFILE_LOW:
115 case POWER_STATE_TYPE_PERFORMANCE: 93 if (rdev->pm.active_crtc_count > 1)
116 wanted_types[0] = type; 94 rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
117 wanted_count = 1; 95 else
96 rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
118 break; 97 break;
119 } 98 case PM_PROFILE_HIGH:
120 99 if (rdev->pm.active_crtc_count > 1)
121 for (i = 0; i < wanted_count; i++) { 100 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
122 for (j = 0; j < rdev->pm.num_power_states; j++) {
123 if (rdev->pm.power_state[j].type == wanted_types[i])
124 return &rdev->pm.power_state[j];
125 }
126 }
127
128 return rdev->pm.default_power_state;
129}
130
131static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
132 struct radeon_power_state *power_state,
133 enum radeon_pm_clock_mode_type type)
134{
135 switch (type) {
136 case POWER_MODE_TYPE_DEFAULT:
137 default:
138 return power_state->default_clock_mode;
139 case POWER_MODE_TYPE_LOW:
140 return &power_state->clock_info[0];
141 case POWER_MODE_TYPE_MID:
142 if (power_state->num_clock_modes > 2)
143 return &power_state->clock_info[1];
144 else 101 else
145 return &power_state->clock_info[0]; 102 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
146 break; 103 break;
147 case POWER_MODE_TYPE_HIGH:
148 return &power_state->clock_info[power_state->num_clock_modes - 1];
149 } 104 }
150 105
106 if (rdev->pm.active_crtc_count == 0) {
107 rdev->pm.requested_power_state_index =
108 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
109 rdev->pm.requested_clock_mode_index =
110 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
111 } else {
112 rdev->pm.requested_power_state_index =
113 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
114 rdev->pm.requested_clock_mode_index =
115 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
116 }
151} 117}
152 118
153static void radeon_get_power_state(struct radeon_device *rdev, 119static void radeon_unmap_vram_bos(struct radeon_device *rdev)
154 enum radeon_pm_action action)
155{ 120{
156 switch (action) { 121 struct radeon_bo *bo, *n;
157 case PM_ACTION_MINIMUM: 122
158 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY); 123 if (list_empty(&rdev->gem.objects))
159 rdev->pm.requested_clock_mode =
160 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
161 break;
162 case PM_ACTION_DOWNCLOCK:
163 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
164 rdev->pm.requested_clock_mode =
165 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
166 break;
167 case PM_ACTION_UPCLOCK:
168 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
169 rdev->pm.requested_clock_mode =
170 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
171 break;
172 case PM_ACTION_NONE:
173 default:
174 DRM_ERROR("Requested mode for not defined action\n");
175 return; 124 return;
125
126 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
127 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
128 ttm_bo_unmap_virtual(&bo->tbo);
176 } 129 }
177 DRM_INFO("Requested: e: %d m: %d p: %d\n", 130
178 rdev->pm.requested_clock_mode->sclk, 131 if (rdev->gart.table.vram.robj)
179 rdev->pm.requested_clock_mode->mclk, 132 ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
180 rdev->pm.requested_power_state->non_clock_info.pcie_lanes); 133
134 if (rdev->stollen_vga_memory)
135 ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
136
137 if (rdev->r600_blit.shader_obj)
138 ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
181} 139}
182 140
183static inline void radeon_sync_with_vblank(struct radeon_device *rdev) 141static void radeon_sync_with_vblank(struct radeon_device *rdev)
184{ 142{
185 if (rdev->pm.active_crtcs) { 143 if (rdev->pm.active_crtcs) {
186 rdev->pm.vblank_sync = false; 144 rdev->pm.vblank_sync = false;
@@ -192,73 +150,332 @@ static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
192 150
193static void radeon_set_power_state(struct radeon_device *rdev) 151static void radeon_set_power_state(struct radeon_device *rdev)
194{ 152{
195 /* if *_clock_mode are the same, *_power_state are as well */ 153 u32 sclk, mclk;
196 if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode) 154
155 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
156 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
197 return; 157 return;
198 158
199 DRM_INFO("Setting: e: %d m: %d p: %d\n", 159 if (radeon_gui_idle(rdev)) {
200 rdev->pm.requested_clock_mode->sclk, 160 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
201 rdev->pm.requested_clock_mode->mclk, 161 clock_info[rdev->pm.requested_clock_mode_index].sclk;
202 rdev->pm.requested_power_state->non_clock_info.pcie_lanes); 162 if (sclk > rdev->clock.default_sclk)
203 163 sclk = rdev->clock.default_sclk;
204 /* set pcie lanes */ 164
205 /* TODO */ 165 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
206 166 clock_info[rdev->pm.requested_clock_mode_index].mclk;
207 /* set voltage */ 167 if (mclk > rdev->clock.default_mclk)
208 /* TODO */ 168 mclk = rdev->clock.default_mclk;
209 169
210 /* set engine clock */ 170 /* voltage, pcie lanes, etc.*/
211 radeon_sync_with_vblank(rdev); 171 radeon_pm_misc(rdev);
212 radeon_pm_debug_check_in_vbl(rdev, false); 172
213 radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk); 173 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
214 radeon_pm_debug_check_in_vbl(rdev, true); 174 radeon_sync_with_vblank(rdev);
215 175
216#if 0 176 if (!radeon_pm_in_vbl(rdev))
217 /* set memory clock */ 177 return;
218 if (rdev->asic->set_memory_clock) { 178
219 radeon_sync_with_vblank(rdev); 179 radeon_pm_prepare(rdev);
220 radeon_pm_debug_check_in_vbl(rdev, false); 180 /* set engine clock */
221 radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk); 181 if (sclk != rdev->pm.current_sclk) {
222 radeon_pm_debug_check_in_vbl(rdev, true); 182 radeon_pm_debug_check_in_vbl(rdev, false);
183 radeon_set_engine_clock(rdev, sclk);
184 radeon_pm_debug_check_in_vbl(rdev, true);
185 rdev->pm.current_sclk = sclk;
186 DRM_DEBUG("Setting: e: %d\n", sclk);
187 }
188
189 /* set memory clock */
190 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
191 radeon_pm_debug_check_in_vbl(rdev, false);
192 radeon_set_memory_clock(rdev, mclk);
193 radeon_pm_debug_check_in_vbl(rdev, true);
194 rdev->pm.current_mclk = mclk;
195 DRM_DEBUG("Setting: m: %d\n", mclk);
196 }
197 radeon_pm_finish(rdev);
198 } else {
199 /* set engine clock */
200 if (sclk != rdev->pm.current_sclk) {
201 radeon_sync_with_vblank(rdev);
202 radeon_pm_prepare(rdev);
203 radeon_set_engine_clock(rdev, sclk);
204 radeon_pm_finish(rdev);
205 rdev->pm.current_sclk = sclk;
206 DRM_DEBUG("Setting: e: %d\n", sclk);
207 }
208 /* set memory clock */
209 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
210 radeon_sync_with_vblank(rdev);
211 radeon_pm_prepare(rdev);
212 radeon_set_memory_clock(rdev, mclk);
213 radeon_pm_finish(rdev);
214 rdev->pm.current_mclk = mclk;
215 DRM_DEBUG("Setting: m: %d\n", mclk);
216 }
217 }
218
219 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
220 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
221 } else
222 DRM_DEBUG("pm: GUI not idle!!!\n");
223}
224
225static void radeon_pm_set_clocks(struct radeon_device *rdev)
226{
227 int i;
228
229 mutex_lock(&rdev->ddev->struct_mutex);
230 mutex_lock(&rdev->vram_mutex);
231 mutex_lock(&rdev->cp.mutex);
232
233 /* gui idle int has issues on older chips it seems */
234 if (rdev->family >= CHIP_R600) {
235 if (rdev->irq.installed) {
236 /* wait for GPU idle */
237 rdev->pm.gui_idle = false;
238 rdev->irq.gui_idle = true;
239 radeon_irq_set(rdev);
240 wait_event_interruptible_timeout(
241 rdev->irq.idle_queue, rdev->pm.gui_idle,
242 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
243 rdev->irq.gui_idle = false;
244 radeon_irq_set(rdev);
245 }
246 } else {
247 if (rdev->cp.ready) {
248 struct radeon_fence *fence;
249 radeon_ring_alloc(rdev, 64);
250 radeon_fence_create(rdev, &fence);
251 radeon_fence_emit(rdev, fence);
252 radeon_ring_commit(rdev);
253 radeon_fence_wait(fence, false);
254 radeon_fence_unref(&fence);
255 }
223 } 256 }
224#endif 257 radeon_unmap_vram_bos(rdev);
258
259 if (rdev->irq.installed) {
260 for (i = 0; i < rdev->num_crtc; i++) {
261 if (rdev->pm.active_crtcs & (1 << i)) {
262 rdev->pm.req_vblank |= (1 << i);
263 drm_vblank_get(rdev->ddev, i);
264 }
265 }
266 }
267
268 radeon_set_power_state(rdev);
269
270 if (rdev->irq.installed) {
271 for (i = 0; i < rdev->num_crtc; i++) {
272 if (rdev->pm.req_vblank & (1 << i)) {
273 rdev->pm.req_vblank &= ~(1 << i);
274 drm_vblank_put(rdev->ddev, i);
275 }
276 }
277 }
278
279 /* update display watermarks based on new power state */
280 radeon_update_bandwidth_info(rdev);
281 if (rdev->pm.active_crtc_count)
282 radeon_bandwidth_update(rdev);
283
284 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
285
286 mutex_unlock(&rdev->cp.mutex);
287 mutex_unlock(&rdev->vram_mutex);
288 mutex_unlock(&rdev->ddev->struct_mutex);
289}
290
291static ssize_t radeon_get_pm_profile(struct device *dev,
292 struct device_attribute *attr,
293 char *buf)
294{
295 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
296 struct radeon_device *rdev = ddev->dev_private;
297 int cp = rdev->pm.profile;
298
299 return snprintf(buf, PAGE_SIZE, "%s\n",
300 (cp == PM_PROFILE_AUTO) ? "auto" :
301 (cp == PM_PROFILE_LOW) ? "low" :
302 (cp == PM_PROFILE_HIGH) ? "high" : "default");
303}
304
305static ssize_t radeon_set_pm_profile(struct device *dev,
306 struct device_attribute *attr,
307 const char *buf,
308 size_t count)
309{
310 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
311 struct radeon_device *rdev = ddev->dev_private;
312
313 mutex_lock(&rdev->pm.mutex);
314 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
315 if (strncmp("default", buf, strlen("default")) == 0)
316 rdev->pm.profile = PM_PROFILE_DEFAULT;
317 else if (strncmp("auto", buf, strlen("auto")) == 0)
318 rdev->pm.profile = PM_PROFILE_AUTO;
319 else if (strncmp("low", buf, strlen("low")) == 0)
320 rdev->pm.profile = PM_PROFILE_LOW;
321 else if (strncmp("high", buf, strlen("high")) == 0)
322 rdev->pm.profile = PM_PROFILE_HIGH;
323 else {
324 DRM_ERROR("invalid power profile!\n");
325 goto fail;
326 }
327 radeon_pm_update_profile(rdev);
328 radeon_pm_set_clocks(rdev);
329 }
330fail:
331 mutex_unlock(&rdev->pm.mutex);
332
333 return count;
334}
335
336static ssize_t radeon_get_pm_method(struct device *dev,
337 struct device_attribute *attr,
338 char *buf)
339{
340 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
341 struct radeon_device *rdev = ddev->dev_private;
342 int pm = rdev->pm.pm_method;
343
344 return snprintf(buf, PAGE_SIZE, "%s\n",
345 (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
346}
347
348static ssize_t radeon_set_pm_method(struct device *dev,
349 struct device_attribute *attr,
350 const char *buf,
351 size_t count)
352{
353 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
354 struct radeon_device *rdev = ddev->dev_private;
355
356
357 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
358 mutex_lock(&rdev->pm.mutex);
359 rdev->pm.pm_method = PM_METHOD_DYNPM;
360 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
361 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
362 mutex_unlock(&rdev->pm.mutex);
363 } else if (strncmp("profile", buf, strlen("profile")) == 0) {
364 mutex_lock(&rdev->pm.mutex);
365 rdev->pm.pm_method = PM_METHOD_PROFILE;
366 /* disable dynpm */
367 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
368 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
369 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
370 mutex_unlock(&rdev->pm.mutex);
371 } else {
372 DRM_ERROR("invalid power method!\n");
373 goto fail;
374 }
375 radeon_pm_compute_clocks(rdev);
376fail:
377 return count;
378}
379
380static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
381static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
225 382
226 rdev->pm.current_power_state = rdev->pm.requested_power_state; 383void radeon_pm_suspend(struct radeon_device *rdev)
227 rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode; 384{
385 mutex_lock(&rdev->pm.mutex);
386 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
387 rdev->pm.current_power_state_index = -1;
388 rdev->pm.current_clock_mode_index = -1;
389 rdev->pm.current_sclk = 0;
390 rdev->pm.current_mclk = 0;
391 mutex_unlock(&rdev->pm.mutex);
392}
393
394void radeon_pm_resume(struct radeon_device *rdev)
395{
396 radeon_pm_compute_clocks(rdev);
228} 397}
229 398
230int radeon_pm_init(struct radeon_device *rdev) 399int radeon_pm_init(struct radeon_device *rdev)
231{ 400{
232 rdev->pm.state = PM_STATE_DISABLED; 401 int ret;
233 rdev->pm.planned_action = PM_ACTION_NONE; 402 /* default to profile method */
234 rdev->pm.downclocked = false; 403 rdev->pm.pm_method = PM_METHOD_PROFILE;
404 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
405 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
406 rdev->pm.dynpm_can_upclock = true;
407 rdev->pm.dynpm_can_downclock = true;
408 rdev->pm.current_sclk = 0;
409 rdev->pm.current_mclk = 0;
235 410
236 if (rdev->bios) { 411 if (rdev->bios) {
237 if (rdev->is_atom_bios) 412 if (rdev->is_atom_bios)
238 radeon_atombios_get_power_modes(rdev); 413 radeon_atombios_get_power_modes(rdev);
239 else 414 else
240 radeon_combios_get_power_modes(rdev); 415 radeon_combios_get_power_modes(rdev);
241 radeon_print_power_mode_info(rdev); 416 radeon_pm_init_profile(rdev);
417 rdev->pm.current_power_state_index = -1;
418 rdev->pm.current_clock_mode_index = -1;
242 } 419 }
243 420
244 if (radeon_debugfs_pm_init(rdev)) { 421 if (rdev->pm.num_power_states > 1) {
245 DRM_ERROR("Failed to register debugfs file for PM!\n"); 422 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
246 } 423 mutex_lock(&rdev->pm.mutex);
424 rdev->pm.profile = PM_PROFILE_DEFAULT;
425 radeon_pm_update_profile(rdev);
426 radeon_pm_set_clocks(rdev);
427 mutex_unlock(&rdev->pm.mutex);
428 }
429
430 /* where's the best place to put these? */
431 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
432 if (ret)
433 DRM_ERROR("failed to create device file for power profile\n");
434 ret = device_create_file(rdev->dev, &dev_attr_power_method);
435 if (ret)
436 DRM_ERROR("failed to create device file for power method\n");
437
438#ifdef CONFIG_ACPI
439 rdev->acpi_nb.notifier_call = radeon_acpi_event;
440 register_acpi_notifier(&rdev->acpi_nb);
441#endif
442 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
247 443
248 INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler); 444 if (radeon_debugfs_pm_init(rdev)) {
445 DRM_ERROR("Failed to register debugfs file for PM!\n");
446 }
249 447
250 if (radeon_dynpm != -1 && radeon_dynpm) { 448 DRM_INFO("radeon: power management initialized\n");
251 rdev->pm.state = PM_STATE_PAUSED;
252 DRM_INFO("radeon: dynamic power management enabled\n");
253 } 449 }
254 450
255 DRM_INFO("radeon: power management initialized\n");
256
257 return 0; 451 return 0;
258} 452}
259 453
260void radeon_pm_fini(struct radeon_device *rdev) 454void radeon_pm_fini(struct radeon_device *rdev)
261{ 455{
456 if (rdev->pm.num_power_states > 1) {
457 mutex_lock(&rdev->pm.mutex);
458 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
459 rdev->pm.profile = PM_PROFILE_DEFAULT;
460 radeon_pm_update_profile(rdev);
461 radeon_pm_set_clocks(rdev);
462 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
463 /* cancel work */
464 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
465 /* reset default clocks */
466 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
467 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
468 radeon_pm_set_clocks(rdev);
469 }
470 mutex_unlock(&rdev->pm.mutex);
471
472 device_remove_file(rdev->dev, &dev_attr_power_profile);
473 device_remove_file(rdev->dev, &dev_attr_power_method);
474#ifdef CONFIG_ACPI
475 unregister_acpi_notifier(&rdev->acpi_nb);
476#endif
477 }
478
262 if (rdev->pm.i2c_bus) 479 if (rdev->pm.i2c_bus)
263 radeon_i2c_destroy(rdev->pm.i2c_bus); 480 radeon_i2c_destroy(rdev->pm.i2c_bus);
264} 481}
@@ -266,146 +483,167 @@ void radeon_pm_fini(struct radeon_device *rdev)
266void radeon_pm_compute_clocks(struct radeon_device *rdev) 483void radeon_pm_compute_clocks(struct radeon_device *rdev)
267{ 484{
268 struct drm_device *ddev = rdev->ddev; 485 struct drm_device *ddev = rdev->ddev;
269 struct drm_connector *connector; 486 struct drm_crtc *crtc;
270 struct radeon_crtc *radeon_crtc; 487 struct radeon_crtc *radeon_crtc;
271 int count = 0;
272 488
273 if (rdev->pm.state == PM_STATE_DISABLED) 489 if (rdev->pm.num_power_states < 2)
274 return; 490 return;
275 491
276 mutex_lock(&rdev->pm.mutex); 492 mutex_lock(&rdev->pm.mutex);
277 493
278 rdev->pm.active_crtcs = 0; 494 rdev->pm.active_crtcs = 0;
279 list_for_each_entry(connector, 495 rdev->pm.active_crtc_count = 0;
280 &ddev->mode_config.connector_list, head) { 496 list_for_each_entry(crtc,
281 if (connector->encoder && 497 &ddev->mode_config.crtc_list, head) {
282 connector->encoder->crtc && 498 radeon_crtc = to_radeon_crtc(crtc);
283 connector->dpms != DRM_MODE_DPMS_OFF) { 499 if (radeon_crtc->enabled) {
284 radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
285 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 500 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
286 ++count; 501 rdev->pm.active_crtc_count++;
287 } 502 }
288 } 503 }
289 504
290 if (count > 1) { 505 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
291 if (rdev->pm.state == PM_STATE_ACTIVE) { 506 radeon_pm_update_profile(rdev);
292 cancel_delayed_work(&rdev->pm.idle_work); 507 radeon_pm_set_clocks(rdev);
293 508 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
294 rdev->pm.state = PM_STATE_PAUSED; 509 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
295 rdev->pm.planned_action = PM_ACTION_UPCLOCK; 510 if (rdev->pm.active_crtc_count > 1) {
296 if (rdev->pm.downclocked) 511 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
297 radeon_pm_set_clocks(rdev); 512 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
298 513
299 DRM_DEBUG("radeon: dynamic power management deactivated\n"); 514 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
300 } 515 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
301 } else if (count == 1) { 516 radeon_pm_get_dynpm_state(rdev);
302 /* TODO: Increase clocks if needed for current mode */ 517 radeon_pm_set_clocks(rdev);
303 518
304 if (rdev->pm.state == PM_STATE_MINIMUM) { 519 DRM_DEBUG("radeon: dynamic power management deactivated\n");
305 rdev->pm.state = PM_STATE_ACTIVE; 520 }
306 rdev->pm.planned_action = PM_ACTION_UPCLOCK; 521 } else if (rdev->pm.active_crtc_count == 1) {
307 radeon_pm_set_clocks(rdev); 522 /* TODO: Increase clocks if needed for current mode */
308 523
309 queue_delayed_work(rdev->wq, &rdev->pm.idle_work, 524 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
310 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 525 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
311 } 526 rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
312 else if (rdev->pm.state == PM_STATE_PAUSED) { 527 radeon_pm_get_dynpm_state(rdev);
313 rdev->pm.state = PM_STATE_ACTIVE; 528 radeon_pm_set_clocks(rdev);
314 queue_delayed_work(rdev->wq, &rdev->pm.idle_work, 529
315 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 530 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
316 DRM_DEBUG("radeon: dynamic power management activated\n"); 531 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
317 } 532 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
318 } 533 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
319 else { /* count == 0 */ 534 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
320 if (rdev->pm.state != PM_STATE_MINIMUM) { 535 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
321 cancel_delayed_work(&rdev->pm.idle_work); 536 DRM_DEBUG("radeon: dynamic power management activated\n");
322 537 }
323 rdev->pm.state = PM_STATE_MINIMUM; 538 } else { /* count == 0 */
324 rdev->pm.planned_action = PM_ACTION_MINIMUM; 539 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
325 radeon_pm_set_clocks(rdev); 540 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
541
542 rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
543 rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
544 radeon_pm_get_dynpm_state(rdev);
545 radeon_pm_set_clocks(rdev);
546 }
547 }
326 } 548 }
327 } 549 }
328 550
329 mutex_unlock(&rdev->pm.mutex); 551 mutex_unlock(&rdev->pm.mutex);
330} 552}
331 553
332static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 554static bool radeon_pm_in_vbl(struct radeon_device *rdev)
333{ 555{
334 u32 stat_crtc1 = 0, stat_crtc2 = 0; 556 u32 stat_crtc = 0, vbl = 0, position = 0;
335 bool in_vbl = true; 557 bool in_vbl = true;
336 558
337 if (ASIC_IS_AVIVO(rdev)) { 559 if (ASIC_IS_DCE4(rdev)) {
560 if (rdev->pm.active_crtcs & (1 << 0)) {
561 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
562 EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
563 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
564 EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
565 }
566 if (rdev->pm.active_crtcs & (1 << 1)) {
567 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
568 EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
569 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
570 EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
571 }
572 if (rdev->pm.active_crtcs & (1 << 2)) {
573 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
574 EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
575 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
576 EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
577 }
578 if (rdev->pm.active_crtcs & (1 << 3)) {
579 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
580 EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
581 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
582 EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
583 }
584 if (rdev->pm.active_crtcs & (1 << 4)) {
585 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
586 EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
587 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
588 EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
589 }
590 if (rdev->pm.active_crtcs & (1 << 5)) {
591 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
592 EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
593 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
594 EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
595 }
596 } else if (ASIC_IS_AVIVO(rdev)) {
597 if (rdev->pm.active_crtcs & (1 << 0)) {
598 vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
599 position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
600 }
601 if (rdev->pm.active_crtcs & (1 << 1)) {
602 vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
603 position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
604 }
605 if (position < vbl && position > 1)
606 in_vbl = false;
607 } else {
338 if (rdev->pm.active_crtcs & (1 << 0)) { 608 if (rdev->pm.active_crtcs & (1 << 0)) {
339 stat_crtc1 = RREG32(D1CRTC_STATUS); 609 stat_crtc = RREG32(RADEON_CRTC_STATUS);
340 if (!(stat_crtc1 & 1)) 610 if (!(stat_crtc & 1))
341 in_vbl = false; 611 in_vbl = false;
342 } 612 }
343 if (rdev->pm.active_crtcs & (1 << 1)) { 613 if (rdev->pm.active_crtcs & (1 << 1)) {
344 stat_crtc2 = RREG32(D2CRTC_STATUS); 614 stat_crtc = RREG32(RADEON_CRTC2_STATUS);
345 if (!(stat_crtc2 & 1)) 615 if (!(stat_crtc & 1))
346 in_vbl = false; 616 in_vbl = false;
347 } 617 }
348 } 618 }
349 if (in_vbl == false)
350 DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
351 stat_crtc2, finish ? "exit" : "entry");
352 return in_vbl;
353}
354static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
355{
356 /*radeon_fence_wait_last(rdev);*/
357 switch (rdev->pm.planned_action) {
358 case PM_ACTION_UPCLOCK:
359 rdev->pm.downclocked = false;
360 break;
361 case PM_ACTION_DOWNCLOCK:
362 rdev->pm.downclocked = true;
363 break;
364 case PM_ACTION_MINIMUM:
365 break;
366 case PM_ACTION_NONE:
367 DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
368 break;
369 }
370 619
371 radeon_set_power_state(rdev); 620 if (position < vbl && position > 1)
372 rdev->pm.planned_action = PM_ACTION_NONE; 621 in_vbl = false;
622
623 return in_vbl;
373} 624}
374 625
375static void radeon_pm_set_clocks(struct radeon_device *rdev) 626static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
376{ 627{
377 radeon_get_power_state(rdev, rdev->pm.planned_action); 628 u32 stat_crtc = 0;
378 mutex_lock(&rdev->cp.mutex); 629 bool in_vbl = radeon_pm_in_vbl(rdev);
379 630
380 if (rdev->pm.active_crtcs & (1 << 0)) { 631 if (in_vbl == false)
381 rdev->pm.req_vblank |= (1 << 0); 632 DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc,
382 drm_vblank_get(rdev->ddev, 0); 633 finish ? "exit" : "entry");
383 } 634 return in_vbl;
384 if (rdev->pm.active_crtcs & (1 << 1)) {
385 rdev->pm.req_vblank |= (1 << 1);
386 drm_vblank_get(rdev->ddev, 1);
387 }
388 radeon_pm_set_clocks_locked(rdev);
389 if (rdev->pm.req_vblank & (1 << 0)) {
390 rdev->pm.req_vblank &= ~(1 << 0);
391 drm_vblank_put(rdev->ddev, 0);
392 }
393 if (rdev->pm.req_vblank & (1 << 1)) {
394 rdev->pm.req_vblank &= ~(1 << 1);
395 drm_vblank_put(rdev->ddev, 1);
396 }
397
398 mutex_unlock(&rdev->cp.mutex);
399} 635}
400 636
401static void radeon_pm_idle_work_handler(struct work_struct *work) 637static void radeon_dynpm_idle_work_handler(struct work_struct *work)
402{ 638{
403 struct radeon_device *rdev; 639 struct radeon_device *rdev;
640 int resched;
404 rdev = container_of(work, struct radeon_device, 641 rdev = container_of(work, struct radeon_device,
405 pm.idle_work.work); 642 pm.dynpm_idle_work.work);
406 643
644 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
407 mutex_lock(&rdev->pm.mutex); 645 mutex_lock(&rdev->pm.mutex);
408 if (rdev->pm.state == PM_STATE_ACTIVE) { 646 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
409 unsigned long irq_flags; 647 unsigned long irq_flags;
410 int not_processed = 0; 648 int not_processed = 0;
411 649
@@ -421,35 +659,40 @@ static void radeon_pm_idle_work_handler(struct work_struct *work)
421 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 659 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
422 660
423 if (not_processed >= 3) { /* should upclock */ 661 if (not_processed >= 3) { /* should upclock */
424 if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) { 662 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
425 rdev->pm.planned_action = PM_ACTION_NONE; 663 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
426 } else if (rdev->pm.planned_action == PM_ACTION_NONE && 664 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
427 rdev->pm.downclocked) { 665 rdev->pm.dynpm_can_upclock) {
428 rdev->pm.planned_action = 666 rdev->pm.dynpm_planned_action =
429 PM_ACTION_UPCLOCK; 667 DYNPM_ACTION_UPCLOCK;
430 rdev->pm.action_timeout = jiffies + 668 rdev->pm.dynpm_action_timeout = jiffies +
431 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 669 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
432 } 670 }
433 } else if (not_processed == 0) { /* should downclock */ 671 } else if (not_processed == 0) { /* should downclock */
434 if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) { 672 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
435 rdev->pm.planned_action = PM_ACTION_NONE; 673 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
436 } else if (rdev->pm.planned_action == PM_ACTION_NONE && 674 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
437 !rdev->pm.downclocked) { 675 rdev->pm.dynpm_can_downclock) {
438 rdev->pm.planned_action = 676 rdev->pm.dynpm_planned_action =
439 PM_ACTION_DOWNCLOCK; 677 DYNPM_ACTION_DOWNCLOCK;
440 rdev->pm.action_timeout = jiffies + 678 rdev->pm.dynpm_action_timeout = jiffies +
441 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 679 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
442 } 680 }
443 } 681 }
444 682
445 if (rdev->pm.planned_action != PM_ACTION_NONE && 683 /* Note, radeon_pm_set_clocks is called with static_switch set
446 jiffies > rdev->pm.action_timeout) { 684 * to false since we want to wait for vbl to avoid flicker.
685 */
686 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
687 jiffies > rdev->pm.dynpm_action_timeout) {
688 radeon_pm_get_dynpm_state(rdev);
447 radeon_pm_set_clocks(rdev); 689 radeon_pm_set_clocks(rdev);
448 } 690 }
449 } 691 }
450 mutex_unlock(&rdev->pm.mutex); 692 mutex_unlock(&rdev->pm.mutex);
693 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
451 694
452 queue_delayed_work(rdev->wq, &rdev->pm.idle_work, 695 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
453 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 696 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
454} 697}
455 698
@@ -464,7 +707,6 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
464 struct drm_device *dev = node->minor->dev; 707 struct drm_device *dev = node->minor->dev;
465 struct radeon_device *rdev = dev->dev_private; 708 struct radeon_device *rdev = dev->dev_private;
466 709
467 seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
468 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); 710 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
469 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 711 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
470 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); 712 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index eabbc9cf30a7..c332f46340d5 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -553,7 +553,6 @@
553# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16) 553# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16)
554#define RADEON_CRTC2_CRNT_FRAME 0x0314 554#define RADEON_CRTC2_CRNT_FRAME 0x0314
555#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318 555#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318
556#define RADEON_CRTC2_STATUS 0x03fc
557#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310 556#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310
558#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */ 557#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */
559#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */ 558#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */
@@ -995,6 +994,7 @@
995# define RADEON_FP_DETECT_MASK (1 << 4) 994# define RADEON_FP_DETECT_MASK (1 << 4)
996# define RADEON_CRTC2_VBLANK_MASK (1 << 9) 995# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
997# define RADEON_FP2_DETECT_MASK (1 << 10) 996# define RADEON_FP2_DETECT_MASK (1 << 10)
997# define RADEON_GUI_IDLE_MASK (1 << 19)
998# define RADEON_SW_INT_ENABLE (1 << 25) 998# define RADEON_SW_INT_ENABLE (1 << 25)
999#define RADEON_GEN_INT_STATUS 0x0044 999#define RADEON_GEN_INT_STATUS 0x0044
1000# define AVIVO_DISPLAY_INT_STATUS (1 << 0) 1000# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
@@ -1006,6 +1006,8 @@
1006# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) 1006# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
1007# define RADEON_FP2_DETECT_STAT (1 << 10) 1007# define RADEON_FP2_DETECT_STAT (1 << 10)
1008# define RADEON_FP2_DETECT_STAT_ACK (1 << 10) 1008# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
1009# define RADEON_GUI_IDLE_STAT (1 << 19)
1010# define RADEON_GUI_IDLE_STAT_ACK (1 << 19)
1009# define RADEON_SW_INT_FIRE (1 << 26) 1011# define RADEON_SW_INT_FIRE (1 << 26)
1010# define RADEON_SW_INT_TEST (1 << 25) 1012# define RADEON_SW_INT_TEST (1 << 25)
1011# define RADEON_SW_INT_TEST_ACK (1 << 25) 1013# define RADEON_SW_INT_TEST_ACK (1 << 25)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index f6e1e8d4d986..261e98a276db 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -219,24 +219,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
219void radeon_ib_pool_fini(struct radeon_device *rdev) 219void radeon_ib_pool_fini(struct radeon_device *rdev)
220{ 220{
221 int r; 221 int r;
222 struct radeon_bo *robj;
222 223
223 if (!rdev->ib_pool.ready) { 224 if (!rdev->ib_pool.ready) {
224 return; 225 return;
225 } 226 }
226 mutex_lock(&rdev->ib_pool.mutex); 227 mutex_lock(&rdev->ib_pool.mutex);
227 radeon_ib_bogus_cleanup(rdev); 228 radeon_ib_bogus_cleanup(rdev);
229 robj = rdev->ib_pool.robj;
230 rdev->ib_pool.robj = NULL;
231 mutex_unlock(&rdev->ib_pool.mutex);
228 232
229 if (rdev->ib_pool.robj) { 233 if (robj) {
230 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 234 r = radeon_bo_reserve(robj, false);
231 if (likely(r == 0)) { 235 if (likely(r == 0)) {
232 radeon_bo_kunmap(rdev->ib_pool.robj); 236 radeon_bo_kunmap(robj);
233 radeon_bo_unpin(rdev->ib_pool.robj); 237 radeon_bo_unpin(robj);
234 radeon_bo_unreserve(rdev->ib_pool.robj); 238 radeon_bo_unreserve(robj);
235 } 239 }
236 radeon_bo_unref(&rdev->ib_pool.robj); 240 radeon_bo_unref(&robj);
237 rdev->ib_pool.robj = NULL;
238 } 241 }
239 mutex_unlock(&rdev->ib_pool.mutex);
240} 242}
241 243
242 244
@@ -258,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev)
258 } 260 }
259} 261}
260 262
261int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) 263int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
262{ 264{
263 int r; 265 int r;
264 266
265 /* Align requested size with padding so unlock_commit can 267 /* Align requested size with padding so unlock_commit can
266 * pad safely */ 268 * pad safely */
267 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; 269 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
268 mutex_lock(&rdev->cp.mutex);
269 while (ndw > (rdev->cp.ring_free_dw - 1)) { 270 while (ndw > (rdev->cp.ring_free_dw - 1)) {
270 radeon_ring_free_size(rdev); 271 radeon_ring_free_size(rdev);
271 if (ndw < rdev->cp.ring_free_dw) { 272 if (ndw < rdev->cp.ring_free_dw) {
272 break; 273 break;
273 } 274 }
274 r = radeon_fence_wait_next(rdev); 275 r = radeon_fence_wait_next(rdev);
275 if (r) { 276 if (r)
276 mutex_unlock(&rdev->cp.mutex);
277 return r; 277 return r;
278 }
279 } 278 }
280 rdev->cp.count_dw = ndw; 279 rdev->cp.count_dw = ndw;
281 rdev->cp.wptr_old = rdev->cp.wptr; 280 rdev->cp.wptr_old = rdev->cp.wptr;
282 return 0; 281 return 0;
283} 282}
284 283
285void radeon_ring_unlock_commit(struct radeon_device *rdev) 284int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
285{
286 int r;
287
288 mutex_lock(&rdev->cp.mutex);
289 r = radeon_ring_alloc(rdev, ndw);
290 if (r) {
291 mutex_unlock(&rdev->cp.mutex);
292 return r;
293 }
294 return 0;
295}
296
297void radeon_ring_commit(struct radeon_device *rdev)
286{ 298{
287 unsigned count_dw_pad; 299 unsigned count_dw_pad;
288 unsigned i; 300 unsigned i;
@@ -295,6 +307,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev)
295 } 307 }
296 DRM_MEMORYBARRIER(); 308 DRM_MEMORYBARRIER();
297 radeon_cp_commit(rdev); 309 radeon_cp_commit(rdev);
310}
311
312void radeon_ring_unlock_commit(struct radeon_device *rdev)
313{
314 radeon_ring_commit(rdev);
298 mutex_unlock(&rdev->cp.mutex); 315 mutex_unlock(&rdev->cp.mutex);
299} 316}
300 317
@@ -344,20 +361,23 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
344void radeon_ring_fini(struct radeon_device *rdev) 361void radeon_ring_fini(struct radeon_device *rdev)
345{ 362{
346 int r; 363 int r;
364 struct radeon_bo *ring_obj;
347 365
348 mutex_lock(&rdev->cp.mutex); 366 mutex_lock(&rdev->cp.mutex);
349 if (rdev->cp.ring_obj) { 367 ring_obj = rdev->cp.ring_obj;
350 r = radeon_bo_reserve(rdev->cp.ring_obj, false); 368 rdev->cp.ring = NULL;
369 rdev->cp.ring_obj = NULL;
370 mutex_unlock(&rdev->cp.mutex);
371
372 if (ring_obj) {
373 r = radeon_bo_reserve(ring_obj, false);
351 if (likely(r == 0)) { 374 if (likely(r == 0)) {
352 radeon_bo_kunmap(rdev->cp.ring_obj); 375 radeon_bo_kunmap(ring_obj);
353 radeon_bo_unpin(rdev->cp.ring_obj); 376 radeon_bo_unpin(ring_obj);
354 radeon_bo_unreserve(rdev->cp.ring_obj); 377 radeon_bo_unreserve(ring_obj);
355 } 378 }
356 radeon_bo_unref(&rdev->cp.ring_obj); 379 radeon_bo_unref(&ring_obj);
357 rdev->cp.ring = NULL;
358 rdev->cp.ring_obj = NULL;
359 } 380 }
360 mutex_unlock(&rdev->cp.mutex);
361} 381}
362 382
363 383
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d031b6863082..e9918d88f5b0 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -33,6 +33,7 @@
33#include <ttm/ttm_bo_driver.h> 33#include <ttm/ttm_bo_driver.h>
34#include <ttm/ttm_placement.h> 34#include <ttm/ttm_placement.h>
35#include <ttm/ttm_module.h> 35#include <ttm/ttm_module.h>
36#include <ttm/ttm_page_alloc.h>
36#include <drm/drmP.h> 37#include <drm/drmP.h>
37#include <drm/radeon_drm.h> 38#include <drm/radeon_drm.h>
38#include <linux/seq_file.h> 39#include <linux/seq_file.h>
@@ -162,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
162 (unsigned)type); 163 (unsigned)type);
163 return -EINVAL; 164 return -EINVAL;
164 } 165 }
165 man->io_offset = rdev->mc.agp_base;
166 man->io_size = rdev->mc.gtt_size;
167 man->io_addr = NULL;
168 if (!rdev->ddev->agp->cant_use_aperture) 166 if (!rdev->ddev->agp->cant_use_aperture)
169 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | 167 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
170 TTM_MEMTYPE_FLAG_MAPPABLE;
171 man->available_caching = TTM_PL_FLAG_UNCACHED | 168 man->available_caching = TTM_PL_FLAG_UNCACHED |
172 TTM_PL_FLAG_WC; 169 TTM_PL_FLAG_WC;
173 man->default_caching = TTM_PL_FLAG_WC; 170 man->default_caching = TTM_PL_FLAG_WC;
174 } else
175#endif
176 {
177 man->io_offset = 0;
178 man->io_size = 0;
179 man->io_addr = NULL;
180 } 171 }
172#endif
181 break; 173 break;
182 case TTM_PL_VRAM: 174 case TTM_PL_VRAM:
183 /* "On-card" video ram */ 175 /* "On-card" video ram */
184 man->gpu_offset = rdev->mc.vram_start; 176 man->gpu_offset = rdev->mc.vram_start;
185 man->flags = TTM_MEMTYPE_FLAG_FIXED | 177 man->flags = TTM_MEMTYPE_FLAG_FIXED |
186 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
187 TTM_MEMTYPE_FLAG_MAPPABLE; 178 TTM_MEMTYPE_FLAG_MAPPABLE;
188 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; 179 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
189 man->default_caching = TTM_PL_FLAG_WC; 180 man->default_caching = TTM_PL_FLAG_WC;
190 man->io_addr = NULL;
191 man->io_offset = rdev->mc.aper_base;
192 man->io_size = rdev->mc.aper_size;
193 break; 181 break;
194 default: 182 default:
195 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 183 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -244,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
244} 232}
245 233
246static int radeon_move_blit(struct ttm_buffer_object *bo, 234static int radeon_move_blit(struct ttm_buffer_object *bo,
247 bool evict, int no_wait, 235 bool evict, int no_wait_reserve, bool no_wait_gpu,
248 struct ttm_mem_reg *new_mem, 236 struct ttm_mem_reg *new_mem,
249 struct ttm_mem_reg *old_mem) 237 struct ttm_mem_reg *old_mem)
250{ 238{
251 struct radeon_device *rdev; 239 struct radeon_device *rdev;
252 uint64_t old_start, new_start; 240 uint64_t old_start, new_start;
@@ -290,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
290 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); 278 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
291 /* FIXME: handle copy error */ 279 /* FIXME: handle copy error */
292 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 280 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
293 evict, no_wait, new_mem); 281 evict, no_wait_reserve, no_wait_gpu, new_mem);
294 radeon_fence_unref(&fence); 282 radeon_fence_unref(&fence);
295 return r; 283 return r;
296} 284}
297 285
298static int radeon_move_vram_ram(struct ttm_buffer_object *bo, 286static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
299 bool evict, bool interruptible, bool no_wait, 287 bool evict, bool interruptible,
288 bool no_wait_reserve, bool no_wait_gpu,
300 struct ttm_mem_reg *new_mem) 289 struct ttm_mem_reg *new_mem)
301{ 290{
302 struct radeon_device *rdev; 291 struct radeon_device *rdev;
@@ -317,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
317 placement.busy_placement = &placements; 306 placement.busy_placement = &placements;
318 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 307 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
319 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 308 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
320 interruptible, no_wait); 309 interruptible, no_wait_reserve, no_wait_gpu);
321 if (unlikely(r)) { 310 if (unlikely(r)) {
322 return r; 311 return r;
323 } 312 }
@@ -331,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
331 if (unlikely(r)) { 320 if (unlikely(r)) {
332 goto out_cleanup; 321 goto out_cleanup;
333 } 322 }
334 r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); 323 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
335 if (unlikely(r)) { 324 if (unlikely(r)) {
336 goto out_cleanup; 325 goto out_cleanup;
337 } 326 }
338 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); 327 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
339out_cleanup: 328out_cleanup:
340 if (tmp_mem.mm_node) { 329 if (tmp_mem.mm_node) {
341 struct ttm_bo_global *glob = rdev->mman.bdev.glob; 330 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -349,7 +338,8 @@ out_cleanup:
349} 338}
350 339
351static int radeon_move_ram_vram(struct ttm_buffer_object *bo, 340static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
352 bool evict, bool interruptible, bool no_wait, 341 bool evict, bool interruptible,
342 bool no_wait_reserve, bool no_wait_gpu,
353 struct ttm_mem_reg *new_mem) 343 struct ttm_mem_reg *new_mem)
354{ 344{
355 struct radeon_device *rdev; 345 struct radeon_device *rdev;
@@ -369,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
369 placement.num_busy_placement = 1; 359 placement.num_busy_placement = 1;
370 placement.busy_placement = &placements; 360 placement.busy_placement = &placements;
371 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 361 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
372 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); 362 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
373 if (unlikely(r)) { 363 if (unlikely(r)) {
374 return r; 364 return r;
375 } 365 }
376 r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); 366 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
377 if (unlikely(r)) { 367 if (unlikely(r)) {
378 goto out_cleanup; 368 goto out_cleanup;
379 } 369 }
380 r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); 370 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
381 if (unlikely(r)) { 371 if (unlikely(r)) {
382 goto out_cleanup; 372 goto out_cleanup;
383 } 373 }
@@ -394,8 +384,9 @@ out_cleanup:
394} 384}
395 385
396static int radeon_bo_move(struct ttm_buffer_object *bo, 386static int radeon_bo_move(struct ttm_buffer_object *bo,
397 bool evict, bool interruptible, bool no_wait, 387 bool evict, bool interruptible,
398 struct ttm_mem_reg *new_mem) 388 bool no_wait_reserve, bool no_wait_gpu,
389 struct ttm_mem_reg *new_mem)
399{ 390{
400 struct radeon_device *rdev; 391 struct radeon_device *rdev;
401 struct ttm_mem_reg *old_mem = &bo->mem; 392 struct ttm_mem_reg *old_mem = &bo->mem;
@@ -422,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
422 if (old_mem->mem_type == TTM_PL_VRAM && 413 if (old_mem->mem_type == TTM_PL_VRAM &&
423 new_mem->mem_type == TTM_PL_SYSTEM) { 414 new_mem->mem_type == TTM_PL_SYSTEM) {
424 r = radeon_move_vram_ram(bo, evict, interruptible, 415 r = radeon_move_vram_ram(bo, evict, interruptible,
425 no_wait, new_mem); 416 no_wait_reserve, no_wait_gpu, new_mem);
426 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 417 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
427 new_mem->mem_type == TTM_PL_VRAM) { 418 new_mem->mem_type == TTM_PL_VRAM) {
428 r = radeon_move_ram_vram(bo, evict, interruptible, 419 r = radeon_move_ram_vram(bo, evict, interruptible,
429 no_wait, new_mem); 420 no_wait_reserve, no_wait_gpu, new_mem);
430 } else { 421 } else {
431 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); 422 r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
432 } 423 }
433 424
434 if (r) { 425 if (r) {
435memcpy: 426memcpy:
436 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 427 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
437 } 428 }
438
439 return r; 429 return r;
440} 430}
441 431
432static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
433{
434 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
435 struct radeon_device *rdev = radeon_get_rdev(bdev);
436
437 mem->bus.addr = NULL;
438 mem->bus.offset = 0;
439 mem->bus.size = mem->num_pages << PAGE_SHIFT;
440 mem->bus.base = 0;
441 mem->bus.is_iomem = false;
442 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
443 return -EINVAL;
444 switch (mem->mem_type) {
445 case TTM_PL_SYSTEM:
446 /* system memory */
447 return 0;
448 case TTM_PL_TT:
449#if __OS_HAS_AGP
450 if (rdev->flags & RADEON_IS_AGP) {
451 /* RADEON_IS_AGP is set only if AGP is active */
452 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
453 mem->bus.base = rdev->mc.agp_base;
454 mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
455 }
456#endif
457 break;
458 case TTM_PL_VRAM:
459 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
460 /* check if it's visible */
461 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
462 return -EINVAL;
463 mem->bus.base = rdev->mc.aper_base;
464 mem->bus.is_iomem = true;
465 break;
466 default:
467 return -EINVAL;
468 }
469 return 0;
470}
471
472static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
473{
474}
475
442static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, 476static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
443 bool lazy, bool interruptible) 477 bool lazy, bool interruptible)
444{ 478{
@@ -479,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
479 .sync_obj_ref = &radeon_sync_obj_ref, 513 .sync_obj_ref = &radeon_sync_obj_ref,
480 .move_notify = &radeon_bo_move_notify, 514 .move_notify = &radeon_bo_move_notify,
481 .fault_reserve_notify = &radeon_bo_fault_reserve_notify, 515 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
516 .io_mem_reserve = &radeon_ttm_io_mem_reserve,
517 .io_mem_free = &radeon_ttm_io_mem_free,
482}; 518};
483 519
484int radeon_ttm_init(struct radeon_device *rdev) 520int radeon_ttm_init(struct radeon_device *rdev)
@@ -571,13 +607,17 @@ static const struct vm_operations_struct *ttm_vm_ops = NULL;
571static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 607static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
572{ 608{
573 struct ttm_buffer_object *bo; 609 struct ttm_buffer_object *bo;
610 struct radeon_device *rdev;
574 int r; 611 int r;
575 612
576 bo = (struct ttm_buffer_object *)vma->vm_private_data; 613 bo = (struct ttm_buffer_object *)vma->vm_private_data;
577 if (bo == NULL) { 614 if (bo == NULL) {
578 return VM_FAULT_NOPAGE; 615 return VM_FAULT_NOPAGE;
579 } 616 }
617 rdev = radeon_get_rdev(bo->bdev);
618 mutex_lock(&rdev->vram_mutex);
580 r = ttm_vm_ops->fault(vma, vmf); 619 r = ttm_vm_ops->fault(vma, vmf);
620 mutex_unlock(&rdev->vram_mutex);
581 return r; 621 return r;
582} 622}
583 623
@@ -745,8 +785,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
745static int radeon_ttm_debugfs_init(struct radeon_device *rdev) 785static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
746{ 786{
747#if defined(CONFIG_DEBUG_FS) 787#if defined(CONFIG_DEBUG_FS)
748 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES]; 788 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
749 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32]; 789 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
750 unsigned i; 790 unsigned i;
751 791
752 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { 792 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -763,7 +803,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
763 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; 803 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
764 804
765 } 805 }
766 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES); 806 /* Add ttm page pool to debugfs */
807 sprintf(radeon_mem_types_names[i], "ttm_page_pool");
808 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
809 radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
810 radeon_mem_types_list[i].driver_features = 0;
811 radeon_mem_types_list[i].data = NULL;
812 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
767 813
768#endif 814#endif
769 return 0; 815 return 0;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 1a41cb268b72..9e4240b3bf0b 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -243,8 +243,6 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
243 243
244void rs400_gpu_init(struct radeon_device *rdev) 244void rs400_gpu_init(struct radeon_device *rdev)
245{ 245{
246 /* FIXME: HDP same place on rs400 ? */
247 r100_hdp_reset(rdev);
248 /* FIXME: is this correct ? */ 246 /* FIXME: is this correct ? */
249 r420_pipes_init(rdev); 247 r420_pipes_init(rdev);
250 if (rs400_mc_wait_for_idle(rdev)) { 248 if (rs400_mc_wait_for_idle(rdev)) {
@@ -433,7 +431,7 @@ int rs400_resume(struct radeon_device *rdev)
433 /* setup MC before calling post tables */ 431 /* setup MC before calling post tables */
434 rs400_mc_program(rdev); 432 rs400_mc_program(rdev);
435 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 433 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
436 if (radeon_gpu_reset(rdev)) { 434 if (radeon_asic_reset(rdev)) {
437 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 435 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
438 RREG32(R_000E40_RBBM_STATUS), 436 RREG32(R_000E40_RBBM_STATUS),
439 RREG32(R_0007C0_CP_STAT)); 437 RREG32(R_0007C0_CP_STAT));
@@ -458,7 +456,6 @@ int rs400_suspend(struct radeon_device *rdev)
458 456
459void rs400_fini(struct radeon_device *rdev) 457void rs400_fini(struct radeon_device *rdev)
460{ 458{
461 radeon_pm_fini(rdev);
462 r100_cp_fini(rdev); 459 r100_cp_fini(rdev);
463 r100_wb_fini(rdev); 460 r100_wb_fini(rdev);
464 r100_ib_fini(rdev); 461 r100_ib_fini(rdev);
@@ -497,7 +494,7 @@ int rs400_init(struct radeon_device *rdev)
497 return r; 494 return r;
498 } 495 }
499 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 496 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
500 if (radeon_gpu_reset(rdev)) { 497 if (radeon_asic_reset(rdev)) {
501 dev_warn(rdev->dev, 498 dev_warn(rdev->dev,
502 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 499 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
503 RREG32(R_000E40_RBBM_STATUS), 500 RREG32(R_000E40_RBBM_STATUS),
@@ -509,8 +506,6 @@ int rs400_init(struct radeon_device *rdev)
509 506
510 /* Initialize clocks */ 507 /* Initialize clocks */
511 radeon_get_clock_info(rdev->ddev); 508 radeon_get_clock_info(rdev->ddev);
512 /* Initialize power management */
513 radeon_pm_init(rdev);
514 /* initialize memory controller */ 509 /* initialize memory controller */
515 rs400_mc_init(rdev); 510 rs400_mc_init(rdev);
516 /* Fence driver */ 511 /* Fence driver */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index a81bc7a21e14..79887cac5b54 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,135 @@
46void rs600_gpu_init(struct radeon_device *rdev); 46void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev); 47int rs600_mc_wait_for_idle(struct radeon_device *rdev);
48 48
49void rs600_pm_misc(struct radeon_device *rdev)
50{
51 int requested_index = rdev->pm.requested_power_state_index;
52 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
53 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
54 u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
55 u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
56
57 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
58 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
59 tmp = RREG32(voltage->gpio.reg);
60 if (voltage->active_high)
61 tmp |= voltage->gpio.mask;
62 else
63 tmp &= ~(voltage->gpio.mask);
64 WREG32(voltage->gpio.reg, tmp);
65 if (voltage->delay)
66 udelay(voltage->delay);
67 } else {
68 tmp = RREG32(voltage->gpio.reg);
69 if (voltage->active_high)
70 tmp &= ~voltage->gpio.mask;
71 else
72 tmp |= voltage->gpio.mask;
73 WREG32(voltage->gpio.reg, tmp);
74 if (voltage->delay)
75 udelay(voltage->delay);
76 }
77 }
78
79 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
80 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
81 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
82 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
83 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
84 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
85 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
86 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
87 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
88 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
89 }
90 } else {
91 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
92 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
93 }
94 WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
95
96 dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
97 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
98 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
99 if (voltage->delay) {
100 dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
101 dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
102 } else
103 dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
104 } else
105 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
106 WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
107
108 hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
109 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
110 hdp_dyn_cntl &= ~HDP_FORCEON;
111 else
112 hdp_dyn_cntl |= HDP_FORCEON;
113 WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
114#if 0
115 /* mc_host_dyn seems to cause hangs from time to time */
116 mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
117 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
118 mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
119 else
120 mc_host_dyn_cntl |= MC_HOST_FORCEON;
121 WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
122#endif
123 dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
124 if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
125 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
126 else
127 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
128 WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
129
130 /* set pcie lanes */
131 if ((rdev->flags & RADEON_IS_PCIE) &&
132 !(rdev->flags & RADEON_IS_IGP) &&
133 rdev->asic->set_pcie_lanes &&
134 (ps->pcie_lanes !=
135 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
136 radeon_set_pcie_lanes(rdev,
137 ps->pcie_lanes);
138 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
139 }
140}
141
142void rs600_pm_prepare(struct radeon_device *rdev)
143{
144 struct drm_device *ddev = rdev->ddev;
145 struct drm_crtc *crtc;
146 struct radeon_crtc *radeon_crtc;
147 u32 tmp;
148
149 /* disable any active CRTCs */
150 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
151 radeon_crtc = to_radeon_crtc(crtc);
152 if (radeon_crtc->enabled) {
153 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
154 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
155 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
156 }
157 }
158}
159
160void rs600_pm_finish(struct radeon_device *rdev)
161{
162 struct drm_device *ddev = rdev->ddev;
163 struct drm_crtc *crtc;
164 struct radeon_crtc *radeon_crtc;
165 u32 tmp;
166
167 /* enable any active CRTCs */
168 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
169 radeon_crtc = to_radeon_crtc(crtc);
170 if (radeon_crtc->enabled) {
171 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
172 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
173 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
174 }
175 }
176}
177
49/* hpd for digital panel detect/disconnect */ 178/* hpd for digital panel detect/disconnect */
50bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 179bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
51{ 180{
@@ -147,6 +276,78 @@ void rs600_hpd_fini(struct radeon_device *rdev)
147 } 276 }
148} 277}
149 278
279void rs600_bm_disable(struct radeon_device *rdev)
280{
281 u32 tmp;
282
283 /* disable bus mastering */
284 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
285 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
286 mdelay(1);
287}
288
289int rs600_asic_reset(struct radeon_device *rdev)
290{
291 u32 status, tmp;
292
293 struct rv515_mc_save save;
294
295 /* Stops all mc clients */
296 rv515_mc_stop(rdev, &save);
297 status = RREG32(R_000E40_RBBM_STATUS);
298 if (!G_000E40_GUI_ACTIVE(status)) {
299 return 0;
300 }
301 status = RREG32(R_000E40_RBBM_STATUS);
302 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
303 /* stop CP */
304 WREG32(RADEON_CP_CSQ_CNTL, 0);
305 tmp = RREG32(RADEON_CP_RB_CNTL);
306 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
307 WREG32(RADEON_CP_RB_RPTR_WR, 0);
308 WREG32(RADEON_CP_RB_WPTR, 0);
309 WREG32(RADEON_CP_RB_CNTL, tmp);
310 pci_save_state(rdev->pdev);
311 /* disable bus mastering */
312 rs600_bm_disable(rdev);
313 /* reset GA+VAP */
314 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
315 S_0000F0_SOFT_RESET_GA(1));
316 RREG32(R_0000F0_RBBM_SOFT_RESET);
317 mdelay(500);
318 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
319 mdelay(1);
320 status = RREG32(R_000E40_RBBM_STATUS);
321 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
322 /* reset CP */
323 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
324 RREG32(R_0000F0_RBBM_SOFT_RESET);
325 mdelay(500);
326 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
327 mdelay(1);
328 status = RREG32(R_000E40_RBBM_STATUS);
329 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
330 /* reset MC */
331 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
332 RREG32(R_0000F0_RBBM_SOFT_RESET);
333 mdelay(500);
334 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
335 mdelay(1);
336 status = RREG32(R_000E40_RBBM_STATUS);
337 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
338 /* restore PCI & busmastering */
339 pci_restore_state(rdev->pdev);
340 /* Check if GPU is idle */
341 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
342 dev_err(rdev->dev, "failed to reset GPU\n");
343 rdev->gpu_lockup = true;
344 return -1;
345 }
346 rv515_mc_resume(rdev, &save);
347 dev_info(rdev->dev, "GPU reset succeed\n");
348 return 0;
349}
350
150/* 351/*
151 * GART. 352 * GART.
152 */ 353 */
@@ -310,6 +511,9 @@ int rs600_irq_set(struct radeon_device *rdev)
310 if (rdev->irq.sw_int) { 511 if (rdev->irq.sw_int) {
311 tmp |= S_000040_SW_INT_EN(1); 512 tmp |= S_000040_SW_INT_EN(1);
312 } 513 }
514 if (rdev->irq.gui_idle) {
515 tmp |= S_000040_GUI_IDLE(1);
516 }
313 if (rdev->irq.crtc_vblank_int[0]) { 517 if (rdev->irq.crtc_vblank_int[0]) {
314 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); 518 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
315 } 519 }
@@ -332,9 +536,15 @@ int rs600_irq_set(struct radeon_device *rdev)
332static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) 536static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
333{ 537{
334 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); 538 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
335 uint32_t irq_mask = ~C_000044_SW_INT; 539 uint32_t irq_mask = S_000044_SW_INT(1);
336 u32 tmp; 540 u32 tmp;
337 541
542 /* the interrupt works, but the status bit is permanently asserted */
543 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
544 if (!rdev->irq.gui_idle_acked)
545 irq_mask |= S_000044_GUI_IDLE_STAT(1);
546 }
547
338 if (G_000044_DISPLAY_INT_STAT(irqs)) { 548 if (G_000044_DISPLAY_INT_STAT(irqs)) {
339 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 549 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
340 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { 550 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
@@ -382,6 +592,9 @@ int rs600_irq_process(struct radeon_device *rdev)
382 uint32_t r500_disp_int; 592 uint32_t r500_disp_int;
383 bool queue_hotplug = false; 593 bool queue_hotplug = false;
384 594
595 /* reset gui idle ack. the status bit is broken */
596 rdev->irq.gui_idle_acked = false;
597
385 status = rs600_irq_ack(rdev, &r500_disp_int); 598 status = rs600_irq_ack(rdev, &r500_disp_int);
386 if (!status && !r500_disp_int) { 599 if (!status && !r500_disp_int) {
387 return IRQ_NONE; 600 return IRQ_NONE;
@@ -390,6 +603,12 @@ int rs600_irq_process(struct radeon_device *rdev)
390 /* SW interrupt */ 603 /* SW interrupt */
391 if (G_000044_SW_INT(status)) 604 if (G_000044_SW_INT(status))
392 radeon_fence_process(rdev); 605 radeon_fence_process(rdev);
606 /* GUI idle */
607 if (G_000040_GUI_IDLE(status)) {
608 rdev->irq.gui_idle_acked = true;
609 rdev->pm.gui_idle = true;
610 wake_up(&rdev->irq.idle_queue);
611 }
393 /* Vertical blank interrupts */ 612 /* Vertical blank interrupts */
394 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { 613 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
395 drm_handle_vblank(rdev->ddev, 0); 614 drm_handle_vblank(rdev->ddev, 0);
@@ -411,6 +630,8 @@ int rs600_irq_process(struct radeon_device *rdev)
411 } 630 }
412 status = rs600_irq_ack(rdev, &r500_disp_int); 631 status = rs600_irq_ack(rdev, &r500_disp_int);
413 } 632 }
633 /* reset gui idle ack. the status bit is broken */
634 rdev->irq.gui_idle_acked = false;
414 if (queue_hotplug) 635 if (queue_hotplug)
415 queue_work(rdev->wq, &rdev->hotplug_work); 636 queue_work(rdev->wq, &rdev->hotplug_work);
416 if (rdev->msi_enabled) { 637 if (rdev->msi_enabled) {
@@ -454,7 +675,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
454 675
455void rs600_gpu_init(struct radeon_device *rdev) 676void rs600_gpu_init(struct radeon_device *rdev)
456{ 677{
457 r100_hdp_reset(rdev);
458 r420_pipes_init(rdev); 678 r420_pipes_init(rdev);
459 /* Wait for mc idle */ 679 /* Wait for mc idle */
460 if (rs600_mc_wait_for_idle(rdev)) 680 if (rs600_mc_wait_for_idle(rdev))
@@ -601,7 +821,7 @@ int rs600_resume(struct radeon_device *rdev)
601 /* Resume clock before doing reset */ 821 /* Resume clock before doing reset */
602 rv515_clock_startup(rdev); 822 rv515_clock_startup(rdev);
603 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 823 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
604 if (radeon_gpu_reset(rdev)) { 824 if (radeon_asic_reset(rdev)) {
605 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 825 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
606 RREG32(R_000E40_RBBM_STATUS), 826 RREG32(R_000E40_RBBM_STATUS),
607 RREG32(R_0007C0_CP_STAT)); 827 RREG32(R_0007C0_CP_STAT));
@@ -626,7 +846,6 @@ int rs600_suspend(struct radeon_device *rdev)
626 846
627void rs600_fini(struct radeon_device *rdev) 847void rs600_fini(struct radeon_device *rdev)
628{ 848{
629 radeon_pm_fini(rdev);
630 r100_cp_fini(rdev); 849 r100_cp_fini(rdev);
631 r100_wb_fini(rdev); 850 r100_wb_fini(rdev);
632 r100_ib_fini(rdev); 851 r100_ib_fini(rdev);
@@ -664,7 +883,7 @@ int rs600_init(struct radeon_device *rdev)
664 return -EINVAL; 883 return -EINVAL;
665 } 884 }
666 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 885 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
667 if (radeon_gpu_reset(rdev)) { 886 if (radeon_asic_reset(rdev)) {
668 dev_warn(rdev->dev, 887 dev_warn(rdev->dev,
669 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 888 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
670 RREG32(R_000E40_RBBM_STATUS), 889 RREG32(R_000E40_RBBM_STATUS),
@@ -676,8 +895,6 @@ int rs600_init(struct radeon_device *rdev)
676 895
677 /* Initialize clocks */ 896 /* Initialize clocks */
678 radeon_get_clock_info(rdev->ddev); 897 radeon_get_clock_info(rdev->ddev);
679 /* Initialize power management */
680 radeon_pm_init(rdev);
681 /* initialize memory controller */ 898 /* initialize memory controller */
682 rs600_mc_init(rdev); 899 rs600_mc_init(rdev);
683 rs600_debugfs(rdev); 900 rs600_debugfs(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index e52d2695510b..a27c13ac47c3 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -178,6 +178,52 @@
178#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0) 178#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
179#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF) 179#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
180#define C_000074_MC_IND_DATA 0x00000000 180#define C_000074_MC_IND_DATA 0x00000000
181#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
182#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
183#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
184#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
185#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
186#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
187#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
188#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
189#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
190#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
191#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
192#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
193#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
194#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
195#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
196#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
197#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
198#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
199#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
200#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
201#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
202#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
203#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
204#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
205#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
206#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
207#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
208#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
209#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
210#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
211#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
212#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
213#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
214#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
215#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
216#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
217#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
218#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
219#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
220#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
221#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
222#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
223#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
224#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
225#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
226#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
181#define R_000134_HDP_FB_LOCATION 0x000134 227#define R_000134_HDP_FB_LOCATION 0x000134
182#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) 228#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
183#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) 229#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
@@ -588,4 +634,38 @@
588#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) 634#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
589#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF 635#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
590 636
637/* PLL regs */
638#define GENERAL_PWRMGT 0x8
639#define GLOBAL_PWRMGT_EN (1 << 0)
640#define MOBILE_SU (1 << 2)
641#define DYN_PWRMGT_SCLK_LENGTH 0xc
642#define NORMAL_POWER_SCLK_HILEN(x) ((x) << 0)
643#define NORMAL_POWER_SCLK_LOLEN(x) ((x) << 4)
644#define REDUCED_POWER_SCLK_HILEN(x) ((x) << 8)
645#define REDUCED_POWER_SCLK_LOLEN(x) ((x) << 12)
646#define POWER_D1_SCLK_HILEN(x) ((x) << 16)
647#define POWER_D1_SCLK_LOLEN(x) ((x) << 20)
648#define STATIC_SCREEN_HILEN(x) ((x) << 24)
649#define STATIC_SCREEN_LOLEN(x) ((x) << 28)
650#define DYN_SCLK_VOL_CNTL 0xe
651#define IO_CG_VOLTAGE_DROP (1 << 0)
652#define VOLTAGE_DROP_SYNC (1 << 2)
653#define VOLTAGE_DELAY_SEL(x) ((x) << 3)
654#define HDP_DYN_CNTL 0x10
655#define HDP_FORCEON (1 << 0)
656#define MC_HOST_DYN_CNTL 0x1e
657#define MC_HOST_FORCEON (1 << 0)
658#define DYN_BACKBIAS_CNTL 0x29
659#define IO_CG_BACKBIAS_EN (1 << 0)
660
661/* mmreg */
662#define DOUT_POWER_MANAGEMENT_CNTL 0x7ee0
663#define PWRDN_WAIT_BUSY_OFF (1 << 0)
664#define PWRDN_WAIT_PWRSEQ_OFF (1 << 4)
665#define PWRDN_WAIT_PPLL_OFF (1 << 8)
666#define PWRUP_WAIT_PPLL_ON (1 << 12)
667#define PWRUP_WAIT_MEM_INIT_DONE (1 << 16)
668#define PM_ASSERT_RESET (1 << 20)
669#define PM_PWRDN_PPLL (1 << 24)
670
591#endif 671#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index bbf3da790fd5..bcc33195ebc2 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -48,8 +48,6 @@ static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
48 48
49static void rs690_gpu_init(struct radeon_device *rdev) 49static void rs690_gpu_init(struct radeon_device *rdev)
50{ 50{
51 /* FIXME: HDP same place on rs690 ? */
52 r100_hdp_reset(rdev);
53 /* FIXME: is this correct ? */ 51 /* FIXME: is this correct ? */
54 r420_pipes_init(rdev); 52 r420_pipes_init(rdev);
55 if (rs690_mc_wait_for_idle(rdev)) { 53 if (rs690_mc_wait_for_idle(rdev)) {
@@ -78,59 +76,59 @@ void rs690_pm_info(struct radeon_device *rdev)
78 /* Get various system informations from bios */ 76 /* Get various system informations from bios */
79 switch (crev) { 77 switch (crev) {
80 case 1: 78 case 1:
81 tmp.full = rfixed_const(100); 79 tmp.full = dfixed_const(100);
82 rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock); 80 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
83 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); 81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
84 rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); 82 rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
85 rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock)); 83 rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
86 rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth); 84 rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
87 break; 85 break;
88 case 2: 86 case 2:
89 tmp.full = rfixed_const(100); 87 tmp.full = dfixed_const(100);
90 rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock); 88 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
91 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); 89 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
92 rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock); 90 rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
93 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); 91 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
94 rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq); 92 rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
95 rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); 93 rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
96 rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); 94 rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
97 break; 95 break;
98 default: 96 default:
99 tmp.full = rfixed_const(100); 97 tmp.full = dfixed_const(100);
100 /* We assume the slower possible clock ie worst case */ 98 /* We assume the slower possible clock ie worst case */
101 /* DDR 333Mhz */ 99 /* DDR 333Mhz */
102 rdev->pm.igp_sideport_mclk.full = rfixed_const(333); 100 rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
103 /* FIXME: system clock ? */ 101 /* FIXME: system clock ? */
104 rdev->pm.igp_system_mclk.full = rfixed_const(100); 102 rdev->pm.igp_system_mclk.full = dfixed_const(100);
105 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); 103 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
106 rdev->pm.igp_ht_link_clk.full = rfixed_const(200); 104 rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
107 rdev->pm.igp_ht_link_width.full = rfixed_const(8); 105 rdev->pm.igp_ht_link_width.full = dfixed_const(8);
108 DRM_ERROR("No integrated system info for your GPU, using safe default\n"); 106 DRM_ERROR("No integrated system info for your GPU, using safe default\n");
109 break; 107 break;
110 } 108 }
111 } else { 109 } else {
112 tmp.full = rfixed_const(100); 110 tmp.full = dfixed_const(100);
113 /* We assume the slower possible clock ie worst case */ 111 /* We assume the slower possible clock ie worst case */
114 /* DDR 333Mhz */ 112 /* DDR 333Mhz */
115 rdev->pm.igp_sideport_mclk.full = rfixed_const(333); 113 rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
116 /* FIXME: system clock ? */ 114 /* FIXME: system clock ? */
117 rdev->pm.igp_system_mclk.full = rfixed_const(100); 115 rdev->pm.igp_system_mclk.full = dfixed_const(100);
118 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); 116 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
119 rdev->pm.igp_ht_link_clk.full = rfixed_const(200); 117 rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
120 rdev->pm.igp_ht_link_width.full = rfixed_const(8); 118 rdev->pm.igp_ht_link_width.full = dfixed_const(8);
121 DRM_ERROR("No integrated system info for your GPU, using safe default\n"); 119 DRM_ERROR("No integrated system info for your GPU, using safe default\n");
122 } 120 }
123 /* Compute various bandwidth */ 121 /* Compute various bandwidth */
124 /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ 122 /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
125 tmp.full = rfixed_const(4); 123 tmp.full = dfixed_const(4);
126 rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); 124 rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
127 /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 125 /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
128 * = ht_clk * ht_width / 5 126 * = ht_clk * ht_width / 5
129 */ 127 */
130 tmp.full = rfixed_const(5); 128 tmp.full = dfixed_const(5);
131 rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, 129 rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
132 rdev->pm.igp_ht_link_width); 130 rdev->pm.igp_ht_link_width);
133 rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); 131 rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
134 if (tmp.full < rdev->pm.max_bandwidth.full) { 132 if (tmp.full < rdev->pm.max_bandwidth.full) {
135 /* HT link is a limiting factor */ 133 /* HT link is a limiting factor */
136 rdev->pm.max_bandwidth.full = tmp.full; 134 rdev->pm.max_bandwidth.full = tmp.full;
@@ -138,10 +136,10 @@ void rs690_pm_info(struct radeon_device *rdev)
138 /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 136 /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
139 * = (sideport_clk * 14) / 10 137 * = (sideport_clk * 14) / 10
140 */ 138 */
141 tmp.full = rfixed_const(14); 139 tmp.full = dfixed_const(14);
142 rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); 140 rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
143 tmp.full = rfixed_const(10); 141 tmp.full = dfixed_const(10);
144 rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); 142 rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
145} 143}
146 144
147void rs690_mc_init(struct radeon_device *rdev) 145void rs690_mc_init(struct radeon_device *rdev)
@@ -241,20 +239,20 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
241 return; 239 return;
242 } 240 }
243 241
244 if (crtc->vsc.full > rfixed_const(2)) 242 if (crtc->vsc.full > dfixed_const(2))
245 wm->num_line_pair.full = rfixed_const(2); 243 wm->num_line_pair.full = dfixed_const(2);
246 else 244 else
247 wm->num_line_pair.full = rfixed_const(1); 245 wm->num_line_pair.full = dfixed_const(1);
248 246
249 b.full = rfixed_const(mode->crtc_hdisplay); 247 b.full = dfixed_const(mode->crtc_hdisplay);
250 c.full = rfixed_const(256); 248 c.full = dfixed_const(256);
251 a.full = rfixed_div(b, c); 249 a.full = dfixed_div(b, c);
252 request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); 250 request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
253 request_fifo_depth.full = rfixed_ceil(request_fifo_depth); 251 request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
254 if (a.full < rfixed_const(4)) { 252 if (a.full < dfixed_const(4)) {
255 wm->lb_request_fifo_depth = 4; 253 wm->lb_request_fifo_depth = 4;
256 } else { 254 } else {
257 wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); 255 wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
258 } 256 }
259 257
260 /* Determine consumption rate 258 /* Determine consumption rate
@@ -263,23 +261,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
263 * vsc = vertical scaling ratio, defined as source/destination 261 * vsc = vertical scaling ratio, defined as source/destination
264 * hsc = horizontal scaling ration, defined as source/destination 262 * hsc = horizontal scaling ration, defined as source/destination
265 */ 263 */
266 a.full = rfixed_const(mode->clock); 264 a.full = dfixed_const(mode->clock);
267 b.full = rfixed_const(1000); 265 b.full = dfixed_const(1000);
268 a.full = rfixed_div(a, b); 266 a.full = dfixed_div(a, b);
269 pclk.full = rfixed_div(b, a); 267 pclk.full = dfixed_div(b, a);
270 if (crtc->rmx_type != RMX_OFF) { 268 if (crtc->rmx_type != RMX_OFF) {
271 b.full = rfixed_const(2); 269 b.full = dfixed_const(2);
272 if (crtc->vsc.full > b.full) 270 if (crtc->vsc.full > b.full)
273 b.full = crtc->vsc.full; 271 b.full = crtc->vsc.full;
274 b.full = rfixed_mul(b, crtc->hsc); 272 b.full = dfixed_mul(b, crtc->hsc);
275 c.full = rfixed_const(2); 273 c.full = dfixed_const(2);
276 b.full = rfixed_div(b, c); 274 b.full = dfixed_div(b, c);
277 consumption_time.full = rfixed_div(pclk, b); 275 consumption_time.full = dfixed_div(pclk, b);
278 } else { 276 } else {
279 consumption_time.full = pclk.full; 277 consumption_time.full = pclk.full;
280 } 278 }
281 a.full = rfixed_const(1); 279 a.full = dfixed_const(1);
282 wm->consumption_rate.full = rfixed_div(a, consumption_time); 280 wm->consumption_rate.full = dfixed_div(a, consumption_time);
283 281
284 282
285 /* Determine line time 283 /* Determine line time
@@ -287,18 +285,18 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
287 * LineTime = total number of horizontal pixels 285 * LineTime = total number of horizontal pixels
288 * pclk = pixel clock period(ns) 286 * pclk = pixel clock period(ns)
289 */ 287 */
290 a.full = rfixed_const(crtc->base.mode.crtc_htotal); 288 a.full = dfixed_const(crtc->base.mode.crtc_htotal);
291 line_time.full = rfixed_mul(a, pclk); 289 line_time.full = dfixed_mul(a, pclk);
292 290
293 /* Determine active time 291 /* Determine active time
294 * ActiveTime = time of active region of display within one line, 292 * ActiveTime = time of active region of display within one line,
295 * hactive = total number of horizontal active pixels 293 * hactive = total number of horizontal active pixels
296 * htotal = total number of horizontal pixels 294 * htotal = total number of horizontal pixels
297 */ 295 */
298 a.full = rfixed_const(crtc->base.mode.crtc_htotal); 296 a.full = dfixed_const(crtc->base.mode.crtc_htotal);
299 b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); 297 b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
300 wm->active_time.full = rfixed_mul(line_time, b); 298 wm->active_time.full = dfixed_mul(line_time, b);
301 wm->active_time.full = rfixed_div(wm->active_time, a); 299 wm->active_time.full = dfixed_div(wm->active_time, a);
302 300
303 /* Maximun bandwidth is the minimun bandwidth of all component */ 301 /* Maximun bandwidth is the minimun bandwidth of all component */
304 rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; 302 rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
@@ -306,8 +304,8 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
306 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && 304 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
307 rdev->pm.sideport_bandwidth.full) 305 rdev->pm.sideport_bandwidth.full)
308 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; 306 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
309 read_delay_latency.full = rfixed_const(370 * 800 * 1000); 307 read_delay_latency.full = dfixed_const(370 * 800 * 1000);
310 read_delay_latency.full = rfixed_div(read_delay_latency, 308 read_delay_latency.full = dfixed_div(read_delay_latency,
311 rdev->pm.igp_sideport_mclk); 309 rdev->pm.igp_sideport_mclk);
312 } else { 310 } else {
313 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && 311 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
@@ -316,23 +314,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
316 if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && 314 if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
317 rdev->pm.ht_bandwidth.full) 315 rdev->pm.ht_bandwidth.full)
318 rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; 316 rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
319 read_delay_latency.full = rfixed_const(5000); 317 read_delay_latency.full = dfixed_const(5000);
320 } 318 }
321 319
322 /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ 320 /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
323 a.full = rfixed_const(16); 321 a.full = dfixed_const(16);
324 rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); 322 rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
325 a.full = rfixed_const(1000); 323 a.full = dfixed_const(1000);
326 rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); 324 rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
327 /* Determine chunk time 325 /* Determine chunk time
328 * ChunkTime = the time it takes the DCP to send one chunk of data 326 * ChunkTime = the time it takes the DCP to send one chunk of data
329 * to the LB which consists of pipeline delay and inter chunk gap 327 * to the LB which consists of pipeline delay and inter chunk gap
330 * sclk = system clock(ns) 328 * sclk = system clock(ns)
331 */ 329 */
332 a.full = rfixed_const(256 * 13); 330 a.full = dfixed_const(256 * 13);
333 chunk_time.full = rfixed_mul(rdev->pm.sclk, a); 331 chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
334 a.full = rfixed_const(10); 332 a.full = dfixed_const(10);
335 chunk_time.full = rfixed_div(chunk_time, a); 333 chunk_time.full = dfixed_div(chunk_time, a);
336 334
337 /* Determine the worst case latency 335 /* Determine the worst case latency
338 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) 336 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -342,13 +340,13 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
342 * ChunkTime = time it takes the DCP to send one chunk of data to the LB 340 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
343 * which consists of pipeline delay and inter chunk gap 341 * which consists of pipeline delay and inter chunk gap
344 */ 342 */
345 if (rfixed_trunc(wm->num_line_pair) > 1) { 343 if (dfixed_trunc(wm->num_line_pair) > 1) {
346 a.full = rfixed_const(3); 344 a.full = dfixed_const(3);
347 wm->worst_case_latency.full = rfixed_mul(a, chunk_time); 345 wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
348 wm->worst_case_latency.full += read_delay_latency.full; 346 wm->worst_case_latency.full += read_delay_latency.full;
349 } else { 347 } else {
350 a.full = rfixed_const(2); 348 a.full = dfixed_const(2);
351 wm->worst_case_latency.full = rfixed_mul(a, chunk_time); 349 wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
352 wm->worst_case_latency.full += read_delay_latency.full; 350 wm->worst_case_latency.full += read_delay_latency.full;
353 } 351 }
354 352
@@ -362,34 +360,34 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
362 * of data to the LB which consists of 360 * of data to the LB which consists of
363 * pipeline delay and inter chunk gap 361 * pipeline delay and inter chunk gap
364 */ 362 */
365 if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { 363 if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
366 tolerable_latency.full = line_time.full; 364 tolerable_latency.full = line_time.full;
367 } else { 365 } else {
368 tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); 366 tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
369 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; 367 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
370 tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); 368 tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
371 tolerable_latency.full = line_time.full - tolerable_latency.full; 369 tolerable_latency.full = line_time.full - tolerable_latency.full;
372 } 370 }
373 /* We assume worst case 32bits (4 bytes) */ 371 /* We assume worst case 32bits (4 bytes) */
374 wm->dbpp.full = rfixed_const(4 * 8); 372 wm->dbpp.full = dfixed_const(4 * 8);
375 373
376 /* Determine the maximum priority mark 374 /* Determine the maximum priority mark
377 * width = viewport width in pixels 375 * width = viewport width in pixels
378 */ 376 */
379 a.full = rfixed_const(16); 377 a.full = dfixed_const(16);
380 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); 378 wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
381 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); 379 wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
382 wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); 380 wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
383 381
384 /* Determine estimated width */ 382 /* Determine estimated width */
385 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; 383 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
386 estimated_width.full = rfixed_div(estimated_width, consumption_time); 384 estimated_width.full = dfixed_div(estimated_width, consumption_time);
387 if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { 385 if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
388 wm->priority_mark.full = rfixed_const(10); 386 wm->priority_mark.full = dfixed_const(10);
389 } else { 387 } else {
390 a.full = rfixed_const(16); 388 a.full = dfixed_const(16);
391 wm->priority_mark.full = rfixed_div(estimated_width, a); 389 wm->priority_mark.full = dfixed_div(estimated_width, a);
392 wm->priority_mark.full = rfixed_ceil(wm->priority_mark); 390 wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
393 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; 391 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
394 } 392 }
395} 393}
@@ -441,58 +439,58 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
441 WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); 439 WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
442 440
443 if (mode0 && mode1) { 441 if (mode0 && mode1) {
444 if (rfixed_trunc(wm0.dbpp) > 64) 442 if (dfixed_trunc(wm0.dbpp) > 64)
445 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); 443 a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
446 else 444 else
447 a.full = wm0.num_line_pair.full; 445 a.full = wm0.num_line_pair.full;
448 if (rfixed_trunc(wm1.dbpp) > 64) 446 if (dfixed_trunc(wm1.dbpp) > 64)
449 b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); 447 b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
450 else 448 else
451 b.full = wm1.num_line_pair.full; 449 b.full = wm1.num_line_pair.full;
452 a.full += b.full; 450 a.full += b.full;
453 fill_rate.full = rfixed_div(wm0.sclk, a); 451 fill_rate.full = dfixed_div(wm0.sclk, a);
454 if (wm0.consumption_rate.full > fill_rate.full) { 452 if (wm0.consumption_rate.full > fill_rate.full) {
455 b.full = wm0.consumption_rate.full - fill_rate.full; 453 b.full = wm0.consumption_rate.full - fill_rate.full;
456 b.full = rfixed_mul(b, wm0.active_time); 454 b.full = dfixed_mul(b, wm0.active_time);
457 a.full = rfixed_mul(wm0.worst_case_latency, 455 a.full = dfixed_mul(wm0.worst_case_latency,
458 wm0.consumption_rate); 456 wm0.consumption_rate);
459 a.full = a.full + b.full; 457 a.full = a.full + b.full;
460 b.full = rfixed_const(16 * 1000); 458 b.full = dfixed_const(16 * 1000);
461 priority_mark02.full = rfixed_div(a, b); 459 priority_mark02.full = dfixed_div(a, b);
462 } else { 460 } else {
463 a.full = rfixed_mul(wm0.worst_case_latency, 461 a.full = dfixed_mul(wm0.worst_case_latency,
464 wm0.consumption_rate); 462 wm0.consumption_rate);
465 b.full = rfixed_const(16 * 1000); 463 b.full = dfixed_const(16 * 1000);
466 priority_mark02.full = rfixed_div(a, b); 464 priority_mark02.full = dfixed_div(a, b);
467 } 465 }
468 if (wm1.consumption_rate.full > fill_rate.full) { 466 if (wm1.consumption_rate.full > fill_rate.full) {
469 b.full = wm1.consumption_rate.full - fill_rate.full; 467 b.full = wm1.consumption_rate.full - fill_rate.full;
470 b.full = rfixed_mul(b, wm1.active_time); 468 b.full = dfixed_mul(b, wm1.active_time);
471 a.full = rfixed_mul(wm1.worst_case_latency, 469 a.full = dfixed_mul(wm1.worst_case_latency,
472 wm1.consumption_rate); 470 wm1.consumption_rate);
473 a.full = a.full + b.full; 471 a.full = a.full + b.full;
474 b.full = rfixed_const(16 * 1000); 472 b.full = dfixed_const(16 * 1000);
475 priority_mark12.full = rfixed_div(a, b); 473 priority_mark12.full = dfixed_div(a, b);
476 } else { 474 } else {
477 a.full = rfixed_mul(wm1.worst_case_latency, 475 a.full = dfixed_mul(wm1.worst_case_latency,
478 wm1.consumption_rate); 476 wm1.consumption_rate);
479 b.full = rfixed_const(16 * 1000); 477 b.full = dfixed_const(16 * 1000);
480 priority_mark12.full = rfixed_div(a, b); 478 priority_mark12.full = dfixed_div(a, b);
481 } 479 }
482 if (wm0.priority_mark.full > priority_mark02.full) 480 if (wm0.priority_mark.full > priority_mark02.full)
483 priority_mark02.full = wm0.priority_mark.full; 481 priority_mark02.full = wm0.priority_mark.full;
484 if (rfixed_trunc(priority_mark02) < 0) 482 if (dfixed_trunc(priority_mark02) < 0)
485 priority_mark02.full = 0; 483 priority_mark02.full = 0;
486 if (wm0.priority_mark_max.full > priority_mark02.full) 484 if (wm0.priority_mark_max.full > priority_mark02.full)
487 priority_mark02.full = wm0.priority_mark_max.full; 485 priority_mark02.full = wm0.priority_mark_max.full;
488 if (wm1.priority_mark.full > priority_mark12.full) 486 if (wm1.priority_mark.full > priority_mark12.full)
489 priority_mark12.full = wm1.priority_mark.full; 487 priority_mark12.full = wm1.priority_mark.full;
490 if (rfixed_trunc(priority_mark12) < 0) 488 if (dfixed_trunc(priority_mark12) < 0)
491 priority_mark12.full = 0; 489 priority_mark12.full = 0;
492 if (wm1.priority_mark_max.full > priority_mark12.full) 490 if (wm1.priority_mark_max.full > priority_mark12.full)
493 priority_mark12.full = wm1.priority_mark_max.full; 491 priority_mark12.full = wm1.priority_mark_max.full;
494 d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); 492 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
495 d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); 493 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
496 if (rdev->disp_priority == 2) { 494 if (rdev->disp_priority == 2) {
497 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 495 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
498 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 496 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
@@ -502,32 +500,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
502 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 500 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
503 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 501 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
504 } else if (mode0) { 502 } else if (mode0) {
505 if (rfixed_trunc(wm0.dbpp) > 64) 503 if (dfixed_trunc(wm0.dbpp) > 64)
506 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); 504 a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
507 else 505 else
508 a.full = wm0.num_line_pair.full; 506 a.full = wm0.num_line_pair.full;
509 fill_rate.full = rfixed_div(wm0.sclk, a); 507 fill_rate.full = dfixed_div(wm0.sclk, a);
510 if (wm0.consumption_rate.full > fill_rate.full) { 508 if (wm0.consumption_rate.full > fill_rate.full) {
511 b.full = wm0.consumption_rate.full - fill_rate.full; 509 b.full = wm0.consumption_rate.full - fill_rate.full;
512 b.full = rfixed_mul(b, wm0.active_time); 510 b.full = dfixed_mul(b, wm0.active_time);
513 a.full = rfixed_mul(wm0.worst_case_latency, 511 a.full = dfixed_mul(wm0.worst_case_latency,
514 wm0.consumption_rate); 512 wm0.consumption_rate);
515 a.full = a.full + b.full; 513 a.full = a.full + b.full;
516 b.full = rfixed_const(16 * 1000); 514 b.full = dfixed_const(16 * 1000);
517 priority_mark02.full = rfixed_div(a, b); 515 priority_mark02.full = dfixed_div(a, b);
518 } else { 516 } else {
519 a.full = rfixed_mul(wm0.worst_case_latency, 517 a.full = dfixed_mul(wm0.worst_case_latency,
520 wm0.consumption_rate); 518 wm0.consumption_rate);
521 b.full = rfixed_const(16 * 1000); 519 b.full = dfixed_const(16 * 1000);
522 priority_mark02.full = rfixed_div(a, b); 520 priority_mark02.full = dfixed_div(a, b);
523 } 521 }
524 if (wm0.priority_mark.full > priority_mark02.full) 522 if (wm0.priority_mark.full > priority_mark02.full)
525 priority_mark02.full = wm0.priority_mark.full; 523 priority_mark02.full = wm0.priority_mark.full;
526 if (rfixed_trunc(priority_mark02) < 0) 524 if (dfixed_trunc(priority_mark02) < 0)
527 priority_mark02.full = 0; 525 priority_mark02.full = 0;
528 if (wm0.priority_mark_max.full > priority_mark02.full) 526 if (wm0.priority_mark_max.full > priority_mark02.full)
529 priority_mark02.full = wm0.priority_mark_max.full; 527 priority_mark02.full = wm0.priority_mark_max.full;
530 d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); 528 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
531 if (rdev->disp_priority == 2) 529 if (rdev->disp_priority == 2)
532 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 530 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
533 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 531 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
@@ -537,32 +535,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
537 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, 535 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
538 S_006D4C_D2MODE_PRIORITY_B_OFF(1)); 536 S_006D4C_D2MODE_PRIORITY_B_OFF(1));
539 } else { 537 } else {
540 if (rfixed_trunc(wm1.dbpp) > 64) 538 if (dfixed_trunc(wm1.dbpp) > 64)
541 a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); 539 a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
542 else 540 else
543 a.full = wm1.num_line_pair.full; 541 a.full = wm1.num_line_pair.full;
544 fill_rate.full = rfixed_div(wm1.sclk, a); 542 fill_rate.full = dfixed_div(wm1.sclk, a);
545 if (wm1.consumption_rate.full > fill_rate.full) { 543 if (wm1.consumption_rate.full > fill_rate.full) {
546 b.full = wm1.consumption_rate.full - fill_rate.full; 544 b.full = wm1.consumption_rate.full - fill_rate.full;
547 b.full = rfixed_mul(b, wm1.active_time); 545 b.full = dfixed_mul(b, wm1.active_time);
548 a.full = rfixed_mul(wm1.worst_case_latency, 546 a.full = dfixed_mul(wm1.worst_case_latency,
549 wm1.consumption_rate); 547 wm1.consumption_rate);
550 a.full = a.full + b.full; 548 a.full = a.full + b.full;
551 b.full = rfixed_const(16 * 1000); 549 b.full = dfixed_const(16 * 1000);
552 priority_mark12.full = rfixed_div(a, b); 550 priority_mark12.full = dfixed_div(a, b);
553 } else { 551 } else {
554 a.full = rfixed_mul(wm1.worst_case_latency, 552 a.full = dfixed_mul(wm1.worst_case_latency,
555 wm1.consumption_rate); 553 wm1.consumption_rate);
556 b.full = rfixed_const(16 * 1000); 554 b.full = dfixed_const(16 * 1000);
557 priority_mark12.full = rfixed_div(a, b); 555 priority_mark12.full = dfixed_div(a, b);
558 } 556 }
559 if (wm1.priority_mark.full > priority_mark12.full) 557 if (wm1.priority_mark.full > priority_mark12.full)
560 priority_mark12.full = wm1.priority_mark.full; 558 priority_mark12.full = wm1.priority_mark.full;
561 if (rfixed_trunc(priority_mark12) < 0) 559 if (dfixed_trunc(priority_mark12) < 0)
562 priority_mark12.full = 0; 560 priority_mark12.full = 0;
563 if (wm1.priority_mark_max.full > priority_mark12.full) 561 if (wm1.priority_mark_max.full > priority_mark12.full)
564 priority_mark12.full = wm1.priority_mark_max.full; 562 priority_mark12.full = wm1.priority_mark_max.full;
565 d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); 563 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
566 if (rdev->disp_priority == 2) 564 if (rdev->disp_priority == 2)
567 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 565 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
568 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, 566 WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
@@ -653,7 +651,7 @@ int rs690_resume(struct radeon_device *rdev)
653 /* Resume clock before doing reset */ 651 /* Resume clock before doing reset */
654 rv515_clock_startup(rdev); 652 rv515_clock_startup(rdev);
655 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 653 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
656 if (radeon_gpu_reset(rdev)) { 654 if (radeon_asic_reset(rdev)) {
657 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 655 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
658 RREG32(R_000E40_RBBM_STATUS), 656 RREG32(R_000E40_RBBM_STATUS),
659 RREG32(R_0007C0_CP_STAT)); 657 RREG32(R_0007C0_CP_STAT));
@@ -678,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
678 676
679void rs690_fini(struct radeon_device *rdev) 677void rs690_fini(struct radeon_device *rdev)
680{ 678{
681 radeon_pm_fini(rdev);
682 r100_cp_fini(rdev); 679 r100_cp_fini(rdev);
683 r100_wb_fini(rdev); 680 r100_wb_fini(rdev);
684 r100_ib_fini(rdev); 681 r100_ib_fini(rdev);
@@ -717,7 +714,7 @@ int rs690_init(struct radeon_device *rdev)
717 return -EINVAL; 714 return -EINVAL;
718 } 715 }
719 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 716 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
720 if (radeon_gpu_reset(rdev)) { 717 if (radeon_asic_reset(rdev)) {
721 dev_warn(rdev->dev, 718 dev_warn(rdev->dev,
722 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 719 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
723 RREG32(R_000E40_RBBM_STATUS), 720 RREG32(R_000E40_RBBM_STATUS),
@@ -729,8 +726,6 @@ int rs690_init(struct radeon_device *rdev)
729 726
730 /* Initialize clocks */ 727 /* Initialize clocks */
731 radeon_get_clock_info(rdev->ddev); 728 radeon_get_clock_info(rdev->ddev);
732 /* Initialize power management */
733 radeon_pm_init(rdev);
734 /* initialize memory controller */ 729 /* initialize memory controller */
735 rs690_mc_init(rdev); 730 rs690_mc_init(rdev);
736 rv515_debugfs(rdev); 731 rv515_debugfs(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 9035121f4b58..7d9a7b0a180a 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -147,16 +147,11 @@ void rv515_gpu_init(struct radeon_device *rdev)
147{ 147{
148 unsigned pipe_select_current, gb_pipe_select, tmp; 148 unsigned pipe_select_current, gb_pipe_select, tmp;
149 149
150 r100_hdp_reset(rdev);
151 r100_rb2d_reset(rdev);
152
153 if (r100_gui_wait_for_idle(rdev)) { 150 if (r100_gui_wait_for_idle(rdev)) {
154 printk(KERN_WARNING "Failed to wait GUI idle while " 151 printk(KERN_WARNING "Failed to wait GUI idle while "
155 "reseting GPU. Bad things might happen.\n"); 152 "reseting GPU. Bad things might happen.\n");
156 } 153 }
157
158 rv515_vga_render_disable(rdev); 154 rv515_vga_render_disable(rdev);
159
160 r420_pipes_init(rdev); 155 r420_pipes_init(rdev);
161 gb_pipe_select = RREG32(0x402C); 156 gb_pipe_select = RREG32(0x402C);
162 tmp = RREG32(0x170C); 157 tmp = RREG32(0x170C);
@@ -174,91 +169,6 @@ void rv515_gpu_init(struct radeon_device *rdev)
174 } 169 }
175} 170}
176 171
177int rv515_ga_reset(struct radeon_device *rdev)
178{
179 uint32_t tmp;
180 bool reinit_cp;
181 int i;
182
183 reinit_cp = rdev->cp.ready;
184 rdev->cp.ready = false;
185 for (i = 0; i < rdev->usec_timeout; i++) {
186 WREG32(CP_CSQ_MODE, 0);
187 WREG32(CP_CSQ_CNTL, 0);
188 WREG32(RBBM_SOFT_RESET, 0x32005);
189 (void)RREG32(RBBM_SOFT_RESET);
190 udelay(200);
191 WREG32(RBBM_SOFT_RESET, 0);
192 /* Wait to prevent race in RBBM_STATUS */
193 mdelay(1);
194 tmp = RREG32(RBBM_STATUS);
195 if (tmp & ((1 << 20) | (1 << 26))) {
196 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
197 /* GA still busy soft reset it */
198 WREG32(0x429C, 0x200);
199 WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
200 WREG32(0x43E0, 0);
201 WREG32(0x43E4, 0);
202 WREG32(0x24AC, 0);
203 }
204 /* Wait to prevent race in RBBM_STATUS */
205 mdelay(1);
206 tmp = RREG32(RBBM_STATUS);
207 if (!(tmp & ((1 << 20) | (1 << 26)))) {
208 break;
209 }
210 }
211 for (i = 0; i < rdev->usec_timeout; i++) {
212 tmp = RREG32(RBBM_STATUS);
213 if (!(tmp & ((1 << 20) | (1 << 26)))) {
214 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
215 tmp);
216 DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
217 DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
218 DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
219 if (reinit_cp) {
220 return r100_cp_init(rdev, rdev->cp.ring_size);
221 }
222 return 0;
223 }
224 DRM_UDELAY(1);
225 }
226 tmp = RREG32(RBBM_STATUS);
227 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
228 return -1;
229}
230
231int rv515_gpu_reset(struct radeon_device *rdev)
232{
233 uint32_t status;
234
235 /* reset order likely matter */
236 status = RREG32(RBBM_STATUS);
237 /* reset HDP */
238 r100_hdp_reset(rdev);
239 /* reset rb2d */
240 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
241 r100_rb2d_reset(rdev);
242 }
243 /* reset GA */
244 if (status & ((1 << 20) | (1 << 26))) {
245 rv515_ga_reset(rdev);
246 }
247 /* reset CP */
248 status = RREG32(RBBM_STATUS);
249 if (status & (1 << 16)) {
250 r100_cp_reset(rdev);
251 }
252 /* Check if GPU is idle */
253 status = RREG32(RBBM_STATUS);
254 if (status & (1 << 31)) {
255 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
256 return -1;
257 }
258 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
259 return 0;
260}
261
262static void rv515_vram_get_type(struct radeon_device *rdev) 172static void rv515_vram_get_type(struct radeon_device *rdev)
263{ 173{
264 uint32_t tmp; 174 uint32_t tmp;
@@ -335,7 +245,7 @@ static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
335 245
336 tmp = RREG32(0x2140); 246 tmp = RREG32(0x2140);
337 seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp); 247 seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
338 radeon_gpu_reset(rdev); 248 radeon_asic_reset(rdev);
339 tmp = RREG32(0x425C); 249 tmp = RREG32(0x425C);
340 seq_printf(m, "GA_IDLE 0x%08x\n", tmp); 250 seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
341 return 0; 251 return 0;
@@ -503,7 +413,7 @@ int rv515_resume(struct radeon_device *rdev)
503 /* Resume clock before doing reset */ 413 /* Resume clock before doing reset */
504 rv515_clock_startup(rdev); 414 rv515_clock_startup(rdev);
505 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 415 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
506 if (radeon_gpu_reset(rdev)) { 416 if (radeon_asic_reset(rdev)) {
507 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 417 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
508 RREG32(R_000E40_RBBM_STATUS), 418 RREG32(R_000E40_RBBM_STATUS),
509 RREG32(R_0007C0_CP_STAT)); 419 RREG32(R_0007C0_CP_STAT));
@@ -535,7 +445,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
535 445
536void rv515_fini(struct radeon_device *rdev) 446void rv515_fini(struct radeon_device *rdev)
537{ 447{
538 radeon_pm_fini(rdev);
539 r100_cp_fini(rdev); 448 r100_cp_fini(rdev);
540 r100_wb_fini(rdev); 449 r100_wb_fini(rdev);
541 r100_ib_fini(rdev); 450 r100_ib_fini(rdev);
@@ -573,7 +482,7 @@ int rv515_init(struct radeon_device *rdev)
573 return -EINVAL; 482 return -EINVAL;
574 } 483 }
575 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 484 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
576 if (radeon_gpu_reset(rdev)) { 485 if (radeon_asic_reset(rdev)) {
577 dev_warn(rdev->dev, 486 dev_warn(rdev->dev,
578 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 487 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
579 RREG32(R_000E40_RBBM_STATUS), 488 RREG32(R_000E40_RBBM_STATUS),
@@ -584,8 +493,6 @@ int rv515_init(struct radeon_device *rdev)
584 return -EINVAL; 493 return -EINVAL;
585 /* Initialize clocks */ 494 /* Initialize clocks */
586 radeon_get_clock_info(rdev->ddev); 495 radeon_get_clock_info(rdev->ddev);
587 /* Initialize power management */
588 radeon_pm_init(rdev);
589 /* initialize AGP */ 496 /* initialize AGP */
590 if (rdev->flags & RADEON_IS_AGP) { 497 if (rdev->flags & RADEON_IS_AGP) {
591 r = radeon_agp_init(rdev); 498 r = radeon_agp_init(rdev);
@@ -885,20 +792,20 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
885 return; 792 return;
886 } 793 }
887 794
888 if (crtc->vsc.full > rfixed_const(2)) 795 if (crtc->vsc.full > dfixed_const(2))
889 wm->num_line_pair.full = rfixed_const(2); 796 wm->num_line_pair.full = dfixed_const(2);
890 else 797 else
891 wm->num_line_pair.full = rfixed_const(1); 798 wm->num_line_pair.full = dfixed_const(1);
892 799
893 b.full = rfixed_const(mode->crtc_hdisplay); 800 b.full = dfixed_const(mode->crtc_hdisplay);
894 c.full = rfixed_const(256); 801 c.full = dfixed_const(256);
895 a.full = rfixed_div(b, c); 802 a.full = dfixed_div(b, c);
896 request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); 803 request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
897 request_fifo_depth.full = rfixed_ceil(request_fifo_depth); 804 request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
898 if (a.full < rfixed_const(4)) { 805 if (a.full < dfixed_const(4)) {
899 wm->lb_request_fifo_depth = 4; 806 wm->lb_request_fifo_depth = 4;
900 } else { 807 } else {
901 wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); 808 wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
902 } 809 }
903 810
904 /* Determine consumption rate 811 /* Determine consumption rate
@@ -907,23 +814,23 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
907 * vsc = vertical scaling ratio, defined as source/destination 814 * vsc = vertical scaling ratio, defined as source/destination
908 * hsc = horizontal scaling ration, defined as source/destination 815 * hsc = horizontal scaling ration, defined as source/destination
909 */ 816 */
910 a.full = rfixed_const(mode->clock); 817 a.full = dfixed_const(mode->clock);
911 b.full = rfixed_const(1000); 818 b.full = dfixed_const(1000);
912 a.full = rfixed_div(a, b); 819 a.full = dfixed_div(a, b);
913 pclk.full = rfixed_div(b, a); 820 pclk.full = dfixed_div(b, a);
914 if (crtc->rmx_type != RMX_OFF) { 821 if (crtc->rmx_type != RMX_OFF) {
915 b.full = rfixed_const(2); 822 b.full = dfixed_const(2);
916 if (crtc->vsc.full > b.full) 823 if (crtc->vsc.full > b.full)
917 b.full = crtc->vsc.full; 824 b.full = crtc->vsc.full;
918 b.full = rfixed_mul(b, crtc->hsc); 825 b.full = dfixed_mul(b, crtc->hsc);
919 c.full = rfixed_const(2); 826 c.full = dfixed_const(2);
920 b.full = rfixed_div(b, c); 827 b.full = dfixed_div(b, c);
921 consumption_time.full = rfixed_div(pclk, b); 828 consumption_time.full = dfixed_div(pclk, b);
922 } else { 829 } else {
923 consumption_time.full = pclk.full; 830 consumption_time.full = pclk.full;
924 } 831 }
925 a.full = rfixed_const(1); 832 a.full = dfixed_const(1);
926 wm->consumption_rate.full = rfixed_div(a, consumption_time); 833 wm->consumption_rate.full = dfixed_div(a, consumption_time);
927 834
928 835
929 /* Determine line time 836 /* Determine line time
@@ -931,27 +838,27 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
931 * LineTime = total number of horizontal pixels 838 * LineTime = total number of horizontal pixels
932 * pclk = pixel clock period(ns) 839 * pclk = pixel clock period(ns)
933 */ 840 */
934 a.full = rfixed_const(crtc->base.mode.crtc_htotal); 841 a.full = dfixed_const(crtc->base.mode.crtc_htotal);
935 line_time.full = rfixed_mul(a, pclk); 842 line_time.full = dfixed_mul(a, pclk);
936 843
937 /* Determine active time 844 /* Determine active time
938 * ActiveTime = time of active region of display within one line, 845 * ActiveTime = time of active region of display within one line,
939 * hactive = total number of horizontal active pixels 846 * hactive = total number of horizontal active pixels
940 * htotal = total number of horizontal pixels 847 * htotal = total number of horizontal pixels
941 */ 848 */
942 a.full = rfixed_const(crtc->base.mode.crtc_htotal); 849 a.full = dfixed_const(crtc->base.mode.crtc_htotal);
943 b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); 850 b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
944 wm->active_time.full = rfixed_mul(line_time, b); 851 wm->active_time.full = dfixed_mul(line_time, b);
945 wm->active_time.full = rfixed_div(wm->active_time, a); 852 wm->active_time.full = dfixed_div(wm->active_time, a);
946 853
947 /* Determine chunk time 854 /* Determine chunk time
948 * ChunkTime = the time it takes the DCP to send one chunk of data 855 * ChunkTime = the time it takes the DCP to send one chunk of data
949 * to the LB which consists of pipeline delay and inter chunk gap 856 * to the LB which consists of pipeline delay and inter chunk gap
950 * sclk = system clock(Mhz) 857 * sclk = system clock(Mhz)
951 */ 858 */
952 a.full = rfixed_const(600 * 1000); 859 a.full = dfixed_const(600 * 1000);
953 chunk_time.full = rfixed_div(a, rdev->pm.sclk); 860 chunk_time.full = dfixed_div(a, rdev->pm.sclk);
954 read_delay_latency.full = rfixed_const(1000); 861 read_delay_latency.full = dfixed_const(1000);
955 862
956 /* Determine the worst case latency 863 /* Determine the worst case latency
957 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) 864 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -961,9 +868,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
961 * ChunkTime = time it takes the DCP to send one chunk of data to the LB 868 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
962 * which consists of pipeline delay and inter chunk gap 869 * which consists of pipeline delay and inter chunk gap
963 */ 870 */
964 if (rfixed_trunc(wm->num_line_pair) > 1) { 871 if (dfixed_trunc(wm->num_line_pair) > 1) {
965 a.full = rfixed_const(3); 872 a.full = dfixed_const(3);
966 wm->worst_case_latency.full = rfixed_mul(a, chunk_time); 873 wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
967 wm->worst_case_latency.full += read_delay_latency.full; 874 wm->worst_case_latency.full += read_delay_latency.full;
968 } else { 875 } else {
969 wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; 876 wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
@@ -979,34 +886,34 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
979 * of data to the LB which consists of 886 * of data to the LB which consists of
980 * pipeline delay and inter chunk gap 887 * pipeline delay and inter chunk gap
981 */ 888 */
982 if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { 889 if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
983 tolerable_latency.full = line_time.full; 890 tolerable_latency.full = line_time.full;
984 } else { 891 } else {
985 tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); 892 tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
986 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; 893 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
987 tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); 894 tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
988 tolerable_latency.full = line_time.full - tolerable_latency.full; 895 tolerable_latency.full = line_time.full - tolerable_latency.full;
989 } 896 }
990 /* We assume worst case 32bits (4 bytes) */ 897 /* We assume worst case 32bits (4 bytes) */
991 wm->dbpp.full = rfixed_const(2 * 16); 898 wm->dbpp.full = dfixed_const(2 * 16);
992 899
993 /* Determine the maximum priority mark 900 /* Determine the maximum priority mark
994 * width = viewport width in pixels 901 * width = viewport width in pixels
995 */ 902 */
996 a.full = rfixed_const(16); 903 a.full = dfixed_const(16);
997 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); 904 wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
998 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); 905 wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
999 wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); 906 wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
1000 907
1001 /* Determine estimated width */ 908 /* Determine estimated width */
1002 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; 909 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
1003 estimated_width.full = rfixed_div(estimated_width, consumption_time); 910 estimated_width.full = dfixed_div(estimated_width, consumption_time);
1004 if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { 911 if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
1005 wm->priority_mark.full = wm->priority_mark_max.full; 912 wm->priority_mark.full = wm->priority_mark_max.full;
1006 } else { 913 } else {
1007 a.full = rfixed_const(16); 914 a.full = dfixed_const(16);
1008 wm->priority_mark.full = rfixed_div(estimated_width, a); 915 wm->priority_mark.full = dfixed_div(estimated_width, a);
1009 wm->priority_mark.full = rfixed_ceil(wm->priority_mark); 916 wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
1010 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; 917 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
1011 } 918 }
1012} 919}
@@ -1035,58 +942,58 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1035 WREG32(LB_MAX_REQ_OUTSTANDING, tmp); 942 WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
1036 943
1037 if (mode0 && mode1) { 944 if (mode0 && mode1) {
1038 if (rfixed_trunc(wm0.dbpp) > 64) 945 if (dfixed_trunc(wm0.dbpp) > 64)
1039 a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); 946 a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
1040 else 947 else
1041 a.full = wm0.num_line_pair.full; 948 a.full = wm0.num_line_pair.full;
1042 if (rfixed_trunc(wm1.dbpp) > 64) 949 if (dfixed_trunc(wm1.dbpp) > 64)
1043 b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); 950 b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
1044 else 951 else
1045 b.full = wm1.num_line_pair.full; 952 b.full = wm1.num_line_pair.full;
1046 a.full += b.full; 953 a.full += b.full;
1047 fill_rate.full = rfixed_div(wm0.sclk, a); 954 fill_rate.full = dfixed_div(wm0.sclk, a);
1048 if (wm0.consumption_rate.full > fill_rate.full) { 955 if (wm0.consumption_rate.full > fill_rate.full) {
1049 b.full = wm0.consumption_rate.full - fill_rate.full; 956 b.full = wm0.consumption_rate.full - fill_rate.full;
1050 b.full = rfixed_mul(b, wm0.active_time); 957 b.full = dfixed_mul(b, wm0.active_time);
1051 a.full = rfixed_const(16); 958 a.full = dfixed_const(16);
1052 b.full = rfixed_div(b, a); 959 b.full = dfixed_div(b, a);
1053 a.full = rfixed_mul(wm0.worst_case_latency, 960 a.full = dfixed_mul(wm0.worst_case_latency,
1054 wm0.consumption_rate); 961 wm0.consumption_rate);
1055 priority_mark02.full = a.full + b.full; 962 priority_mark02.full = a.full + b.full;
1056 } else { 963 } else {
1057 a.full = rfixed_mul(wm0.worst_case_latency, 964 a.full = dfixed_mul(wm0.worst_case_latency,
1058 wm0.consumption_rate); 965 wm0.consumption_rate);
1059 b.full = rfixed_const(16 * 1000); 966 b.full = dfixed_const(16 * 1000);
1060 priority_mark02.full = rfixed_div(a, b); 967 priority_mark02.full = dfixed_div(a, b);
1061 } 968 }
1062 if (wm1.consumption_rate.full > fill_rate.full) { 969 if (wm1.consumption_rate.full > fill_rate.full) {
1063 b.full = wm1.consumption_rate.full - fill_rate.full; 970 b.full = wm1.consumption_rate.full - fill_rate.full;
1064 b.full = rfixed_mul(b, wm1.active_time); 971 b.full = dfixed_mul(b, wm1.active_time);
1065 a.full = rfixed_const(16); 972 a.full = dfixed_const(16);
1066 b.full = rfixed_div(b, a); 973 b.full = dfixed_div(b, a);
1067 a.full = rfixed_mul(wm1.worst_case_latency, 974 a.full = dfixed_mul(wm1.worst_case_latency,
1068 wm1.consumption_rate); 975 wm1.consumption_rate);
1069 priority_mark12.full = a.full + b.full; 976 priority_mark12.full = a.full + b.full;
1070 } else { 977 } else {
1071 a.full = rfixed_mul(wm1.worst_case_latency, 978 a.full = dfixed_mul(wm1.worst_case_latency,
1072 wm1.consumption_rate); 979 wm1.consumption_rate);
1073 b.full = rfixed_const(16 * 1000); 980 b.full = dfixed_const(16 * 1000);
1074 priority_mark12.full = rfixed_div(a, b); 981 priority_mark12.full = dfixed_div(a, b);
1075 } 982 }
1076 if (wm0.priority_mark.full > priority_mark02.full) 983 if (wm0.priority_mark.full > priority_mark02.full)
1077 priority_mark02.full = wm0.priority_mark.full; 984 priority_mark02.full = wm0.priority_mark.full;
1078 if (rfixed_trunc(priority_mark02) < 0) 985 if (dfixed_trunc(priority_mark02) < 0)
1079 priority_mark02.full = 0; 986 priority_mark02.full = 0;
1080 if (wm0.priority_mark_max.full > priority_mark02.full) 987 if (wm0.priority_mark_max.full > priority_mark02.full)
1081 priority_mark02.full = wm0.priority_mark_max.full; 988 priority_mark02.full = wm0.priority_mark_max.full;
1082 if (wm1.priority_mark.full > priority_mark12.full) 989 if (wm1.priority_mark.full > priority_mark12.full)
1083 priority_mark12.full = wm1.priority_mark.full; 990 priority_mark12.full = wm1.priority_mark.full;
1084 if (rfixed_trunc(priority_mark12) < 0) 991 if (dfixed_trunc(priority_mark12) < 0)
1085 priority_mark12.full = 0; 992 priority_mark12.full = 0;
1086 if (wm1.priority_mark_max.full > priority_mark12.full) 993 if (wm1.priority_mark_max.full > priority_mark12.full)
1087 priority_mark12.full = wm1.priority_mark_max.full; 994 priority_mark12.full = wm1.priority_mark_max.full;
1088 d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); 995 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
1089 d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); 996 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
1090 if (rdev->disp_priority == 2) { 997 if (rdev->disp_priority == 2) {
1091 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 998 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1092 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 999 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
@@ -1096,32 +1003,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1096 WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 1003 WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
1097 WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 1004 WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
1098 } else if (mode0) { 1005 } else if (mode0) {
1099 if (rfixed_trunc(wm0.dbpp) > 64) 1006 if (dfixed_trunc(wm0.dbpp) > 64)
1100 a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); 1007 a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
1101 else 1008 else
1102 a.full = wm0.num_line_pair.full; 1009 a.full = wm0.num_line_pair.full;
1103 fill_rate.full = rfixed_div(wm0.sclk, a); 1010 fill_rate.full = dfixed_div(wm0.sclk, a);
1104 if (wm0.consumption_rate.full > fill_rate.full) { 1011 if (wm0.consumption_rate.full > fill_rate.full) {
1105 b.full = wm0.consumption_rate.full - fill_rate.full; 1012 b.full = wm0.consumption_rate.full - fill_rate.full;
1106 b.full = rfixed_mul(b, wm0.active_time); 1013 b.full = dfixed_mul(b, wm0.active_time);
1107 a.full = rfixed_const(16); 1014 a.full = dfixed_const(16);
1108 b.full = rfixed_div(b, a); 1015 b.full = dfixed_div(b, a);
1109 a.full = rfixed_mul(wm0.worst_case_latency, 1016 a.full = dfixed_mul(wm0.worst_case_latency,
1110 wm0.consumption_rate); 1017 wm0.consumption_rate);
1111 priority_mark02.full = a.full + b.full; 1018 priority_mark02.full = a.full + b.full;
1112 } else { 1019 } else {
1113 a.full = rfixed_mul(wm0.worst_case_latency, 1020 a.full = dfixed_mul(wm0.worst_case_latency,
1114 wm0.consumption_rate); 1021 wm0.consumption_rate);
1115 b.full = rfixed_const(16); 1022 b.full = dfixed_const(16);
1116 priority_mark02.full = rfixed_div(a, b); 1023 priority_mark02.full = dfixed_div(a, b);
1117 } 1024 }
1118 if (wm0.priority_mark.full > priority_mark02.full) 1025 if (wm0.priority_mark.full > priority_mark02.full)
1119 priority_mark02.full = wm0.priority_mark.full; 1026 priority_mark02.full = wm0.priority_mark.full;
1120 if (rfixed_trunc(priority_mark02) < 0) 1027 if (dfixed_trunc(priority_mark02) < 0)
1121 priority_mark02.full = 0; 1028 priority_mark02.full = 0;
1122 if (wm0.priority_mark_max.full > priority_mark02.full) 1029 if (wm0.priority_mark_max.full > priority_mark02.full)
1123 priority_mark02.full = wm0.priority_mark_max.full; 1030 priority_mark02.full = wm0.priority_mark_max.full;
1124 d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); 1031 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
1125 if (rdev->disp_priority == 2) 1032 if (rdev->disp_priority == 2)
1126 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1033 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1127 WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 1034 WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
@@ -1129,32 +1036,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1129 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); 1036 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1130 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); 1037 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1131 } else { 1038 } else {
1132 if (rfixed_trunc(wm1.dbpp) > 64) 1039 if (dfixed_trunc(wm1.dbpp) > 64)
1133 a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); 1040 a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
1134 else 1041 else
1135 a.full = wm1.num_line_pair.full; 1042 a.full = wm1.num_line_pair.full;
1136 fill_rate.full = rfixed_div(wm1.sclk, a); 1043 fill_rate.full = dfixed_div(wm1.sclk, a);
1137 if (wm1.consumption_rate.full > fill_rate.full) { 1044 if (wm1.consumption_rate.full > fill_rate.full) {
1138 b.full = wm1.consumption_rate.full - fill_rate.full; 1045 b.full = wm1.consumption_rate.full - fill_rate.full;
1139 b.full = rfixed_mul(b, wm1.active_time); 1046 b.full = dfixed_mul(b, wm1.active_time);
1140 a.full = rfixed_const(16); 1047 a.full = dfixed_const(16);
1141 b.full = rfixed_div(b, a); 1048 b.full = dfixed_div(b, a);
1142 a.full = rfixed_mul(wm1.worst_case_latency, 1049 a.full = dfixed_mul(wm1.worst_case_latency,
1143 wm1.consumption_rate); 1050 wm1.consumption_rate);
1144 priority_mark12.full = a.full + b.full; 1051 priority_mark12.full = a.full + b.full;
1145 } else { 1052 } else {
1146 a.full = rfixed_mul(wm1.worst_case_latency, 1053 a.full = dfixed_mul(wm1.worst_case_latency,
1147 wm1.consumption_rate); 1054 wm1.consumption_rate);
1148 b.full = rfixed_const(16 * 1000); 1055 b.full = dfixed_const(16 * 1000);
1149 priority_mark12.full = rfixed_div(a, b); 1056 priority_mark12.full = dfixed_div(a, b);
1150 } 1057 }
1151 if (wm1.priority_mark.full > priority_mark12.full) 1058 if (wm1.priority_mark.full > priority_mark12.full)
1152 priority_mark12.full = wm1.priority_mark.full; 1059 priority_mark12.full = wm1.priority_mark.full;
1153 if (rfixed_trunc(priority_mark12) < 0) 1060 if (dfixed_trunc(priority_mark12) < 0)
1154 priority_mark12.full = 0; 1061 priority_mark12.full = 0;
1155 if (wm1.priority_mark_max.full > priority_mark12.full) 1062 if (wm1.priority_mark_max.full > priority_mark12.full)
1156 priority_mark12.full = wm1.priority_mark_max.full; 1063 priority_mark12.full = wm1.priority_mark_max.full;
1157 d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); 1064 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
1158 if (rdev->disp_priority == 2) 1065 if (rdev->disp_priority == 2)
1159 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1066 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1160 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); 1067 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index fc216e49384d..590309a710b1 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -217,6 +217,52 @@
217#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) 217#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
218 218
219/* Registers */ 219/* Registers */
220#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
221#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
222#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
223#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
224#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
225#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
226#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
227#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
228#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
229#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
230#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
231#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
232#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
233#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
234#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
235#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
236#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
237#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
238#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
239#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
240#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
241#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
242#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
243#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
244#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
245#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
246#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
247#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
248#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
249#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
250#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
251#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
252#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
253#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
254#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
255#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
256#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
257#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
258#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
259#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
260#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
261#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
262#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
263#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
264#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
265#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
220#define R_0000F8_CONFIG_MEMSIZE 0x0000F8 266#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
221#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0) 267#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
222#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF) 268#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 97958a64df1a..253f24aec031 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,10 @@
42static void rv770_gpu_init(struct radeon_device *rdev); 42static void rv770_gpu_init(struct radeon_device *rdev);
43void rv770_fini(struct radeon_device *rdev); 43void rv770_fini(struct radeon_device *rdev);
44 44
45void rv770_pm_misc(struct radeon_device *rdev)
46{
47
48}
45 49
46/* 50/*
47 * GART 51 * GART
@@ -237,7 +241,6 @@ void r700_cp_stop(struct radeon_device *rdev)
237 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 241 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
238} 242}
239 243
240
241static int rv770_cp_load_microcode(struct radeon_device *rdev) 244static int rv770_cp_load_microcode(struct radeon_device *rdev)
242{ 245{
243 const __be32 *fw_data; 246 const __be32 *fw_data;
@@ -272,6 +275,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
272 return 0; 275 return 0;
273} 276}
274 277
278void r700_cp_fini(struct radeon_device *rdev)
279{
280 r700_cp_stop(rdev);
281 radeon_ring_fini(rdev);
282}
275 283
276/* 284/*
277 * Core functions 285 * Core functions
@@ -906,23 +914,12 @@ int rv770_mc_init(struct radeon_device *rdev)
906 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 914 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
907 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 915 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
908 rdev->mc.visible_vram_size = rdev->mc.aper_size; 916 rdev->mc.visible_vram_size = rdev->mc.aper_size;
909 /* FIXME remove this once we support unmappable VRAM */
910 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
911 rdev->mc.mc_vram_size = rdev->mc.aper_size;
912 rdev->mc.real_vram_size = rdev->mc.aper_size;
913 }
914 r600_vram_gtt_location(rdev, &rdev->mc); 917 r600_vram_gtt_location(rdev, &rdev->mc);
915 radeon_update_bandwidth_info(rdev); 918 radeon_update_bandwidth_info(rdev);
916 919
917 return 0; 920 return 0;
918} 921}
919 922
920int rv770_gpu_reset(struct radeon_device *rdev)
921{
922 /* FIXME: implement any rv770 specific bits */
923 return r600_gpu_reset(rdev);
924}
925
926static int rv770_startup(struct radeon_device *rdev) 923static int rv770_startup(struct radeon_device *rdev)
927{ 924{
928 int r; 925 int r;
@@ -1094,8 +1091,6 @@ int rv770_init(struct radeon_device *rdev)
1094 r = radeon_clocks_init(rdev); 1091 r = radeon_clocks_init(rdev);
1095 if (r) 1092 if (r)
1096 return r; 1093 return r;
1097 /* Initialize power management */
1098 radeon_pm_init(rdev);
1099 /* Fence driver */ 1094 /* Fence driver */
1100 r = radeon_fence_driver_init(rdev); 1095 r = radeon_fence_driver_init(rdev);
1101 if (r) 1096 if (r)
@@ -1132,7 +1127,7 @@ int rv770_init(struct radeon_device *rdev)
1132 r = rv770_startup(rdev); 1127 r = rv770_startup(rdev);
1133 if (r) { 1128 if (r) {
1134 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1129 dev_err(rdev->dev, "disabling GPU acceleration\n");
1135 r600_cp_fini(rdev); 1130 r700_cp_fini(rdev);
1136 r600_wb_fini(rdev); 1131 r600_wb_fini(rdev);
1137 r600_irq_fini(rdev); 1132 r600_irq_fini(rdev);
1138 radeon_irq_kms_fini(rdev); 1133 radeon_irq_kms_fini(rdev);
@@ -1164,9 +1159,8 @@ int rv770_init(struct radeon_device *rdev)
1164 1159
1165void rv770_fini(struct radeon_device *rdev) 1160void rv770_fini(struct radeon_device *rdev)
1166{ 1161{
1167 radeon_pm_fini(rdev);
1168 r600_blit_fini(rdev); 1162 r600_blit_fini(rdev);
1169 r600_cp_fini(rdev); 1163 r700_cp_fini(rdev);
1170 r600_wb_fini(rdev); 1164 r600_wb_fini(rdev);
1171 r600_irq_fini(rdev); 1165 r600_irq_fini(rdev);
1172 radeon_irq_kms_fini(rdev); 1166 radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bff6fc2524c8..2d0c9ca484c5 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -539,11 +539,10 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
539{ 539{
540 drm_savage_private_t *dev_priv; 540 drm_savage_private_t *dev_priv;
541 541
542 dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL); 542 dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
543 if (dev_priv == NULL) 543 if (dev_priv == NULL)
544 return -ENOMEM; 544 return -ENOMEM;
545 545
546 memset(dev_priv, 0, sizeof(drm_savage_private_t));
547 dev->dev_private = (void *)dev_priv; 546 dev->dev_private = (void *)dev_priv;
548 547
549 dev_priv->chipset = (enum savage_family)chipset; 548 dev_priv->chipset = (enum savage_family)chipset;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 1e138f5bae09..4256e2006476 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,6 @@
4ccflags-y := -Iinclude/drm 4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o 7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
8 8
9obj-$(CONFIG_DRM_TTM) += ttm.o 9obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0e3754a3a303..555ebb12ace8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); 79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); 80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); 81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
82 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
83 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
84 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); 82 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
85 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", 83 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
86 man->available_caching); 84 man->available_caching);
@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
357 355
358static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 356static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
359 struct ttm_mem_reg *mem, 357 struct ttm_mem_reg *mem,
360 bool evict, bool interruptible, bool no_wait) 358 bool evict, bool interruptible,
359 bool no_wait_reserve, bool no_wait_gpu)
361{ 360{
362 struct ttm_bo_device *bdev = bo->bdev; 361 struct ttm_bo_device *bdev = bo->bdev;
363 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 362 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
402 401
403 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 402 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
404 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 403 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
405 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); 404 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
406 else if (bdev->driver->move) 405 else if (bdev->driver->move)
407 ret = bdev->driver->move(bo, evict, interruptible, 406 ret = bdev->driver->move(bo, evict, interruptible,
408 no_wait, mem); 407 no_wait_reserve, no_wait_gpu, mem);
409 else 408 else
410 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); 409 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
411 410
412 if (ret) 411 if (ret)
413 goto out_err; 412 goto out_err;
@@ -605,8 +604,22 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
605} 604}
606EXPORT_SYMBOL(ttm_bo_unref); 605EXPORT_SYMBOL(ttm_bo_unref);
607 606
607int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
608{
609 return cancel_delayed_work_sync(&bdev->wq);
610}
611EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
612
613void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
614{
615 if (resched)
616 schedule_delayed_work(&bdev->wq,
617 ((HZ / 100) < 1) ? 1 : HZ / 100);
618}
619EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
620
608static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 621static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
609 bool no_wait) 622 bool no_wait_reserve, bool no_wait_gpu)
610{ 623{
611 struct ttm_bo_device *bdev = bo->bdev; 624 struct ttm_bo_device *bdev = bo->bdev;
612 struct ttm_bo_global *glob = bo->glob; 625 struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +628,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
615 int ret = 0; 628 int ret = 0;
616 629
617 spin_lock(&bo->lock); 630 spin_lock(&bo->lock);
618 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 631 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
619 spin_unlock(&bo->lock); 632 spin_unlock(&bo->lock);
620 633
621 if (unlikely(ret != 0)) { 634 if (unlikely(ret != 0)) {
@@ -631,6 +644,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
631 644
632 evict_mem = bo->mem; 645 evict_mem = bo->mem;
633 evict_mem.mm_node = NULL; 646 evict_mem.mm_node = NULL;
647 evict_mem.bus.io_reserved = false;
634 648
635 placement.fpfn = 0; 649 placement.fpfn = 0;
636 placement.lpfn = 0; 650 placement.lpfn = 0;
@@ -638,7 +652,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
638 placement.num_busy_placement = 0; 652 placement.num_busy_placement = 0;
639 bdev->driver->evict_flags(bo, &placement); 653 bdev->driver->evict_flags(bo, &placement);
640 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 654 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
641 no_wait); 655 no_wait_reserve, no_wait_gpu);
642 if (ret) { 656 if (ret) {
643 if (ret != -ERESTARTSYS) { 657 if (ret != -ERESTARTSYS) {
644 printk(KERN_ERR TTM_PFX 658 printk(KERN_ERR TTM_PFX
@@ -650,7 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
650 } 664 }
651 665
652 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 666 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
653 no_wait); 667 no_wait_reserve, no_wait_gpu);
654 if (ret) { 668 if (ret) {
655 if (ret != -ERESTARTSYS) 669 if (ret != -ERESTARTSYS)
656 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 670 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +684,8 @@ out:
670 684
671static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 685static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
672 uint32_t mem_type, 686 uint32_t mem_type,
673 bool interruptible, bool no_wait) 687 bool interruptible, bool no_wait_reserve,
688 bool no_wait_gpu)
674{ 689{
675 struct ttm_bo_global *glob = bdev->glob; 690 struct ttm_bo_global *glob = bdev->glob;
676 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 691 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +702,11 @@ retry:
687 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); 702 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
688 kref_get(&bo->list_kref); 703 kref_get(&bo->list_kref);
689 704
690 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 705 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
691 706
692 if (unlikely(ret == -EBUSY)) { 707 if (unlikely(ret == -EBUSY)) {
693 spin_unlock(&glob->lru_lock); 708 spin_unlock(&glob->lru_lock);
694 if (likely(!no_wait)) 709 if (likely(!no_wait_gpu))
695 ret = ttm_bo_wait_unreserved(bo, interruptible); 710 ret = ttm_bo_wait_unreserved(bo, interruptible);
696 711
697 kref_put(&bo->list_kref, ttm_bo_release_list); 712 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +728,7 @@ retry:
713 while (put_count--) 728 while (put_count--)
714 kref_put(&bo->list_kref, ttm_bo_ref_bug); 729 kref_put(&bo->list_kref, ttm_bo_ref_bug);
715 730
716 ret = ttm_bo_evict(bo, interruptible, no_wait); 731 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
717 ttm_bo_unreserve(bo); 732 ttm_bo_unreserve(bo);
718 733
719 kref_put(&bo->list_kref, ttm_bo_release_list); 734 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +779,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
764 uint32_t mem_type, 779 uint32_t mem_type,
765 struct ttm_placement *placement, 780 struct ttm_placement *placement,
766 struct ttm_mem_reg *mem, 781 struct ttm_mem_reg *mem,
767 bool interruptible, bool no_wait) 782 bool interruptible,
783 bool no_wait_reserve,
784 bool no_wait_gpu)
768{ 785{
769 struct ttm_bo_device *bdev = bo->bdev; 786 struct ttm_bo_device *bdev = bo->bdev;
770 struct ttm_bo_global *glob = bdev->glob; 787 struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +802,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
785 } 802 }
786 spin_unlock(&glob->lru_lock); 803 spin_unlock(&glob->lru_lock);
787 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 804 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
788 no_wait); 805 no_wait_reserve, no_wait_gpu);
789 if (unlikely(ret != 0)) 806 if (unlikely(ret != 0))
790 return ret; 807 return ret;
791 } while (1); 808 } while (1);
@@ -855,7 +872,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
855int ttm_bo_mem_space(struct ttm_buffer_object *bo, 872int ttm_bo_mem_space(struct ttm_buffer_object *bo,
856 struct ttm_placement *placement, 873 struct ttm_placement *placement,
857 struct ttm_mem_reg *mem, 874 struct ttm_mem_reg *mem,
858 bool interruptible, bool no_wait) 875 bool interruptible, bool no_wait_reserve,
876 bool no_wait_gpu)
859{ 877{
860 struct ttm_bo_device *bdev = bo->bdev; 878 struct ttm_bo_device *bdev = bo->bdev;
861 struct ttm_mem_type_manager *man; 879 struct ttm_mem_type_manager *man;
@@ -952,7 +970,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
952 } 970 }
953 971
954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 972 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
955 interruptible, no_wait); 973 interruptible, no_wait_reserve, no_wait_gpu);
956 if (ret == 0 && mem->mm_node) { 974 if (ret == 0 && mem->mm_node) {
957 mem->placement = cur_flags; 975 mem->placement = cur_flags;
958 mem->mm_node->private = bo; 976 mem->mm_node->private = bo;
@@ -978,7 +996,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
978 996
979int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 997int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
980 struct ttm_placement *placement, 998 struct ttm_placement *placement,
981 bool interruptible, bool no_wait) 999 bool interruptible, bool no_wait_reserve,
1000 bool no_wait_gpu)
982{ 1001{
983 struct ttm_bo_global *glob = bo->glob; 1002 struct ttm_bo_global *glob = bo->glob;
984 int ret = 0; 1003 int ret = 0;
@@ -992,20 +1011,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
992 * instead of doing it here. 1011 * instead of doing it here.
993 */ 1012 */
994 spin_lock(&bo->lock); 1013 spin_lock(&bo->lock);
995 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 1014 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
996 spin_unlock(&bo->lock); 1015 spin_unlock(&bo->lock);
997 if (ret) 1016 if (ret)
998 return ret; 1017 return ret;
999 mem.num_pages = bo->num_pages; 1018 mem.num_pages = bo->num_pages;
1000 mem.size = mem.num_pages << PAGE_SHIFT; 1019 mem.size = mem.num_pages << PAGE_SHIFT;
1001 mem.page_alignment = bo->mem.page_alignment; 1020 mem.page_alignment = bo->mem.page_alignment;
1021 mem.bus.io_reserved = false;
1002 /* 1022 /*
1003 * Determine where to move the buffer. 1023 * Determine where to move the buffer.
1004 */ 1024 */
1005 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); 1025 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1006 if (ret) 1026 if (ret)
1007 goto out_unlock; 1027 goto out_unlock;
1008 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); 1028 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1009out_unlock: 1029out_unlock:
1010 if (ret && mem.mm_node) { 1030 if (ret && mem.mm_node) {
1011 spin_lock(&glob->lru_lock); 1031 spin_lock(&glob->lru_lock);
@@ -1039,7 +1059,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1039 1059
1040int ttm_bo_validate(struct ttm_buffer_object *bo, 1060int ttm_bo_validate(struct ttm_buffer_object *bo,
1041 struct ttm_placement *placement, 1061 struct ttm_placement *placement,
1042 bool interruptible, bool no_wait) 1062 bool interruptible, bool no_wait_reserve,
1063 bool no_wait_gpu)
1043{ 1064{
1044 int ret; 1065 int ret;
1045 1066
@@ -1054,7 +1075,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1054 */ 1075 */
1055 ret = ttm_bo_mem_compat(placement, &bo->mem); 1076 ret = ttm_bo_mem_compat(placement, &bo->mem);
1056 if (ret < 0) { 1077 if (ret < 0) {
1057 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); 1078 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1058 if (ret) 1079 if (ret)
1059 return ret; 1080 return ret;
1060 } else { 1081 } else {
@@ -1153,6 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1153 bo->mem.num_pages = bo->num_pages; 1174 bo->mem.num_pages = bo->num_pages;
1154 bo->mem.mm_node = NULL; 1175 bo->mem.mm_node = NULL;
1155 bo->mem.page_alignment = page_alignment; 1176 bo->mem.page_alignment = page_alignment;
1177 bo->mem.bus.io_reserved = false;
1156 bo->buffer_start = buffer_start & PAGE_MASK; 1178 bo->buffer_start = buffer_start & PAGE_MASK;
1157 bo->priv_flags = 0; 1179 bo->priv_flags = 0;
1158 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1180 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1175,7 +1197,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1175 goto out_err; 1197 goto out_err;
1176 } 1198 }
1177 1199
1178 ret = ttm_bo_validate(bo, placement, interruptible, false); 1200 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1179 if (ret) 1201 if (ret)
1180 goto out_err; 1202 goto out_err;
1181 1203
@@ -1249,7 +1271,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1249 spin_lock(&glob->lru_lock); 1271 spin_lock(&glob->lru_lock);
1250 while (!list_empty(&man->lru)) { 1272 while (!list_empty(&man->lru)) {
1251 spin_unlock(&glob->lru_lock); 1273 spin_unlock(&glob->lru_lock);
1252 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1274 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1253 if (ret) { 1275 if (ret) {
1254 if (allow_errors) { 1276 if (allow_errors) {
1255 return ret; 1277 return ret;
@@ -1553,26 +1575,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1553 return true; 1575 return true;
1554} 1576}
1555 1577
1556int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1557 struct ttm_mem_reg *mem,
1558 unsigned long *bus_base,
1559 unsigned long *bus_offset, unsigned long *bus_size)
1560{
1561 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1562
1563 *bus_size = 0;
1564 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1565 return -EINVAL;
1566
1567 if (ttm_mem_reg_is_pci(bdev, mem)) {
1568 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1569 *bus_size = mem->num_pages << PAGE_SHIFT;
1570 *bus_base = man->io_offset;
1571 }
1572
1573 return 0;
1574}
1575
1576void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1578void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1577{ 1579{
1578 struct ttm_bo_device *bdev = bo->bdev; 1580 struct ttm_bo_device *bdev = bo->bdev;
@@ -1581,8 +1583,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1581 1583
1582 if (!bdev->dev_mapping) 1584 if (!bdev->dev_mapping)
1583 return; 1585 return;
1584
1585 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); 1586 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1587 ttm_mem_io_free(bdev, &bo->mem);
1586} 1588}
1587EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1589EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1588 1590
@@ -1811,7 +1813,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1811 evict_mem.mem_type = TTM_PL_SYSTEM; 1813 evict_mem.mem_type = TTM_PL_SYSTEM;
1812 1814
1813 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1815 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1814 false, false); 1816 false, false, false);
1815 if (unlikely(ret != 0)) 1817 if (unlikely(ret != 0))
1816 goto out; 1818 goto out;
1817 } 1819 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d764e82e799b..13012a1f1486 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
50} 50}
51 51
52int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 52int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53 bool evict, bool no_wait, struct ttm_mem_reg *new_mem) 53 bool evict, bool no_wait_reserve,
54 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
54{ 55{
55 struct ttm_tt *ttm = bo->ttm; 56 struct ttm_tt *ttm = bo->ttm;
56 struct ttm_mem_reg *old_mem = &bo->mem; 57 struct ttm_mem_reg *old_mem = &bo->mem;
@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
81} 82}
82EXPORT_SYMBOL(ttm_bo_move_ttm); 83EXPORT_SYMBOL(ttm_bo_move_ttm);
83 84
85int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
86{
87 int ret;
88
89 if (!mem->bus.io_reserved) {
90 mem->bus.io_reserved = true;
91 ret = bdev->driver->io_mem_reserve(bdev, mem);
92 if (unlikely(ret != 0))
93 return ret;
94 }
95 return 0;
96}
97
98void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
99{
100 if (bdev->driver->io_mem_reserve) {
101 if (mem->bus.io_reserved) {
102 mem->bus.io_reserved = false;
103 bdev->driver->io_mem_free(bdev, mem);
104 }
105 }
106}
107
84int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 108int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
85 void **virtual) 109 void **virtual)
86{ 110{
87 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
88 unsigned long bus_offset;
89 unsigned long bus_size;
90 unsigned long bus_base;
91 int ret; 111 int ret;
92 void *addr; 112 void *addr;
93 113
94 *virtual = NULL; 114 *virtual = NULL;
95 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); 115 ret = ttm_mem_io_reserve(bdev, mem);
96 if (ret || bus_size == 0) 116 if (ret || !mem->bus.is_iomem)
97 return ret; 117 return ret;
98 118
99 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) 119 if (mem->bus.addr) {
100 addr = (void *)(((u8 *) man->io_addr) + bus_offset); 120 addr = mem->bus.addr;
101 else { 121 } else {
102 if (mem->placement & TTM_PL_FLAG_WC) 122 if (mem->placement & TTM_PL_FLAG_WC)
103 addr = ioremap_wc(bus_base + bus_offset, bus_size); 123 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
104 else 124 else
105 addr = ioremap_nocache(bus_base + bus_offset, bus_size); 125 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
106 if (!addr) 126 if (!addr) {
127 ttm_mem_io_free(bdev, mem);
107 return -ENOMEM; 128 return -ENOMEM;
129 }
108 } 130 }
109 *virtual = addr; 131 *virtual = addr;
110 return 0; 132 return 0;
@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
117 139
118 man = &bdev->man[mem->mem_type]; 140 man = &bdev->man[mem->mem_type];
119 141
120 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) 142 if (virtual && mem->bus.addr == NULL)
121 iounmap(virtual); 143 iounmap(virtual);
144 ttm_mem_io_free(bdev, mem);
122} 145}
123 146
124static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 147static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
208} 231}
209 232
210int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 233int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
211 bool evict, bool no_wait, struct ttm_mem_reg *new_mem) 234 bool evict, bool no_wait_reserve, bool no_wait_gpu,
235 struct ttm_mem_reg *new_mem)
212{ 236{
213 struct ttm_bo_device *bdev = bo->bdev; 237 struct ttm_bo_device *bdev = bo->bdev;
214 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 238 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
369EXPORT_SYMBOL(ttm_io_prot); 393EXPORT_SYMBOL(ttm_io_prot);
370 394
371static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 395static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
372 unsigned long bus_base, 396 unsigned long offset,
373 unsigned long bus_offset, 397 unsigned long size,
374 unsigned long bus_size,
375 struct ttm_bo_kmap_obj *map) 398 struct ttm_bo_kmap_obj *map)
376{ 399{
377 struct ttm_bo_device *bdev = bo->bdev;
378 struct ttm_mem_reg *mem = &bo->mem; 400 struct ttm_mem_reg *mem = &bo->mem;
379 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
380 401
381 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { 402 if (bo->mem.bus.addr) {
382 map->bo_kmap_type = ttm_bo_map_premapped; 403 map->bo_kmap_type = ttm_bo_map_premapped;
383 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); 404 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
384 } else { 405 } else {
385 map->bo_kmap_type = ttm_bo_map_iomap; 406 map->bo_kmap_type = ttm_bo_map_iomap;
386 if (mem->placement & TTM_PL_FLAG_WC) 407 if (mem->placement & TTM_PL_FLAG_WC)
387 map->virtual = ioremap_wc(bus_base + bus_offset, 408 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
388 bus_size); 409 size);
389 else 410 else
390 map->virtual = ioremap_nocache(bus_base + bus_offset, 411 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
391 bus_size); 412 size);
392 } 413 }
393 return (!map->virtual) ? -ENOMEM : 0; 414 return (!map->virtual) ? -ENOMEM : 0;
394} 415}
@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
441 unsigned long start_page, unsigned long num_pages, 462 unsigned long start_page, unsigned long num_pages,
442 struct ttm_bo_kmap_obj *map) 463 struct ttm_bo_kmap_obj *map)
443{ 464{
465 unsigned long offset, size;
444 int ret; 466 int ret;
445 unsigned long bus_base;
446 unsigned long bus_offset;
447 unsigned long bus_size;
448 467
449 BUG_ON(!list_empty(&bo->swap)); 468 BUG_ON(!list_empty(&bo->swap));
450 map->virtual = NULL; 469 map->virtual = NULL;
470 map->bo = bo;
451 if (num_pages > bo->num_pages) 471 if (num_pages > bo->num_pages)
452 return -EINVAL; 472 return -EINVAL;
453 if (start_page > bo->num_pages) 473 if (start_page > bo->num_pages)
@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
456 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) 476 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
457 return -EPERM; 477 return -EPERM;
458#endif 478#endif
459 ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, 479 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
460 &bus_offset, &bus_size);
461 if (ret) 480 if (ret)
462 return ret; 481 return ret;
463 if (bus_size == 0) { 482 if (!bo->mem.bus.is_iomem) {
464 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 483 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
465 } else { 484 } else {
466 bus_offset += start_page << PAGE_SHIFT; 485 offset = start_page << PAGE_SHIFT;
467 bus_size = num_pages << PAGE_SHIFT; 486 size = num_pages << PAGE_SHIFT;
468 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); 487 return ttm_bo_ioremap(bo, offset, size, map);
469 } 488 }
470} 489}
471EXPORT_SYMBOL(ttm_bo_kmap); 490EXPORT_SYMBOL(ttm_bo_kmap);
@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
477 switch (map->bo_kmap_type) { 496 switch (map->bo_kmap_type) {
478 case ttm_bo_map_iomap: 497 case ttm_bo_map_iomap:
479 iounmap(map->virtual); 498 iounmap(map->virtual);
499 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
480 break; 500 break;
481 case ttm_bo_map_vmap: 501 case ttm_bo_map_vmap:
482 vunmap(map->virtual); 502 vunmap(map->virtual);
@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
494} 514}
495EXPORT_SYMBOL(ttm_bo_kunmap); 515EXPORT_SYMBOL(ttm_bo_kunmap);
496 516
497int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
498 unsigned long dst_offset,
499 unsigned long *pfn, pgprot_t *prot)
500{
501 struct ttm_mem_reg *mem = &bo->mem;
502 struct ttm_bo_device *bdev = bo->bdev;
503 unsigned long bus_offset;
504 unsigned long bus_size;
505 unsigned long bus_base;
506 int ret;
507 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
508 &bus_size);
509 if (ret)
510 return -EINVAL;
511 if (bus_size != 0)
512 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
513 else
514 if (!bo->ttm)
515 return -EINVAL;
516 else
517 *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
518 dst_offset >>
519 PAGE_SHIFT));
520 *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
521 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
522
523 return 0;
524}
525
526int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 517int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
527 void *sync_obj, 518 void *sync_obj,
528 void *sync_obj_arg, 519 void *sync_obj_arg,
529 bool evict, bool no_wait, 520 bool evict, bool no_wait_reserve,
521 bool no_wait_gpu,
530 struct ttm_mem_reg *new_mem) 522 struct ttm_mem_reg *new_mem)
531{ 523{
532 struct ttm_bo_device *bdev = bo->bdev; 524 struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8b8dd3..fe6cb77899f4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
74 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 74 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
75 vma->vm_private_data; 75 vma->vm_private_data;
76 struct ttm_bo_device *bdev = bo->bdev; 76 struct ttm_bo_device *bdev = bo->bdev;
77 unsigned long bus_base;
78 unsigned long bus_offset;
79 unsigned long bus_size;
80 unsigned long page_offset; 77 unsigned long page_offset;
81 unsigned long page_last; 78 unsigned long page_last;
82 unsigned long pfn; 79 unsigned long pfn;
@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
84 struct page *page; 81 struct page *page;
85 int ret; 82 int ret;
86 int i; 83 int i;
87 bool is_iomem;
88 unsigned long address = (unsigned long)vmf->virtual_address; 84 unsigned long address = (unsigned long)vmf->virtual_address;
89 int retval = VM_FAULT_NOPAGE; 85 int retval = VM_FAULT_NOPAGE;
90 86
@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
101 return VM_FAULT_NOPAGE; 97 return VM_FAULT_NOPAGE;
102 } 98 }
103 99
104 if (bdev->driver->fault_reserve_notify) 100 if (bdev->driver->fault_reserve_notify) {
105 bdev->driver->fault_reserve_notify(bo); 101 ret = bdev->driver->fault_reserve_notify(bo);
102 switch (ret) {
103 case 0:
104 break;
105 case -EBUSY:
106 set_need_resched();
107 case -ERESTARTSYS:
108 retval = VM_FAULT_NOPAGE;
109 goto out_unlock;
110 default:
111 retval = VM_FAULT_SIGBUS;
112 goto out_unlock;
113 }
114 }
106 115
107 /* 116 /*
108 * Wait for buffer data in transit, due to a pipelined 117 * Wait for buffer data in transit, due to a pipelined
@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
122 spin_unlock(&bo->lock); 131 spin_unlock(&bo->lock);
123 132
124 133
125 ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, 134 ret = ttm_mem_io_reserve(bdev, &bo->mem);
126 &bus_size); 135 if (ret) {
127 if (unlikely(ret != 0)) {
128 retval = VM_FAULT_SIGBUS; 136 retval = VM_FAULT_SIGBUS;
129 goto out_unlock; 137 goto out_unlock;
130 } 138 }
131 139
132 is_iomem = (bus_size != 0);
133
134 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 140 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
135 bo->vm_node->start - vma->vm_pgoff; 141 bo->vm_node->start - vma->vm_pgoff;
136 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + 142 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
154 * vma->vm_page_prot when the object changes caching policy, with 160 * vma->vm_page_prot when the object changes caching policy, with
155 * the correct locks held. 161 * the correct locks held.
156 */ 162 */
157 163 if (bo->mem.bus.is_iomem) {
158 if (is_iomem) {
159 vma->vm_page_prot = ttm_io_prot(bo->mem.placement, 164 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
160 vma->vm_page_prot); 165 vma->vm_page_prot);
161 } else { 166 } else {
@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
171 */ 176 */
172 177
173 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { 178 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
174 179 if (bo->mem.bus.is_iomem)
175 if (is_iomem) 180 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
176 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
177 page_offset;
178 else { 181 else {
179 page = ttm_tt_get_page(ttm, page_offset); 182 page = ttm_tt_get_page(ttm, page_offset);
180 if (unlikely(!page && i == 0)) { 183 if (unlikely(!page && i == 0)) {
@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
198 retval = 201 retval =
199 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 202 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
200 goto out_unlock; 203 goto out_unlock;
201
202 } 204 }
203 205
204 address += PAGE_SIZE; 206 address += PAGE_SIZE;
@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
221 223
222static void ttm_bo_vm_close(struct vm_area_struct *vma) 224static void ttm_bo_vm_close(struct vm_area_struct *vma)
223{ 225{
224 struct ttm_buffer_object *bo = 226 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
225 (struct ttm_buffer_object *)vma->vm_private_data;
226 227
227 ttm_bo_unref(&bo); 228 ttm_bo_unref(&bo);
228 vma->vm_private_data = NULL; 229 vma->vm_private_data = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 801b702566e6..e70ddd82dc02 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -27,6 +27,7 @@
27 27
28#include "ttm/ttm_memory.h" 28#include "ttm/ttm_memory.h"
29#include "ttm/ttm_module.h" 29#include "ttm/ttm_module.h"
30#include "ttm/ttm_page_alloc.h"
30#include <linux/spinlock.h> 31#include <linux/spinlock.h>
31#include <linux/sched.h> 32#include <linux/sched.h>
32#include <linux/wait.h> 33#include <linux/wait.h>
@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
393 "Zone %7s: Available graphics memory: %llu kiB.\n", 394 "Zone %7s: Available graphics memory: %llu kiB.\n",
394 zone->name, (unsigned long long) zone->max_mem >> 10); 395 zone->name, (unsigned long long) zone->max_mem >> 10);
395 } 396 }
397 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
396 return 0; 398 return 0;
397out_no_zone: 399out_no_zone:
398 ttm_mem_global_release(glob); 400 ttm_mem_global_release(glob);
@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
405 unsigned int i; 407 unsigned int i;
406 struct ttm_mem_zone *zone; 408 struct ttm_mem_zone *zone;
407 409
410 /* let the page allocator first stop the shrink work. */
411 ttm_page_alloc_fini();
412
408 flush_workqueue(glob->swap_queue); 413 flush_workqueue(glob->swap_queue);
409 destroy_workqueue(glob->swap_queue); 414 destroy_workqueue(glob->swap_queue);
410 glob->swap_queue = NULL; 415 glob->swap_queue = NULL;
@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
412 zone = glob->zones[i]; 417 zone = glob->zones[i];
413 kobject_del(&zone->kobj); 418 kobject_del(&zone->kobj);
414 kobject_put(&zone->kobj); 419 kobject_put(&zone->kobj);
415 } 420 }
416 kobject_del(&glob->kobj); 421 kobject_del(&glob->kobj);
417 kobject_put(&glob->kobj); 422 kobject_put(&glob->kobj);
418} 423}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644
index 000000000000..0d9a42c2394f
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -0,0 +1,845 @@
1/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
26 */
27
28/* simple list based uncached page pool
29 * - Pool collects resently freed pages for reuse
30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages
32 */
33#include <linux/list.h>
34#include <linux/spinlock.h>
35#include <linux/highmem.h>
36#include <linux/mm_types.h>
37#include <linux/module.h>
38#include <linux/mm.h>
39#include <linux/seq_file.h> /* for seq_printf */
40#include <linux/slab.h>
41
42#include <asm/atomic.h>
43#include <asm/agp.h>
44
45#include "ttm/ttm_bo_driver.h"
46#include "ttm/ttm_page_alloc.h"
47
48
49#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
50#define SMALL_ALLOCATION 16
51#define FREE_ALL_PAGES (~0U)
52/* times are in msecs */
53#define PAGE_FREE_INTERVAL 1000
54
55/**
56 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
57 *
58 * @lock: Protects the shared pool from concurrnet access. Must be used with
59 * irqsave/irqrestore variants because pool allocator maybe called from
60 * delayed work.
61 * @fill_lock: Prevent concurrent calls to fill.
62 * @list: Pool of free uc/wc pages for fast reuse.
63 * @gfp_flags: Flags to pass for alloc_page.
64 * @npages: Number of pages in pool.
65 */
66struct ttm_page_pool {
67 spinlock_t lock;
68 bool fill_lock;
69 struct list_head list;
70 int gfp_flags;
71 unsigned npages;
72 char *name;
73 unsigned long nfrees;
74 unsigned long nrefills;
75};
76
77/**
78 * Limits for the pool. They are handled without locks because only place where
79 * they may change is in sysfs store. They won't have immediate effect anyway
80 * so forcing serialiazation to access them is pointless.
81 */
82
83struct ttm_pool_opts {
84 unsigned alloc_size;
85 unsigned max_size;
86 unsigned small;
87};
88
89#define NUM_POOLS 4
90
91/**
92 * struct ttm_pool_manager - Holds memory pools for fst allocation
93 *
94 * Manager is read only object for pool code so it doesn't need locking.
95 *
96 * @free_interval: minimum number of jiffies between freeing pages from pool.
97 * @page_alloc_inited: reference counting for pool allocation.
98 * @work: Work that is used to shrink the pool. Work is only run when there is
99 * some pages to free.
100 * @small_allocation: Limit in number of pages what is small allocation.
101 *
102 * @pools: All pool objects in use.
103 **/
104struct ttm_pool_manager {
105 struct kobject kobj;
106 struct shrinker mm_shrink;
107 atomic_t page_alloc_inited;
108 struct ttm_pool_opts options;
109
110 union {
111 struct ttm_page_pool pools[NUM_POOLS];
112 struct {
113 struct ttm_page_pool wc_pool;
114 struct ttm_page_pool uc_pool;
115 struct ttm_page_pool wc_pool_dma32;
116 struct ttm_page_pool uc_pool_dma32;
117 } ;
118 };
119};
120
121static struct attribute ttm_page_pool_max = {
122 .name = "pool_max_size",
123 .mode = S_IRUGO | S_IWUSR
124};
125static struct attribute ttm_page_pool_small = {
126 .name = "pool_small_allocation",
127 .mode = S_IRUGO | S_IWUSR
128};
129static struct attribute ttm_page_pool_alloc_size = {
130 .name = "pool_allocation_size",
131 .mode = S_IRUGO | S_IWUSR
132};
133
134static struct attribute *ttm_pool_attrs[] = {
135 &ttm_page_pool_max,
136 &ttm_page_pool_small,
137 &ttm_page_pool_alloc_size,
138 NULL
139};
140
141static void ttm_pool_kobj_release(struct kobject *kobj)
142{
143 struct ttm_pool_manager *m =
144 container_of(kobj, struct ttm_pool_manager, kobj);
145 (void)m;
146}
147
148static ssize_t ttm_pool_store(struct kobject *kobj,
149 struct attribute *attr, const char *buffer, size_t size)
150{
151 struct ttm_pool_manager *m =
152 container_of(kobj, struct ttm_pool_manager, kobj);
153 int chars;
154 unsigned val;
155 chars = sscanf(buffer, "%u", &val);
156 if (chars == 0)
157 return size;
158
159 /* Convert kb to number of pages */
160 val = val / (PAGE_SIZE >> 10);
161
162 if (attr == &ttm_page_pool_max)
163 m->options.max_size = val;
164 else if (attr == &ttm_page_pool_small)
165 m->options.small = val;
166 else if (attr == &ttm_page_pool_alloc_size) {
167 if (val > NUM_PAGES_TO_ALLOC*8) {
168 printk(KERN_ERR "[ttm] Setting allocation size to %lu "
169 "is not allowed. Recomended size is "
170 "%lu\n",
171 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
172 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
173 return size;
174 } else if (val > NUM_PAGES_TO_ALLOC) {
175 printk(KERN_WARNING "[ttm] Setting allocation size to "
176 "larger than %lu is not recomended.\n",
177 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
178 }
179 m->options.alloc_size = val;
180 }
181
182 return size;
183}
184
185static ssize_t ttm_pool_show(struct kobject *kobj,
186 struct attribute *attr, char *buffer)
187{
188 struct ttm_pool_manager *m =
189 container_of(kobj, struct ttm_pool_manager, kobj);
190 unsigned val = 0;
191
192 if (attr == &ttm_page_pool_max)
193 val = m->options.max_size;
194 else if (attr == &ttm_page_pool_small)
195 val = m->options.small;
196 else if (attr == &ttm_page_pool_alloc_size)
197 val = m->options.alloc_size;
198
199 val = val * (PAGE_SIZE >> 10);
200
201 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
202}
203
204static const struct sysfs_ops ttm_pool_sysfs_ops = {
205 .show = &ttm_pool_show,
206 .store = &ttm_pool_store,
207};
208
209static struct kobj_type ttm_pool_kobj_type = {
210 .release = &ttm_pool_kobj_release,
211 .sysfs_ops = &ttm_pool_sysfs_ops,
212 .default_attrs = ttm_pool_attrs,
213};
214
215static struct ttm_pool_manager _manager = {
216 .page_alloc_inited = ATOMIC_INIT(0)
217};
218
219#ifndef CONFIG_X86
220static int set_pages_array_wb(struct page **pages, int addrinarray)
221{
222#ifdef TTM_HAS_AGP
223 int i;
224
225 for (i = 0; i < addrinarray; i++)
226 unmap_page_from_agp(pages[i]);
227#endif
228 return 0;
229}
230
231static int set_pages_array_wc(struct page **pages, int addrinarray)
232{
233#ifdef TTM_HAS_AGP
234 int i;
235
236 for (i = 0; i < addrinarray; i++)
237 map_page_into_agp(pages[i]);
238#endif
239 return 0;
240}
241
242static int set_pages_array_uc(struct page **pages, int addrinarray)
243{
244#ifdef TTM_HAS_AGP
245 int i;
246
247 for (i = 0; i < addrinarray; i++)
248 map_page_into_agp(pages[i]);
249#endif
250 return 0;
251}
252#endif
253
254/**
255 * Select the right pool or requested caching state and ttm flags. */
256static struct ttm_page_pool *ttm_get_pool(int flags,
257 enum ttm_caching_state cstate)
258{
259 int pool_index;
260
261 if (cstate == tt_cached)
262 return NULL;
263
264 if (cstate == tt_wc)
265 pool_index = 0x0;
266 else
267 pool_index = 0x1;
268
269 if (flags & TTM_PAGE_FLAG_DMA32)
270 pool_index |= 0x2;
271
272 return &_manager.pools[pool_index];
273}
274
275/* set memory back to wb and free the pages. */
276static void ttm_pages_put(struct page *pages[], unsigned npages)
277{
278 unsigned i;
279 if (set_pages_array_wb(pages, npages))
280 printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
281 npages);
282 for (i = 0; i < npages; ++i)
283 __free_page(pages[i]);
284}
285
286static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
287 unsigned freed_pages)
288{
289 pool->npages -= freed_pages;
290 pool->nfrees += freed_pages;
291}
292
293/**
294 * Free pages from pool.
295 *
296 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
297 * number of pages in one go.
298 *
299 * @pool: to free the pages from
300 * @free_all: If set to true will free all pages in pool
301 **/
302static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
303{
304 unsigned long irq_flags;
305 struct page *p;
306 struct page **pages_to_free;
307 unsigned freed_pages = 0,
308 npages_to_free = nr_free;
309
310 if (NUM_PAGES_TO_ALLOC < nr_free)
311 npages_to_free = NUM_PAGES_TO_ALLOC;
312
313 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
314 GFP_KERNEL);
315 if (!pages_to_free) {
316 printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
317 return 0;
318 }
319
320restart:
321 spin_lock_irqsave(&pool->lock, irq_flags);
322
323 list_for_each_entry_reverse(p, &pool->list, lru) {
324 if (freed_pages >= npages_to_free)
325 break;
326
327 pages_to_free[freed_pages++] = p;
328 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
329 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
330 /* remove range of pages from the pool */
331 __list_del(p->lru.prev, &pool->list);
332
333 ttm_pool_update_free_locked(pool, freed_pages);
334 /**
335 * Because changing page caching is costly
336 * we unlock the pool to prevent stalling.
337 */
338 spin_unlock_irqrestore(&pool->lock, irq_flags);
339
340 ttm_pages_put(pages_to_free, freed_pages);
341 if (likely(nr_free != FREE_ALL_PAGES))
342 nr_free -= freed_pages;
343
344 if (NUM_PAGES_TO_ALLOC >= nr_free)
345 npages_to_free = nr_free;
346 else
347 npages_to_free = NUM_PAGES_TO_ALLOC;
348
349 freed_pages = 0;
350
351 /* free all so restart the processing */
352 if (nr_free)
353 goto restart;
354
355 /* Not allowed to fall tough or break because
356 * following context is inside spinlock while we are
357 * outside here.
358 */
359 goto out;
360
361 }
362 }
363
364 /* remove range of pages from the pool */
365 if (freed_pages) {
366 __list_del(&p->lru, &pool->list);
367
368 ttm_pool_update_free_locked(pool, freed_pages);
369 nr_free -= freed_pages;
370 }
371
372 spin_unlock_irqrestore(&pool->lock, irq_flags);
373
374 if (freed_pages)
375 ttm_pages_put(pages_to_free, freed_pages);
376out:
377 kfree(pages_to_free);
378 return nr_free;
379}
380
381/* Get good estimation how many pages are free in pools */
382static int ttm_pool_get_num_unused_pages(void)
383{
384 unsigned i;
385 int total = 0;
386 for (i = 0; i < NUM_POOLS; ++i)
387 total += _manager.pools[i].npages;
388
389 return total;
390}
391
392/**
393 * Calback for mm to request pool to reduce number of page held.
394 */
395static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
396{
397 static atomic_t start_pool = ATOMIC_INIT(0);
398 unsigned i;
399 unsigned pool_offset = atomic_add_return(1, &start_pool);
400 struct ttm_page_pool *pool;
401
402 pool_offset = pool_offset % NUM_POOLS;
403 /* select start pool in round robin fashion */
404 for (i = 0; i < NUM_POOLS; ++i) {
405 unsigned nr_free = shrink_pages;
406 if (shrink_pages == 0)
407 break;
408 pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
409 shrink_pages = ttm_page_pool_free(pool, nr_free);
410 }
411 /* return estimated number of unused pages in pool */
412 return ttm_pool_get_num_unused_pages();
413}
414
415static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
416{
417 manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
418 manager->mm_shrink.seeks = 1;
419 register_shrinker(&manager->mm_shrink);
420}
421
422static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
423{
424 unregister_shrinker(&manager->mm_shrink);
425}
426
427static int ttm_set_pages_caching(struct page **pages,
428 enum ttm_caching_state cstate, unsigned cpages)
429{
430 int r = 0;
431 /* Set page caching */
432 switch (cstate) {
433 case tt_uncached:
434 r = set_pages_array_uc(pages, cpages);
435 if (r)
436 printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
437 cpages);
438 break;
439 case tt_wc:
440 r = set_pages_array_wc(pages, cpages);
441 if (r)
442 printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
443 cpages);
444 break;
445 default:
446 break;
447 }
448 return r;
449}
450
451/**
452 * Free pages the pages that failed to change the caching state. If there is
453 * any pages that have changed their caching state already put them to the
454 * pool.
455 */
456static void ttm_handle_caching_state_failure(struct list_head *pages,
457 int ttm_flags, enum ttm_caching_state cstate,
458 struct page **failed_pages, unsigned cpages)
459{
460 unsigned i;
461 /* Failed pages has to be reed */
462 for (i = 0; i < cpages; ++i) {
463 list_del(&failed_pages[i]->lru);
464 __free_page(failed_pages[i]);
465 }
466}
467
468/**
469 * Allocate new pages with correct caching.
470 *
471 * This function is reentrant if caller updates count depending on number of
472 * pages returned in pages array.
473 */
474static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
475 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
476{
477 struct page **caching_array;
478 struct page *p;
479 int r = 0;
480 unsigned i, cpages;
481 unsigned max_cpages = min(count,
482 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
483
484 /* allocate array for page caching change */
485 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
486
487 if (!caching_array) {
488 printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
489 return -ENOMEM;
490 }
491
492 for (i = 0, cpages = 0; i < count; ++i) {
493 p = alloc_page(gfp_flags);
494
495 if (!p) {
496 printk(KERN_ERR "[ttm] unable to get page %u\n", i);
497
498 /* store already allocated pages in the pool after
499 * setting the caching state */
500 if (cpages) {
501 r = ttm_set_pages_caching(caching_array, cstate, cpages);
502 if (r)
503 ttm_handle_caching_state_failure(pages,
504 ttm_flags, cstate,
505 caching_array, cpages);
506 }
507 r = -ENOMEM;
508 goto out;
509 }
510
511#ifdef CONFIG_HIGHMEM
512 /* gfp flags of highmem page should never be dma32 so we
513 * we should be fine in such case
514 */
515 if (!PageHighMem(p))
516#endif
517 {
518 caching_array[cpages++] = p;
519 if (cpages == max_cpages) {
520
521 r = ttm_set_pages_caching(caching_array,
522 cstate, cpages);
523 if (r) {
524 ttm_handle_caching_state_failure(pages,
525 ttm_flags, cstate,
526 caching_array, cpages);
527 goto out;
528 }
529 cpages = 0;
530 }
531 }
532
533 list_add(&p->lru, pages);
534 }
535
536 if (cpages) {
537 r = ttm_set_pages_caching(caching_array, cstate, cpages);
538 if (r)
539 ttm_handle_caching_state_failure(pages,
540 ttm_flags, cstate,
541 caching_array, cpages);
542 }
543out:
544 kfree(caching_array);
545
546 return r;
547}
548
549/**
550 * Fill the given pool if there isn't enough pages and requested number of
551 * pages is small.
552 */
553static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
554 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
555 unsigned long *irq_flags)
556{
557 struct page *p;
558 int r;
559 unsigned cpages = 0;
560 /**
561 * Only allow one pool fill operation at a time.
562 * If pool doesn't have enough pages for the allocation new pages are
563 * allocated from outside of pool.
564 */
565 if (pool->fill_lock)
566 return;
567
568 pool->fill_lock = true;
569
570 /* If allocation request is small and there is not enough
571 * pages in pool we fill the pool first */
572 if (count < _manager.options.small
573 && count > pool->npages) {
574 struct list_head new_pages;
575 unsigned alloc_size = _manager.options.alloc_size;
576
577 /**
578 * Can't change page caching if in irqsave context. We have to
579 * drop the pool->lock.
580 */
581 spin_unlock_irqrestore(&pool->lock, *irq_flags);
582
583 INIT_LIST_HEAD(&new_pages);
584 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
585 cstate, alloc_size);
586 spin_lock_irqsave(&pool->lock, *irq_flags);
587
588 if (!r) {
589 list_splice(&new_pages, &pool->list);
590 ++pool->nrefills;
591 pool->npages += alloc_size;
592 } else {
593 printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
594 /* If we have any pages left put them to the pool. */
595 list_for_each_entry(p, &pool->list, lru) {
596 ++cpages;
597 }
598 list_splice(&new_pages, &pool->list);
599 pool->npages += cpages;
600 }
601
602 }
603 pool->fill_lock = false;
604}
605
606/**
607 * Cut count nubmer of pages from the pool and put them to return list
608 *
609 * @return count of pages still to allocate to fill the request.
610 */
611static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
612 struct list_head *pages, int ttm_flags,
613 enum ttm_caching_state cstate, unsigned count)
614{
615 unsigned long irq_flags;
616 struct list_head *p;
617 unsigned i;
618
619 spin_lock_irqsave(&pool->lock, irq_flags);
620 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
621
622 if (count >= pool->npages) {
623 /* take all pages from the pool */
624 list_splice_init(&pool->list, pages);
625 count -= pool->npages;
626 pool->npages = 0;
627 goto out;
628 }
629 /* find the last pages to include for requested number of pages. Split
630 * pool to begin and halves to reduce search space. */
631 if (count <= pool->npages/2) {
632 i = 0;
633 list_for_each(p, &pool->list) {
634 if (++i == count)
635 break;
636 }
637 } else {
638 i = pool->npages + 1;
639 list_for_each_prev(p, &pool->list) {
640 if (--i == count)
641 break;
642 }
643 }
644 /* Cut count number of pages from pool */
645 list_cut_position(pages, &pool->list, p);
646 pool->npages -= count;
647 count = 0;
648out:
649 spin_unlock_irqrestore(&pool->lock, irq_flags);
650 return count;
651}
652
653/*
654 * On success pages list will hold count number of correctly
655 * cached pages.
656 */
657int ttm_get_pages(struct list_head *pages, int flags,
658 enum ttm_caching_state cstate, unsigned count)
659{
660 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
661 struct page *p = NULL;
662 int gfp_flags = 0;
663 int r;
664
665 /* set zero flag for page allocation if required */
666 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
667 gfp_flags |= __GFP_ZERO;
668
669 /* No pool for cached pages */
670 if (pool == NULL) {
671 if (flags & TTM_PAGE_FLAG_DMA32)
672 gfp_flags |= GFP_DMA32;
673 else
674 gfp_flags |= __GFP_HIGHMEM;
675
676 for (r = 0; r < count; ++r) {
677 p = alloc_page(gfp_flags);
678 if (!p) {
679
680 printk(KERN_ERR "[ttm] unable to allocate page.");
681 return -ENOMEM;
682 }
683
684 list_add(&p->lru, pages);
685 }
686 return 0;
687 }
688
689
690 /* combine zero flag to pool flags */
691 gfp_flags |= pool->gfp_flags;
692
693 /* First we take pages from the pool */
694 count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
695
696 /* clear the pages coming from the pool if requested */
697 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
698 list_for_each_entry(p, pages, lru) {
699 clear_page(page_address(p));
700 }
701 }
702
703 /* If pool didn't have enough pages allocate new one. */
704 if (count > 0) {
705 /* ttm_alloc_new_pages doesn't reference pool so we can run
706 * multiple requests in parallel.
707 **/
708 r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
709 if (r) {
710 /* If there is any pages in the list put them back to
711 * the pool. */
712 printk(KERN_ERR "[ttm] Failed to allocate extra pages "
713 "for large request.");
714 ttm_put_pages(pages, 0, flags, cstate);
715 return r;
716 }
717 }
718
719
720 return 0;
721}
722
723/* Put all pages in pages list to correct pool to wait for reuse */
724void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
725 enum ttm_caching_state cstate)
726{
727 unsigned long irq_flags;
728 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
729 struct page *p, *tmp;
730
731 if (pool == NULL) {
732 /* No pool for this memory type so free the pages */
733
734 list_for_each_entry_safe(p, tmp, pages, lru) {
735 __free_page(p);
736 }
737 /* Make the pages list empty */
738 INIT_LIST_HEAD(pages);
739 return;
740 }
741 if (page_count == 0) {
742 list_for_each_entry_safe(p, tmp, pages, lru) {
743 ++page_count;
744 }
745 }
746
747 spin_lock_irqsave(&pool->lock, irq_flags);
748 list_splice_init(pages, &pool->list);
749 pool->npages += page_count;
750 /* Check that we don't go over the pool limit */
751 page_count = 0;
752 if (pool->npages > _manager.options.max_size) {
753 page_count = pool->npages - _manager.options.max_size;
754 /* free at least NUM_PAGES_TO_ALLOC number of pages
755 * to reduce calls to set_memory_wb */
756 if (page_count < NUM_PAGES_TO_ALLOC)
757 page_count = NUM_PAGES_TO_ALLOC;
758 }
759 spin_unlock_irqrestore(&pool->lock, irq_flags);
760 if (page_count)
761 ttm_page_pool_free(pool, page_count);
762}
763
764static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
765 char *name)
766{
767 spin_lock_init(&pool->lock);
768 pool->fill_lock = false;
769 INIT_LIST_HEAD(&pool->list);
770 pool->npages = pool->nfrees = 0;
771 pool->gfp_flags = flags;
772 pool->name = name;
773}
774
775int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
776{
777 int ret;
778 if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
779 return 0;
780
781 printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
782
783 ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
784
785 ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
786
787 ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
788 "wc dma");
789
790 ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
791 "uc dma");
792
793 _manager.options.max_size = max_pages;
794 _manager.options.small = SMALL_ALLOCATION;
795 _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
796
797 kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
798 ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
799 if (unlikely(ret != 0)) {
800 kobject_put(&_manager.kobj);
801 return ret;
802 }
803
804 ttm_pool_mm_shrink_init(&_manager);
805
806 return 0;
807}
808
809void ttm_page_alloc_fini()
810{
811 int i;
812
813 if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
814 return;
815
816 printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
817 ttm_pool_mm_shrink_fini(&_manager);
818
819 for (i = 0; i < NUM_POOLS; ++i)
820 ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
821
822 kobject_put(&_manager.kobj);
823}
824
825int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
826{
827 struct ttm_page_pool *p;
828 unsigned i;
829 char *h[] = {"pool", "refills", "pages freed", "size"};
830 if (atomic_read(&_manager.page_alloc_inited) == 0) {
831 seq_printf(m, "No pool allocator running.\n");
832 return 0;
833 }
834 seq_printf(m, "%6s %12s %13s %8s\n",
835 h[0], h[1], h[2], h[3]);
836 for (i = 0; i < NUM_POOLS; ++i) {
837 p = &_manager.pools[i];
838
839 seq_printf(m, "%6s %12ld %13ld %8d\n",
840 p->name, p->nrefills,
841 p->nfrees, p->npages);
842 }
843 return 0;
844}
845EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5fd5b8faeb3..a7bab87a548b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -39,6 +39,7 @@
39#include "ttm/ttm_module.h" 39#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h" 40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h" 41#include "ttm/ttm_placement.h"
42#include "ttm/ttm_page_alloc.h"
42 43
43static int ttm_tt_swapin(struct ttm_tt *ttm); 44static int ttm_tt_swapin(struct ttm_tt *ttm);
44 45
@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
56 ttm->pages = NULL; 57 ttm->pages = NULL;
57} 58}
58 59
59static struct page *ttm_tt_alloc_page(unsigned page_flags)
60{
61 gfp_t gfp_flags = GFP_USER;
62
63 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
64 gfp_flags |= __GFP_ZERO;
65
66 if (page_flags & TTM_PAGE_FLAG_DMA32)
67 gfp_flags |= __GFP_DMA32;
68 else
69 gfp_flags |= __GFP_HIGHMEM;
70
71 return alloc_page(gfp_flags);
72}
73
74static void ttm_tt_free_user_pages(struct ttm_tt *ttm) 60static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
75{ 61{
76 int write; 62 int write;
@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
111static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) 97static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
112{ 98{
113 struct page *p; 99 struct page *p;
100 struct list_head h;
114 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 101 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
115 int ret; 102 int ret;
116 103
117 while (NULL == (p = ttm->pages[index])) { 104 while (NULL == (p = ttm->pages[index])) {
118 p = ttm_tt_alloc_page(ttm->page_flags);
119 105
120 if (!p) 106 INIT_LIST_HEAD(&h);
107
108 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
109
110 if (ret != 0)
121 return NULL; 111 return NULL;
122 112
113 p = list_first_entry(&h, struct page, lru);
114
123 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); 115 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
124 if (unlikely(ret != 0)) 116 if (unlikely(ret != 0))
125 goto out_err; 117 goto out_err;
@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
228 if (ttm->caching_state == c_state) 220 if (ttm->caching_state == c_state)
229 return 0; 221 return 0;
230 222
231 if (c_state != tt_cached) { 223 if (ttm->state == tt_unpopulated) {
232 ret = ttm_tt_populate(ttm); 224 /* Change caching but don't populate */
233 if (unlikely(ret != 0)) 225 ttm->caching_state = c_state;
234 return ret; 226 return 0;
235 } 227 }
236 228
237 if (ttm->caching_state == tt_cached) 229 if (ttm->caching_state == tt_cached)
@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
282static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) 274static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
283{ 275{
284 int i; 276 int i;
277 unsigned count = 0;
278 struct list_head h;
285 struct page *cur_page; 279 struct page *cur_page;
286 struct ttm_backend *be = ttm->be; 280 struct ttm_backend *be = ttm->be;
287 281
282 INIT_LIST_HEAD(&h);
283
288 if (be) 284 if (be)
289 be->func->clear(be); 285 be->func->clear(be);
290 (void)ttm_tt_set_caching(ttm, tt_cached);
291 for (i = 0; i < ttm->num_pages; ++i) { 286 for (i = 0; i < ttm->num_pages; ++i) {
287
292 cur_page = ttm->pages[i]; 288 cur_page = ttm->pages[i];
293 ttm->pages[i] = NULL; 289 ttm->pages[i] = NULL;
294 if (cur_page) { 290 if (cur_page) {
@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
298 "Leaking pages.\n"); 294 "Leaking pages.\n");
299 ttm_mem_global_free_page(ttm->glob->mem_glob, 295 ttm_mem_global_free_page(ttm->glob->mem_glob,
300 cur_page); 296 cur_page);
301 __free_page(cur_page); 297 list_add(&cur_page->lru, &h);
298 count++;
302 } 299 }
303 } 300 }
301 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
304 ttm->state = tt_unpopulated; 302 ttm->state = tt_unpopulated;
305 ttm->first_himem_page = ttm->num_pages; 303 ttm->first_himem_page = ttm->num_pages;
306 ttm->last_lomem_page = -1; 304 ttm->last_lomem_page = -1;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 825ebe3d89d5..c4f5114aee7c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
137int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 137int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
138 struct ttm_mem_type_manager *man) 138 struct ttm_mem_type_manager *man)
139{ 139{
140 struct vmw_private *dev_priv =
141 container_of(bdev, struct vmw_private, bdev);
142
143 switch (type) { 140 switch (type) {
144 case TTM_PL_SYSTEM: 141 case TTM_PL_SYSTEM:
145 /* System memory */ 142 /* System memory */
@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
151 case TTM_PL_VRAM: 148 case TTM_PL_VRAM:
152 /* "On-card" video ram */ 149 /* "On-card" video ram */
153 man->gpu_offset = 0; 150 man->gpu_offset = 0;
154 man->io_offset = dev_priv->vram_start; 151 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
155 man->io_size = dev_priv->vram_size;
156 man->flags = TTM_MEMTYPE_FLAG_FIXED |
157 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
158 man->io_addr = NULL;
159 man->available_caching = TTM_PL_MASK_CACHING; 152 man->available_caching = TTM_PL_MASK_CACHING;
160 man->default_caching = TTM_PL_FLAG_WC; 153 man->default_caching = TTM_PL_FLAG_WC;
161 break; 154 break;
@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
193 vmw_dmabuf_gmr_unbind(bo); 186 vmw_dmabuf_gmr_unbind(bo);
194} 187}
195 188
189static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
190{
191 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
192 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
193
194 mem->bus.addr = NULL;
195 mem->bus.is_iomem = false;
196 mem->bus.offset = 0;
197 mem->bus.size = mem->num_pages << PAGE_SHIFT;
198 mem->bus.base = 0;
199 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
200 return -EINVAL;
201 switch (mem->mem_type) {
202 case TTM_PL_SYSTEM:
203 /* System memory */
204 return 0;
205 case TTM_PL_VRAM:
206 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
207 mem->bus.base = dev_priv->vram_start;
208 mem->bus.is_iomem = true;
209 break;
210 default:
211 return -EINVAL;
212 }
213 return 0;
214}
215
216static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
217{
218}
219
220static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
221{
222 return 0;
223}
224
196/** 225/**
197 * FIXME: We're using the old vmware polling method to sync. 226 * FIXME: We're using the old vmware polling method to sync.
198 * Do this with fences instead. 227 * Do this with fences instead.
@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = {
248 .sync_obj_unref = vmw_sync_obj_unref, 277 .sync_obj_unref = vmw_sync_obj_unref,
249 .sync_obj_ref = vmw_sync_obj_ref, 278 .sync_obj_ref = vmw_sync_obj_ref,
250 .move_notify = vmw_move_notify, 279 .move_notify = vmw_move_notify,
251 .swap_notify = vmw_swap_notify 280 .swap_notify = vmw_swap_notify,
281 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
282 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
283 .io_mem_free = &vmw_ttm_io_mem_free,
252}; 284};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0897359b3e4e..dbd36b8910cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
570 * Put BO in VRAM, only if there is space. 570 * Put BO in VRAM, only if there is space.
571 */ 571 */
572 572
573 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false); 573 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
574 if (unlikely(ret == -ERESTARTSYS)) 574 if (unlikely(ret == -ERESTARTSYS))
575 return ret; 575 return ret;
576 576
@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
590 * previous contents. 590 * previous contents.
591 */ 591 */
592 592
593 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); 593 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
594 return ret; 594 return ret;
595} 595}
596 596
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a93367041cdc..7421aaad8d09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,8 +559,13 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
559 info->pixmap.scan_align = 1; 559 info->pixmap.scan_align = 1;
560#endif 560#endif
561 561
562 info->aperture_base = vmw_priv->vram_start; 562 info->apertures = alloc_apertures(1);
563 info->aperture_size = vmw_priv->vram_size; 563 if (!info->apertures) {
564 ret = -ENOMEM;
565 goto err_aper;
566 }
567 info->apertures->ranges[0].base = vmw_priv->vram_start;
568 info->apertures->ranges[0].size = vmw_priv->vram_size;
564 569
565 /* 570 /*
566 * Dirty & Deferred IO 571 * Dirty & Deferred IO
@@ -580,6 +585,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
580 585
581err_defio: 586err_defio:
582 fb_deferred_io_cleanup(info); 587 fb_deferred_io_cleanup(info);
588err_aper:
583 ttm_bo_kunmap(&par->map); 589 ttm_bo_kunmap(&par->map);
584err_unref: 590err_unref:
585 ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo); 591 ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
@@ -628,7 +634,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
628 if (unlikely(ret != 0)) 634 if (unlikely(ret != 0))
629 return ret; 635 return ret;
630 636
631 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false); 637 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
632 ttm_bo_unreserve(bo); 638 ttm_bo_unreserve(bo);
633 639
634 return ret; 640 return ret;
@@ -652,7 +658,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
652 if (unlikely(ret != 0)) 658 if (unlikely(ret != 0))
653 goto err_unlock; 659 goto err_unlock;
654 660
655 ret = ttm_bo_validate(bo, &ne_placement, false, false); 661 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
656 ttm_bo_unreserve(bo); 662 ttm_bo_unreserve(bo);
657err_unlock: 663err_unlock:
658 ttm_write_unlock(&vmw_priv->active_master->lock); 664 ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 31f9afed0a63..bbc7c4c30bc7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -752,14 +752,8 @@ err_not_scanout:
752 return NULL; 752 return NULL;
753} 753}
754 754
755static int vmw_kms_fb_changed(struct drm_device *dev)
756{
757 return 0;
758}
759
760static struct drm_mode_config_funcs vmw_kms_funcs = { 755static struct drm_mode_config_funcs vmw_kms_funcs = {
761 .fb_create = vmw_kms_fb_create, 756 .fb_create = vmw_kms_fb_create,
762 .fb_changed = vmw_kms_fb_changed,
763}; 757};
764 758
765int vmw_kms_init(struct vmw_private *dev_priv) 759int vmw_kms_init(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 5b6eabeb7f51..ad566c85b075 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
118 if (pin) 118 if (pin)
119 overlay_placement = &vmw_vram_ne_placement; 119 overlay_placement = &vmw_vram_ne_placement;
120 120
121 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false); 121 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
122 122
123 ttm_bo_unreserve(bo); 123 ttm_bo_unreserve(bo);
124 124
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 61ab4daf0bbb..8d0e31a22027 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -18,12 +18,12 @@ config VGA_ARB_MAX_GPUS
18 multiple GPUS. The overhead for each GPU is very small. 18 multiple GPUS. The overhead for each GPU is very small.
19 19
20config VGA_SWITCHEROO 20config VGA_SWITCHEROO
21 bool "Laptop Hybrid Grapics - GPU switching support" 21 bool "Laptop Hybrid Graphics - GPU switching support"
22 depends on X86 22 depends on X86
23 depends on ACPI 23 depends on ACPI
24 help 24 help
25 Many laptops released in 2008/9/10 have two gpus with a multiplxer 25 Many laptops released in 2008/9/10 have two GPUs with a multiplexer
26 to switch between them. This adds support for dynamic switching when 26 to switch between them. This adds support for dynamic switching when
27 X isn't running and delayed switching until the next logoff. This 27 X isn't running and delayed switching until the next logoff. This
28 features is called hybrid graphics, ATI PowerXpress, and Nvidia 28 feature is called hybrid graphics, ATI PowerXpress, and Nvidia
29 HybridPower. 29 HybridPower.
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index ecf405562f5c..4a56f46af40a 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -168,7 +168,7 @@ static void efifb_destroy(struct fb_info *info)
168{ 168{
169 if (info->screen_base) 169 if (info->screen_base)
170 iounmap(info->screen_base); 170 iounmap(info->screen_base);
171 release_mem_region(info->aperture_base, info->aperture_size); 171 release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
172 framebuffer_release(info); 172 framebuffer_release(info);
173} 173}
174 174
@@ -292,8 +292,13 @@ static int __devinit efifb_probe(struct platform_device *dev)
292 info->pseudo_palette = info->par; 292 info->pseudo_palette = info->par;
293 info->par = NULL; 293 info->par = NULL;
294 294
295 info->aperture_base = efifb_fix.smem_start; 295 info->apertures = alloc_apertures(1);
296 info->aperture_size = size_remap; 296 if (!info->apertures) {
297 err = -ENOMEM;
298 goto err_release_fb;
299 }
300 info->apertures->ranges[0].base = efifb_fix.smem_start;
301 info->apertures->ranges[0].size = size_remap;
297 302
298 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 303 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
299 if (!info->screen_base) { 304 if (!info->screen_base) {
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index a15b44e9c003..e08b7b5cb326 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1468,16 +1468,67 @@ static int fb_check_foreignness(struct fb_info *fi)
1468 return 0; 1468 return 0;
1469} 1469}
1470 1470
1471static bool fb_do_apertures_overlap(struct fb_info *gen, struct fb_info *hw) 1471static bool apertures_overlap(struct aperture *gen, struct aperture *hw)
1472{ 1472{
1473 /* is the generic aperture base the same as the HW one */ 1473 /* is the generic aperture base the same as the HW one */
1474 if (gen->aperture_base == hw->aperture_base) 1474 if (gen->base == hw->base)
1475 return true; 1475 return true;
1476 /* is the generic aperture base inside the hw base->hw base+size */ 1476 /* is the generic aperture base inside the hw base->hw base+size */
1477 if (gen->aperture_base > hw->aperture_base && gen->aperture_base <= hw->aperture_base + hw->aperture_size) 1477 if (gen->base > hw->base && gen->base <= hw->base + hw->size)
1478 return true; 1478 return true;
1479 return false; 1479 return false;
1480} 1480}
1481
1482static bool fb_do_apertures_overlap(struct apertures_struct *gena,
1483 struct apertures_struct *hwa)
1484{
1485 int i, j;
1486 if (!hwa || !gena)
1487 return false;
1488
1489 for (i = 0; i < hwa->count; ++i) {
1490 struct aperture *h = &hwa->ranges[i];
1491 for (j = 0; j < gena->count; ++j) {
1492 struct aperture *g = &gena->ranges[j];
1493 printk(KERN_DEBUG "checking generic (%llx %llx) vs hw (%llx %llx)\n",
1494 g->base, g->size, h->base, h->size);
1495 if (apertures_overlap(g, h))
1496 return true;
1497 }
1498 }
1499
1500 return false;
1501}
1502
1503#define VGA_FB_PHYS 0xA0000
1504void remove_conflicting_framebuffers(struct apertures_struct *a,
1505 const char *name, bool primary)
1506{
1507 int i;
1508
1509 /* check all firmware fbs and kick off if the base addr overlaps */
1510 for (i = 0 ; i < FB_MAX; i++) {
1511 struct apertures_struct *gen_aper;
1512 if (!registered_fb[i])
1513 continue;
1514
1515 if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
1516 continue;
1517
1518 gen_aper = registered_fb[i]->apertures;
1519 if (fb_do_apertures_overlap(gen_aper, a) ||
1520 (primary && gen_aper && gen_aper->count &&
1521 gen_aper->ranges[0].base == VGA_FB_PHYS)) {
1522
1523 printk(KERN_ERR "fb: conflicting fb hw usage "
1524 "%s vs %s - removing generic driver\n",
1525 name, registered_fb[i]->fix.id);
1526 unregister_framebuffer(registered_fb[i]);
1527 }
1528 }
1529}
1530EXPORT_SYMBOL(remove_conflicting_framebuffers);
1531
1481/** 1532/**
1482 * register_framebuffer - registers a frame buffer device 1533 * register_framebuffer - registers a frame buffer device
1483 * @fb_info: frame buffer info structure 1534 * @fb_info: frame buffer info structure
@@ -1501,21 +1552,8 @@ register_framebuffer(struct fb_info *fb_info)
1501 if (fb_check_foreignness(fb_info)) 1552 if (fb_check_foreignness(fb_info))
1502 return -ENOSYS; 1553 return -ENOSYS;
1503 1554
1504 /* check all firmware fbs and kick off if the base addr overlaps */ 1555 remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
1505 for (i = 0 ; i < FB_MAX; i++) { 1556 fb_is_primary_device(fb_info));
1506 if (!registered_fb[i])
1507 continue;
1508
1509 if (registered_fb[i]->flags & FBINFO_MISC_FIRMWARE) {
1510 if (fb_do_apertures_overlap(registered_fb[i], fb_info)) {
1511 printk(KERN_ERR "fb: conflicting fb hw usage "
1512 "%s vs %s - removing generic driver\n",
1513 fb_info->fix.id,
1514 registered_fb[i]->fix.id);
1515 unregister_framebuffer(registered_fb[i]);
1516 }
1517 }
1518 }
1519 1557
1520 num_registered_fb++; 1558 num_registered_fb++;
1521 for (i = 0 ; i < FB_MAX; i++) 1559 for (i = 0 ; i < FB_MAX; i++)
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 81aa3129c17d..0a08f1341227 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -80,6 +80,7 @@ EXPORT_SYMBOL(framebuffer_alloc);
80 */ 80 */
81void framebuffer_release(struct fb_info *info) 81void framebuffer_release(struct fb_info *info)
82{ 82{
83 kfree(info->apertures);
83 kfree(info); 84 kfree(info);
84} 85}
85EXPORT_SYMBOL(framebuffer_release); 86EXPORT_SYMBOL(framebuffer_release);
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 61f8b8f919b0..46dda7d8aaee 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -285,7 +285,7 @@ static void offb_destroy(struct fb_info *info)
285{ 285{
286 if (info->screen_base) 286 if (info->screen_base)
287 iounmap(info->screen_base); 287 iounmap(info->screen_base);
288 release_mem_region(info->aperture_base, info->aperture_size); 288 release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
289 framebuffer_release(info); 289 framebuffer_release(info);
290} 290}
291 291
@@ -491,8 +491,11 @@ static void __init offb_init_fb(const char *name, const char *full_name,
491 var->vmode = FB_VMODE_NONINTERLACED; 491 var->vmode = FB_VMODE_NONINTERLACED;
492 492
493 /* set offb aperture size for generic probing */ 493 /* set offb aperture size for generic probing */
494 info->aperture_base = address; 494 info->apertures = alloc_apertures(1);
495 info->aperture_size = fix->smem_len; 495 if (!info->apertures)
496 goto out_aper;
497 info->apertures->ranges[0].base = address;
498 info->apertures->ranges[0].size = fix->smem_len;
496 499
497 info->fbops = &offb_ops; 500 info->fbops = &offb_ops;
498 info->screen_base = ioremap(address, fix->smem_len); 501 info->screen_base = ioremap(address, fix->smem_len);
@@ -501,17 +504,20 @@ static void __init offb_init_fb(const char *name, const char *full_name,
501 504
502 fb_alloc_cmap(&info->cmap, 256, 0); 505 fb_alloc_cmap(&info->cmap, 256, 0);
503 506
504 if (register_framebuffer(info) < 0) { 507 if (register_framebuffer(info) < 0)
505 iounmap(par->cmap_adr); 508 goto out_err;
506 par->cmap_adr = NULL;
507 iounmap(info->screen_base);
508 framebuffer_release(info);
509 release_mem_region(res_start, res_size);
510 return;
511 }
512 509
513 printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n", 510 printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n",
514 info->node, full_name); 511 info->node, full_name);
512 return;
513
514out_err:
515 iounmap(info->screen_base);
516out_aper:
517 iounmap(par->cmap_adr);
518 par->cmap_adr = NULL;
519 framebuffer_release(info);
520 release_mem_region(res_start, res_size);
515} 521}
516 522
517 523
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index 0cadf7aee27e..090aa1a9be6e 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -177,7 +177,7 @@ static void vesafb_destroy(struct fb_info *info)
177{ 177{
178 if (info->screen_base) 178 if (info->screen_base)
179 iounmap(info->screen_base); 179 iounmap(info->screen_base);
180 release_mem_region(info->aperture_base, info->aperture_size); 180 release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
181 framebuffer_release(info); 181 framebuffer_release(info);
182} 182}
183 183
@@ -295,8 +295,13 @@ static int __init vesafb_probe(struct platform_device *dev)
295 info->par = NULL; 295 info->par = NULL;
296 296
297 /* set vesafb aperture size for generic probing */ 297 /* set vesafb aperture size for generic probing */
298 info->aperture_base = screen_info.lfb_base; 298 info->apertures = alloc_apertures(1);
299 info->aperture_size = size_total; 299 if (!info->apertures) {
300 err = -ENOMEM;
301 goto err;
302 }
303 info->apertures->ranges[0].base = screen_info.lfb_base;
304 info->apertures->ranges[0].size = size_total;
300 305
301 info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len); 306 info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len);
302 if (!info->screen_base) { 307 if (!info->screen_base) {
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index bf638a47a5b3..149c47ac7e93 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1263,10 +1263,19 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image
1263 vga_imageblit_color(info, image); 1263 vga_imageblit_color(info, image);
1264} 1264}
1265 1265
1266static void vga16fb_destroy(struct fb_info *info)
1267{
1268 iounmap(info->screen_base);
1269 fb_dealloc_cmap(&info->cmap);
1270 /* XXX unshare VGA regions */
1271 framebuffer_release(info);
1272}
1273
1266static struct fb_ops vga16fb_ops = { 1274static struct fb_ops vga16fb_ops = {
1267 .owner = THIS_MODULE, 1275 .owner = THIS_MODULE,
1268 .fb_open = vga16fb_open, 1276 .fb_open = vga16fb_open,
1269 .fb_release = vga16fb_release, 1277 .fb_release = vga16fb_release,
1278 .fb_destroy = vga16fb_destroy,
1270 .fb_check_var = vga16fb_check_var, 1279 .fb_check_var = vga16fb_check_var,
1271 .fb_set_par = vga16fb_set_par, 1280 .fb_set_par = vga16fb_set_par,
1272 .fb_setcolreg = vga16fb_setcolreg, 1281 .fb_setcolreg = vga16fb_setcolreg,
@@ -1306,6 +1315,11 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
1306 ret = -ENOMEM; 1315 ret = -ENOMEM;
1307 goto err_fb_alloc; 1316 goto err_fb_alloc;
1308 } 1317 }
1318 info->apertures = alloc_apertures(1);
1319 if (!info->apertures) {
1320 ret = -ENOMEM;
1321 goto err_ioremap;
1322 }
1309 1323
1310 /* XXX share VGA_FB_PHYS and I/O region with vgacon and others */ 1324 /* XXX share VGA_FB_PHYS and I/O region with vgacon and others */
1311 info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS, 0); 1325 info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS, 0);
@@ -1335,7 +1349,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
1335 info->fix = vga16fb_fix; 1349 info->fix = vga16fb_fix;
1336 /* supports rectangles with widths of multiples of 8 */ 1350 /* supports rectangles with widths of multiples of 8 */
1337 info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31; 1351 info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31;
1338 info->flags = FBINFO_FLAG_DEFAULT | 1352 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
1339 FBINFO_HWACCEL_YPAN; 1353 FBINFO_HWACCEL_YPAN;
1340 1354
1341 i = (info->var.bits_per_pixel == 8) ? 256 : 16; 1355 i = (info->var.bits_per_pixel == 8) ? 256 : 16;
@@ -1354,6 +1368,9 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
1354 1368
1355 vga16fb_update_fix(info); 1369 vga16fb_update_fix(info);
1356 1370
1371 info->apertures->ranges[0].base = VGA_FB_PHYS;
1372 info->apertures->ranges[0].size = VGA_FB_PHYS_LEN;
1373
1357 if (register_framebuffer(info) < 0) { 1374 if (register_framebuffer(info) < 0) {
1358 printk(KERN_ERR "vga16fb: unable to register framebuffer\n"); 1375 printk(KERN_ERR "vga16fb: unable to register framebuffer\n");
1359 ret = -EINVAL; 1376 ret = -EINVAL;
@@ -1380,13 +1397,8 @@ static int vga16fb_remove(struct platform_device *dev)
1380{ 1397{
1381 struct fb_info *info = platform_get_drvdata(dev); 1398 struct fb_info *info = platform_get_drvdata(dev);
1382 1399
1383 if (info) { 1400 if (info)
1384 unregister_framebuffer(info); 1401 unregister_framebuffer(info);
1385 iounmap(info->screen_base);
1386 fb_dealloc_cmap(&info->cmap);
1387 /* XXX unshare VGA regions */
1388 framebuffer_release(info);
1389 }
1390 1402
1391 return 0; 1403 return 0;
1392} 1404}