aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-06-27 20:53:23 -0400
committerDave Airlie <airlied@redhat.com>2013-06-27 20:53:23 -0400
commit5b0207bb59b26a8cb5b53f13c99c67eb0d2a7abb (patch)
tree114adf60884b7a69ed2812c7b06f41d5c228981a
parent28419261b09aa3a5118647b1ed93809ca97c5354 (diff)
parenta144acbcfbfea44a80afc8f880a7ad72bf01c819 (diff)
Merge branch 'drm-next-3.11' of git://people.freedesktop.org/~agd5f/linux into drm-next
This is the pull request for radeon for 3.11. Highlights include: - Support for CIK (Sea Islands) asics: 3D, compute, UVD - DPM (Dynamic Power Management) support for 6xx-SI - ASPM support for 6xx-SI - Assorted bug fixes * 'drm-next-3.11' of git://people.freedesktop.org/~agd5f/linux: (168 commits) drm/radeon/SI: fix TDP adjustment in set_power_state drm/radeon/NI: fix TDP adjustment in set_power_state drm/radeon: fix endian issues in atombios dpm code drm/radeon/dpm: fix UVD clock setting on SI drm/radeon/dpm: fix UVD clock setting on cayman drm/radeon/dpm: add support for setting UVD clock on rv6xx drm/radeon/dpm: add support for setting UVD clock on rs780 drm/radeon: fix typo in ni_print_power_state drm/radeon: fix typo in cik_select_se_sh() drm/radeon/si: fix typo in function name drm/radeon/dpm: fix typo in setting uvd clock drm/radeon/dpm: add dpm_set_power_state failure output (si) add dpm_set_power_state failure output (7xx-ni) drm/radeon/dpm: add dpm_set_power_state failure output (7xx-ni) drm/radeon/dpm: add dpm_enable failure output (si) drm/radeon/dpm: add dpm_enable failure output (7xx-ni) drm/radeon/kms: add dpm support for SI (v7) drm/radeon: switch SI to use radeon_ucode.h drm/radeon: add SI to r600_is_internal_thermal_sensor() drm/radeon/dpm/rs780: properly catch errors in dpm setup ...
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h40
-rw-r--r--drivers/gpu/drm/radeon/atombios.h547
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c88
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c51
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c2740
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h57
-rw-r--r--drivers/gpu/drm/radeon/btcd.h181
-rw-r--r--drivers/gpu/drm/radeon/cik.c6987
-rw-r--r--drivers/gpu/drm/radeon/cik_blit_shaders.c246
-rw-r--r--drivers/gpu/drm/radeon/cik_blit_shaders.h32
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h147
-rw-r--r--drivers/gpu/drm/radeon/cikd.h1297
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h1081
-rw-r--r--drivers/gpu/drm/radeon/clearstate_defs.h44
-rw-r--r--drivers/gpu/drm/radeon/clearstate_evergreen.h1080
-rw-r--r--drivers/gpu/drm/radeon/clearstate_si.h941
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2176
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.h160
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c644
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h12
-rw-r--r--drivers/gpu/drm/radeon/evergreen_smc.h67
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h389
-rw-r--r--drivers/gpu/drm/radeon/ni.c198
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c4316
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.h248
-rw-r--r--drivers/gpu/drm/radeon/nid.h561
-rw-r--r--drivers/gpu/drm/radeon/nislands_smc.h329
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h113
-rw-r--r--drivers/gpu/drm/radeon/r600.c147
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c1024
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h226
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c11
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h232
-rw-r--r--drivers/gpu/drm/radeon/radeon.h519
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c145
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c698
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h178
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c880
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c106
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h93
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c607
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h129
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c32
-rw-r--r--drivers/gpu/drm/radeon/rs690.c291
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c963
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.h109
-rw-r--r--drivers/gpu/drm/radeon/rs780d.h168
-rw-r--r--drivers/gpu/drm/radeon/rv515.c224
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c2059
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.h95
-rw-r--r--drivers/gpu/drm/radeon/rv6xxd.h246
-rw-r--r--drivers/gpu/drm/radeon/rv730_dpm.c508
-rw-r--r--drivers/gpu/drm/radeon/rv730d.h165
-rw-r--r--drivers/gpu/drm/radeon/rv740_dpm.c416
-rw-r--r--drivers/gpu/drm/radeon/rv740d.h117
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c2462
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.h288
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.c621
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.h209
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h279
-rw-r--r--drivers/gpu/drm/radeon/si.c1337
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6387
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h227
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c284
-rw-r--r--drivers/gpu/drm/radeon/sid.h599
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h397
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c1801
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.h220
-rw-r--r--drivers/gpu/drm/radeon/sumo_smc.c222
-rw-r--r--drivers/gpu/drm/radeon/sumod.h372
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c1887
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.h131
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c115
-rw-r--r--drivers/gpu/drm/radeon/trinityd.h228
-rw-r--r--include/drm/drm_fixed.h94
-rw-r--r--include/drm/drm_pciids.h24
88 files changed, 52960 insertions, 561 deletions
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 86c5e3611892..c3df52c1a60c 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -76,7 +76,10 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ 76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ 77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ 78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
79 si_blit_shaders.o radeon_prime.o radeon_uvd.o 79 si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \
80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o
80 83
81radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 84radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
82radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 85radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index ca4b038050d2..06192698bd96 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -69,6 +69,8 @@
69#define ENCODER_OBJECT_ID_ALMOND 0x22 69#define ENCODER_OBJECT_ID_ALMOND 0x22
70#define ENCODER_OBJECT_ID_TRAVIS 0x23 70#define ENCODER_OBJECT_ID_TRAVIS 0x23
71#define ENCODER_OBJECT_ID_NUTMEG 0x22 71#define ENCODER_OBJECT_ID_NUTMEG 0x22
72#define ENCODER_OBJECT_ID_HDMI_ANX9805 0x26
73
72/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */ 74/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
73#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 75#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
74#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 76#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
@@ -86,6 +88,8 @@
86#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20 88#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
87#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21 89#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
88#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24 90#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
91#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
92#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
89 93
90#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF 94#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
91 95
@@ -364,6 +368,14 @@
364 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 368 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
365 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) 369 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
366 370
371#define ENCODER_INTERNAL_UNIPHY3_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
372 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
373 ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 << OBJECT_ID_SHIFT)
374
375#define ENCODER_INTERNAL_UNIPHY3_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
376 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
377 ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 << OBJECT_ID_SHIFT)
378
367#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 379#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
368 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 380 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
369 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) 381 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
@@ -392,6 +404,10 @@
392 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 404 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
393 ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT) 405 ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT)
394 406
407#define ENCODER_HDMI_ANX9805_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
408 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
409 ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
410
395/****************************************************/ 411/****************************************************/
396/* Connector Object ID definition - Shared with BIOS */ 412/* Connector Object ID definition - Shared with BIOS */
397/****************************************************/ 413/****************************************************/
@@ -461,6 +477,14 @@
461 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ 477 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
462 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) 478 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
463 479
480#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
481 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
482 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
483
484#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
485 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
486 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
487
464#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 488#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
465 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 489 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
466 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) 490 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
@@ -473,6 +497,10 @@
473 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ 497 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
474 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) 498 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
475 499
500#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
501 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
502 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
503
476#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 504#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
477 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 505 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
478 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) 506 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
@@ -541,6 +569,18 @@
541 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ 569 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
542 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) 570 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
543 571
572#define CONNECTOR_HDMI_TYPE_A_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
573 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
574 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
575
576#define CONNECTOR_HDMI_TYPE_A_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
577 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
578 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
579
580#define CONNECTOR_HDMI_TYPE_A_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
581 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
582 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
583
544#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 584#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
545 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 585 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
546 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT) 586 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 0ee573743de9..16b120c3f144 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -74,6 +74,8 @@
74#define ATOM_PPLL2 1 74#define ATOM_PPLL2 1
75#define ATOM_DCPLL 2 75#define ATOM_DCPLL 2
76#define ATOM_PPLL0 2 76#define ATOM_PPLL0 2
77#define ATOM_PPLL3 3
78
77#define ATOM_EXT_PLL1 8 79#define ATOM_EXT_PLL1 8
78#define ATOM_EXT_PLL2 9 80#define ATOM_EXT_PLL2 9
79#define ATOM_EXT_CLOCK 10 81#define ATOM_EXT_CLOCK 10
@@ -259,7 +261,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
259 USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes. 261 USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes.
260 USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock 262 USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
261 USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios 263 USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
262 USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios 264 USHORT SetUniphyInstance; //Atomic Table, only used by Bios
263 USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2 265 USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
264 USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3 266 USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
265 USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1 267 USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1
@@ -271,7 +273,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
271 USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1 273 USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
272 USHORT PatchMCSetting; //only used by BIOS 274 USHORT PatchMCSetting; //only used by BIOS
273 USHORT MC_SEQ_Control; //only used by BIOS 275 USHORT MC_SEQ_Control; //only used by BIOS
274 USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead 276 USHORT Gfx_Harvesting; //Atomic Table, Obsolete from Ry6xx, Now only used by BIOS for GFX harvesting
275 USHORT EnableScaler; //Atomic Table, used only by Bios 277 USHORT EnableScaler; //Atomic Table, used only by Bios
276 USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 278 USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
277 USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 279 USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
@@ -328,7 +330,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
328#define UNIPHYTransmitterControl DIG1TransmitterControl 330#define UNIPHYTransmitterControl DIG1TransmitterControl
329#define LVTMATransmitterControl DIG2TransmitterControl 331#define LVTMATransmitterControl DIG2TransmitterControl
330#define SetCRTC_DPM_State GetConditionalGoldenSetting 332#define SetCRTC_DPM_State GetConditionalGoldenSetting
331#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange 333#define ASIC_StaticPwrMgtStatusChange SetUniphyInstance
332#define HPDInterruptService ReadHWAssistedI2CStatus 334#define HPDInterruptService ReadHWAssistedI2CStatus
333#define EnableVGA_Access GetSCLKOverMCLKRatio 335#define EnableVGA_Access GetSCLKOverMCLKRatio
334#define EnableYUV GetDispObjectInfo 336#define EnableYUV GetDispObjectInfo
@@ -338,7 +340,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
338#define TMDSAEncoderControl PatchMCSetting 340#define TMDSAEncoderControl PatchMCSetting
339#define LVDSEncoderControl MC_SEQ_Control 341#define LVDSEncoderControl MC_SEQ_Control
340#define LCD1OutputControl HW_Misc_Operation 342#define LCD1OutputControl HW_Misc_Operation
341 343#define TV1OutputControl Gfx_Harvesting
342 344
343typedef struct _ATOM_MASTER_COMMAND_TABLE 345typedef struct _ATOM_MASTER_COMMAND_TABLE
344{ 346{
@@ -478,11 +480,11 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
478typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 480typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
479{ 481{
480#if ATOM_BIG_ENDIAN 482#if ATOM_BIG_ENDIAN
481 ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly 483 ULONG ucPostDiv:8; //return parameter: post divider which is used to program to register directly
482 ULONG ulClock:24; //Input= target clock, output = actual clock 484 ULONG ulClock:24; //Input= target clock, output = actual clock
483#else 485#else
484 ULONG ulClock:24; //Input= target clock, output = actual clock 486 ULONG ulClock:24; //Input= target clock, output = actual clock
485 ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly 487 ULONG ucPostDiv:8; //return parameter: post divider which is used to program to register directly
486#endif 488#endif
487}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; 489}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
488 490
@@ -504,6 +506,32 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
504 UCHAR ucReserved; 506 UCHAR ucReserved;
505}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5; 507}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
506 508
509
510typedef struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6
511{
512 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
513 ULONG ulReserved[2];
514}COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6;
515
516//ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag
517#define COMPUTE_GPUCLK_INPUT_FLAG_CLK_TYPE_MASK 0x0f
518#define COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK 0x00
519#define COMPUTE_GPUCLK_INPUT_FLAG_SCLK 0x01
520
521typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6
522{
523 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock; //Output Parameter: ucPostDiv=DFS divider
524 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter: PLL FB divider
525 UCHAR ucPllRefDiv; //Output Parameter: PLL ref divider
526 UCHAR ucPllPostDiv; //Output Parameter: PLL post divider
527 UCHAR ucPllCntlFlag; //Output Flags: control flag
528 UCHAR ucReserved;
529}COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6;
530
531//ucPllCntlFlag
532#define SPLL_CNTL_FLAG_VCO_MODE_MASK 0x03
533
534
507// ucInputFlag 535// ucInputFlag
508#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode 536#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
509 537
@@ -1686,6 +1714,7 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V6
1686#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08 1714#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
1687#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c 1715#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
1688#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10 1716#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
1717#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40
1689 1718
1690typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 1719typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
1691{ 1720{
@@ -2102,6 +2131,17 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
2102}DVO_ENCODER_CONTROL_PARAMETERS_V3; 2131}DVO_ENCODER_CONTROL_PARAMETERS_V3;
2103#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3 2132#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
2104 2133
2134typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V1_4
2135{
2136 USHORT usPixelClock;
2137 UCHAR ucDVOConfig;
2138 UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
2139 UCHAR ucBitPerColor; //please refer to definition of PANEL_xBIT_PER_COLOR
2140 UCHAR ucReseved[3];
2141}DVO_ENCODER_CONTROL_PARAMETERS_V1_4;
2142#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 DVO_ENCODER_CONTROL_PARAMETERS_V1_4
2143
2144
2105//ucTableFormatRevision=1 2145//ucTableFormatRevision=1
2106//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for 2146//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for
2107// bit1=0: non-coherent mode 2147// bit1=0: non-coherent mode
@@ -2165,7 +2205,7 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
2165#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4 2205#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
2166 2206
2167#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0 2207#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
2168#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1 2208#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
2169#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2 2209#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
2170 2210
2171typedef struct _SET_VOLTAGE_PARAMETERS 2211typedef struct _SET_VOLTAGE_PARAMETERS
@@ -2200,15 +2240,20 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
2200//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode 2240//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
2201#define ATOM_SET_VOLTAGE 0 //Set voltage Level 2241#define ATOM_SET_VOLTAGE 0 //Set voltage Level
2202#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator 2242#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator
2203#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase 2243#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase, only for SVID/PVID regulator
2204#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used in SetVoltageTable v1.3 2244#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used from SetVoltageTable v1.3
2205#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID 2245#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID, not used for SetVoltage v1.4
2246#define ATOM_GET_LEAKAGE_ID 8 //Get Leakage Voltage Id ( starting from SMU7x IP ), SetVoltage v1.4
2206 2247
2207// define vitual voltage id in usVoltageLevel 2248// define vitual voltage id in usVoltageLevel
2208#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01 2249#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
2209#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02 2250#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02
2210#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03 2251#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03
2211#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04 2252#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04
2253#define ATOM_VIRTUAL_VOLTAGE_ID4 0xff05
2254#define ATOM_VIRTUAL_VOLTAGE_ID5 0xff06
2255#define ATOM_VIRTUAL_VOLTAGE_ID6 0xff07
2256#define ATOM_VIRTUAL_VOLTAGE_ID7 0xff08
2212 2257
2213typedef struct _SET_VOLTAGE_PS_ALLOCATION 2258typedef struct _SET_VOLTAGE_PS_ALLOCATION
2214{ 2259{
@@ -2628,7 +2673,8 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_2
2628 ULONG ulFirmwareRevision; 2673 ULONG ulFirmwareRevision;
2629 ULONG ulDefaultEngineClock; //In 10Khz unit 2674 ULONG ulDefaultEngineClock; //In 10Khz unit
2630 ULONG ulDefaultMemoryClock; //In 10Khz unit 2675 ULONG ulDefaultMemoryClock; //In 10Khz unit
2631 ULONG ulReserved[2]; 2676 ULONG ulSPLL_OutputFreq; //In 10Khz unit
2677 ULONG ulGPUPLL_OutputFreq; //In 10Khz unit
2632 ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit* 2678 ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
2633 ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit* 2679 ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
2634 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit 2680 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
@@ -3813,6 +3859,12 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
3813 UCHAR ucGPIO_ID; 3859 UCHAR ucGPIO_ID;
3814}ATOM_GPIO_PIN_ASSIGNMENT; 3860}ATOM_GPIO_PIN_ASSIGNMENT;
3815 3861
3862//ucGPIO_ID pre-define id for multiple usage
3863//from SMU7.x, if ucGPIO_ID=PP_AC_DC_SWITCH_GPIO_PINID in GPIO_LUTTable, AC/DC swithing feature is enable
3864#define PP_AC_DC_SWITCH_GPIO_PINID 60
3865//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable
3866#define VDDC_VRHOT_GPIO_PINID 61
3867
3816typedef struct _ATOM_GPIO_PIN_LUT 3868typedef struct _ATOM_GPIO_PIN_LUT
3817{ 3869{
3818 ATOM_COMMON_TABLE_HEADER sHeader; 3870 ATOM_COMMON_TABLE_HEADER sHeader;
@@ -4074,17 +4126,19 @@ typedef struct _EXT_DISPLAY_PATH
4074 4126
4075//usCaps 4127//usCaps
4076#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01 4128#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01
4129#define EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN 0x02
4077 4130
4078typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO 4131typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
4079{ 4132{
4080 ATOM_COMMON_TABLE_HEADER sHeader; 4133 ATOM_COMMON_TABLE_HEADER sHeader;
4081 UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string 4134 UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
4082 EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. 4135 EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
4083 UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. 4136 UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
4084 UCHAR uc3DStereoPinId; // use for eDP panel 4137 UCHAR uc3DStereoPinId; // use for eDP panel
4085 UCHAR ucRemoteDisplayConfig; 4138 UCHAR ucRemoteDisplayConfig;
4086 UCHAR uceDPToLVDSRxId; 4139 UCHAR uceDPToLVDSRxId;
4087 UCHAR Reserved[4]; // for potential expansion 4140 UCHAR ucFixDPVoltageSwing; // usCaps[1]=1, this indicate DP_LANE_SET value
4141 UCHAR Reserved[3]; // for potential expansion
4088}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; 4142}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
4089 4143
4090//Related definitions, all records are different but they have a commond header 4144//Related definitions, all records are different but they have a commond header
@@ -4416,6 +4470,13 @@ typedef struct _ATOM_VOLTAGE_CONTROL
4416#define VOLTAGE_CONTROL_ID_CHL822x 0x08 4470#define VOLTAGE_CONTROL_ID_CHL822x 0x08
4417#define VOLTAGE_CONTROL_ID_VT1586M 0x09 4471#define VOLTAGE_CONTROL_ID_VT1586M 0x09
4418#define VOLTAGE_CONTROL_ID_UP1637 0x0A 4472#define VOLTAGE_CONTROL_ID_UP1637 0x0A
4473#define VOLTAGE_CONTROL_ID_CHL8214 0x0B
4474#define VOLTAGE_CONTROL_ID_UP1801 0x0C
4475#define VOLTAGE_CONTROL_ID_ST6788A 0x0D
4476#define VOLTAGE_CONTROL_ID_CHLIR3564SVI2 0x0E
4477#define VOLTAGE_CONTROL_ID_AD527x 0x0F
4478#define VOLTAGE_CONTROL_ID_NCP81022 0x10
4479#define VOLTAGE_CONTROL_ID_LTC2635 0x11
4419 4480
4420typedef struct _ATOM_VOLTAGE_OBJECT 4481typedef struct _ATOM_VOLTAGE_OBJECT
4421{ 4482{
@@ -4458,6 +4519,15 @@ typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
4458 USHORT usSize; //Size of Object 4519 USHORT usSize; //Size of Object
4459}ATOM_VOLTAGE_OBJECT_HEADER_V3; 4520}ATOM_VOLTAGE_OBJECT_HEADER_V3;
4460 4521
4522// ATOM_VOLTAGE_OBJECT_HEADER_V3.ucVoltageMode
4523#define VOLTAGE_OBJ_GPIO_LUT 0 //VOLTAGE and GPIO Lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
4524#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ 3 //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3
4525#define VOLTAGE_OBJ_PHASE_LUT 4 //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
4526#define VOLTAGE_OBJ_SVID2 7 //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3
4527#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4528#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4529#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT 0x12 //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4530
4461typedef struct _VOLTAGE_LUT_ENTRY_V2 4531typedef struct _VOLTAGE_LUT_ENTRY_V2
4462{ 4532{
4463 ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register 4533 ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register
@@ -4473,7 +4543,7 @@ typedef struct _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
4473 4543
4474typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3 4544typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
4475{ 4545{
4476 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; 4546 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_VR_I2C_INIT_SEQ
4477 UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id 4547 UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id
4478 UCHAR ucVoltageControlI2cLine; 4548 UCHAR ucVoltageControlI2cLine;
4479 UCHAR ucVoltageControlAddress; 4549 UCHAR ucVoltageControlAddress;
@@ -4484,7 +4554,7 @@ typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
4484 4554
4485typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3 4555typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
4486{ 4556{
4487 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; 4557 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
4488 UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode 4558 UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode
4489 UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table 4559 UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table
4490 UCHAR ucPhaseDelay; // phase delay in unit of micro second 4560 UCHAR ucPhaseDelay; // phase delay in unit of micro second
@@ -4495,7 +4565,7 @@ typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
4495 4565
4496typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 4566typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4497{ 4567{
4498 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; 4568 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = 0x10/0x11/0x12
4499 UCHAR ucLeakageCntlId; // default is 0 4569 UCHAR ucLeakageCntlId; // default is 0
4500 UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table 4570 UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
4501 UCHAR ucReserved[2]; 4571 UCHAR ucReserved[2];
@@ -4503,10 +4573,26 @@ typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4503 LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1]; 4573 LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
4504}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3; 4574}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
4505 4575
4576
4577typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3
4578{
4579 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_SVID2
4580// 14:7 – PSI0_VID
4581// 6 – PSI0_EN
4582// 5 – PSI1
4583// 4:2 – load line slope trim.
4584// 1:0 – offset trim,
4585 USHORT usLoadLine_PSI;
4586// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31
4587 UCHAR ucReserved[2];
4588 ULONG ulReserved;
4589}ATOM_SVID2_VOLTAGE_OBJECT_V3;
4590
4506typedef union _ATOM_VOLTAGE_OBJECT_V3{ 4591typedef union _ATOM_VOLTAGE_OBJECT_V3{
4507 ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj; 4592 ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
4508 ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj; 4593 ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
4509 ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj; 4594 ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
4595 ATOM_SVID2_VOLTAGE_OBJECT_V3 asSVID2Obj;
4510}ATOM_VOLTAGE_OBJECT_V3; 4596}ATOM_VOLTAGE_OBJECT_V3;
4511 4597
4512typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 4598typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1
@@ -4536,6 +4622,21 @@ typedef struct _ATOM_ASIC_PROFILING_INFO
4536 ATOM_ASIC_PROFILE_VOLTAGE asVoltage; 4622 ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
4537}ATOM_ASIC_PROFILING_INFO; 4623}ATOM_ASIC_PROFILING_INFO;
4538 4624
4625typedef struct _ATOM_ASIC_PROFILING_INFO_V2_1
4626{
4627 ATOM_COMMON_TABLE_HEADER asHeader;
4628 UCHAR ucLeakageBinNum; // indicate the entry number of LeakageId/Voltage Lut table
4629 USHORT usLeakageBinArrayOffset; // offset of USHORT Leakage Bin list array ( from lower LeakageId to higher)
4630
4631 UCHAR ucElbVDDC_Num;
4632 USHORT usElbVDDC_IdArrayOffset; // offset of USHORT virtual VDDC voltage id ( 0xff01~0xff08 )
4633 USHORT usElbVDDC_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array
4634
4635 UCHAR ucElbVDDCI_Num;
4636 USHORT usElbVDDCI_IdArrayOffset; // offset of USHORT virtual VDDCI voltage id ( 0xff01~0xff08 )
4637 USHORT usElbVDDCI_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array
4638}ATOM_ASIC_PROFILING_INFO_V2_1;
4639
4539typedef struct _ATOM_POWER_SOURCE_OBJECT 4640typedef struct _ATOM_POWER_SOURCE_OBJECT
4540{ 4641{
4541 UCHAR ucPwrSrcId; // Power source 4642 UCHAR ucPwrSrcId; // Power source
@@ -4652,6 +4753,8 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
4652#define SYS_INFO_LVDSMISC__888_BPC 0x04 4753#define SYS_INFO_LVDSMISC__888_BPC 0x04
4653#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08 4754#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08
4654#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10 4755#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10
4756// new since Trinity
4757#define SYS_INFO_LVDSMISC__TRAVIS_LVDS_VOL_OVERRIDE_EN 0x20
4655 4758
4656// not used any more 4759// not used any more
4657#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04 4760#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04
@@ -4752,6 +4855,29 @@ typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
4752 ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo; 4855 ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo;
4753 ULONG ulPowerplayTable[128]; 4856 ULONG ulPowerplayTable[128];
4754}ATOM_FUSION_SYSTEM_INFO_V1; 4857}ATOM_FUSION_SYSTEM_INFO_V1;
4858
4859
4860typedef struct _ATOM_TDP_CONFIG_BITS
4861{
4862#if ATOM_BIG_ENDIAN
4863 ULONG uReserved:2;
4864 ULONG uTDP_Value:14; // Original TDP value in tens of milli watts
4865 ULONG uCTDP_Value:14; // Override value in tens of milli watts
4866 ULONG uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value))
4867#else
4868 ULONG uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value))
4869 ULONG uCTDP_Value:14; // Override value in tens of milli watts
4870 ULONG uTDP_Value:14; // Original TDP value in tens of milli watts
4871 ULONG uReserved:2;
4872#endif
4873}ATOM_TDP_CONFIG_BITS;
4874
4875typedef union _ATOM_TDP_CONFIG
4876{
4877 ATOM_TDP_CONFIG_BITS TDP_config;
4878 ULONG TDP_config_all;
4879}ATOM_TDP_CONFIG;
4880
4755/********************************************************************************************************************** 4881/**********************************************************************************************************************
4756 ATOM_FUSION_SYSTEM_INFO_V1 Description 4882 ATOM_FUSION_SYSTEM_INFO_V1 Description
4757sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition. 4883sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
@@ -4784,7 +4910,8 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
4784 UCHAR ucMemoryType; 4910 UCHAR ucMemoryType;
4785 UCHAR ucUMAChannelNumber; 4911 UCHAR ucUMAChannelNumber;
4786 UCHAR strVBIOSMsg[40]; 4912 UCHAR strVBIOSMsg[40];
4787 ULONG ulReserved[20]; 4913 ATOM_TDP_CONFIG asTdpConfig;
4914 ULONG ulReserved[19];
4788 ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; 4915 ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
4789 ULONG ulGMCRestoreResetTime; 4916 ULONG ulGMCRestoreResetTime;
4790 ULONG ulMinimumNClk; 4917 ULONG ulMinimumNClk;
@@ -4809,7 +4936,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
4809 USHORT GnbTdpLimit; 4936 USHORT GnbTdpLimit;
4810 USHORT usMaxLVDSPclkFreqInSingleLink; 4937 USHORT usMaxLVDSPclkFreqInSingleLink;
4811 UCHAR ucLvdsMisc; 4938 UCHAR ucLvdsMisc;
4812 UCHAR ucLVDSReserved; 4939 UCHAR ucTravisLVDSVolAdjust;
4813 UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms; 4940 UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
4814 UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; 4941 UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
4815 UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; 4942 UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
@@ -4817,7 +4944,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
4817 UCHAR ucLVDSOffToOnDelay_in4Ms; 4944 UCHAR ucLVDSOffToOnDelay_in4Ms;
4818 UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; 4945 UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
4819 UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; 4946 UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
4820 UCHAR ucLVDSReserved1; 4947 UCHAR ucMinAllowedBL_Level;
4821 ULONG ulLCDBitDepthControlVal; 4948 ULONG ulLCDBitDepthControlVal;
4822 ULONG ulNbpStateMemclkFreq[4]; 4949 ULONG ulNbpStateMemclkFreq[4];
4823 USHORT usNBP2Voltage; 4950 USHORT usNBP2Voltage;
@@ -4846,6 +4973,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
4846#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 4973#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
4847#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02 4974#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
4848#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08 4975#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
4976#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10
4849 4977
4850/********************************************************************************************************************** 4978/**********************************************************************************************************************
4851 ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description 4979 ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
@@ -4945,6 +5073,9 @@ ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 pan
4945 [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color 5073 [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
4946 [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used 5074 [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
4947 [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low ) 5075 [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
5076 [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4
5077ucTravisLVDSVolAdjust When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust
5078 value to program Travis register LVDS_CTRL_4
4948ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ). 5079ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
4949 =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. 5080 =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
4950 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. 5081 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
@@ -4964,18 +5095,241 @@ ucLVDSOffToOnDelay_in4Ms: LVDS power down sequence time in unit of 4ms.
4964 =0 means to use VBIOS default delay which is 125 ( 500ms ). 5095 =0 means to use VBIOS default delay which is 125 ( 500ms ).
4965 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. 5096 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
4966 5097
4967ucLVDSPwrOnVARY_BLtoBLON_in4Ms: LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active. 5098ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms:
5099 LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
4968 =0 means to use VBIOS default delay which is 0 ( 0ms ). 5100 =0 means to use VBIOS default delay which is 0 ( 0ms ).
4969 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. 5101 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
4970 5102
4971ucLVDSPwrOffBLONtoVARY_BL_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off. 5103ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms:
5104 LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
4972 =0 means to use VBIOS default delay which is 0 ( 0ms ). 5105 =0 means to use VBIOS default delay which is 0 ( 0ms ).
4973 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. 5106 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
4974 5107
5108ucMinAllowedBL_Level: Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0.
5109
4975ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate. 5110ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate.
4976 5111
4977**********************************************************************************************************************/ 5112**********************************************************************************************************************/
4978 5113
5114// this IntegrateSystemInfoTable is used for Kaveri & Kabini APU
5115typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8
5116{
5117 ATOM_COMMON_TABLE_HEADER sHeader;
5118 ULONG ulBootUpEngineClock;
5119 ULONG ulDentistVCOFreq;
5120 ULONG ulBootUpUMAClock;
5121 ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
5122 ULONG ulBootUpReqDisplayVector;
5123 ULONG ulVBIOSMisc;
5124 ULONG ulGPUCapInfo;
5125 ULONG ulDISP_CLK2Freq;
5126 USHORT usRequestedPWMFreqInHz;
5127 UCHAR ucHtcTmpLmt;
5128 UCHAR ucHtcHystLmt;
5129 ULONG ulReserved2;
5130 ULONG ulSystemConfig;
5131 ULONG ulCPUCapInfo;
5132 ULONG ulReserved3;
5133 USHORT usGPUReservedSysMemSize;
5134 USHORT usExtDispConnInfoOffset;
5135 USHORT usPanelRefreshRateRange;
5136 UCHAR ucMemoryType;
5137 UCHAR ucUMAChannelNumber;
5138 UCHAR strVBIOSMsg[40];
5139 ATOM_TDP_CONFIG asTdpConfig;
5140 ULONG ulReserved[19];
5141 ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
5142 ULONG ulGMCRestoreResetTime;
5143 ULONG ulReserved4;
5144 ULONG ulIdleNClk;
5145 ULONG ulDDR_DLL_PowerUpTime;
5146 ULONG ulDDR_PLL_PowerUpTime;
5147 USHORT usPCIEClkSSPercentage;
5148 USHORT usPCIEClkSSType;
5149 USHORT usLvdsSSPercentage;
5150 USHORT usLvdsSSpreadRateIn10Hz;
5151 USHORT usHDMISSPercentage;
5152 USHORT usHDMISSpreadRateIn10Hz;
5153 USHORT usDVISSPercentage;
5154 USHORT usDVISSpreadRateIn10Hz;
5155 ULONG ulGPUReservedSysMemBaseAddrLo;
5156 ULONG ulGPUReservedSysMemBaseAddrHi;
5157 ULONG ulReserved5[3];
5158 USHORT usMaxLVDSPclkFreqInSingleLink;
5159 UCHAR ucLvdsMisc;
5160 UCHAR ucTravisLVDSVolAdjust;
5161 UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
5162 UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
5163 UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
5164 UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
5165 UCHAR ucLVDSOffToOnDelay_in4Ms;
5166 UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
5167 UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
5168 UCHAR ucMinAllowedBL_Level;
5169 ULONG ulLCDBitDepthControlVal;
5170 ULONG ulNbpStateMemclkFreq[4];
5171 ULONG ulReserved6;
5172 ULONG ulNbpStateNClkFreq[4];
5173 USHORT usNBPStateVoltage[4];
5174 USHORT usBootUpNBVoltage;
5175 USHORT usReserved2;
5176 ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
5177}ATOM_INTEGRATED_SYSTEM_INFO_V1_8;
5178
5179/**********************************************************************************************************************
5180 ATOM_INTEGRATED_SYSTEM_INFO_V1_8 Description
5181ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
5182ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
5183ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
5184sDISPCLK_Voltage: Report Display clock frequency requirement on GNB voltage(up to 4 voltage levels).
5185
5186ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects:
5187 ATOM_DEVICE_CRT1_SUPPORT 0x0001
5188 ATOM_DEVICE_DFP1_SUPPORT 0x0008
5189 ATOM_DEVICE_DFP6_SUPPORT 0x0040
5190 ATOM_DEVICE_DFP2_SUPPORT 0x0080
5191 ATOM_DEVICE_DFP3_SUPPORT 0x0200
5192 ATOM_DEVICE_DFP4_SUPPORT 0x0400
5193 ATOM_DEVICE_DFP5_SUPPORT 0x0800
5194 ATOM_DEVICE_LCD1_SUPPORT 0x0002
5195
5196ulVBIOSMisc: Miscellenous flags for VBIOS requirement and interface
5197 bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS.
5198 =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
5199 bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
5200 =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
5201 bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
5202 =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
5203 bit[3]=0: VBIOS fast boot is disable
5204 =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
5205
5206ulGPUCapInfo: bit[0~2]= Reserved
5207 bit[3]=0: Enable AUX HW mode detection logic
5208 =1: Disable AUX HW mode detection logic
5209 bit[4]=0: Disable DFS bypass feature
5210 =1: Enable DFS bypass feature
5211
5212usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
5213 Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
5214
5215 When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
5216 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
5217 VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
5218 Changing BL using VBIOS function is functional in both driver and non-driver present environment;
5219 and enabling VariBri under the driver environment from PP table is optional.
5220
5221 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
5222 that BL control from GPU is expected.
5223 VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
5224 Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
5225 it's per platform
5226 and enabling VariBri under the driver environment from PP table is optional.
5227
5228ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. Threshold on value to enter HTC_active state.
5229ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
5230 To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
5231
5232ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
5233 =1: PCIE Power Gating Enabled
5234 Bit[1]=0: DDR-DLL shut-down feature disabled.
5235 1: DDR-DLL shut-down feature enabled.
5236 Bit[2]=0: DDR-PLL Power down feature disabled.
5237 1: DDR-PLL Power down feature enabled.
5238 Bit[3]=0: GNB DPM is disabled
5239 =1: GNB DPM is enabled
5240ulCPUCapInfo: TBD
5241
5242usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
5243usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
5244 to indicate a range.
5245 SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
5246 SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
5247 SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
5248 SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
5249
5250ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3;=5:GDDR5; [7:4] is reserved.
5251ucUMAChannelNumber: System memory channel numbers.
5252
5253strVBIOSMsg[40]: VBIOS boot up customized message string
5254
5255sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
5256
5257ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
5258ulIdleNClk: NCLK speed while memory runs in self-refresh state, used to calculate self-refresh latency. Unit in 10kHz.
5259ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
5260ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
5261
5262usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
5263usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
5264usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
5265usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
5266usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
5267usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
5268usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
5269usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
5270
5271usGPUReservedSysMemSize: Reserved system memory size for ACP engine in APU GNB, units in MB. 0/2/4MB based on CMOS options, current default could be 0MB. KV only, not on KB.
5272ulGPUReservedSysMemBaseAddrLo: Low 32 bits base address to the reserved system memory.
5273ulGPUReservedSysMemBaseAddrHi: High 32 bits base address to the reserved system memory.
5274
5275usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
5276ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
5277 [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
5278 [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
5279 [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
5280 [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
5281 [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4
5282ucTravisLVDSVolAdjust When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust
5283 value to program Travis register LVDS_CTRL_4
5284ucLVDSPwrOnSeqDIGONtoDE_in4Ms:
5285 LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
5286 =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
5287 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5288ucLVDSPwrOnDEtoVARY_BL_in4Ms:
5289 LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).
5290 =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
5291 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5292ucLVDSPwrOffVARY_BLtoDE_in4Ms:
5293 LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off.
5294 =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
5295 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5296ucLVDSPwrOffDEtoDIGON_in4Ms:
5297 LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off.
5298 =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
5299 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5300ucLVDSOffToOnDelay_in4Ms:
5301 LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active.
5302 =0 means to use VBIOS default delay which is 125 ( 500ms ).
5303 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5304ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms:
5305 LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
5306 =0 means to use VBIOS default delay which is 0 ( 0ms ).
5307 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5308
5309ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms:
5310 LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
5311 =0 means to use VBIOS default delay which is 0 ( 0ms ).
5312 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5313ucMinAllowedBL_Level: Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0.
5314
5315ulLCDBitDepthControlVal: GPU display control encoder bit dither control setting, used to program register mmFMT_BIT_DEPTH_CONTROL
5316
5317ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB P-State(P0, P1, P2 & P3).
5318ulNbpStateNClkFreq[4]: NB P-State NClk frequency in different NB P-State
5319usNBPStateVoltage[4]: NB P-State (P0/P1 & P2/P3) voltage; NBP3 refers to lowes voltage
5320usBootUpNBVoltage: NB P-State voltage during boot up before driver loaded
5321sExtDispConnInfo: Display connector information table provided to VBIOS
5322
5323**********************************************************************************************************************/
5324
5325// this Table is used for Kaveri/Kabini APU
5326typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
5327{
5328 ATOM_INTEGRATED_SYSTEM_INFO_V1_8 sIntegratedSysInfo; // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
5329 ULONG ulPowerplayTable[128]; // Update comments here to link new powerplay table definition structure
5330}ATOM_FUSION_SYSTEM_INFO_V2;
5331
5332
4979/**************************************************************************/ 5333/**************************************************************************/
4980// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design 5334// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
4981//Memory SS Info Table 5335//Memory SS Info Table
@@ -5026,22 +5380,24 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
5026 5380
5027//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type. 5381//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
5028//SS is not required or enabled if a match is not found. 5382//SS is not required or enabled if a match is not found.
5029#define ASIC_INTERNAL_MEMORY_SS 1 5383#define ASIC_INTERNAL_MEMORY_SS 1
5030#define ASIC_INTERNAL_ENGINE_SS 2 5384#define ASIC_INTERNAL_ENGINE_SS 2
5031#define ASIC_INTERNAL_UVD_SS 3 5385#define ASIC_INTERNAL_UVD_SS 3
5032#define ASIC_INTERNAL_SS_ON_TMDS 4 5386#define ASIC_INTERNAL_SS_ON_TMDS 4
5033#define ASIC_INTERNAL_SS_ON_HDMI 5 5387#define ASIC_INTERNAL_SS_ON_HDMI 5
5034#define ASIC_INTERNAL_SS_ON_LVDS 6 5388#define ASIC_INTERNAL_SS_ON_LVDS 6
5035#define ASIC_INTERNAL_SS_ON_DP 7 5389#define ASIC_INTERNAL_SS_ON_DP 7
5036#define ASIC_INTERNAL_SS_ON_DCPLL 8 5390#define ASIC_INTERNAL_SS_ON_DCPLL 8
5037#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9 5391#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
5038#define ASIC_INTERNAL_VCE_SS 10 5392#define ASIC_INTERNAL_VCE_SS 10
5393#define ASIC_INTERNAL_GPUPLL_SS 11
5394
5039 5395
5040typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 5396typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
5041{ 5397{
5042 ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz 5398 ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
5043 //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 ) 5399 //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
5044 USHORT usSpreadSpectrumPercentage; //in unit of 0.01% 5400 USHORT usSpreadSpectrumPercentage; //in unit of 0.01% or 0.001%, decided by ucSpreadSpectrumMode bit4
5045 USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq 5401 USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
5046 UCHAR ucClockIndication; //Indicate which clock source needs SS 5402 UCHAR ucClockIndication; //Indicate which clock source needs SS
5047 UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS 5403 UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
@@ -5079,6 +5435,11 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
5079 UCHAR ucReserved[2]; 5435 UCHAR ucReserved[2];
5080}ATOM_ASIC_SS_ASSIGNMENT_V3; 5436}ATOM_ASIC_SS_ASSIGNMENT_V3;
5081 5437
5438//ATOM_ASIC_SS_ASSIGNMENT_V3.ucSpreadSpectrumMode
5439#define SS_MODE_V3_CENTRE_SPREAD_MASK 0x01
5440#define SS_MODE_V3_EXTERNAL_SS_MASK 0x02
5441#define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10
5442
5082typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 5443typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
5083{ 5444{
5084 ATOM_COMMON_TABLE_HEADER sHeader; 5445 ATOM_COMMON_TABLE_HEADER sHeader;
@@ -5719,6 +6080,7 @@ typedef struct _INDIRECT_IO_ACCESS
5719#define INDIRECT_IO_PCIE 3 6080#define INDIRECT_IO_PCIE 3
5720#define INDIRECT_IO_PCIEP 4 6081#define INDIRECT_IO_PCIEP 4
5721#define INDIRECT_IO_NBMISC 5 6082#define INDIRECT_IO_NBMISC 5
6083#define INDIRECT_IO_SMU 5
5722 6084
5723#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ 6085#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ
5724#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE 6086#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE
@@ -5730,6 +6092,8 @@ typedef struct _INDIRECT_IO_ACCESS
5730#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE 6092#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE
5731#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ 6093#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
5732#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE 6094#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
6095#define INDIRECT_IO_SMU_READ INDIRECT_IO_SMU | INDIRECT_READ
6096#define INDIRECT_IO_SMU_WRITE INDIRECT_IO_SMU | INDIRECT_WRITE
5733 6097
5734typedef struct _ATOM_OEM_INFO 6098typedef struct _ATOM_OEM_INFO
5735{ 6099{
@@ -5875,6 +6239,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
5875#define _64Mx32 0x43 6239#define _64Mx32 0x43
5876#define _128Mx8 0x51 6240#define _128Mx8 0x51
5877#define _128Mx16 0x52 6241#define _128Mx16 0x52
6242#define _128Mx32 0x53
5878#define _256Mx8 0x61 6243#define _256Mx8 0x61
5879#define _256Mx16 0x62 6244#define _256Mx16 0x62
5880 6245
@@ -5893,6 +6258,8 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
5893#define PROMOS MOSEL 6258#define PROMOS MOSEL
5894#define KRETON INFINEON 6259#define KRETON INFINEON
5895#define ELIXIR NANYA 6260#define ELIXIR NANYA
6261#define MEZZA ELPIDA
6262
5896 6263
5897/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// 6264/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
5898 6265
@@ -6625,6 +6992,10 @@ typedef struct _ATOM_DISP_OUT_INFO_V3
6625 ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only 6992 ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
6626}ATOM_DISP_OUT_INFO_V3; 6993}ATOM_DISP_OUT_INFO_V3;
6627 6994
6995//ucDispCaps
6996#define DISPLAY_CAPS__DP_PCLK_FROM_PPLL 0x01
6997#define DISPLAY_CAPS__FORCE_DISPDEV_CONNECTED 0x02
6998
6628typedef enum CORE_REF_CLK_SOURCE{ 6999typedef enum CORE_REF_CLK_SOURCE{
6629 CLOCK_SRC_XTALIN=0, 7000 CLOCK_SRC_XTALIN=0,
6630 CLOCK_SRC_XO_IN=1, 7001 CLOCK_SRC_XO_IN=1,
@@ -6829,6 +7200,17 @@ typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{
6829 USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings 7200 USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
6830}DIG_TRANSMITTER_INFO_HEADER_V3_1; 7201}DIG_TRANSMITTER_INFO_HEADER_V3_1;
6831 7202
7203typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_2{
7204 ATOM_COMMON_TABLE_HEADER sHeader;
7205 USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
7206 USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
7207 USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
7208 USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
7209 USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
7210 USHORT usDPSSRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy SS Pll register Info
7211 USHORT usDPSSSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy SS Pll Settings
7212}DIG_TRANSMITTER_INFO_HEADER_V3_2;
7213
6832typedef struct _CLOCK_CONDITION_REGESTER_INFO{ 7214typedef struct _CLOCK_CONDITION_REGESTER_INFO{
6833 USHORT usRegisterIndex; 7215 USHORT usRegisterIndex;
6834 UCHAR ucStartBit; 7216 UCHAR ucStartBit;
@@ -6852,12 +7234,24 @@ typedef struct _PHY_CONDITION_REG_VAL{
6852 ULONG ulRegVal; 7234 ULONG ulRegVal;
6853}PHY_CONDITION_REG_VAL; 7235}PHY_CONDITION_REG_VAL;
6854 7236
7237typedef struct _PHY_CONDITION_REG_VAL_V2{
7238 ULONG ulCondition;
7239 UCHAR ucCondition2;
7240 ULONG ulRegVal;
7241}PHY_CONDITION_REG_VAL_V2;
7242
6855typedef struct _PHY_CONDITION_REG_INFO{ 7243typedef struct _PHY_CONDITION_REG_INFO{
6856 USHORT usRegIndex; 7244 USHORT usRegIndex;
6857 USHORT usSize; 7245 USHORT usSize;
6858 PHY_CONDITION_REG_VAL asRegVal[1]; 7246 PHY_CONDITION_REG_VAL asRegVal[1];
6859}PHY_CONDITION_REG_INFO; 7247}PHY_CONDITION_REG_INFO;
6860 7248
7249typedef struct _PHY_CONDITION_REG_INFO_V2{
7250 USHORT usRegIndex;
7251 USHORT usSize;
7252 PHY_CONDITION_REG_VAL_V2 asRegVal[1];
7253}PHY_CONDITION_REG_INFO_V2;
7254
6861typedef struct _PHY_ANALOG_SETTING_INFO{ 7255typedef struct _PHY_ANALOG_SETTING_INFO{
6862 UCHAR ucEncodeMode; 7256 UCHAR ucEncodeMode;
6863 UCHAR ucPhySel; 7257 UCHAR ucPhySel;
@@ -6865,6 +7259,25 @@ typedef struct _PHY_ANALOG_SETTING_INFO{
6865 PHY_CONDITION_REG_INFO asAnalogSetting[1]; 7259 PHY_CONDITION_REG_INFO asAnalogSetting[1];
6866}PHY_ANALOG_SETTING_INFO; 7260}PHY_ANALOG_SETTING_INFO;
6867 7261
7262typedef struct _PHY_ANALOG_SETTING_INFO_V2{
7263 UCHAR ucEncodeMode;
7264 UCHAR ucPhySel;
7265 USHORT usSize;
7266 PHY_CONDITION_REG_INFO_V2 asAnalogSetting[1];
7267}PHY_ANALOG_SETTING_INFO_V2;
7268
7269typedef struct _GFX_HAVESTING_PARAMETERS {
7270 UCHAR ucGfxBlkId; //GFX blk id to be harvested, like CU, RB or PRIM
7271 UCHAR ucReserved; //reserved
7272 UCHAR ucActiveUnitNumPerSH; //requested active CU/RB/PRIM number per shader array
7273 UCHAR ucMaxUnitNumPerSH; //max CU/RB/PRIM number per shader array
7274} GFX_HAVESTING_PARAMETERS;
7275
7276//ucGfxBlkId
7277#define GFX_HARVESTING_CU_ID 0
7278#define GFX_HARVESTING_RB_ID 1
7279#define GFX_HARVESTING_PRIM_ID 2
7280
6868/****************************************************************************/ 7281/****************************************************************************/
6869//Portion VI: Definitinos for vbios MC scratch registers that driver used 7282//Portion VI: Definitinos for vbios MC scratch registers that driver used
6870/****************************************************************************/ 7283/****************************************************************************/
@@ -6875,8 +7288,17 @@ typedef struct _PHY_ANALOG_SETTING_INFO{
6875#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000 7288#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000
6876#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000 7289#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000
6877#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000 7290#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000
7291#define MC_MISC0__MEMORY_TYPE__HBM 0x60000000
6878#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000 7292#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000
6879 7293
7294#define ATOM_MEM_TYPE_DDR_STRING "DDR"
7295#define ATOM_MEM_TYPE_DDR2_STRING "DDR2"
7296#define ATOM_MEM_TYPE_GDDR3_STRING "GDDR3"
7297#define ATOM_MEM_TYPE_GDDR4_STRING "GDDR4"
7298#define ATOM_MEM_TYPE_GDDR5_STRING "GDDR5"
7299#define ATOM_MEM_TYPE_HBM_STRING "HBM"
7300#define ATOM_MEM_TYPE_DDR3_STRING "DDR3"
7301
6880/****************************************************************************/ 7302/****************************************************************************/
6881//Portion VI: Definitinos being oboselete 7303//Portion VI: Definitinos being oboselete
6882/****************************************************************************/ 7304/****************************************************************************/
@@ -7274,6 +7696,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
7274#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15 7696#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
7275#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16 7697#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
7276#define ATOM_PP_THERMALCONTROLLER_LM96163 17 7698#define ATOM_PP_THERMALCONTROLLER_LM96163 17
7699#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18
7277 7700
7278// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal. 7701// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
7279// We probably should reserve the bit 0x80 for this use. 7702// We probably should reserve the bit 0x80 for this use.
@@ -7316,6 +7739,8 @@ typedef struct _ATOM_PPLIB_EXTENDEDHEADER
7316 // Add extra system parameters here, always adjust size to include all fields. 7739 // Add extra system parameters here, always adjust size to include all fields.
7317 USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table 7740 USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
7318 USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table 7741 USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
7742 USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table
7743 USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table
7319} ATOM_PPLIB_EXTENDEDHEADER; 7744} ATOM_PPLIB_EXTENDEDHEADER;
7320 7745
7321//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps 7746//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
@@ -7337,7 +7762,10 @@ typedef struct _ATOM_PPLIB_EXTENDEDHEADER
7337#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC. 7762#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
7338#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature. 7763#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
7339#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state. 7764#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
7340 7765#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table.
7766#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity.
7767#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17.
7768#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
7341 7769
7342typedef struct _ATOM_PPLIB_POWERPLAYTABLE 7770typedef struct _ATOM_PPLIB_POWERPLAYTABLE
7343{ 7771{
@@ -7398,7 +7826,7 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
7398 USHORT usVddcDependencyOnMCLKOffset; 7826 USHORT usVddcDependencyOnMCLKOffset;
7399 USHORT usMaxClockVoltageOnDCOffset; 7827 USHORT usMaxClockVoltageOnDCOffset;
7400 USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table 7828 USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
7401 USHORT usReserved; 7829 USHORT usMvddDependencyOnMCLKOffset;
7402} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4; 7830} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
7403 7831
7404typedef struct _ATOM_PPLIB_POWERPLAYTABLE5 7832typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
@@ -7563,6 +7991,17 @@ typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
7563 7991
7564} ATOM_PPLIB_SI_CLOCK_INFO; 7992} ATOM_PPLIB_SI_CLOCK_INFO;
7565 7993
7994typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
7995{
7996 USHORT usEngineClockLow;
7997 UCHAR ucEngineClockHigh;
7998
7999 USHORT usMemoryClockLow;
8000 UCHAR ucMemoryClockHigh;
8001
8002 UCHAR ucPCIEGen;
8003 USHORT usPCIELane;
8004} ATOM_PPLIB_CI_CLOCK_INFO;
7566 8005
7567typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO 8006typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
7568 8007
@@ -7680,8 +8119,8 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
7680 8119
7681typedef struct _ATOM_PPLIB_CAC_Leakage_Record 8120typedef struct _ATOM_PPLIB_CAC_Leakage_Record
7682{ 8121{
7683 USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations 8122 USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value.
7684 ULONG ulLeakageValue; 8123 ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value.
7685}ATOM_PPLIB_CAC_Leakage_Record; 8124}ATOM_PPLIB_CAC_Leakage_Record;
7686 8125
7687typedef struct _ATOM_PPLIB_CAC_Leakage_Table 8126typedef struct _ATOM_PPLIB_CAC_Leakage_Table
@@ -7796,6 +8235,42 @@ typedef struct _ATOM_PPLIB_UVD_Table
7796// ATOM_PPLIB_UVD_State_Table states; 8235// ATOM_PPLIB_UVD_State_Table states;
7797}ATOM_PPLIB_UVD_Table; 8236}ATOM_PPLIB_UVD_Table;
7798 8237
8238
8239typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
8240{
8241 USHORT usVoltage;
8242 USHORT usSAMClockLow;
8243 UCHAR ucSAMClockHigh;
8244}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
8245
8246typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
8247 UCHAR numEntries;
8248 ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
8249}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
8250
8251typedef struct _ATOM_PPLIB_SAMU_Table
8252{
8253 UCHAR revid;
8254 ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
8255}ATOM_PPLIB_SAMU_Table;
8256
8257#define ATOM_PPM_A_A 1
8258#define ATOM_PPM_A_I 2
8259typedef struct _ATOM_PPLIB_PPM_Table
8260{
8261 UCHAR ucRevId;
8262 UCHAR ucPpmDesign; //A+I or A+A
8263 USHORT usCpuCoreNumber;
8264 ULONG ulPlatformTDP;
8265 ULONG ulSmallACPlatformTDP;
8266 ULONG ulPlatformTDC;
8267 ULONG ulSmallACPlatformTDC;
8268 ULONG ulApuTDP;
8269 ULONG ulDGpuTDP;
8270 ULONG ulDGpuUlvPower;
8271 ULONG ulTjmax;
8272} ATOM_PPLIB_PPM_Table;
8273
7799/**************************************************************************/ 8274/**************************************************************************/
7800 8275
7801 8276
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index d5df8fd10217..c7ad4b930850 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -555,7 +555,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
555 if (rdev->family < CHIP_RV770) 555 if (rdev->family < CHIP_RV770)
556 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 556 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
557 /* use frac fb div on APUs */ 557 /* use frac fb div on APUs */
558 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) 558 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
559 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 559 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
560 /* use frac fb div on RS780/RS880 */ 560 /* use frac fb div on RS780/RS880 */
561 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) 561 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
@@ -743,7 +743,7 @@ static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
743 * SetPixelClock provides the dividers 743 * SetPixelClock provides the dividers
744 */ 744 */
745 args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk); 745 args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
746 if (ASIC_IS_DCE61(rdev)) 746 if (ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
747 args.v6.ucPpll = ATOM_EXT_PLL1; 747 args.v6.ucPpll = ATOM_EXT_PLL1;
748 else if (ASIC_IS_DCE6(rdev)) 748 else if (ASIC_IS_DCE6(rdev))
749 args.v6.ucPpll = ATOM_PPLL0; 749 args.v6.ucPpll = ATOM_PPLL0;
@@ -1143,7 +1143,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1143 } 1143 }
1144 1144
1145 if (tiling_flags & RADEON_TILING_MACRO) { 1145 if (tiling_flags & RADEON_TILING_MACRO) {
1146 if (rdev->family >= CHIP_TAHITI) 1146 if (rdev->family >= CHIP_BONAIRE)
1147 tmp = rdev->config.cik.tile_config;
1148 else if (rdev->family >= CHIP_TAHITI)
1147 tmp = rdev->config.si.tile_config; 1149 tmp = rdev->config.si.tile_config;
1148 else if (rdev->family >= CHIP_CAYMAN) 1150 else if (rdev->family >= CHIP_CAYMAN)
1149 tmp = rdev->config.cayman.tile_config; 1151 tmp = rdev->config.cayman.tile_config;
@@ -1170,11 +1172,29 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1170 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); 1172 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
1171 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); 1173 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
1172 fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect); 1174 fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
1175 if (rdev->family >= CHIP_BONAIRE) {
1176 /* XXX need to know more about the surface tiling mode */
1177 fb_format |= CIK_GRPH_MICRO_TILE_MODE(CIK_DISPLAY_MICRO_TILING);
1178 }
1173 } else if (tiling_flags & RADEON_TILING_MICRO) 1179 } else if (tiling_flags & RADEON_TILING_MICRO)
1174 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1180 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1175 1181
1176 if ((rdev->family == CHIP_TAHITI) || 1182 if (rdev->family >= CHIP_BONAIRE) {
1177 (rdev->family == CHIP_PITCAIRN)) 1183 u32 num_pipe_configs = rdev->config.cik.max_tile_pipes;
1184 u32 num_rb = rdev->config.cik.max_backends_per_se;
1185 if (num_pipe_configs > 8)
1186 num_pipe_configs = 8;
1187 if (num_pipe_configs == 8)
1188 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16);
1189 else if (num_pipe_configs == 4) {
1190 if (num_rb == 4)
1191 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
1192 else if (num_rb < 4)
1193 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
1194 } else if (num_pipe_configs == 2)
1195 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
1196 } else if ((rdev->family == CHIP_TAHITI) ||
1197 (rdev->family == CHIP_PITCAIRN))
1178 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); 1198 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1179 else if (rdev->family == CHIP_VERDE) 1199 else if (rdev->family == CHIP_VERDE)
1180 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); 1200 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
@@ -1224,8 +1244,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1224 WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); 1244 WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
1225 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); 1245 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
1226 1246
1227 WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, 1247 if (rdev->family >= CHIP_BONAIRE)
1228 target_fb->height); 1248 WREG32(CIK_LB_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
1249 target_fb->height);
1250 else
1251 WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
1252 target_fb->height);
1229 x &= ~3; 1253 x &= ~3;
1230 y &= ~1; 1254 y &= ~1;
1231 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, 1255 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
@@ -1597,6 +1621,12 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
1597 * 1621 *
1598 * Asic specific PLL information 1622 * Asic specific PLL information
1599 * 1623 *
1624 * DCE 8.x
1625 * KB/KV
1626 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
1627 * CI
1628 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
1629 *
1600 * DCE 6.1 1630 * DCE 6.1
1601 * - PPLL2 is only available to UNIPHYA (both DP and non-DP) 1631 * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
1602 * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP) 1632 * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
@@ -1623,7 +1653,47 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1623 u32 pll_in_use; 1653 u32 pll_in_use;
1624 int pll; 1654 int pll;
1625 1655
1626 if (ASIC_IS_DCE61(rdev)) { 1656 if (ASIC_IS_DCE8(rdev)) {
1657 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1658 if (rdev->clock.dp_extclk)
1659 /* skip PPLL programming if using ext clock */
1660 return ATOM_PPLL_INVALID;
1661 else {
1662 /* use the same PPLL for all DP monitors */
1663 pll = radeon_get_shared_dp_ppll(crtc);
1664 if (pll != ATOM_PPLL_INVALID)
1665 return pll;
1666 }
1667 } else {
1668 /* use the same PPLL for all monitors with the same clock */
1669 pll = radeon_get_shared_nondp_ppll(crtc);
1670 if (pll != ATOM_PPLL_INVALID)
1671 return pll;
1672 }
1673 /* otherwise, pick one of the plls */
1674 if ((rdev->family == CHIP_KAVERI) ||
1675 (rdev->family == CHIP_KABINI)) {
1676 /* KB/KV has PPLL1 and PPLL2 */
1677 pll_in_use = radeon_get_pll_use_mask(crtc);
1678 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1679 return ATOM_PPLL2;
1680 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1681 return ATOM_PPLL1;
1682 DRM_ERROR("unable to allocate a PPLL\n");
1683 return ATOM_PPLL_INVALID;
1684 } else {
1685 /* CI has PPLL0, PPLL1, and PPLL2 */
1686 pll_in_use = radeon_get_pll_use_mask(crtc);
1687 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1688 return ATOM_PPLL2;
1689 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1690 return ATOM_PPLL1;
1691 if (!(pll_in_use & (1 << ATOM_PPLL0)))
1692 return ATOM_PPLL0;
1693 DRM_ERROR("unable to allocate a PPLL\n");
1694 return ATOM_PPLL_INVALID;
1695 }
1696 } else if (ASIC_IS_DCE61(rdev)) {
1627 struct radeon_encoder_atom_dig *dig = 1697 struct radeon_encoder_atom_dig *dig =
1628 radeon_encoder->enc_priv; 1698 radeon_encoder->enc_priv;
1629 1699
@@ -1861,7 +1931,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1861 break; 1931 break;
1862 case ATOM_PPLL0: 1932 case ATOM_PPLL0:
1863 /* disable the ppll */ 1933 /* disable the ppll */
1864 if (ASIC_IS_DCE61(rdev)) 1934 if ((rdev->family == CHIP_ARUBA) || (rdev->family == CHIP_BONAIRE))
1865 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1935 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1866 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 1936 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
1867 break; 1937 break;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 8406c8251fbf..092275d53d4a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -186,6 +186,13 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
186 u8 backlight_level; 186 u8 backlight_level;
187 char bl_name[16]; 187 char bl_name[16];
188 188
189 /* Mac laptops with multiple GPUs use the gmux driver for backlight
190 * so don't register a backlight device
191 */
192 if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
193 (rdev->pdev->device == 0x6741))
194 return;
195
189 if (!radeon_encoder->enc_priv) 196 if (!radeon_encoder->enc_priv)
190 return; 197 return;
191 198
@@ -296,6 +303,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
296 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 303 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
297 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 304 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
298 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 305 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
306 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
299 return true; 307 return true;
300 default: 308 default:
301 return false; 309 return false;
@@ -479,11 +487,11 @@ static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
479 } 487 }
480} 488}
481 489
482
483union dvo_encoder_control { 490union dvo_encoder_control {
484 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; 491 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
485 DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; 492 DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
486 DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; 493 DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
494 DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 dvo_v4;
487}; 495};
488 496
489void 497void
@@ -533,6 +541,13 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
533 args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 541 args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
534 args.dvo_v3.ucDVOConfig = 0; /* XXX */ 542 args.dvo_v3.ucDVOConfig = 0; /* XXX */
535 break; 543 break;
544 case 4:
545 /* DCE8 */
546 args.dvo_v4.ucAction = action;
547 args.dvo_v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
548 args.dvo_v4.ucDVOConfig = 0; /* XXX */
549 args.dvo_v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
550 break;
536 default: 551 default:
537 DRM_ERROR("Unknown table version %d, %d\n", frev, crev); 552 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
538 break; 553 break;
@@ -915,10 +930,14 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
915 args.v4.ucLaneNum = 4; 930 args.v4.ucLaneNum = 4;
916 931
917 if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) { 932 if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
918 if (dp_clock == 270000) 933 if (dp_clock == 540000)
919 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
920 else if (dp_clock == 540000)
921 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; 934 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
935 else if (dp_clock == 324000)
936 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ;
937 else if (dp_clock == 270000)
938 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
939 else
940 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ;
922 } 941 }
923 args.v4.acConfig.ucDigSel = dig->dig_encoder; 942 args.v4.acConfig.ucDigSel = dig->dig_encoder;
924 args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder); 943 args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
@@ -1012,6 +1031,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1012 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1031 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1013 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1032 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1014 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1033 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1034 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1015 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); 1035 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
1016 break; 1036 break;
1017 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1037 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
@@ -1271,6 +1291,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1271 else 1291 else
1272 args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE; 1292 args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
1273 break; 1293 break;
1294 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1295 args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYG;
1296 break;
1274 } 1297 }
1275 if (is_dp) 1298 if (is_dp)
1276 args.v5.ucLaneNum = dp_lane_count; 1299 args.v5.ucLaneNum = dp_lane_count;
@@ -1735,6 +1758,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1735 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1758 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1736 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1759 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1737 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1760 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1761 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1738 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1762 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1739 radeon_atom_encoder_dpms_dig(encoder, mode); 1763 radeon_atom_encoder_dpms_dig(encoder, mode);
1740 break; 1764 break;
@@ -1872,6 +1896,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1872 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1896 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1873 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1897 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1874 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1898 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1899 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1875 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1900 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1876 dig = radeon_encoder->enc_priv; 1901 dig = radeon_encoder->enc_priv;
1877 switch (dig->dig_encoder) { 1902 switch (dig->dig_encoder) {
@@ -1893,6 +1918,9 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1893 case 5: 1918 case 5:
1894 args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; 1919 args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
1895 break; 1920 break;
1921 case 6:
1922 args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
1923 break;
1896 } 1924 }
1897 break; 1925 break;
1898 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1926 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
@@ -1955,7 +1983,13 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1955 /* set scaler clears this on some chips */ 1983 /* set scaler clears this on some chips */
1956 if (ASIC_IS_AVIVO(rdev) && 1984 if (ASIC_IS_AVIVO(rdev) &&
1957 (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) { 1985 (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
1958 if (ASIC_IS_DCE4(rdev)) { 1986 if (ASIC_IS_DCE8(rdev)) {
1987 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1988 WREG32(CIK_LB_DATA_FORMAT + radeon_crtc->crtc_offset,
1989 CIK_INTERLEAVE_EN);
1990 else
1991 WREG32(CIK_LB_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1992 } else if (ASIC_IS_DCE4(rdev)) {
1959 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1993 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1960 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 1994 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
1961 EVERGREEN_INTERLEAVE_EN); 1995 EVERGREEN_INTERLEAVE_EN);
@@ -2002,6 +2036,9 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
2002 else 2036 else
2003 return 4; 2037 return 4;
2004 break; 2038 break;
2039 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2040 return 6;
2041 break;
2005 } 2042 }
2006 } else if (ASIC_IS_DCE4(rdev)) { 2043 } else if (ASIC_IS_DCE4(rdev)) {
2007 /* DCE4/5 */ 2044 /* DCE4/5 */
@@ -2086,6 +2123,7 @@ radeon_atom_encoder_init(struct radeon_device *rdev)
2086 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2123 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2087 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2124 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2088 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2125 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2126 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2089 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2127 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2090 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); 2128 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
2091 break; 2129 break;
@@ -2130,6 +2168,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2130 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2168 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2131 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2169 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2132 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2170 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2171 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2133 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2172 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2134 /* handled in dpms */ 2173 /* handled in dpms */
2135 break; 2174 break;
@@ -2395,6 +2434,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
2395 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2434 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2396 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2435 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2397 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2436 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2437 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2398 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2438 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2399 /* handled in dpms */ 2439 /* handled in dpms */
2400 break; 2440 break;
@@ -2626,6 +2666,7 @@ radeon_add_atom_encoder(struct drm_device *dev,
2626 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2666 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2627 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2667 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2628 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2668 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2669 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2629 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 2670 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2630 radeon_encoder->rmx_type = RMX_FULL; 2671 radeon_encoder->rmx_type = RMX_FULL;
2631 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 2672 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
new file mode 100644
index 000000000000..bab018583417
--- /dev/null
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -0,0 +1,2740 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "btcd.h"
28#include "r600_dpm.h"
29#include "cypress_dpm.h"
30#include "btc_dpm.h"
31#include "atom.h"
32
33#define MC_CG_ARB_FREQ_F0 0x0a
34#define MC_CG_ARB_FREQ_F1 0x0b
35#define MC_CG_ARB_FREQ_F2 0x0c
36#define MC_CG_ARB_FREQ_F3 0x0d
37
38#define MC_CG_SEQ_DRAMCONF_S0 0x05
39#define MC_CG_SEQ_DRAMCONF_S1 0x06
40#define MC_CG_SEQ_YCLK_SUSPEND 0x04
41#define MC_CG_SEQ_YCLK_RESUME 0x0a
42
43#define SMC_RAM_END 0x8000
44
45#ifndef BTC_MGCG_SEQUENCE
46#define BTC_MGCG_SEQUENCE 300
47
48struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
49struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
50struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
51
52
53//********* BARTS **************//
54static const u32 barts_cgcg_cgls_default[] =
55{
56 /* Register, Value, Mask bits */
57 0x000008f8, 0x00000010, 0xffffffff,
58 0x000008fc, 0x00000000, 0xffffffff,
59 0x000008f8, 0x00000011, 0xffffffff,
60 0x000008fc, 0x00000000, 0xffffffff,
61 0x000008f8, 0x00000012, 0xffffffff,
62 0x000008fc, 0x00000000, 0xffffffff,
63 0x000008f8, 0x00000013, 0xffffffff,
64 0x000008fc, 0x00000000, 0xffffffff,
65 0x000008f8, 0x00000014, 0xffffffff,
66 0x000008fc, 0x00000000, 0xffffffff,
67 0x000008f8, 0x00000015, 0xffffffff,
68 0x000008fc, 0x00000000, 0xffffffff,
69 0x000008f8, 0x00000016, 0xffffffff,
70 0x000008fc, 0x00000000, 0xffffffff,
71 0x000008f8, 0x00000017, 0xffffffff,
72 0x000008fc, 0x00000000, 0xffffffff,
73 0x000008f8, 0x00000018, 0xffffffff,
74 0x000008fc, 0x00000000, 0xffffffff,
75 0x000008f8, 0x00000019, 0xffffffff,
76 0x000008fc, 0x00000000, 0xffffffff,
77 0x000008f8, 0x0000001a, 0xffffffff,
78 0x000008fc, 0x00000000, 0xffffffff,
79 0x000008f8, 0x0000001b, 0xffffffff,
80 0x000008fc, 0x00000000, 0xffffffff,
81 0x000008f8, 0x00000020, 0xffffffff,
82 0x000008fc, 0x00000000, 0xffffffff,
83 0x000008f8, 0x00000021, 0xffffffff,
84 0x000008fc, 0x00000000, 0xffffffff,
85 0x000008f8, 0x00000022, 0xffffffff,
86 0x000008fc, 0x00000000, 0xffffffff,
87 0x000008f8, 0x00000023, 0xffffffff,
88 0x000008fc, 0x00000000, 0xffffffff,
89 0x000008f8, 0x00000024, 0xffffffff,
90 0x000008fc, 0x00000000, 0xffffffff,
91 0x000008f8, 0x00000025, 0xffffffff,
92 0x000008fc, 0x00000000, 0xffffffff,
93 0x000008f8, 0x00000026, 0xffffffff,
94 0x000008fc, 0x00000000, 0xffffffff,
95 0x000008f8, 0x00000027, 0xffffffff,
96 0x000008fc, 0x00000000, 0xffffffff,
97 0x000008f8, 0x00000028, 0xffffffff,
98 0x000008fc, 0x00000000, 0xffffffff,
99 0x000008f8, 0x00000029, 0xffffffff,
100 0x000008fc, 0x00000000, 0xffffffff,
101 0x000008f8, 0x0000002a, 0xffffffff,
102 0x000008fc, 0x00000000, 0xffffffff,
103 0x000008f8, 0x0000002b, 0xffffffff,
104 0x000008fc, 0x00000000, 0xffffffff
105};
106#define BARTS_CGCG_CGLS_DEFAULT_LENGTH sizeof(barts_cgcg_cgls_default) / (3 * sizeof(u32))
107
108static const u32 barts_cgcg_cgls_disable[] =
109{
110 0x000008f8, 0x00000010, 0xffffffff,
111 0x000008fc, 0xffffffff, 0xffffffff,
112 0x000008f8, 0x00000011, 0xffffffff,
113 0x000008fc, 0xffffffff, 0xffffffff,
114 0x000008f8, 0x00000012, 0xffffffff,
115 0x000008fc, 0xffffffff, 0xffffffff,
116 0x000008f8, 0x00000013, 0xffffffff,
117 0x000008fc, 0xffffffff, 0xffffffff,
118 0x000008f8, 0x00000014, 0xffffffff,
119 0x000008fc, 0xffffffff, 0xffffffff,
120 0x000008f8, 0x00000015, 0xffffffff,
121 0x000008fc, 0xffffffff, 0xffffffff,
122 0x000008f8, 0x00000016, 0xffffffff,
123 0x000008fc, 0xffffffff, 0xffffffff,
124 0x000008f8, 0x00000017, 0xffffffff,
125 0x000008fc, 0xffffffff, 0xffffffff,
126 0x000008f8, 0x00000018, 0xffffffff,
127 0x000008fc, 0xffffffff, 0xffffffff,
128 0x000008f8, 0x00000019, 0xffffffff,
129 0x000008fc, 0xffffffff, 0xffffffff,
130 0x000008f8, 0x0000001a, 0xffffffff,
131 0x000008fc, 0xffffffff, 0xffffffff,
132 0x000008f8, 0x0000001b, 0xffffffff,
133 0x000008fc, 0xffffffff, 0xffffffff,
134 0x000008f8, 0x00000020, 0xffffffff,
135 0x000008fc, 0x00000000, 0xffffffff,
136 0x000008f8, 0x00000021, 0xffffffff,
137 0x000008fc, 0x00000000, 0xffffffff,
138 0x000008f8, 0x00000022, 0xffffffff,
139 0x000008fc, 0x00000000, 0xffffffff,
140 0x000008f8, 0x00000023, 0xffffffff,
141 0x000008fc, 0x00000000, 0xffffffff,
142 0x000008f8, 0x00000024, 0xffffffff,
143 0x000008fc, 0x00000000, 0xffffffff,
144 0x000008f8, 0x00000025, 0xffffffff,
145 0x000008fc, 0x00000000, 0xffffffff,
146 0x000008f8, 0x00000026, 0xffffffff,
147 0x000008fc, 0x00000000, 0xffffffff,
148 0x000008f8, 0x00000027, 0xffffffff,
149 0x000008fc, 0x00000000, 0xffffffff,
150 0x000008f8, 0x00000028, 0xffffffff,
151 0x000008fc, 0x00000000, 0xffffffff,
152 0x000008f8, 0x00000029, 0xffffffff,
153 0x000008fc, 0x00000000, 0xffffffff,
154 0x000008f8, 0x0000002a, 0xffffffff,
155 0x000008fc, 0x00000000, 0xffffffff,
156 0x000008f8, 0x0000002b, 0xffffffff,
157 0x000008fc, 0x00000000, 0xffffffff,
158 0x00000644, 0x000f7912, 0x001f4180,
159 0x00000644, 0x000f3812, 0x001f4180
160};
161#define BARTS_CGCG_CGLS_DISABLE_LENGTH sizeof(barts_cgcg_cgls_disable) / (3 * sizeof(u32))
162
163static const u32 barts_cgcg_cgls_enable[] =
164{
165 /* 0x0000c124, 0x84180000, 0x00180000, */
166 0x00000644, 0x000f7892, 0x001f4080,
167 0x000008f8, 0x00000010, 0xffffffff,
168 0x000008fc, 0x00000000, 0xffffffff,
169 0x000008f8, 0x00000011, 0xffffffff,
170 0x000008fc, 0x00000000, 0xffffffff,
171 0x000008f8, 0x00000012, 0xffffffff,
172 0x000008fc, 0x00000000, 0xffffffff,
173 0x000008f8, 0x00000013, 0xffffffff,
174 0x000008fc, 0x00000000, 0xffffffff,
175 0x000008f8, 0x00000014, 0xffffffff,
176 0x000008fc, 0x00000000, 0xffffffff,
177 0x000008f8, 0x00000015, 0xffffffff,
178 0x000008fc, 0x00000000, 0xffffffff,
179 0x000008f8, 0x00000016, 0xffffffff,
180 0x000008fc, 0x00000000, 0xffffffff,
181 0x000008f8, 0x00000017, 0xffffffff,
182 0x000008fc, 0x00000000, 0xffffffff,
183 0x000008f8, 0x00000018, 0xffffffff,
184 0x000008fc, 0x00000000, 0xffffffff,
185 0x000008f8, 0x00000019, 0xffffffff,
186 0x000008fc, 0x00000000, 0xffffffff,
187 0x000008f8, 0x0000001a, 0xffffffff,
188 0x000008fc, 0x00000000, 0xffffffff,
189 0x000008f8, 0x0000001b, 0xffffffff,
190 0x000008fc, 0x00000000, 0xffffffff,
191 0x000008f8, 0x00000020, 0xffffffff,
192 0x000008fc, 0xffffffff, 0xffffffff,
193 0x000008f8, 0x00000021, 0xffffffff,
194 0x000008fc, 0xffffffff, 0xffffffff,
195 0x000008f8, 0x00000022, 0xffffffff,
196 0x000008fc, 0xffffffff, 0xffffffff,
197 0x000008f8, 0x00000023, 0xffffffff,
198 0x000008fc, 0xffffffff, 0xffffffff,
199 0x000008f8, 0x00000024, 0xffffffff,
200 0x000008fc, 0xffffffff, 0xffffffff,
201 0x000008f8, 0x00000025, 0xffffffff,
202 0x000008fc, 0xffffffff, 0xffffffff,
203 0x000008f8, 0x00000026, 0xffffffff,
204 0x000008fc, 0xffffffff, 0xffffffff,
205 0x000008f8, 0x00000027, 0xffffffff,
206 0x000008fc, 0xffffffff, 0xffffffff,
207 0x000008f8, 0x00000028, 0xffffffff,
208 0x000008fc, 0xffffffff, 0xffffffff,
209 0x000008f8, 0x00000029, 0xffffffff,
210 0x000008fc, 0xffffffff, 0xffffffff,
211 0x000008f8, 0x0000002a, 0xffffffff,
212 0x000008fc, 0xffffffff, 0xffffffff,
213 0x000008f8, 0x0000002b, 0xffffffff,
214 0x000008fc, 0xffffffff, 0xffffffff
215};
216#define BARTS_CGCG_CGLS_ENABLE_LENGTH sizeof(barts_cgcg_cgls_enable) / (3 * sizeof(u32))
217
218static const u32 barts_mgcg_default[] =
219{
220 0x0000802c, 0xc0000000, 0xffffffff,
221 0x00005448, 0x00000100, 0xffffffff,
222 0x000055e4, 0x00600100, 0xffffffff,
223 0x0000160c, 0x00000100, 0xffffffff,
224 0x0000c164, 0x00000100, 0xffffffff,
225 0x00008a18, 0x00000100, 0xffffffff,
226 0x0000897c, 0x06000100, 0xffffffff,
227 0x00008b28, 0x00000100, 0xffffffff,
228 0x00009144, 0x00000100, 0xffffffff,
229 0x00009a60, 0x00000100, 0xffffffff,
230 0x00009868, 0x00000100, 0xffffffff,
231 0x00008d58, 0x00000100, 0xffffffff,
232 0x00009510, 0x00000100, 0xffffffff,
233 0x0000949c, 0x00000100, 0xffffffff,
234 0x00009654, 0x00000100, 0xffffffff,
235 0x00009030, 0x00000100, 0xffffffff,
236 0x00009034, 0x00000100, 0xffffffff,
237 0x00009038, 0x00000100, 0xffffffff,
238 0x0000903c, 0x00000100, 0xffffffff,
239 0x00009040, 0x00000100, 0xffffffff,
240 0x0000a200, 0x00000100, 0xffffffff,
241 0x0000a204, 0x00000100, 0xffffffff,
242 0x0000a208, 0x00000100, 0xffffffff,
243 0x0000a20c, 0x00000100, 0xffffffff,
244 0x0000977c, 0x00000100, 0xffffffff,
245 0x00003f80, 0x00000100, 0xffffffff,
246 0x0000a210, 0x00000100, 0xffffffff,
247 0x0000a214, 0x00000100, 0xffffffff,
248 0x000004d8, 0x00000100, 0xffffffff,
249 0x00009784, 0x00000100, 0xffffffff,
250 0x00009698, 0x00000100, 0xffffffff,
251 0x000004d4, 0x00000200, 0xffffffff,
252 0x000004d0, 0x00000000, 0xffffffff,
253 0x000030cc, 0x00000100, 0xffffffff,
254 0x0000d0c0, 0xff000100, 0xffffffff,
255 0x0000802c, 0x40000000, 0xffffffff,
256 0x0000915c, 0x00010000, 0xffffffff,
257 0x00009160, 0x00030002, 0xffffffff,
258 0x00009164, 0x00050004, 0xffffffff,
259 0x00009168, 0x00070006, 0xffffffff,
260 0x00009178, 0x00070000, 0xffffffff,
261 0x0000917c, 0x00030002, 0xffffffff,
262 0x00009180, 0x00050004, 0xffffffff,
263 0x0000918c, 0x00010006, 0xffffffff,
264 0x00009190, 0x00090008, 0xffffffff,
265 0x00009194, 0x00070000, 0xffffffff,
266 0x00009198, 0x00030002, 0xffffffff,
267 0x0000919c, 0x00050004, 0xffffffff,
268 0x000091a8, 0x00010006, 0xffffffff,
269 0x000091ac, 0x00090008, 0xffffffff,
270 0x000091b0, 0x00070000, 0xffffffff,
271 0x000091b4, 0x00030002, 0xffffffff,
272 0x000091b8, 0x00050004, 0xffffffff,
273 0x000091c4, 0x00010006, 0xffffffff,
274 0x000091c8, 0x00090008, 0xffffffff,
275 0x000091cc, 0x00070000, 0xffffffff,
276 0x000091d0, 0x00030002, 0xffffffff,
277 0x000091d4, 0x00050004, 0xffffffff,
278 0x000091e0, 0x00010006, 0xffffffff,
279 0x000091e4, 0x00090008, 0xffffffff,
280 0x000091e8, 0x00000000, 0xffffffff,
281 0x000091ec, 0x00070000, 0xffffffff,
282 0x000091f0, 0x00030002, 0xffffffff,
283 0x000091f4, 0x00050004, 0xffffffff,
284 0x00009200, 0x00010006, 0xffffffff,
285 0x00009204, 0x00090008, 0xffffffff,
286 0x00009208, 0x00070000, 0xffffffff,
287 0x0000920c, 0x00030002, 0xffffffff,
288 0x00009210, 0x00050004, 0xffffffff,
289 0x0000921c, 0x00010006, 0xffffffff,
290 0x00009220, 0x00090008, 0xffffffff,
291 0x00009224, 0x00070000, 0xffffffff,
292 0x00009228, 0x00030002, 0xffffffff,
293 0x0000922c, 0x00050004, 0xffffffff,
294 0x00009238, 0x00010006, 0xffffffff,
295 0x0000923c, 0x00090008, 0xffffffff,
296 0x00009294, 0x00000000, 0xffffffff,
297 0x0000802c, 0x40010000, 0xffffffff,
298 0x0000915c, 0x00010000, 0xffffffff,
299 0x00009160, 0x00030002, 0xffffffff,
300 0x00009164, 0x00050004, 0xffffffff,
301 0x00009168, 0x00070006, 0xffffffff,
302 0x00009178, 0x00070000, 0xffffffff,
303 0x0000917c, 0x00030002, 0xffffffff,
304 0x00009180, 0x00050004, 0xffffffff,
305 0x0000918c, 0x00010006, 0xffffffff,
306 0x00009190, 0x00090008, 0xffffffff,
307 0x00009194, 0x00070000, 0xffffffff,
308 0x00009198, 0x00030002, 0xffffffff,
309 0x0000919c, 0x00050004, 0xffffffff,
310 0x000091a8, 0x00010006, 0xffffffff,
311 0x000091ac, 0x00090008, 0xffffffff,
312 0x000091b0, 0x00070000, 0xffffffff,
313 0x000091b4, 0x00030002, 0xffffffff,
314 0x000091b8, 0x00050004, 0xffffffff,
315 0x000091c4, 0x00010006, 0xffffffff,
316 0x000091c8, 0x00090008, 0xffffffff,
317 0x000091cc, 0x00070000, 0xffffffff,
318 0x000091d0, 0x00030002, 0xffffffff,
319 0x000091d4, 0x00050004, 0xffffffff,
320 0x000091e0, 0x00010006, 0xffffffff,
321 0x000091e4, 0x00090008, 0xffffffff,
322 0x000091e8, 0x00000000, 0xffffffff,
323 0x000091ec, 0x00070000, 0xffffffff,
324 0x000091f0, 0x00030002, 0xffffffff,
325 0x000091f4, 0x00050004, 0xffffffff,
326 0x00009200, 0x00010006, 0xffffffff,
327 0x00009204, 0x00090008, 0xffffffff,
328 0x00009208, 0x00070000, 0xffffffff,
329 0x0000920c, 0x00030002, 0xffffffff,
330 0x00009210, 0x00050004, 0xffffffff,
331 0x0000921c, 0x00010006, 0xffffffff,
332 0x00009220, 0x00090008, 0xffffffff,
333 0x00009224, 0x00070000, 0xffffffff,
334 0x00009228, 0x00030002, 0xffffffff,
335 0x0000922c, 0x00050004, 0xffffffff,
336 0x00009238, 0x00010006, 0xffffffff,
337 0x0000923c, 0x00090008, 0xffffffff,
338 0x00009294, 0x00000000, 0xffffffff,
339 0x0000802c, 0xc0000000, 0xffffffff,
340 0x000008f8, 0x00000010, 0xffffffff,
341 0x000008fc, 0x00000000, 0xffffffff,
342 0x000008f8, 0x00000011, 0xffffffff,
343 0x000008fc, 0x00000000, 0xffffffff,
344 0x000008f8, 0x00000012, 0xffffffff,
345 0x000008fc, 0x00000000, 0xffffffff,
346 0x000008f8, 0x00000013, 0xffffffff,
347 0x000008fc, 0x00000000, 0xffffffff,
348 0x000008f8, 0x00000014, 0xffffffff,
349 0x000008fc, 0x00000000, 0xffffffff,
350 0x000008f8, 0x00000015, 0xffffffff,
351 0x000008fc, 0x00000000, 0xffffffff,
352 0x000008f8, 0x00000016, 0xffffffff,
353 0x000008fc, 0x00000000, 0xffffffff,
354 0x000008f8, 0x00000017, 0xffffffff,
355 0x000008fc, 0x00000000, 0xffffffff,
356 0x000008f8, 0x00000018, 0xffffffff,
357 0x000008fc, 0x00000000, 0xffffffff,
358 0x000008f8, 0x00000019, 0xffffffff,
359 0x000008fc, 0x00000000, 0xffffffff,
360 0x000008f8, 0x0000001a, 0xffffffff,
361 0x000008fc, 0x00000000, 0xffffffff,
362 0x000008f8, 0x0000001b, 0xffffffff,
363 0x000008fc, 0x00000000, 0xffffffff
364};
365#define BARTS_MGCG_DEFAULT_LENGTH sizeof(barts_mgcg_default) / (3 * sizeof(u32))
366
367static const u32 barts_mgcg_disable[] =
368{
369 0x0000802c, 0xc0000000, 0xffffffff,
370 0x000008f8, 0x00000000, 0xffffffff,
371 0x000008fc, 0xffffffff, 0xffffffff,
372 0x000008f8, 0x00000001, 0xffffffff,
373 0x000008fc, 0xffffffff, 0xffffffff,
374 0x000008f8, 0x00000002, 0xffffffff,
375 0x000008fc, 0xffffffff, 0xffffffff,
376 0x000008f8, 0x00000003, 0xffffffff,
377 0x000008fc, 0xffffffff, 0xffffffff,
378 0x00009150, 0x00600000, 0xffffffff
379};
380#define BARTS_MGCG_DISABLE_LENGTH sizeof(barts_mgcg_disable) / (3 * sizeof(u32))
381
382static const u32 barts_mgcg_enable[] =
383{
384 0x0000802c, 0xc0000000, 0xffffffff,
385 0x000008f8, 0x00000000, 0xffffffff,
386 0x000008fc, 0x00000000, 0xffffffff,
387 0x000008f8, 0x00000001, 0xffffffff,
388 0x000008fc, 0x00000000, 0xffffffff,
389 0x000008f8, 0x00000002, 0xffffffff,
390 0x000008fc, 0x00000000, 0xffffffff,
391 0x000008f8, 0x00000003, 0xffffffff,
392 0x000008fc, 0x00000000, 0xffffffff,
393 0x00009150, 0x81944000, 0xffffffff
394};
395#define BARTS_MGCG_ENABLE_LENGTH sizeof(barts_mgcg_enable) / (3 * sizeof(u32))
396
397//********* CAICOS **************//
398static const u32 caicos_cgcg_cgls_default[] =
399{
400 0x000008f8, 0x00000010, 0xffffffff,
401 0x000008fc, 0x00000000, 0xffffffff,
402 0x000008f8, 0x00000011, 0xffffffff,
403 0x000008fc, 0x00000000, 0xffffffff,
404 0x000008f8, 0x00000012, 0xffffffff,
405 0x000008fc, 0x00000000, 0xffffffff,
406 0x000008f8, 0x00000013, 0xffffffff,
407 0x000008fc, 0x00000000, 0xffffffff,
408 0x000008f8, 0x00000014, 0xffffffff,
409 0x000008fc, 0x00000000, 0xffffffff,
410 0x000008f8, 0x00000015, 0xffffffff,
411 0x000008fc, 0x00000000, 0xffffffff,
412 0x000008f8, 0x00000016, 0xffffffff,
413 0x000008fc, 0x00000000, 0xffffffff,
414 0x000008f8, 0x00000017, 0xffffffff,
415 0x000008fc, 0x00000000, 0xffffffff,
416 0x000008f8, 0x00000018, 0xffffffff,
417 0x000008fc, 0x00000000, 0xffffffff,
418 0x000008f8, 0x00000019, 0xffffffff,
419 0x000008fc, 0x00000000, 0xffffffff,
420 0x000008f8, 0x0000001a, 0xffffffff,
421 0x000008fc, 0x00000000, 0xffffffff,
422 0x000008f8, 0x0000001b, 0xffffffff,
423 0x000008fc, 0x00000000, 0xffffffff,
424 0x000008f8, 0x00000020, 0xffffffff,
425 0x000008fc, 0x00000000, 0xffffffff,
426 0x000008f8, 0x00000021, 0xffffffff,
427 0x000008fc, 0x00000000, 0xffffffff,
428 0x000008f8, 0x00000022, 0xffffffff,
429 0x000008fc, 0x00000000, 0xffffffff,
430 0x000008f8, 0x00000023, 0xffffffff,
431 0x000008fc, 0x00000000, 0xffffffff,
432 0x000008f8, 0x00000024, 0xffffffff,
433 0x000008fc, 0x00000000, 0xffffffff,
434 0x000008f8, 0x00000025, 0xffffffff,
435 0x000008fc, 0x00000000, 0xffffffff,
436 0x000008f8, 0x00000026, 0xffffffff,
437 0x000008fc, 0x00000000, 0xffffffff,
438 0x000008f8, 0x00000027, 0xffffffff,
439 0x000008fc, 0x00000000, 0xffffffff,
440 0x000008f8, 0x00000028, 0xffffffff,
441 0x000008fc, 0x00000000, 0xffffffff,
442 0x000008f8, 0x00000029, 0xffffffff,
443 0x000008fc, 0x00000000, 0xffffffff,
444 0x000008f8, 0x0000002a, 0xffffffff,
445 0x000008fc, 0x00000000, 0xffffffff,
446 0x000008f8, 0x0000002b, 0xffffffff,
447 0x000008fc, 0x00000000, 0xffffffff
448};
449#define CAICOS_CGCG_CGLS_DEFAULT_LENGTH sizeof(caicos_cgcg_cgls_default) / (3 * sizeof(u32))
450
451static const u32 caicos_cgcg_cgls_disable[] =
452{
453 0x000008f8, 0x00000010, 0xffffffff,
454 0x000008fc, 0xffffffff, 0xffffffff,
455 0x000008f8, 0x00000011, 0xffffffff,
456 0x000008fc, 0xffffffff, 0xffffffff,
457 0x000008f8, 0x00000012, 0xffffffff,
458 0x000008fc, 0xffffffff, 0xffffffff,
459 0x000008f8, 0x00000013, 0xffffffff,
460 0x000008fc, 0xffffffff, 0xffffffff,
461 0x000008f8, 0x00000014, 0xffffffff,
462 0x000008fc, 0xffffffff, 0xffffffff,
463 0x000008f8, 0x00000015, 0xffffffff,
464 0x000008fc, 0xffffffff, 0xffffffff,
465 0x000008f8, 0x00000016, 0xffffffff,
466 0x000008fc, 0xffffffff, 0xffffffff,
467 0x000008f8, 0x00000017, 0xffffffff,
468 0x000008fc, 0xffffffff, 0xffffffff,
469 0x000008f8, 0x00000018, 0xffffffff,
470 0x000008fc, 0xffffffff, 0xffffffff,
471 0x000008f8, 0x00000019, 0xffffffff,
472 0x000008fc, 0xffffffff, 0xffffffff,
473 0x000008f8, 0x0000001a, 0xffffffff,
474 0x000008fc, 0xffffffff, 0xffffffff,
475 0x000008f8, 0x0000001b, 0xffffffff,
476 0x000008fc, 0xffffffff, 0xffffffff,
477 0x000008f8, 0x00000020, 0xffffffff,
478 0x000008fc, 0x00000000, 0xffffffff,
479 0x000008f8, 0x00000021, 0xffffffff,
480 0x000008fc, 0x00000000, 0xffffffff,
481 0x000008f8, 0x00000022, 0xffffffff,
482 0x000008fc, 0x00000000, 0xffffffff,
483 0x000008f8, 0x00000023, 0xffffffff,
484 0x000008fc, 0x00000000, 0xffffffff,
485 0x000008f8, 0x00000024, 0xffffffff,
486 0x000008fc, 0x00000000, 0xffffffff,
487 0x000008f8, 0x00000025, 0xffffffff,
488 0x000008fc, 0x00000000, 0xffffffff,
489 0x000008f8, 0x00000026, 0xffffffff,
490 0x000008fc, 0x00000000, 0xffffffff,
491 0x000008f8, 0x00000027, 0xffffffff,
492 0x000008fc, 0x00000000, 0xffffffff,
493 0x000008f8, 0x00000028, 0xffffffff,
494 0x000008fc, 0x00000000, 0xffffffff,
495 0x000008f8, 0x00000029, 0xffffffff,
496 0x000008fc, 0x00000000, 0xffffffff,
497 0x000008f8, 0x0000002a, 0xffffffff,
498 0x000008fc, 0x00000000, 0xffffffff,
499 0x000008f8, 0x0000002b, 0xffffffff,
500 0x000008fc, 0x00000000, 0xffffffff,
501 0x00000644, 0x000f7912, 0x001f4180,
502 0x00000644, 0x000f3812, 0x001f4180
503};
504#define CAICOS_CGCG_CGLS_DISABLE_LENGTH sizeof(caicos_cgcg_cgls_disable) / (3 * sizeof(u32))
505
506static const u32 caicos_cgcg_cgls_enable[] =
507{
508 /* 0x0000c124, 0x84180000, 0x00180000, */
509 0x00000644, 0x000f7892, 0x001f4080,
510 0x000008f8, 0x00000010, 0xffffffff,
511 0x000008fc, 0x00000000, 0xffffffff,
512 0x000008f8, 0x00000011, 0xffffffff,
513 0x000008fc, 0x00000000, 0xffffffff,
514 0x000008f8, 0x00000012, 0xffffffff,
515 0x000008fc, 0x00000000, 0xffffffff,
516 0x000008f8, 0x00000013, 0xffffffff,
517 0x000008fc, 0x00000000, 0xffffffff,
518 0x000008f8, 0x00000014, 0xffffffff,
519 0x000008fc, 0x00000000, 0xffffffff,
520 0x000008f8, 0x00000015, 0xffffffff,
521 0x000008fc, 0x00000000, 0xffffffff,
522 0x000008f8, 0x00000016, 0xffffffff,
523 0x000008fc, 0x00000000, 0xffffffff,
524 0x000008f8, 0x00000017, 0xffffffff,
525 0x000008fc, 0x00000000, 0xffffffff,
526 0x000008f8, 0x00000018, 0xffffffff,
527 0x000008fc, 0x00000000, 0xffffffff,
528 0x000008f8, 0x00000019, 0xffffffff,
529 0x000008fc, 0x00000000, 0xffffffff,
530 0x000008f8, 0x0000001a, 0xffffffff,
531 0x000008fc, 0x00000000, 0xffffffff,
532 0x000008f8, 0x0000001b, 0xffffffff,
533 0x000008fc, 0x00000000, 0xffffffff,
534 0x000008f8, 0x00000020, 0xffffffff,
535 0x000008fc, 0xffffffff, 0xffffffff,
536 0x000008f8, 0x00000021, 0xffffffff,
537 0x000008fc, 0xffffffff, 0xffffffff,
538 0x000008f8, 0x00000022, 0xffffffff,
539 0x000008fc, 0xffffffff, 0xffffffff,
540 0x000008f8, 0x00000023, 0xffffffff,
541 0x000008fc, 0xffffffff, 0xffffffff,
542 0x000008f8, 0x00000024, 0xffffffff,
543 0x000008fc, 0xffffffff, 0xffffffff,
544 0x000008f8, 0x00000025, 0xffffffff,
545 0x000008fc, 0xffffffff, 0xffffffff,
546 0x000008f8, 0x00000026, 0xffffffff,
547 0x000008fc, 0xffffffff, 0xffffffff,
548 0x000008f8, 0x00000027, 0xffffffff,
549 0x000008fc, 0xffffffff, 0xffffffff,
550 0x000008f8, 0x00000028, 0xffffffff,
551 0x000008fc, 0xffffffff, 0xffffffff,
552 0x000008f8, 0x00000029, 0xffffffff,
553 0x000008fc, 0xffffffff, 0xffffffff,
554 0x000008f8, 0x0000002a, 0xffffffff,
555 0x000008fc, 0xffffffff, 0xffffffff,
556 0x000008f8, 0x0000002b, 0xffffffff,
557 0x000008fc, 0xffffffff, 0xffffffff
558};
559#define CAICOS_CGCG_CGLS_ENABLE_LENGTH sizeof(caicos_cgcg_cgls_enable) / (3 * sizeof(u32))
560
561static const u32 caicos_mgcg_default[] =
562{
563 0x0000802c, 0xc0000000, 0xffffffff,
564 0x00005448, 0x00000100, 0xffffffff,
565 0x000055e4, 0x00600100, 0xffffffff,
566 0x0000160c, 0x00000100, 0xffffffff,
567 0x0000c164, 0x00000100, 0xffffffff,
568 0x00008a18, 0x00000100, 0xffffffff,
569 0x0000897c, 0x06000100, 0xffffffff,
570 0x00008b28, 0x00000100, 0xffffffff,
571 0x00009144, 0x00000100, 0xffffffff,
572 0x00009a60, 0x00000100, 0xffffffff,
573 0x00009868, 0x00000100, 0xffffffff,
574 0x00008d58, 0x00000100, 0xffffffff,
575 0x00009510, 0x00000100, 0xffffffff,
576 0x0000949c, 0x00000100, 0xffffffff,
577 0x00009654, 0x00000100, 0xffffffff,
578 0x00009030, 0x00000100, 0xffffffff,
579 0x00009034, 0x00000100, 0xffffffff,
580 0x00009038, 0x00000100, 0xffffffff,
581 0x0000903c, 0x00000100, 0xffffffff,
582 0x00009040, 0x00000100, 0xffffffff,
583 0x0000a200, 0x00000100, 0xffffffff,
584 0x0000a204, 0x00000100, 0xffffffff,
585 0x0000a208, 0x00000100, 0xffffffff,
586 0x0000a20c, 0x00000100, 0xffffffff,
587 0x0000977c, 0x00000100, 0xffffffff,
588 0x00003f80, 0x00000100, 0xffffffff,
589 0x0000a210, 0x00000100, 0xffffffff,
590 0x0000a214, 0x00000100, 0xffffffff,
591 0x000004d8, 0x00000100, 0xffffffff,
592 0x00009784, 0x00000100, 0xffffffff,
593 0x00009698, 0x00000100, 0xffffffff,
594 0x000004d4, 0x00000200, 0xffffffff,
595 0x000004d0, 0x00000000, 0xffffffff,
596 0x000030cc, 0x00000100, 0xffffffff,
597 0x0000d0c0, 0xff000100, 0xffffffff,
598 0x0000915c, 0x00010000, 0xffffffff,
599 0x00009160, 0x00030002, 0xffffffff,
600 0x00009164, 0x00050004, 0xffffffff,
601 0x00009168, 0x00070006, 0xffffffff,
602 0x00009178, 0x00070000, 0xffffffff,
603 0x0000917c, 0x00030002, 0xffffffff,
604 0x00009180, 0x00050004, 0xffffffff,
605 0x0000918c, 0x00010006, 0xffffffff,
606 0x00009190, 0x00090008, 0xffffffff,
607 0x00009194, 0x00070000, 0xffffffff,
608 0x00009198, 0x00030002, 0xffffffff,
609 0x0000919c, 0x00050004, 0xffffffff,
610 0x000091a8, 0x00010006, 0xffffffff,
611 0x000091ac, 0x00090008, 0xffffffff,
612 0x000091e8, 0x00000000, 0xffffffff,
613 0x00009294, 0x00000000, 0xffffffff,
614 0x000008f8, 0x00000010, 0xffffffff,
615 0x000008fc, 0x00000000, 0xffffffff,
616 0x000008f8, 0x00000011, 0xffffffff,
617 0x000008fc, 0x00000000, 0xffffffff,
618 0x000008f8, 0x00000012, 0xffffffff,
619 0x000008fc, 0x00000000, 0xffffffff,
620 0x000008f8, 0x00000013, 0xffffffff,
621 0x000008fc, 0x00000000, 0xffffffff,
622 0x000008f8, 0x00000014, 0xffffffff,
623 0x000008fc, 0x00000000, 0xffffffff,
624 0x000008f8, 0x00000015, 0xffffffff,
625 0x000008fc, 0x00000000, 0xffffffff,
626 0x000008f8, 0x00000016, 0xffffffff,
627 0x000008fc, 0x00000000, 0xffffffff,
628 0x000008f8, 0x00000017, 0xffffffff,
629 0x000008fc, 0x00000000, 0xffffffff,
630 0x000008f8, 0x00000018, 0xffffffff,
631 0x000008fc, 0x00000000, 0xffffffff,
632 0x000008f8, 0x00000019, 0xffffffff,
633 0x000008fc, 0x00000000, 0xffffffff,
634 0x000008f8, 0x0000001a, 0xffffffff,
635 0x000008fc, 0x00000000, 0xffffffff,
636 0x000008f8, 0x0000001b, 0xffffffff,
637 0x000008fc, 0x00000000, 0xffffffff
638};
639#define CAICOS_MGCG_DEFAULT_LENGTH sizeof(caicos_mgcg_default) / (3 * sizeof(u32))
640
641static const u32 caicos_mgcg_disable[] =
642{
643 0x0000802c, 0xc0000000, 0xffffffff,
644 0x000008f8, 0x00000000, 0xffffffff,
645 0x000008fc, 0xffffffff, 0xffffffff,
646 0x000008f8, 0x00000001, 0xffffffff,
647 0x000008fc, 0xffffffff, 0xffffffff,
648 0x000008f8, 0x00000002, 0xffffffff,
649 0x000008fc, 0xffffffff, 0xffffffff,
650 0x000008f8, 0x00000003, 0xffffffff,
651 0x000008fc, 0xffffffff, 0xffffffff,
652 0x00009150, 0x00600000, 0xffffffff
653};
654#define CAICOS_MGCG_DISABLE_LENGTH sizeof(caicos_mgcg_disable) / (3 * sizeof(u32))
655
656static const u32 caicos_mgcg_enable[] =
657{
658 0x0000802c, 0xc0000000, 0xffffffff,
659 0x000008f8, 0x00000000, 0xffffffff,
660 0x000008fc, 0x00000000, 0xffffffff,
661 0x000008f8, 0x00000001, 0xffffffff,
662 0x000008fc, 0x00000000, 0xffffffff,
663 0x000008f8, 0x00000002, 0xffffffff,
664 0x000008fc, 0x00000000, 0xffffffff,
665 0x000008f8, 0x00000003, 0xffffffff,
666 0x000008fc, 0x00000000, 0xffffffff,
667 0x00009150, 0x46944040, 0xffffffff
668};
669#define CAICOS_MGCG_ENABLE_LENGTH sizeof(caicos_mgcg_enable) / (3 * sizeof(u32))
670
671//********* TURKS **************//
672static const u32 turks_cgcg_cgls_default[] =
673{
674 0x000008f8, 0x00000010, 0xffffffff,
675 0x000008fc, 0x00000000, 0xffffffff,
676 0x000008f8, 0x00000011, 0xffffffff,
677 0x000008fc, 0x00000000, 0xffffffff,
678 0x000008f8, 0x00000012, 0xffffffff,
679 0x000008fc, 0x00000000, 0xffffffff,
680 0x000008f8, 0x00000013, 0xffffffff,
681 0x000008fc, 0x00000000, 0xffffffff,
682 0x000008f8, 0x00000014, 0xffffffff,
683 0x000008fc, 0x00000000, 0xffffffff,
684 0x000008f8, 0x00000015, 0xffffffff,
685 0x000008fc, 0x00000000, 0xffffffff,
686 0x000008f8, 0x00000016, 0xffffffff,
687 0x000008fc, 0x00000000, 0xffffffff,
688 0x000008f8, 0x00000017, 0xffffffff,
689 0x000008fc, 0x00000000, 0xffffffff,
690 0x000008f8, 0x00000018, 0xffffffff,
691 0x000008fc, 0x00000000, 0xffffffff,
692 0x000008f8, 0x00000019, 0xffffffff,
693 0x000008fc, 0x00000000, 0xffffffff,
694 0x000008f8, 0x0000001a, 0xffffffff,
695 0x000008fc, 0x00000000, 0xffffffff,
696 0x000008f8, 0x0000001b, 0xffffffff,
697 0x000008fc, 0x00000000, 0xffffffff,
698 0x000008f8, 0x00000020, 0xffffffff,
699 0x000008fc, 0x00000000, 0xffffffff,
700 0x000008f8, 0x00000021, 0xffffffff,
701 0x000008fc, 0x00000000, 0xffffffff,
702 0x000008f8, 0x00000022, 0xffffffff,
703 0x000008fc, 0x00000000, 0xffffffff,
704 0x000008f8, 0x00000023, 0xffffffff,
705 0x000008fc, 0x00000000, 0xffffffff,
706 0x000008f8, 0x00000024, 0xffffffff,
707 0x000008fc, 0x00000000, 0xffffffff,
708 0x000008f8, 0x00000025, 0xffffffff,
709 0x000008fc, 0x00000000, 0xffffffff,
710 0x000008f8, 0x00000026, 0xffffffff,
711 0x000008fc, 0x00000000, 0xffffffff,
712 0x000008f8, 0x00000027, 0xffffffff,
713 0x000008fc, 0x00000000, 0xffffffff,
714 0x000008f8, 0x00000028, 0xffffffff,
715 0x000008fc, 0x00000000, 0xffffffff,
716 0x000008f8, 0x00000029, 0xffffffff,
717 0x000008fc, 0x00000000, 0xffffffff,
718 0x000008f8, 0x0000002a, 0xffffffff,
719 0x000008fc, 0x00000000, 0xffffffff,
720 0x000008f8, 0x0000002b, 0xffffffff,
721 0x000008fc, 0x00000000, 0xffffffff
722};
723#define TURKS_CGCG_CGLS_DEFAULT_LENGTH sizeof(turks_cgcg_cgls_default) / (3 * sizeof(u32))
724
725static const u32 turks_cgcg_cgls_disable[] =
726{
727 0x000008f8, 0x00000010, 0xffffffff,
728 0x000008fc, 0xffffffff, 0xffffffff,
729 0x000008f8, 0x00000011, 0xffffffff,
730 0x000008fc, 0xffffffff, 0xffffffff,
731 0x000008f8, 0x00000012, 0xffffffff,
732 0x000008fc, 0xffffffff, 0xffffffff,
733 0x000008f8, 0x00000013, 0xffffffff,
734 0x000008fc, 0xffffffff, 0xffffffff,
735 0x000008f8, 0x00000014, 0xffffffff,
736 0x000008fc, 0xffffffff, 0xffffffff,
737 0x000008f8, 0x00000015, 0xffffffff,
738 0x000008fc, 0xffffffff, 0xffffffff,
739 0x000008f8, 0x00000016, 0xffffffff,
740 0x000008fc, 0xffffffff, 0xffffffff,
741 0x000008f8, 0x00000017, 0xffffffff,
742 0x000008fc, 0xffffffff, 0xffffffff,
743 0x000008f8, 0x00000018, 0xffffffff,
744 0x000008fc, 0xffffffff, 0xffffffff,
745 0x000008f8, 0x00000019, 0xffffffff,
746 0x000008fc, 0xffffffff, 0xffffffff,
747 0x000008f8, 0x0000001a, 0xffffffff,
748 0x000008fc, 0xffffffff, 0xffffffff,
749 0x000008f8, 0x0000001b, 0xffffffff,
750 0x000008fc, 0xffffffff, 0xffffffff,
751 0x000008f8, 0x00000020, 0xffffffff,
752 0x000008fc, 0x00000000, 0xffffffff,
753 0x000008f8, 0x00000021, 0xffffffff,
754 0x000008fc, 0x00000000, 0xffffffff,
755 0x000008f8, 0x00000022, 0xffffffff,
756 0x000008fc, 0x00000000, 0xffffffff,
757 0x000008f8, 0x00000023, 0xffffffff,
758 0x000008fc, 0x00000000, 0xffffffff,
759 0x000008f8, 0x00000024, 0xffffffff,
760 0x000008fc, 0x00000000, 0xffffffff,
761 0x000008f8, 0x00000025, 0xffffffff,
762 0x000008fc, 0x00000000, 0xffffffff,
763 0x000008f8, 0x00000026, 0xffffffff,
764 0x000008fc, 0x00000000, 0xffffffff,
765 0x000008f8, 0x00000027, 0xffffffff,
766 0x000008fc, 0x00000000, 0xffffffff,
767 0x000008f8, 0x00000028, 0xffffffff,
768 0x000008fc, 0x00000000, 0xffffffff,
769 0x000008f8, 0x00000029, 0xffffffff,
770 0x000008fc, 0x00000000, 0xffffffff,
771 0x000008f8, 0x0000002a, 0xffffffff,
772 0x000008fc, 0x00000000, 0xffffffff,
773 0x000008f8, 0x0000002b, 0xffffffff,
774 0x000008fc, 0x00000000, 0xffffffff,
775 0x00000644, 0x000f7912, 0x001f4180,
776 0x00000644, 0x000f3812, 0x001f4180
777};
778#define TURKS_CGCG_CGLS_DISABLE_LENGTH sizeof(turks_cgcg_cgls_disable) / (3 * sizeof(u32))
779
780static const u32 turks_cgcg_cgls_enable[] =
781{
782 /* 0x0000c124, 0x84180000, 0x00180000, */
783 0x00000644, 0x000f7892, 0x001f4080,
784 0x000008f8, 0x00000010, 0xffffffff,
785 0x000008fc, 0x00000000, 0xffffffff,
786 0x000008f8, 0x00000011, 0xffffffff,
787 0x000008fc, 0x00000000, 0xffffffff,
788 0x000008f8, 0x00000012, 0xffffffff,
789 0x000008fc, 0x00000000, 0xffffffff,
790 0x000008f8, 0x00000013, 0xffffffff,
791 0x000008fc, 0x00000000, 0xffffffff,
792 0x000008f8, 0x00000014, 0xffffffff,
793 0x000008fc, 0x00000000, 0xffffffff,
794 0x000008f8, 0x00000015, 0xffffffff,
795 0x000008fc, 0x00000000, 0xffffffff,
796 0x000008f8, 0x00000016, 0xffffffff,
797 0x000008fc, 0x00000000, 0xffffffff,
798 0x000008f8, 0x00000017, 0xffffffff,
799 0x000008fc, 0x00000000, 0xffffffff,
800 0x000008f8, 0x00000018, 0xffffffff,
801 0x000008fc, 0x00000000, 0xffffffff,
802 0x000008f8, 0x00000019, 0xffffffff,
803 0x000008fc, 0x00000000, 0xffffffff,
804 0x000008f8, 0x0000001a, 0xffffffff,
805 0x000008fc, 0x00000000, 0xffffffff,
806 0x000008f8, 0x0000001b, 0xffffffff,
807 0x000008fc, 0x00000000, 0xffffffff,
808 0x000008f8, 0x00000020, 0xffffffff,
809 0x000008fc, 0xffffffff, 0xffffffff,
810 0x000008f8, 0x00000021, 0xffffffff,
811 0x000008fc, 0xffffffff, 0xffffffff,
812 0x000008f8, 0x00000022, 0xffffffff,
813 0x000008fc, 0xffffffff, 0xffffffff,
814 0x000008f8, 0x00000023, 0xffffffff,
815 0x000008fc, 0xffffffff, 0xffffffff,
816 0x000008f8, 0x00000024, 0xffffffff,
817 0x000008fc, 0xffffffff, 0xffffffff,
818 0x000008f8, 0x00000025, 0xffffffff,
819 0x000008fc, 0xffffffff, 0xffffffff,
820 0x000008f8, 0x00000026, 0xffffffff,
821 0x000008fc, 0xffffffff, 0xffffffff,
822 0x000008f8, 0x00000027, 0xffffffff,
823 0x000008fc, 0xffffffff, 0xffffffff,
824 0x000008f8, 0x00000028, 0xffffffff,
825 0x000008fc, 0xffffffff, 0xffffffff,
826 0x000008f8, 0x00000029, 0xffffffff,
827 0x000008fc, 0xffffffff, 0xffffffff,
828 0x000008f8, 0x0000002a, 0xffffffff,
829 0x000008fc, 0xffffffff, 0xffffffff,
830 0x000008f8, 0x0000002b, 0xffffffff,
831 0x000008fc, 0xffffffff, 0xffffffff
832};
833#define TURKS_CGCG_CGLS_ENABLE_LENGTH sizeof(turks_cgcg_cgls_enable) / (3 * sizeof(u32))
834
835// These are the sequences for turks_mgcg_shls
836static const u32 turks_mgcg_default[] =
837{
838 0x0000802c, 0xc0000000, 0xffffffff,
839 0x00005448, 0x00000100, 0xffffffff,
840 0x000055e4, 0x00600100, 0xffffffff,
841 0x0000160c, 0x00000100, 0xffffffff,
842 0x0000c164, 0x00000100, 0xffffffff,
843 0x00008a18, 0x00000100, 0xffffffff,
844 0x0000897c, 0x06000100, 0xffffffff,
845 0x00008b28, 0x00000100, 0xffffffff,
846 0x00009144, 0x00000100, 0xffffffff,
847 0x00009a60, 0x00000100, 0xffffffff,
848 0x00009868, 0x00000100, 0xffffffff,
849 0x00008d58, 0x00000100, 0xffffffff,
850 0x00009510, 0x00000100, 0xffffffff,
851 0x0000949c, 0x00000100, 0xffffffff,
852 0x00009654, 0x00000100, 0xffffffff,
853 0x00009030, 0x00000100, 0xffffffff,
854 0x00009034, 0x00000100, 0xffffffff,
855 0x00009038, 0x00000100, 0xffffffff,
856 0x0000903c, 0x00000100, 0xffffffff,
857 0x00009040, 0x00000100, 0xffffffff,
858 0x0000a200, 0x00000100, 0xffffffff,
859 0x0000a204, 0x00000100, 0xffffffff,
860 0x0000a208, 0x00000100, 0xffffffff,
861 0x0000a20c, 0x00000100, 0xffffffff,
862 0x0000977c, 0x00000100, 0xffffffff,
863 0x00003f80, 0x00000100, 0xffffffff,
864 0x0000a210, 0x00000100, 0xffffffff,
865 0x0000a214, 0x00000100, 0xffffffff,
866 0x000004d8, 0x00000100, 0xffffffff,
867 0x00009784, 0x00000100, 0xffffffff,
868 0x00009698, 0x00000100, 0xffffffff,
869 0x000004d4, 0x00000200, 0xffffffff,
870 0x000004d0, 0x00000000, 0xffffffff,
871 0x000030cc, 0x00000100, 0xffffffff,
872 0x0000d0c0, 0x00000100, 0xffffffff,
873 0x0000915c, 0x00010000, 0xffffffff,
874 0x00009160, 0x00030002, 0xffffffff,
875 0x00009164, 0x00050004, 0xffffffff,
876 0x00009168, 0x00070006, 0xffffffff,
877 0x00009178, 0x00070000, 0xffffffff,
878 0x0000917c, 0x00030002, 0xffffffff,
879 0x00009180, 0x00050004, 0xffffffff,
880 0x0000918c, 0x00010006, 0xffffffff,
881 0x00009190, 0x00090008, 0xffffffff,
882 0x00009194, 0x00070000, 0xffffffff,
883 0x00009198, 0x00030002, 0xffffffff,
884 0x0000919c, 0x00050004, 0xffffffff,
885 0x000091a8, 0x00010006, 0xffffffff,
886 0x000091ac, 0x00090008, 0xffffffff,
887 0x000091b0, 0x00070000, 0xffffffff,
888 0x000091b4, 0x00030002, 0xffffffff,
889 0x000091b8, 0x00050004, 0xffffffff,
890 0x000091c4, 0x00010006, 0xffffffff,
891 0x000091c8, 0x00090008, 0xffffffff,
892 0x000091cc, 0x00070000, 0xffffffff,
893 0x000091d0, 0x00030002, 0xffffffff,
894 0x000091d4, 0x00050004, 0xffffffff,
895 0x000091e0, 0x00010006, 0xffffffff,
896 0x000091e4, 0x00090008, 0xffffffff,
897 0x000091e8, 0x00000000, 0xffffffff,
898 0x000091ec, 0x00070000, 0xffffffff,
899 0x000091f0, 0x00030002, 0xffffffff,
900 0x000091f4, 0x00050004, 0xffffffff,
901 0x00009200, 0x00010006, 0xffffffff,
902 0x00009204, 0x00090008, 0xffffffff,
903 0x00009208, 0x00070000, 0xffffffff,
904 0x0000920c, 0x00030002, 0xffffffff,
905 0x00009210, 0x00050004, 0xffffffff,
906 0x0000921c, 0x00010006, 0xffffffff,
907 0x00009220, 0x00090008, 0xffffffff,
908 0x00009294, 0x00000000, 0xffffffff,
909 0x000008f8, 0x00000010, 0xffffffff,
910 0x000008fc, 0x00000000, 0xffffffff,
911 0x000008f8, 0x00000011, 0xffffffff,
912 0x000008fc, 0x00000000, 0xffffffff,
913 0x000008f8, 0x00000012, 0xffffffff,
914 0x000008fc, 0x00000000, 0xffffffff,
915 0x000008f8, 0x00000013, 0xffffffff,
916 0x000008fc, 0x00000000, 0xffffffff,
917 0x000008f8, 0x00000014, 0xffffffff,
918 0x000008fc, 0x00000000, 0xffffffff,
919 0x000008f8, 0x00000015, 0xffffffff,
920 0x000008fc, 0x00000000, 0xffffffff,
921 0x000008f8, 0x00000016, 0xffffffff,
922 0x000008fc, 0x00000000, 0xffffffff,
923 0x000008f8, 0x00000017, 0xffffffff,
924 0x000008fc, 0x00000000, 0xffffffff,
925 0x000008f8, 0x00000018, 0xffffffff,
926 0x000008fc, 0x00000000, 0xffffffff,
927 0x000008f8, 0x00000019, 0xffffffff,
928 0x000008fc, 0x00000000, 0xffffffff,
929 0x000008f8, 0x0000001a, 0xffffffff,
930 0x000008fc, 0x00000000, 0xffffffff,
931 0x000008f8, 0x0000001b, 0xffffffff,
932 0x000008fc, 0x00000000, 0xffffffff
933};
934#define TURKS_MGCG_DEFAULT_LENGTH sizeof(turks_mgcg_default) / (3 * sizeof(u32))
935
936static const u32 turks_mgcg_disable[] =
937{
938 0x0000802c, 0xc0000000, 0xffffffff,
939 0x000008f8, 0x00000000, 0xffffffff,
940 0x000008fc, 0xffffffff, 0xffffffff,
941 0x000008f8, 0x00000001, 0xffffffff,
942 0x000008fc, 0xffffffff, 0xffffffff,
943 0x000008f8, 0x00000002, 0xffffffff,
944 0x000008fc, 0xffffffff, 0xffffffff,
945 0x000008f8, 0x00000003, 0xffffffff,
946 0x000008fc, 0xffffffff, 0xffffffff,
947 0x00009150, 0x00600000, 0xffffffff
948};
949#define TURKS_MGCG_DISABLE_LENGTH sizeof(turks_mgcg_disable) / (3 * sizeof(u32))
950
951static const u32 turks_mgcg_enable[] =
952{
953 0x0000802c, 0xc0000000, 0xffffffff,
954 0x000008f8, 0x00000000, 0xffffffff,
955 0x000008fc, 0x00000000, 0xffffffff,
956 0x000008f8, 0x00000001, 0xffffffff,
957 0x000008fc, 0x00000000, 0xffffffff,
958 0x000008f8, 0x00000002, 0xffffffff,
959 0x000008fc, 0x00000000, 0xffffffff,
960 0x000008f8, 0x00000003, 0xffffffff,
961 0x000008fc, 0x00000000, 0xffffffff,
962 0x00009150, 0x6e944000, 0xffffffff
963};
964#define TURKS_MGCG_ENABLE_LENGTH sizeof(turks_mgcg_enable) / (3 * sizeof(u32))
965
966#endif
967
968#ifndef BTC_SYSLS_SEQUENCE
969#define BTC_SYSLS_SEQUENCE 100
970
971
972//********* BARTS **************//
973static const u32 barts_sysls_default[] =
974{
975 /* Register, Value, Mask bits */
976 0x000055e8, 0x00000000, 0xffffffff,
977 0x0000d0bc, 0x00000000, 0xffffffff,
978 0x000015c0, 0x000c1401, 0xffffffff,
979 0x0000264c, 0x000c0400, 0xffffffff,
980 0x00002648, 0x000c0400, 0xffffffff,
981 0x00002650, 0x000c0400, 0xffffffff,
982 0x000020b8, 0x000c0400, 0xffffffff,
983 0x000020bc, 0x000c0400, 0xffffffff,
984 0x000020c0, 0x000c0c80, 0xffffffff,
985 0x0000f4a0, 0x000000c0, 0xffffffff,
986 0x0000f4a4, 0x00680fff, 0xffffffff,
987 0x000004c8, 0x00000001, 0xffffffff,
988 0x000064ec, 0x00000000, 0xffffffff,
989 0x00000c7c, 0x00000000, 0xffffffff,
990 0x00006dfc, 0x00000000, 0xffffffff
991};
992#define BARTS_SYSLS_DEFAULT_LENGTH sizeof(barts_sysls_default) / (3 * sizeof(u32))
993
994static const u32 barts_sysls_disable[] =
995{
996 0x000055e8, 0x00000000, 0xffffffff,
997 0x0000d0bc, 0x00000000, 0xffffffff,
998 0x000015c0, 0x00041401, 0xffffffff,
999 0x0000264c, 0x00040400, 0xffffffff,
1000 0x00002648, 0x00040400, 0xffffffff,
1001 0x00002650, 0x00040400, 0xffffffff,
1002 0x000020b8, 0x00040400, 0xffffffff,
1003 0x000020bc, 0x00040400, 0xffffffff,
1004 0x000020c0, 0x00040c80, 0xffffffff,
1005 0x0000f4a0, 0x000000c0, 0xffffffff,
1006 0x0000f4a4, 0x00680000, 0xffffffff,
1007 0x000004c8, 0x00000001, 0xffffffff,
1008 0x000064ec, 0x00007ffd, 0xffffffff,
1009 0x00000c7c, 0x0000ff00, 0xffffffff,
1010 0x00006dfc, 0x0000007f, 0xffffffff
1011};
1012#define BARTS_SYSLS_DISABLE_LENGTH sizeof(barts_sysls_disable) / (3 * sizeof(u32))
1013
1014static const u32 barts_sysls_enable[] =
1015{
1016 0x000055e8, 0x00000001, 0xffffffff,
1017 0x0000d0bc, 0x00000100, 0xffffffff,
1018 0x000015c0, 0x000c1401, 0xffffffff,
1019 0x0000264c, 0x000c0400, 0xffffffff,
1020 0x00002648, 0x000c0400, 0xffffffff,
1021 0x00002650, 0x000c0400, 0xffffffff,
1022 0x000020b8, 0x000c0400, 0xffffffff,
1023 0x000020bc, 0x000c0400, 0xffffffff,
1024 0x000020c0, 0x000c0c80, 0xffffffff,
1025 0x0000f4a0, 0x000000c0, 0xffffffff,
1026 0x0000f4a4, 0x00680fff, 0xffffffff,
1027 0x000004c8, 0x00000000, 0xffffffff,
1028 0x000064ec, 0x00000000, 0xffffffff,
1029 0x00000c7c, 0x00000000, 0xffffffff,
1030 0x00006dfc, 0x00000000, 0xffffffff
1031};
1032#define BARTS_SYSLS_ENABLE_LENGTH sizeof(barts_sysls_enable) / (3 * sizeof(u32))
1033
1034//********* CAICOS **************//
1035static const u32 caicos_sysls_default[] =
1036{
1037 0x000055e8, 0x00000000, 0xffffffff,
1038 0x0000d0bc, 0x00000000, 0xffffffff,
1039 0x000015c0, 0x000c1401, 0xffffffff,
1040 0x0000264c, 0x000c0400, 0xffffffff,
1041 0x00002648, 0x000c0400, 0xffffffff,
1042 0x00002650, 0x000c0400, 0xffffffff,
1043 0x000020b8, 0x000c0400, 0xffffffff,
1044 0x000020bc, 0x000c0400, 0xffffffff,
1045 0x0000f4a0, 0x000000c0, 0xffffffff,
1046 0x0000f4a4, 0x00680fff, 0xffffffff,
1047 0x000004c8, 0x00000001, 0xffffffff,
1048 0x000064ec, 0x00000000, 0xffffffff,
1049 0x00000c7c, 0x00000000, 0xffffffff,
1050 0x00006dfc, 0x00000000, 0xffffffff
1051};
1052#define CAICOS_SYSLS_DEFAULT_LENGTH sizeof(caicos_sysls_default) / (3 * sizeof(u32))
1053
1054static const u32 caicos_sysls_disable[] =
1055{
1056 0x000055e8, 0x00000000, 0xffffffff,
1057 0x0000d0bc, 0x00000000, 0xffffffff,
1058 0x000015c0, 0x00041401, 0xffffffff,
1059 0x0000264c, 0x00040400, 0xffffffff,
1060 0x00002648, 0x00040400, 0xffffffff,
1061 0x00002650, 0x00040400, 0xffffffff,
1062 0x000020b8, 0x00040400, 0xffffffff,
1063 0x000020bc, 0x00040400, 0xffffffff,
1064 0x0000f4a0, 0x000000c0, 0xffffffff,
1065 0x0000f4a4, 0x00680000, 0xffffffff,
1066 0x000004c8, 0x00000001, 0xffffffff,
1067 0x000064ec, 0x00007ffd, 0xffffffff,
1068 0x00000c7c, 0x0000ff00, 0xffffffff,
1069 0x00006dfc, 0x0000007f, 0xffffffff
1070};
1071#define CAICOS_SYSLS_DISABLE_LENGTH sizeof(caicos_sysls_disable) / (3 * sizeof(u32))
1072
1073static const u32 caicos_sysls_enable[] =
1074{
1075 0x000055e8, 0x00000001, 0xffffffff,
1076 0x0000d0bc, 0x00000100, 0xffffffff,
1077 0x000015c0, 0x000c1401, 0xffffffff,
1078 0x0000264c, 0x000c0400, 0xffffffff,
1079 0x00002648, 0x000c0400, 0xffffffff,
1080 0x00002650, 0x000c0400, 0xffffffff,
1081 0x000020b8, 0x000c0400, 0xffffffff,
1082 0x000020bc, 0x000c0400, 0xffffffff,
1083 0x0000f4a0, 0x000000c0, 0xffffffff,
1084 0x0000f4a4, 0x00680fff, 0xffffffff,
1085 0x000064ec, 0x00000000, 0xffffffff,
1086 0x00000c7c, 0x00000000, 0xffffffff,
1087 0x00006dfc, 0x00000000, 0xffffffff,
1088 0x000004c8, 0x00000000, 0xffffffff
1089};
1090#define CAICOS_SYSLS_ENABLE_LENGTH sizeof(caicos_sysls_enable) / (3 * sizeof(u32))
1091
1092//********* TURKS **************//
1093static const u32 turks_sysls_default[] =
1094{
1095 0x000055e8, 0x00000000, 0xffffffff,
1096 0x0000d0bc, 0x00000000, 0xffffffff,
1097 0x000015c0, 0x000c1401, 0xffffffff,
1098 0x0000264c, 0x000c0400, 0xffffffff,
1099 0x00002648, 0x000c0400, 0xffffffff,
1100 0x00002650, 0x000c0400, 0xffffffff,
1101 0x000020b8, 0x000c0400, 0xffffffff,
1102 0x000020bc, 0x000c0400, 0xffffffff,
1103 0x000020c0, 0x000c0c80, 0xffffffff,
1104 0x0000f4a0, 0x000000c0, 0xffffffff,
1105 0x0000f4a4, 0x00680fff, 0xffffffff,
1106 0x000004c8, 0x00000001, 0xffffffff,
1107 0x000064ec, 0x00000000, 0xffffffff,
1108 0x00000c7c, 0x00000000, 0xffffffff,
1109 0x00006dfc, 0x00000000, 0xffffffff
1110};
1111#define TURKS_SYSLS_DEFAULT_LENGTH sizeof(turks_sysls_default) / (3 * sizeof(u32))
1112
1113static const u32 turks_sysls_disable[] =
1114{
1115 0x000055e8, 0x00000000, 0xffffffff,
1116 0x0000d0bc, 0x00000000, 0xffffffff,
1117 0x000015c0, 0x00041401, 0xffffffff,
1118 0x0000264c, 0x00040400, 0xffffffff,
1119 0x00002648, 0x00040400, 0xffffffff,
1120 0x00002650, 0x00040400, 0xffffffff,
1121 0x000020b8, 0x00040400, 0xffffffff,
1122 0x000020bc, 0x00040400, 0xffffffff,
1123 0x000020c0, 0x00040c80, 0xffffffff,
1124 0x0000f4a0, 0x000000c0, 0xffffffff,
1125 0x0000f4a4, 0x00680000, 0xffffffff,
1126 0x000004c8, 0x00000001, 0xffffffff,
1127 0x000064ec, 0x00007ffd, 0xffffffff,
1128 0x00000c7c, 0x0000ff00, 0xffffffff,
1129 0x00006dfc, 0x0000007f, 0xffffffff
1130};
1131#define TURKS_SYSLS_DISABLE_LENGTH sizeof(turks_sysls_disable) / (3 * sizeof(u32))
1132
1133static const u32 turks_sysls_enable[] =
1134{
1135 0x000055e8, 0x00000001, 0xffffffff,
1136 0x0000d0bc, 0x00000100, 0xffffffff,
1137 0x000015c0, 0x000c1401, 0xffffffff,
1138 0x0000264c, 0x000c0400, 0xffffffff,
1139 0x00002648, 0x000c0400, 0xffffffff,
1140 0x00002650, 0x000c0400, 0xffffffff,
1141 0x000020b8, 0x000c0400, 0xffffffff,
1142 0x000020bc, 0x000c0400, 0xffffffff,
1143 0x000020c0, 0x000c0c80, 0xffffffff,
1144 0x0000f4a0, 0x000000c0, 0xffffffff,
1145 0x0000f4a4, 0x00680fff, 0xffffffff,
1146 0x000004c8, 0x00000000, 0xffffffff,
1147 0x000064ec, 0x00000000, 0xffffffff,
1148 0x00000c7c, 0x00000000, 0xffffffff,
1149 0x00006dfc, 0x00000000, 0xffffffff
1150};
1151#define TURKS_SYSLS_ENABLE_LENGTH sizeof(turks_sysls_enable) / (3 * sizeof(u32))
1152
1153#endif
1154
1155u32 btc_valid_sclk[40] =
1156{
1157 5000, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000,
1158 55000, 60000, 65000, 70000, 75000, 80000, 85000, 90000, 95000, 100000,
1159 105000, 110000, 11500, 120000, 125000, 130000, 135000, 140000, 145000, 150000,
1160 155000, 160000, 165000, 170000, 175000, 180000, 185000, 190000, 195000, 200000
1161};
1162
1163static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
1164{
1165 { 10000, 30000, RADEON_SCLK_UP },
1166 { 15000, 30000, RADEON_SCLK_UP },
1167 { 20000, 30000, RADEON_SCLK_UP },
1168 { 25000, 30000, RADEON_SCLK_UP }
1169};
1170
1171void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
1172 u32 clock, u16 max_voltage, u16 *voltage)
1173{
1174 u32 i;
1175
1176 if ((table == NULL) || (table->count == 0))
1177 return;
1178
1179 for (i= 0; i < table->count; i++) {
1180 if (clock <= table->entries[i].clk) {
1181 if (*voltage < table->entries[i].v)
1182 *voltage = (u16)((table->entries[i].v < max_voltage) ?
1183 table->entries[i].v : max_voltage);
1184 return;
1185 }
1186 }
1187
1188 *voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
1189}
1190
1191static u32 btc_find_valid_clock(struct radeon_clock_array *clocks,
1192 u32 max_clock, u32 requested_clock)
1193{
1194 unsigned int i;
1195
1196 if ((clocks == NULL) || (clocks->count == 0))
1197 return (requested_clock < max_clock) ? requested_clock : max_clock;
1198
1199 for (i = 0; i < clocks->count; i++) {
1200 if (clocks->values[i] >= requested_clock)
1201 return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
1202 }
1203
1204 return (clocks->values[clocks->count - 1] < max_clock) ?
1205 clocks->values[clocks->count - 1] : max_clock;
1206}
1207
1208static u32 btc_get_valid_mclk(struct radeon_device *rdev,
1209 u32 max_mclk, u32 requested_mclk)
1210{
1211 return btc_find_valid_clock(&rdev->pm.dpm.dyn_state.valid_mclk_values,
1212 max_mclk, requested_mclk);
1213}
1214
1215static u32 btc_get_valid_sclk(struct radeon_device *rdev,
1216 u32 max_sclk, u32 requested_sclk)
1217{
1218 return btc_find_valid_clock(&rdev->pm.dpm.dyn_state.valid_sclk_values,
1219 max_sclk, requested_sclk);
1220}
1221
1222void btc_skip_blacklist_clocks(struct radeon_device *rdev,
1223 const u32 max_sclk, const u32 max_mclk,
1224 u32 *sclk, u32 *mclk)
1225{
1226 int i, num_blacklist_clocks;
1227
1228 if ((sclk == NULL) || (mclk == NULL))
1229 return;
1230
1231 num_blacklist_clocks = ARRAY_SIZE(btc_blacklist_clocks);
1232
1233 for (i = 0; i < num_blacklist_clocks; i++) {
1234 if ((btc_blacklist_clocks[i].sclk == *sclk) &&
1235 (btc_blacklist_clocks[i].mclk == *mclk))
1236 break;
1237 }
1238
1239 if (i < num_blacklist_clocks) {
1240 if (btc_blacklist_clocks[i].action == RADEON_SCLK_UP) {
1241 *sclk = btc_get_valid_sclk(rdev, max_sclk, *sclk + 1);
1242
1243 if (*sclk < max_sclk)
1244 btc_skip_blacklist_clocks(rdev, max_sclk, max_mclk, sclk, mclk);
1245 }
1246 }
1247}
1248
1249void btc_adjust_clock_combinations(struct radeon_device *rdev,
1250 const struct radeon_clock_and_voltage_limits *max_limits,
1251 struct rv7xx_pl *pl)
1252{
1253
1254 if ((pl->mclk == 0) || (pl->sclk == 0))
1255 return;
1256
1257 if (pl->mclk == pl->sclk)
1258 return;
1259
1260 if (pl->mclk > pl->sclk) {
1261 if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > rdev->pm.dpm.dyn_state.mclk_sclk_ratio)
1262 pl->sclk = btc_get_valid_sclk(rdev,
1263 max_limits->sclk,
1264 (pl->mclk +
1265 (rdev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
1266 rdev->pm.dpm.dyn_state.mclk_sclk_ratio);
1267 } else {
1268 if ((pl->sclk - pl->mclk) > rdev->pm.dpm.dyn_state.sclk_mclk_delta)
1269 pl->mclk = btc_get_valid_mclk(rdev,
1270 max_limits->mclk,
1271 pl->sclk -
1272 rdev->pm.dpm.dyn_state.sclk_mclk_delta);
1273 }
1274}
1275
1276static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
1277{
1278 unsigned int i;
1279
1280 for (i = 0; i < table->count; i++) {
1281 if (voltage <= table->entries[i].value)
1282 return table->entries[i].value;
1283 }
1284
1285 return table->entries[table->count - 1].value;
1286}
1287
1288void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
1289 u16 max_vddc, u16 max_vddci,
1290 u16 *vddc, u16 *vddci)
1291{
1292 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1293 u16 new_voltage;
1294
1295 if ((0 == *vddc) || (0 == *vddci))
1296 return;
1297
1298 if (*vddc > *vddci) {
1299 if ((*vddc - *vddci) > rdev->pm.dpm.dyn_state.vddc_vddci_delta) {
1300 new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
1301 (*vddc - rdev->pm.dpm.dyn_state.vddc_vddci_delta));
1302 *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
1303 }
1304 } else {
1305 if ((*vddci - *vddc) > rdev->pm.dpm.dyn_state.vddc_vddci_delta) {
1306 new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
1307 (*vddci - rdev->pm.dpm.dyn_state.vddc_vddci_delta));
1308 *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
1309 }
1310 }
1311}
1312
1313static void btc_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
1314 bool enable)
1315{
1316 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1317 u32 tmp, bif;
1318
1319 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1320 if (enable) {
1321 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
1322 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
1323 if (!pi->boot_in_gen2) {
1324 bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
1325 bif |= CG_CLIENT_REQ(0xd);
1326 WREG32(CG_BIF_REQ_AND_RSP, bif);
1327
1328 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
1329 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
1330 tmp |= LC_GEN2_EN_STRAP;
1331
1332 tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
1333 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
1334 udelay(10);
1335 tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
1336 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
1337 }
1338 }
1339 } else {
1340 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
1341 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
1342 if (!pi->boot_in_gen2) {
1343 bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
1344 bif |= CG_CLIENT_REQ(0xd);
1345 WREG32(CG_BIF_REQ_AND_RSP, bif);
1346
1347 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
1348 tmp &= ~LC_GEN2_EN_STRAP;
1349 }
1350 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
1351 }
1352 }
1353}
1354
1355static void btc_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
1356 bool enable)
1357{
1358 btc_enable_bif_dynamic_pcie_gen2(rdev, enable);
1359
1360 if (enable)
1361 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
1362 else
1363 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
1364}
1365
1366static int btc_disable_ulv(struct radeon_device *rdev)
1367{
1368 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1369
1370 if (eg_pi->ulv.supported) {
1371 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) != PPSMC_Result_OK)
1372 return -EINVAL;
1373 }
1374 return 0;
1375}
1376
1377static int btc_populate_ulv_state(struct radeon_device *rdev,
1378 RV770_SMC_STATETABLE *table)
1379{
1380 int ret = -EINVAL;
1381 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1382 struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl;
1383
1384 if (ulv_pl->vddc) {
1385 ret = cypress_convert_power_level_to_smc(rdev,
1386 ulv_pl,
1387 &table->ULVState.levels[0],
1388 PPSMC_DISPLAY_WATERMARK_LOW);
1389 if (ret == 0) {
1390 table->ULVState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
1391 table->ULVState.levels[0].ACIndex = 1;
1392
1393 table->ULVState.levels[1] = table->ULVState.levels[0];
1394 table->ULVState.levels[2] = table->ULVState.levels[0];
1395
1396 table->ULVState.flags |= PPSMC_SWSTATE_FLAG_DC;
1397
1398 WREG32(CG_ULV_CONTROL, BTC_CGULVCONTROL_DFLT);
1399 WREG32(CG_ULV_PARAMETER, BTC_CGULVPARAMETER_DFLT);
1400 }
1401 }
1402
1403 return ret;
1404}
1405
1406static int btc_populate_smc_acpi_state(struct radeon_device *rdev,
1407 RV770_SMC_STATETABLE *table)
1408{
1409 int ret = cypress_populate_smc_acpi_state(rdev, table);
1410
1411 if (ret == 0) {
1412 table->ACPIState.levels[0].ACIndex = 0;
1413 table->ACPIState.levels[1].ACIndex = 0;
1414 table->ACPIState.levels[2].ACIndex = 0;
1415 }
1416
1417 return ret;
1418}
1419
1420void btc_program_mgcg_hw_sequence(struct radeon_device *rdev,
1421 const u32 *sequence, u32 count)
1422{
1423 u32 i, length = count * 3;
1424 u32 tmp;
1425
1426 for (i = 0; i < length; i+=3) {
1427 tmp = RREG32(sequence[i]);
1428 tmp &= ~sequence[i+2];
1429 tmp |= sequence[i+1] & sequence[i+2];
1430 WREG32(sequence[i], tmp);
1431 }
1432}
1433
1434static void btc_cg_clock_gating_default(struct radeon_device *rdev)
1435{
1436 u32 count;
1437 const u32 *p = NULL;
1438
1439 if (rdev->family == CHIP_BARTS) {
1440 p = (const u32 *)&barts_cgcg_cgls_default;
1441 count = BARTS_CGCG_CGLS_DEFAULT_LENGTH;
1442 } else if (rdev->family == CHIP_TURKS) {
1443 p = (const u32 *)&turks_cgcg_cgls_default;
1444 count = TURKS_CGCG_CGLS_DEFAULT_LENGTH;
1445 } else if (rdev->family == CHIP_CAICOS) {
1446 p = (const u32 *)&caicos_cgcg_cgls_default;
1447 count = CAICOS_CGCG_CGLS_DEFAULT_LENGTH;
1448 } else
1449 return;
1450
1451 btc_program_mgcg_hw_sequence(rdev, p, count);
1452}
1453
1454static void btc_cg_clock_gating_enable(struct radeon_device *rdev,
1455 bool enable)
1456{
1457 u32 count;
1458 const u32 *p = NULL;
1459
1460 if (enable) {
1461 if (rdev->family == CHIP_BARTS) {
1462 p = (const u32 *)&barts_cgcg_cgls_enable;
1463 count = BARTS_CGCG_CGLS_ENABLE_LENGTH;
1464 } else if (rdev->family == CHIP_TURKS) {
1465 p = (const u32 *)&turks_cgcg_cgls_enable;
1466 count = TURKS_CGCG_CGLS_ENABLE_LENGTH;
1467 } else if (rdev->family == CHIP_CAICOS) {
1468 p = (const u32 *)&caicos_cgcg_cgls_enable;
1469 count = CAICOS_CGCG_CGLS_ENABLE_LENGTH;
1470 } else
1471 return;
1472 } else {
1473 if (rdev->family == CHIP_BARTS) {
1474 p = (const u32 *)&barts_cgcg_cgls_disable;
1475 count = BARTS_CGCG_CGLS_DISABLE_LENGTH;
1476 } else if (rdev->family == CHIP_TURKS) {
1477 p = (const u32 *)&turks_cgcg_cgls_disable;
1478 count = TURKS_CGCG_CGLS_DISABLE_LENGTH;
1479 } else if (rdev->family == CHIP_CAICOS) {
1480 p = (const u32 *)&caicos_cgcg_cgls_disable;
1481 count = CAICOS_CGCG_CGLS_DISABLE_LENGTH;
1482 } else
1483 return;
1484 }
1485
1486 btc_program_mgcg_hw_sequence(rdev, p, count);
1487}
1488
1489static void btc_mg_clock_gating_default(struct radeon_device *rdev)
1490{
1491 u32 count;
1492 const u32 *p = NULL;
1493
1494 if (rdev->family == CHIP_BARTS) {
1495 p = (const u32 *)&barts_mgcg_default;
1496 count = BARTS_MGCG_DEFAULT_LENGTH;
1497 } else if (rdev->family == CHIP_TURKS) {
1498 p = (const u32 *)&turks_mgcg_default;
1499 count = TURKS_MGCG_DEFAULT_LENGTH;
1500 } else if (rdev->family == CHIP_CAICOS) {
1501 p = (const u32 *)&caicos_mgcg_default;
1502 count = CAICOS_MGCG_DEFAULT_LENGTH;
1503 } else
1504 return;
1505
1506 btc_program_mgcg_hw_sequence(rdev, p, count);
1507}
1508
1509static void btc_mg_clock_gating_enable(struct radeon_device *rdev,
1510 bool enable)
1511{
1512 u32 count;
1513 const u32 *p = NULL;
1514
1515 if (enable) {
1516 if (rdev->family == CHIP_BARTS) {
1517 p = (const u32 *)&barts_mgcg_enable;
1518 count = BARTS_MGCG_ENABLE_LENGTH;
1519 } else if (rdev->family == CHIP_TURKS) {
1520 p = (const u32 *)&turks_mgcg_enable;
1521 count = TURKS_MGCG_ENABLE_LENGTH;
1522 } else if (rdev->family == CHIP_CAICOS) {
1523 p = (const u32 *)&caicos_mgcg_enable;
1524 count = CAICOS_MGCG_ENABLE_LENGTH;
1525 } else
1526 return;
1527 } else {
1528 if (rdev->family == CHIP_BARTS) {
1529 p = (const u32 *)&barts_mgcg_disable[0];
1530 count = BARTS_MGCG_DISABLE_LENGTH;
1531 } else if (rdev->family == CHIP_TURKS) {
1532 p = (const u32 *)&turks_mgcg_disable[0];
1533 count = TURKS_MGCG_DISABLE_LENGTH;
1534 } else if (rdev->family == CHIP_CAICOS) {
1535 p = (const u32 *)&caicos_mgcg_disable[0];
1536 count = CAICOS_MGCG_DISABLE_LENGTH;
1537 } else
1538 return;
1539 }
1540
1541 btc_program_mgcg_hw_sequence(rdev, p, count);
1542}
1543
1544static void btc_ls_clock_gating_default(struct radeon_device *rdev)
1545{
1546 u32 count;
1547 const u32 *p = NULL;
1548
1549 if (rdev->family == CHIP_BARTS) {
1550 p = (const u32 *)&barts_sysls_default;
1551 count = BARTS_SYSLS_DEFAULT_LENGTH;
1552 } else if (rdev->family == CHIP_TURKS) {
1553 p = (const u32 *)&turks_sysls_default;
1554 count = TURKS_SYSLS_DEFAULT_LENGTH;
1555 } else if (rdev->family == CHIP_CAICOS) {
1556 p = (const u32 *)&caicos_sysls_default;
1557 count = CAICOS_SYSLS_DEFAULT_LENGTH;
1558 } else
1559 return;
1560
1561 btc_program_mgcg_hw_sequence(rdev, p, count);
1562}
1563
1564static void btc_ls_clock_gating_enable(struct radeon_device *rdev,
1565 bool enable)
1566{
1567 u32 count;
1568 const u32 *p = NULL;
1569
1570 if (enable) {
1571 if (rdev->family == CHIP_BARTS) {
1572 p = (const u32 *)&barts_sysls_enable;
1573 count = BARTS_SYSLS_ENABLE_LENGTH;
1574 } else if (rdev->family == CHIP_TURKS) {
1575 p = (const u32 *)&turks_sysls_enable;
1576 count = TURKS_SYSLS_ENABLE_LENGTH;
1577 } else if (rdev->family == CHIP_CAICOS) {
1578 p = (const u32 *)&caicos_sysls_enable;
1579 count = CAICOS_SYSLS_ENABLE_LENGTH;
1580 } else
1581 return;
1582 } else {
1583 if (rdev->family == CHIP_BARTS) {
1584 p = (const u32 *)&barts_sysls_disable;
1585 count = BARTS_SYSLS_DISABLE_LENGTH;
1586 } else if (rdev->family == CHIP_TURKS) {
1587 p = (const u32 *)&turks_sysls_disable;
1588 count = TURKS_SYSLS_DISABLE_LENGTH;
1589 } else if (rdev->family == CHIP_CAICOS) {
1590 p = (const u32 *)&caicos_sysls_disable;
1591 count = CAICOS_SYSLS_DISABLE_LENGTH;
1592 } else
1593 return;
1594 }
1595
1596 btc_program_mgcg_hw_sequence(rdev, p, count);
1597}
1598
1599bool btc_dpm_enabled(struct radeon_device *rdev)
1600{
1601 if (rv770_is_smc_running(rdev))
1602 return true;
1603 else
1604 return false;
1605}
1606
1607static int btc_init_smc_table(struct radeon_device *rdev,
1608 struct radeon_ps *radeon_boot_state)
1609{
1610 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1611 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1612 RV770_SMC_STATETABLE *table = &pi->smc_statetable;
1613 int ret;
1614
1615 memset(table, 0, sizeof(RV770_SMC_STATETABLE));
1616
1617 cypress_populate_smc_voltage_tables(rdev, table);
1618
1619 switch (rdev->pm.int_thermal_type) {
1620 case THERMAL_TYPE_EVERGREEN:
1621 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
1622 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
1623 break;
1624 case THERMAL_TYPE_NONE:
1625 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
1626 break;
1627 default:
1628 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
1629 break;
1630 }
1631
1632 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
1633 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1634
1635 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1636 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
1637
1638 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1639 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1640
1641 if (pi->mem_gddr5)
1642 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1643
1644 ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table);
1645 if (ret)
1646 return ret;
1647
1648 if (eg_pi->sclk_deep_sleep)
1649 WREG32_P(SCLK_PSKIP_CNTL, PSKIP_ON_ALLOW_STOP_HI(32),
1650 ~PSKIP_ON_ALLOW_STOP_HI_MASK);
1651
1652 ret = btc_populate_smc_acpi_state(rdev, table);
1653 if (ret)
1654 return ret;
1655
1656 if (eg_pi->ulv.supported) {
1657 ret = btc_populate_ulv_state(rdev, table);
1658 if (ret)
1659 eg_pi->ulv.supported = false;
1660 }
1661
1662 table->driverState = table->initialState;
1663
1664 return rv770_copy_bytes_to_smc(rdev,
1665 pi->state_table_start,
1666 (u8 *)table,
1667 sizeof(RV770_SMC_STATETABLE),
1668 pi->sram_end);
1669}
1670
1671static void btc_set_at_for_uvd(struct radeon_device *rdev,
1672 struct radeon_ps *radeon_new_state)
1673{
1674 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1675 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1676 int idx = 0;
1677
1678 if (r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2))
1679 idx = 1;
1680
1681 if ((idx == 1) && !eg_pi->smu_uvd_hs) {
1682 pi->rlp = 10;
1683 pi->rmp = 100;
1684 pi->lhp = 100;
1685 pi->lmp = 10;
1686 } else {
1687 pi->rlp = eg_pi->ats[idx].rlp;
1688 pi->rmp = eg_pi->ats[idx].rmp;
1689 pi->lhp = eg_pi->ats[idx].lhp;
1690 pi->lmp = eg_pi->ats[idx].lmp;
1691 }
1692
1693}
1694
1695void btc_notify_uvd_to_smc(struct radeon_device *rdev,
1696 struct radeon_ps *radeon_new_state)
1697{
1698 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1699
1700 if (r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
1701 rv770_write_smc_soft_register(rdev,
1702 RV770_SMC_SOFT_REGISTER_uvd_enabled, 1);
1703 eg_pi->uvd_enabled = true;
1704 } else {
1705 rv770_write_smc_soft_register(rdev,
1706 RV770_SMC_SOFT_REGISTER_uvd_enabled, 0);
1707 eg_pi->uvd_enabled = false;
1708 }
1709}
1710
1711int btc_reset_to_default(struct radeon_device *rdev)
1712{
1713 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) != PPSMC_Result_OK)
1714 return -EINVAL;
1715
1716 return 0;
1717}
1718
1719static void btc_stop_smc(struct radeon_device *rdev)
1720{
1721 int i;
1722
1723 for (i = 0; i < rdev->usec_timeout; i++) {
1724 if (((RREG32(LB_SYNC_RESET_SEL) & LB_SYNC_RESET_SEL_MASK) >> LB_SYNC_RESET_SEL_SHIFT) != 1)
1725 break;
1726 udelay(1);
1727 }
1728 udelay(100);
1729
1730 r7xx_stop_smc(rdev);
1731}
1732
1733void btc_read_arb_registers(struct radeon_device *rdev)
1734{
1735 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1736 struct evergreen_arb_registers *arb_registers =
1737 &eg_pi->bootup_arb_registers;
1738
1739 arb_registers->mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1740 arb_registers->mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1741 arb_registers->mc_arb_rfsh_rate = RREG32(MC_ARB_RFSH_RATE);
1742 arb_registers->mc_arb_burst_time = RREG32(MC_ARB_BURST_TIME);
1743}
1744
1745
1746static void btc_set_arb0_registers(struct radeon_device *rdev,
1747 struct evergreen_arb_registers *arb_registers)
1748{
1749 u32 val;
1750
1751 WREG32(MC_ARB_DRAM_TIMING, arb_registers->mc_arb_dram_timing);
1752 WREG32(MC_ARB_DRAM_TIMING2, arb_registers->mc_arb_dram_timing2);
1753
1754 val = (arb_registers->mc_arb_rfsh_rate & POWERMODE0_MASK) >>
1755 POWERMODE0_SHIFT;
1756 WREG32_P(MC_ARB_RFSH_RATE, POWERMODE0(val), ~POWERMODE0_MASK);
1757
1758 val = (arb_registers->mc_arb_burst_time & STATE0_MASK) >>
1759 STATE0_SHIFT;
1760 WREG32_P(MC_ARB_BURST_TIME, STATE0(val), ~STATE0_MASK);
1761}
1762
1763static void btc_set_boot_state_timing(struct radeon_device *rdev)
1764{
1765 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1766
1767 if (eg_pi->ulv.supported)
1768 btc_set_arb0_registers(rdev, &eg_pi->bootup_arb_registers);
1769}
1770
1771static bool btc_is_state_ulv_compatible(struct radeon_device *rdev,
1772 struct radeon_ps *radeon_state)
1773{
1774 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
1775 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1776 struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl;
1777
1778 if (state->low.mclk != ulv_pl->mclk)
1779 return false;
1780
1781 if (state->low.vddci != ulv_pl->vddci)
1782 return false;
1783
1784 /* XXX check minclocks, etc. */
1785
1786 return true;
1787}
1788
1789
1790static int btc_set_ulv_dram_timing(struct radeon_device *rdev)
1791{
1792 u32 val;
1793 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1794 struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl;
1795
1796 radeon_atom_set_engine_dram_timings(rdev,
1797 ulv_pl->sclk,
1798 ulv_pl->mclk);
1799
1800 val = rv770_calculate_memory_refresh_rate(rdev, ulv_pl->sclk);
1801 WREG32_P(MC_ARB_RFSH_RATE, POWERMODE0(val), ~POWERMODE0_MASK);
1802
1803 val = cypress_calculate_burst_time(rdev, ulv_pl->sclk, ulv_pl->mclk);
1804 WREG32_P(MC_ARB_BURST_TIME, STATE0(val), ~STATE0_MASK);
1805
1806 return 0;
1807}
1808
1809static int btc_enable_ulv(struct radeon_device *rdev)
1810{
1811 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) != PPSMC_Result_OK)
1812 return -EINVAL;
1813
1814 return 0;
1815}
1816
1817static int btc_set_power_state_conditionally_enable_ulv(struct radeon_device *rdev,
1818 struct radeon_ps *radeon_new_state)
1819{
1820 int ret = 0;
1821 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1822
1823 if (eg_pi->ulv.supported) {
1824 if (btc_is_state_ulv_compatible(rdev, radeon_new_state)) {
1825 // Set ARB[0] to reflect the DRAM timing needed for ULV.
1826 ret = btc_set_ulv_dram_timing(rdev);
1827 if (ret == 0)
1828 ret = btc_enable_ulv(rdev);
1829 }
1830 }
1831
1832 return ret;
1833}
1834
1835static bool btc_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
1836{
1837 bool result = true;
1838
1839 switch (in_reg) {
1840 case MC_SEQ_RAS_TIMING >> 2:
1841 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
1842 break;
1843 case MC_SEQ_CAS_TIMING >> 2:
1844 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
1845 break;
1846 case MC_SEQ_MISC_TIMING >> 2:
1847 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
1848 break;
1849 case MC_SEQ_MISC_TIMING2 >> 2:
1850 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
1851 break;
1852 case MC_SEQ_RD_CTL_D0 >> 2:
1853 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
1854 break;
1855 case MC_SEQ_RD_CTL_D1 >> 2:
1856 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
1857 break;
1858 case MC_SEQ_WR_CTL_D0 >> 2:
1859 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
1860 break;
1861 case MC_SEQ_WR_CTL_D1 >> 2:
1862 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
1863 break;
1864 case MC_PMG_CMD_EMRS >> 2:
1865 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
1866 break;
1867 case MC_PMG_CMD_MRS >> 2:
1868 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
1869 break;
1870 case MC_PMG_CMD_MRS1 >> 2:
1871 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
1872 break;
1873 default:
1874 result = false;
1875 break;
1876 }
1877
1878 return result;
1879}
1880
1881static void btc_set_valid_flag(struct evergreen_mc_reg_table *table)
1882{
1883 u8 i, j;
1884
1885 for (i = 0; i < table->last; i++) {
1886 for (j = 1; j < table->num_entries; j++) {
1887 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
1888 table->mc_reg_table_entry[j].mc_data[i]) {
1889 table->valid_flag |= (1 << i);
1890 break;
1891 }
1892 }
1893 }
1894}
1895
1896static int btc_set_mc_special_registers(struct radeon_device *rdev,
1897 struct evergreen_mc_reg_table *table)
1898{
1899 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1900 u8 i, j, k;
1901 u32 tmp;
1902
1903 for (i = 0, j = table->last; i < table->last; i++) {
1904 switch (table->mc_reg_address[i].s1) {
1905 case MC_SEQ_MISC1 >> 2:
1906 tmp = RREG32(MC_PMG_CMD_EMRS);
1907 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
1908 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
1909 for (k = 0; k < table->num_entries; k++) {
1910 table->mc_reg_table_entry[k].mc_data[j] =
1911 ((tmp & 0xffff0000)) |
1912 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
1913 }
1914 j++;
1915
1916 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1917 return -EINVAL;
1918
1919 tmp = RREG32(MC_PMG_CMD_MRS);
1920 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
1921 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
1922 for (k = 0; k < table->num_entries; k++) {
1923 table->mc_reg_table_entry[k].mc_data[j] =
1924 (tmp & 0xffff0000) |
1925 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
1926 if (!pi->mem_gddr5)
1927 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
1928 }
1929 j++;
1930
1931 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1932 return -EINVAL;
1933 break;
1934 case MC_SEQ_RESERVE_M >> 2:
1935 tmp = RREG32(MC_PMG_CMD_MRS1);
1936 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
1937 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
1938 for (k = 0; k < table->num_entries; k++) {
1939 table->mc_reg_table_entry[k].mc_data[j] =
1940 (tmp & 0xffff0000) |
1941 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
1942 }
1943 j++;
1944
1945 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1946 return -EINVAL;
1947 break;
1948 default:
1949 break;
1950 }
1951 }
1952
1953 table->last = j;
1954
1955 return 0;
1956}
1957
1958static void btc_set_s0_mc_reg_index(struct evergreen_mc_reg_table *table)
1959{
1960 u32 i;
1961 u16 address;
1962
1963 for (i = 0; i < table->last; i++) {
1964 table->mc_reg_address[i].s0 =
1965 btc_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
1966 address : table->mc_reg_address[i].s1;
1967 }
1968}
1969
1970static int btc_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
1971 struct evergreen_mc_reg_table *eg_table)
1972{
1973 u8 i, j;
1974
1975 if (table->last > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1976 return -EINVAL;
1977
1978 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
1979 return -EINVAL;
1980
1981 for (i = 0; i < table->last; i++)
1982 eg_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
1983 eg_table->last = table->last;
1984
1985 for (i = 0; i < table->num_entries; i++) {
1986 eg_table->mc_reg_table_entry[i].mclk_max =
1987 table->mc_reg_table_entry[i].mclk_max;
1988 for(j = 0; j < table->last; j++)
1989 eg_table->mc_reg_table_entry[i].mc_data[j] =
1990 table->mc_reg_table_entry[i].mc_data[j];
1991 }
1992 eg_table->num_entries = table->num_entries;
1993
1994 return 0;
1995}
1996
1997static int btc_initialize_mc_reg_table(struct radeon_device *rdev)
1998{
1999 int ret;
2000 struct atom_mc_reg_table *table;
2001 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2002 struct evergreen_mc_reg_table *eg_table = &eg_pi->mc_reg_table;
2003 u8 module_index = rv770_get_memory_module_index(rdev);
2004
2005 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
2006 if (!table)
2007 return -ENOMEM;
2008
2009 /* Program additional LP registers that are no longer programmed by VBIOS */
2010 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
2011 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
2012 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
2013 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
2014 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
2015 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
2016 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
2017 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
2018 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
2019 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
2020 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
2021
2022 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
2023
2024 if (ret)
2025 goto init_mc_done;
2026
2027 ret = btc_copy_vbios_mc_reg_table(table, eg_table);
2028
2029 if (ret)
2030 goto init_mc_done;
2031
2032 btc_set_s0_mc_reg_index(eg_table);
2033 ret = btc_set_mc_special_registers(rdev, eg_table);
2034
2035 if (ret)
2036 goto init_mc_done;
2037
2038 btc_set_valid_flag(eg_table);
2039
2040init_mc_done:
2041 kfree(table);
2042
2043 return ret;
2044}
2045
2046static void btc_init_stutter_mode(struct radeon_device *rdev)
2047{
2048 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2049 u32 tmp;
2050
2051 if (pi->mclk_stutter_mode_threshold) {
2052 if (pi->mem_gddr5) {
2053 tmp = RREG32(MC_PMG_AUTO_CFG);
2054 if ((0x200 & tmp) == 0) {
2055 tmp = (tmp & 0xfffffc0b) | 0x204;
2056 WREG32(MC_PMG_AUTO_CFG, tmp);
2057 }
2058 }
2059 }
2060}
2061
2062static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2063 struct radeon_ps *rps)
2064{
2065 struct rv7xx_ps *ps = rv770_get_ps(rps);
2066 struct radeon_clock_and_voltage_limits *max_limits;
2067 bool disable_mclk_switching;
2068 u32 mclk, sclk;
2069 u16 vddc, vddci;
2070
2071 if (rdev->pm.dpm.new_active_crtc_count > 1)
2072 disable_mclk_switching = true;
2073 else
2074 disable_mclk_switching = false;
2075
2076 if (rdev->pm.dpm.ac_power)
2077 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2078 else
2079 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
2080
2081 if (rdev->pm.dpm.ac_power == false) {
2082 if (ps->high.mclk > max_limits->mclk)
2083 ps->high.mclk = max_limits->mclk;
2084 if (ps->high.sclk > max_limits->sclk)
2085 ps->high.sclk = max_limits->sclk;
2086 if (ps->high.vddc > max_limits->vddc)
2087 ps->high.vddc = max_limits->vddc;
2088 if (ps->high.vddci > max_limits->vddci)
2089 ps->high.vddci = max_limits->vddci;
2090
2091 if (ps->medium.mclk > max_limits->mclk)
2092 ps->medium.mclk = max_limits->mclk;
2093 if (ps->medium.sclk > max_limits->sclk)
2094 ps->medium.sclk = max_limits->sclk;
2095 if (ps->medium.vddc > max_limits->vddc)
2096 ps->medium.vddc = max_limits->vddc;
2097 if (ps->medium.vddci > max_limits->vddci)
2098 ps->medium.vddci = max_limits->vddci;
2099
2100 if (ps->low.mclk > max_limits->mclk)
2101 ps->low.mclk = max_limits->mclk;
2102 if (ps->low.sclk > max_limits->sclk)
2103 ps->low.sclk = max_limits->sclk;
2104 if (ps->low.vddc > max_limits->vddc)
2105 ps->low.vddc = max_limits->vddc;
2106 if (ps->low.vddci > max_limits->vddci)
2107 ps->low.vddci = max_limits->vddci;
2108 }
2109
2110 /* XXX validate the min clocks required for display */
2111
2112 if (disable_mclk_switching) {
2113 sclk = ps->low.sclk;
2114 mclk = ps->high.mclk;
2115 vddc = ps->low.vddc;
2116 vddci = ps->high.vddci;
2117 } else {
2118 sclk = ps->low.sclk;
2119 mclk = ps->low.mclk;
2120 vddc = ps->low.vddc;
2121 vddci = ps->low.vddci;
2122 }
2123
2124 /* adjusted low state */
2125 ps->low.sclk = sclk;
2126 ps->low.mclk = mclk;
2127 ps->low.vddc = vddc;
2128 ps->low.vddci = vddci;
2129
2130 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
2131 &ps->low.sclk, &ps->low.mclk);
2132
2133 /* adjusted medium, high states */
2134 if (ps->medium.sclk < ps->low.sclk)
2135 ps->medium.sclk = ps->low.sclk;
2136 if (ps->medium.vddc < ps->low.vddc)
2137 ps->medium.vddc = ps->low.vddc;
2138 if (ps->high.sclk < ps->medium.sclk)
2139 ps->high.sclk = ps->medium.sclk;
2140 if (ps->high.vddc < ps->medium.vddc)
2141 ps->high.vddc = ps->medium.vddc;
2142
2143 if (disable_mclk_switching) {
2144 mclk = ps->low.mclk;
2145 if (mclk < ps->medium.mclk)
2146 mclk = ps->medium.mclk;
2147 if (mclk < ps->high.mclk)
2148 mclk = ps->high.mclk;
2149 ps->low.mclk = mclk;
2150 ps->low.vddci = vddci;
2151 ps->medium.mclk = mclk;
2152 ps->medium.vddci = vddci;
2153 ps->high.mclk = mclk;
2154 ps->high.vddci = vddci;
2155 } else {
2156 if (ps->medium.mclk < ps->low.mclk)
2157 ps->medium.mclk = ps->low.mclk;
2158 if (ps->medium.vddci < ps->low.vddci)
2159 ps->medium.vddci = ps->low.vddci;
2160 if (ps->high.mclk < ps->medium.mclk)
2161 ps->high.mclk = ps->medium.mclk;
2162 if (ps->high.vddci < ps->medium.vddci)
2163 ps->high.vddci = ps->medium.vddci;
2164 }
2165
2166 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
2167 &ps->medium.sclk, &ps->medium.mclk);
2168 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
2169 &ps->high.sclk, &ps->high.mclk);
2170
2171 btc_adjust_clock_combinations(rdev, max_limits, &ps->low);
2172 btc_adjust_clock_combinations(rdev, max_limits, &ps->medium);
2173 btc_adjust_clock_combinations(rdev, max_limits, &ps->high);
2174
2175 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2176 ps->low.sclk, max_limits->vddc, &ps->low.vddc);
2177 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2178 ps->low.mclk, max_limits->vddci, &ps->low.vddci);
2179 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2180 ps->low.mclk, max_limits->vddc, &ps->low.vddc);
2181 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
2182 rdev->clock.current_dispclk, max_limits->vddc, &ps->low.vddc);
2183
2184 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2185 ps->medium.sclk, max_limits->vddc, &ps->medium.vddc);
2186 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2187 ps->medium.mclk, max_limits->vddci, &ps->medium.vddci);
2188 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2189 ps->medium.mclk, max_limits->vddc, &ps->medium.vddc);
2190 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
2191 rdev->clock.current_dispclk, max_limits->vddc, &ps->medium.vddc);
2192
2193 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2194 ps->high.sclk, max_limits->vddc, &ps->high.vddc);
2195 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2196 ps->high.mclk, max_limits->vddci, &ps->high.vddci);
2197 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2198 ps->high.mclk, max_limits->vddc, &ps->high.vddc);
2199 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
2200 rdev->clock.current_dispclk, max_limits->vddc, &ps->high.vddc);
2201
2202 btc_apply_voltage_delta_rules(rdev, max_limits->vddc, max_limits->vddci,
2203 &ps->low.vddc, &ps->low.vddci);
2204 btc_apply_voltage_delta_rules(rdev, max_limits->vddc, max_limits->vddci,
2205 &ps->medium.vddc, &ps->medium.vddci);
2206 btc_apply_voltage_delta_rules(rdev, max_limits->vddc, max_limits->vddci,
2207 &ps->high.vddc, &ps->high.vddci);
2208
2209 if ((ps->high.vddc <= rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) &&
2210 (ps->medium.vddc <= rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) &&
2211 (ps->low.vddc <= rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc))
2212 ps->dc_compatible = true;
2213 else
2214 ps->dc_compatible = false;
2215
2216 if (ps->low.vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
2217 ps->low.flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
2218 if (ps->medium.vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
2219 ps->medium.flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
2220 if (ps->high.vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
2221 ps->high.flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
2222}
2223
2224static void btc_update_current_ps(struct radeon_device *rdev,
2225 struct radeon_ps *rps)
2226{
2227 struct rv7xx_ps *new_ps = rv770_get_ps(rps);
2228 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2229
2230 eg_pi->current_rps = *rps;
2231 eg_pi->current_ps = *new_ps;
2232 eg_pi->current_rps.ps_priv = &eg_pi->current_ps;
2233}
2234
2235static void btc_update_requested_ps(struct radeon_device *rdev,
2236 struct radeon_ps *rps)
2237{
2238 struct rv7xx_ps *new_ps = rv770_get_ps(rps);
2239 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2240
2241 eg_pi->requested_rps = *rps;
2242 eg_pi->requested_ps = *new_ps;
2243 eg_pi->requested_rps.ps_priv = &eg_pi->requested_ps;
2244}
2245
2246void btc_dpm_reset_asic(struct radeon_device *rdev)
2247{
2248 rv770_restrict_performance_levels_before_switch(rdev);
2249 btc_disable_ulv(rdev);
2250 btc_set_boot_state_timing(rdev);
2251 rv770_set_boot_state(rdev);
2252}
2253
2254int btc_dpm_pre_set_power_state(struct radeon_device *rdev)
2255{
2256 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2257 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
2258 struct radeon_ps *new_ps = &requested_ps;
2259
2260 btc_update_requested_ps(rdev, new_ps);
2261
2262 btc_apply_state_adjust_rules(rdev, &eg_pi->requested_rps);
2263
2264 return 0;
2265}
2266
2267int btc_dpm_set_power_state(struct radeon_device *rdev)
2268{
2269 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2270 struct radeon_ps *new_ps = &eg_pi->requested_rps;
2271 struct radeon_ps *old_ps = &eg_pi->current_rps;
2272 int ret;
2273
2274 ret = btc_disable_ulv(rdev);
2275 btc_set_boot_state_timing(rdev);
2276 ret = rv770_restrict_performance_levels_before_switch(rdev);
2277 if (ret) {
2278 DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n");
2279 return ret;
2280 }
2281 if (eg_pi->pcie_performance_request)
2282 cypress_notify_link_speed_change_before_state_change(rdev, new_ps, old_ps);
2283
2284 rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
2285 ret = rv770_halt_smc(rdev);
2286 if (ret) {
2287 DRM_ERROR("rv770_halt_smc failed\n");
2288 return ret;
2289 }
2290 btc_set_at_for_uvd(rdev, new_ps);
2291 if (eg_pi->smu_uvd_hs)
2292 btc_notify_uvd_to_smc(rdev, new_ps);
2293 ret = cypress_upload_sw_state(rdev, new_ps);
2294 if (ret) {
2295 DRM_ERROR("cypress_upload_sw_state failed\n");
2296 return ret;
2297 }
2298 if (eg_pi->dynamic_ac_timing) {
2299 ret = cypress_upload_mc_reg_table(rdev, new_ps);
2300 if (ret) {
2301 DRM_ERROR("cypress_upload_mc_reg_table failed\n");
2302 return ret;
2303 }
2304 }
2305
2306 cypress_program_memory_timing_parameters(rdev, new_ps);
2307
2308 ret = rv770_resume_smc(rdev);
2309 if (ret) {
2310 DRM_ERROR("rv770_resume_smc failed\n");
2311 return ret;
2312 }
2313 ret = rv770_set_sw_state(rdev);
2314 if (ret) {
2315 DRM_ERROR("rv770_set_sw_state failed\n");
2316 return ret;
2317 }
2318 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
2319
2320 if (eg_pi->pcie_performance_request)
2321 cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
2322
2323 ret = btc_set_power_state_conditionally_enable_ulv(rdev, new_ps);
2324 if (ret) {
2325 DRM_ERROR("btc_set_power_state_conditionally_enable_ulv failed\n");
2326 return ret;
2327 }
2328
2329#if 0
2330 /* XXX */
2331 ret = rv770_unrestrict_performance_levels_after_switch(rdev);
2332 if (ret) {
2333 DRM_ERROR("rv770_unrestrict_performance_levels_after_switch failed\n");
2334 return ret;
2335 }
2336#endif
2337
2338 return 0;
2339}
2340
2341void btc_dpm_post_set_power_state(struct radeon_device *rdev)
2342{
2343 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2344 struct radeon_ps *new_ps = &eg_pi->requested_rps;
2345
2346 btc_update_current_ps(rdev, new_ps);
2347}
2348
2349int btc_dpm_enable(struct radeon_device *rdev)
2350{
2351 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2352 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2353 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
2354 int ret;
2355
2356 if (pi->gfx_clock_gating)
2357 btc_cg_clock_gating_default(rdev);
2358
2359 if (btc_dpm_enabled(rdev))
2360 return -EINVAL;
2361
2362 if (pi->mg_clock_gating)
2363 btc_mg_clock_gating_default(rdev);
2364
2365 if (eg_pi->ls_clock_gating)
2366 btc_ls_clock_gating_default(rdev);
2367
2368 if (pi->voltage_control) {
2369 rv770_enable_voltage_control(rdev, true);
2370 ret = cypress_construct_voltage_tables(rdev);
2371 if (ret) {
2372 DRM_ERROR("cypress_construct_voltage_tables failed\n");
2373 return ret;
2374 }
2375 }
2376
2377 if (pi->mvdd_control) {
2378 ret = cypress_get_mvdd_configuration(rdev);
2379 if (ret) {
2380 DRM_ERROR("cypress_get_mvdd_configuration failed\n");
2381 return ret;
2382 }
2383 }
2384
2385 if (eg_pi->dynamic_ac_timing) {
2386 ret = btc_initialize_mc_reg_table(rdev);
2387 if (ret)
2388 eg_pi->dynamic_ac_timing = false;
2389 }
2390
2391 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
2392 rv770_enable_backbias(rdev, true);
2393
2394 if (pi->dynamic_ss)
2395 cypress_enable_spread_spectrum(rdev, true);
2396
2397 if (pi->thermal_protection)
2398 rv770_enable_thermal_protection(rdev, true);
2399
2400 rv770_setup_bsp(rdev);
2401 rv770_program_git(rdev);
2402 rv770_program_tp(rdev);
2403 rv770_program_tpp(rdev);
2404 rv770_program_sstp(rdev);
2405 rv770_program_engine_speed_parameters(rdev);
2406 cypress_enable_display_gap(rdev);
2407 rv770_program_vc(rdev);
2408
2409 if (pi->dynamic_pcie_gen2)
2410 btc_enable_dynamic_pcie_gen2(rdev, true);
2411
2412 ret = rv770_upload_firmware(rdev);
2413 if (ret) {
2414 DRM_ERROR("rv770_upload_firmware failed\n");
2415 return ret;
2416 }
2417 ret = cypress_get_table_locations(rdev);
2418 if (ret) {
2419 DRM_ERROR("cypress_get_table_locations failed\n");
2420 return ret;
2421 }
2422 ret = btc_init_smc_table(rdev, boot_ps);
2423 if (ret)
2424 return ret;
2425
2426 if (eg_pi->dynamic_ac_timing) {
2427 ret = cypress_populate_mc_reg_table(rdev, boot_ps);
2428 if (ret) {
2429 DRM_ERROR("cypress_populate_mc_reg_table failed\n");
2430 return ret;
2431 }
2432 }
2433
2434 cypress_program_response_times(rdev);
2435 r7xx_start_smc(rdev);
2436 ret = cypress_notify_smc_display_change(rdev, false);
2437 if (ret) {
2438 DRM_ERROR("cypress_notify_smc_display_change failed\n");
2439 return ret;
2440 }
2441 cypress_enable_sclk_control(rdev, true);
2442
2443 if (eg_pi->memory_transition)
2444 cypress_enable_mclk_control(rdev, true);
2445
2446 cypress_start_dpm(rdev);
2447
2448 if (pi->gfx_clock_gating)
2449 btc_cg_clock_gating_enable(rdev, true);
2450
2451 if (pi->mg_clock_gating)
2452 btc_mg_clock_gating_enable(rdev, true);
2453
2454 if (eg_pi->ls_clock_gating)
2455 btc_ls_clock_gating_enable(rdev, true);
2456
2457 if (rdev->irq.installed &&
2458 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
2459 PPSMC_Result result;
2460
2461 ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
2462 if (ret)
2463 return ret;
2464 rdev->irq.dpm_thermal = true;
2465 radeon_irq_set(rdev);
2466 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
2467
2468 if (result != PPSMC_Result_OK)
2469 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
2470 }
2471
2472 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
2473
2474 btc_init_stutter_mode(rdev);
2475
2476 btc_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
2477
2478 return 0;
2479};
2480
2481void btc_dpm_disable(struct radeon_device *rdev)
2482{
2483 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2484 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2485
2486 if (!btc_dpm_enabled(rdev))
2487 return;
2488
2489 rv770_clear_vc(rdev);
2490
2491 if (pi->thermal_protection)
2492 rv770_enable_thermal_protection(rdev, false);
2493
2494 if (pi->dynamic_pcie_gen2)
2495 btc_enable_dynamic_pcie_gen2(rdev, false);
2496
2497 if (rdev->irq.installed &&
2498 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
2499 rdev->irq.dpm_thermal = false;
2500 radeon_irq_set(rdev);
2501 }
2502
2503 if (pi->gfx_clock_gating)
2504 btc_cg_clock_gating_enable(rdev, false);
2505
2506 if (pi->mg_clock_gating)
2507 btc_mg_clock_gating_enable(rdev, false);
2508
2509 if (eg_pi->ls_clock_gating)
2510 btc_ls_clock_gating_enable(rdev, false);
2511
2512 rv770_stop_dpm(rdev);
2513 btc_reset_to_default(rdev);
2514 btc_stop_smc(rdev);
2515 cypress_enable_spread_spectrum(rdev, false);
2516
2517 btc_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
2518}
2519
2520void btc_dpm_setup_asic(struct radeon_device *rdev)
2521{
2522 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2523
2524 rv770_get_memory_type(rdev);
2525 rv740_read_clock_registers(rdev);
2526 btc_read_arb_registers(rdev);
2527 rv770_read_voltage_smio_registers(rdev);
2528
2529 if (eg_pi->pcie_performance_request)
2530 cypress_advertise_gen2_capability(rdev);
2531
2532 rv770_get_pcie_gen2_status(rdev);
2533 rv770_enable_acpi_pm(rdev);
2534}
2535
2536int btc_dpm_init(struct radeon_device *rdev)
2537{
2538 struct rv7xx_power_info *pi;
2539 struct evergreen_power_info *eg_pi;
2540 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
2541 u16 data_offset, size;
2542 u8 frev, crev;
2543 struct atom_clock_dividers dividers;
2544 int ret;
2545
2546 eg_pi = kzalloc(sizeof(struct evergreen_power_info), GFP_KERNEL);
2547 if (eg_pi == NULL)
2548 return -ENOMEM;
2549 rdev->pm.dpm.priv = eg_pi;
2550 pi = &eg_pi->rv7xx;
2551
2552 rv770_get_max_vddc(rdev);
2553
2554 eg_pi->ulv.supported = false;
2555 pi->acpi_vddc = 0;
2556 eg_pi->acpi_vddci = 0;
2557 pi->min_vddc_in_table = 0;
2558 pi->max_vddc_in_table = 0;
2559
2560 ret = rv7xx_parse_power_table(rdev);
2561 if (ret)
2562 return ret;
2563 ret = r600_parse_extended_power_table(rdev);
2564 if (ret)
2565 return ret;
2566
2567 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
2568 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
2569 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
2570 r600_free_extended_power_table(rdev);
2571 return -ENOMEM;
2572 }
2573 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
2574 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
2575 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
2576 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
2577 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 800;
2578 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
2579 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 800;
2580 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
2581 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 800;
2582
2583 if (rdev->pm.dpm.voltage_response_time == 0)
2584 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
2585 if (rdev->pm.dpm.backbias_response_time == 0)
2586 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
2587
2588 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2589 0, false, &dividers);
2590 if (ret)
2591 pi->ref_div = dividers.ref_div + 1;
2592 else
2593 pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
2594
2595 pi->mclk_strobe_mode_threshold = 40000;
2596 pi->mclk_edc_enable_threshold = 40000;
2597 eg_pi->mclk_edc_wr_enable_threshold = 40000;
2598
2599 pi->rlp = RV770_RLP_DFLT;
2600 pi->rmp = RV770_RMP_DFLT;
2601 pi->lhp = RV770_LHP_DFLT;
2602 pi->lmp = RV770_LMP_DFLT;
2603
2604 eg_pi->ats[0].rlp = RV770_RLP_DFLT;
2605 eg_pi->ats[0].rmp = RV770_RMP_DFLT;
2606 eg_pi->ats[0].lhp = RV770_LHP_DFLT;
2607 eg_pi->ats[0].lmp = RV770_LMP_DFLT;
2608
2609 eg_pi->ats[1].rlp = BTC_RLP_UVD_DFLT;
2610 eg_pi->ats[1].rmp = BTC_RMP_UVD_DFLT;
2611 eg_pi->ats[1].lhp = BTC_LHP_UVD_DFLT;
2612 eg_pi->ats[1].lmp = BTC_LMP_UVD_DFLT;
2613
2614 eg_pi->smu_uvd_hs = true;
2615
2616 pi->voltage_control =
2617 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
2618
2619 pi->mvdd_control =
2620 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
2621
2622 eg_pi->vddci_control =
2623 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
2624
2625 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
2626 &frev, &crev, &data_offset)) {
2627 pi->sclk_ss = true;
2628 pi->mclk_ss = true;
2629 pi->dynamic_ss = true;
2630 } else {
2631 pi->sclk_ss = false;
2632 pi->mclk_ss = false;
2633 pi->dynamic_ss = true;
2634 }
2635
2636 pi->asi = RV770_ASI_DFLT;
2637 pi->pasi = CYPRESS_HASI_DFLT;
2638 pi->vrc = CYPRESS_VRC_DFLT;
2639
2640 pi->power_gating = false;
2641
2642 pi->gfx_clock_gating = true;
2643
2644 pi->mg_clock_gating = true;
2645 pi->mgcgtssm = true;
2646 eg_pi->ls_clock_gating = false;
2647 eg_pi->sclk_deep_sleep = false;
2648
2649 pi->dynamic_pcie_gen2 = true;
2650
2651 if (pi->gfx_clock_gating &&
2652 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2653 pi->thermal_protection = true;
2654 else
2655 pi->thermal_protection = false;
2656
2657 pi->display_gap = true;
2658
2659 if (rdev->flags & RADEON_IS_MOBILITY)
2660 pi->dcodt = true;
2661 else
2662 pi->dcodt = false;
2663
2664 pi->ulps = true;
2665
2666 eg_pi->dynamic_ac_timing = true;
2667 eg_pi->abm = true;
2668 eg_pi->mcls = true;
2669 eg_pi->light_sleep = true;
2670 eg_pi->memory_transition = true;
2671#if defined(CONFIG_ACPI)
2672 eg_pi->pcie_performance_request =
2673 radeon_acpi_is_pcie_performance_request_supported(rdev);
2674#else
2675 eg_pi->pcie_performance_request = false;
2676#endif
2677
2678 if (rdev->family == CHIP_BARTS)
2679 eg_pi->dll_default_on = true;
2680 else
2681 eg_pi->dll_default_on = false;
2682
2683 eg_pi->sclk_deep_sleep = false;
2684 if (ASIC_IS_LOMBOK(rdev))
2685 pi->mclk_stutter_mode_threshold = 30000;
2686 else
2687 pi->mclk_stutter_mode_threshold = 0;
2688
2689 pi->sram_end = SMC_RAM_END;
2690
2691 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
2692 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
2693 rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2 = 900;
2694 rdev->pm.dpm.dyn_state.valid_sclk_values.count = ARRAY_SIZE(btc_valid_sclk);
2695 rdev->pm.dpm.dyn_state.valid_sclk_values.values = btc_valid_sclk;
2696 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
2697 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
2698
2699 if (rdev->family == CHIP_TURKS)
2700 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
2701 else
2702 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000;
2703
2704 return 0;
2705}
2706
2707void btc_dpm_fini(struct radeon_device *rdev)
2708{
2709 int i;
2710
2711 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2712 kfree(rdev->pm.dpm.ps[i].ps_priv);
2713 }
2714 kfree(rdev->pm.dpm.ps);
2715 kfree(rdev->pm.dpm.priv);
2716 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
2717 r600_free_extended_power_table(rdev);
2718}
2719
2720u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
2721{
2722 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2723 struct rv7xx_ps *requested_state = rv770_get_ps(&eg_pi->requested_rps);
2724
2725 if (low)
2726 return requested_state->low.sclk;
2727 else
2728 return requested_state->high.sclk;
2729}
2730
2731u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low)
2732{
2733 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2734 struct rv7xx_ps *requested_state = rv770_get_ps(&eg_pi->requested_rps);
2735
2736 if (low)
2737 return requested_state->low.mclk;
2738 else
2739 return requested_state->high.mclk;
2740}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
new file mode 100644
index 000000000000..1a15e0e41950
--- /dev/null
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -0,0 +1,57 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __BTC_DPM_H__
24#define __BTC_DPM_H__
25
26#define BTC_RLP_UVD_DFLT 20
27#define BTC_RMP_UVD_DFLT 50
28#define BTC_LHP_UVD_DFLT 50
29#define BTC_LMP_UVD_DFLT 20
30#define BARTS_MGCGCGTSSMCTRL_DFLT 0x81944000
31#define TURKS_MGCGCGTSSMCTRL_DFLT 0x6e944000
32#define CAICOS_MGCGCGTSSMCTRL_DFLT 0x46944040
33#define BTC_CGULVPARAMETER_DFLT 0x00040035
34#define BTC_CGULVCONTROL_DFLT 0x00001450
35
36extern u32 btc_valid_sclk[40];
37
38void btc_read_arb_registers(struct radeon_device *rdev);
39void btc_program_mgcg_hw_sequence(struct radeon_device *rdev,
40 const u32 *sequence, u32 count);
41void btc_skip_blacklist_clocks(struct radeon_device *rdev,
42 const u32 max_sclk, const u32 max_mclk,
43 u32 *sclk, u32 *mclk);
44void btc_adjust_clock_combinations(struct radeon_device *rdev,
45 const struct radeon_clock_and_voltage_limits *max_limits,
46 struct rv7xx_pl *pl);
47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
48 u32 clock, u16 max_voltage, u16 *voltage);
49void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
50 u16 max_vddc, u16 max_vddci,
51 u16 *vddc, u16 *vddci);
52bool btc_dpm_enabled(struct radeon_device *rdev);
53int btc_reset_to_default(struct radeon_device *rdev);
54void btc_notify_uvd_to_smc(struct radeon_device *rdev,
55 struct radeon_ps *radeon_new_state);
56
57#endif
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h
new file mode 100644
index 000000000000..29e32de7e025
--- /dev/null
+++ b/drivers/gpu/drm/radeon/btcd.h
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef _BTCD_H_
25#define _BTCD_H_
26
27/* pm registers */
28
29#define GENERAL_PWRMGT 0x63c
30# define GLOBAL_PWRMGT_EN (1 << 0)
31# define STATIC_PM_EN (1 << 1)
32# define THERMAL_PROTECTION_DIS (1 << 2)
33# define THERMAL_PROTECTION_TYPE (1 << 3)
34# define ENABLE_GEN2PCIE (1 << 4)
35# define ENABLE_GEN2XSP (1 << 5)
36# define SW_SMIO_INDEX(x) ((x) << 6)
37# define SW_SMIO_INDEX_MASK (3 << 6)
38# define SW_SMIO_INDEX_SHIFT 6
39# define LOW_VOLT_D2_ACPI (1 << 8)
40# define LOW_VOLT_D3_ACPI (1 << 9)
41# define VOLT_PWRMGT_EN (1 << 10)
42# define BACKBIAS_PAD_EN (1 << 18)
43# define BACKBIAS_VALUE (1 << 19)
44# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
45# define AC_DC_SW (1 << 24)
46
47#define CG_BIF_REQ_AND_RSP 0x7f4
48#define CG_CLIENT_REQ(x) ((x) << 0)
49#define CG_CLIENT_REQ_MASK (0xff << 0)
50#define CG_CLIENT_REQ_SHIFT 0
51#define CG_CLIENT_RESP(x) ((x) << 8)
52#define CG_CLIENT_RESP_MASK (0xff << 8)
53#define CG_CLIENT_RESP_SHIFT 8
54#define CLIENT_CG_REQ(x) ((x) << 16)
55#define CLIENT_CG_REQ_MASK (0xff << 16)
56#define CLIENT_CG_REQ_SHIFT 16
57#define CLIENT_CG_RESP(x) ((x) << 24)
58#define CLIENT_CG_RESP_MASK (0xff << 24)
59#define CLIENT_CG_RESP_SHIFT 24
60
61#define SCLK_PSKIP_CNTL 0x8c0
62#define PSKIP_ON_ALLOW_STOP_HI(x) ((x) << 16)
63#define PSKIP_ON_ALLOW_STOP_HI_MASK (0xff << 16)
64#define PSKIP_ON_ALLOW_STOP_HI_SHIFT 16
65
66#define CG_ULV_CONTROL 0x8c8
67#define CG_ULV_PARAMETER 0x8cc
68
69#define MC_ARB_DRAM_TIMING 0x2774
70#define MC_ARB_DRAM_TIMING2 0x2778
71
72#define MC_ARB_RFSH_RATE 0x27b0
73#define POWERMODE0(x) ((x) << 0)
74#define POWERMODE0_MASK (0xff << 0)
75#define POWERMODE0_SHIFT 0
76#define POWERMODE1(x) ((x) << 8)
77#define POWERMODE1_MASK (0xff << 8)
78#define POWERMODE1_SHIFT 8
79#define POWERMODE2(x) ((x) << 16)
80#define POWERMODE2_MASK (0xff << 16)
81#define POWERMODE2_SHIFT 16
82#define POWERMODE3(x) ((x) << 24)
83#define POWERMODE3_MASK (0xff << 24)
84#define POWERMODE3_SHIFT 24
85
86#define MC_ARB_BURST_TIME 0x2808
87#define STATE0(x) ((x) << 0)
88#define STATE0_MASK (0x1f << 0)
89#define STATE0_SHIFT 0
90#define STATE1(x) ((x) << 5)
91#define STATE1_MASK (0x1f << 5)
92#define STATE1_SHIFT 5
93#define STATE2(x) ((x) << 10)
94#define STATE2_MASK (0x1f << 10)
95#define STATE2_SHIFT 10
96#define STATE3(x) ((x) << 15)
97#define STATE3_MASK (0x1f << 15)
98#define STATE3_SHIFT 15
99
100#define MC_SEQ_RAS_TIMING 0x28a0
101#define MC_SEQ_CAS_TIMING 0x28a4
102#define MC_SEQ_MISC_TIMING 0x28a8
103#define MC_SEQ_MISC_TIMING2 0x28ac
104
105#define MC_SEQ_RD_CTL_D0 0x28b4
106#define MC_SEQ_RD_CTL_D1 0x28b8
107#define MC_SEQ_WR_CTL_D0 0x28bc
108#define MC_SEQ_WR_CTL_D1 0x28c0
109
110#define MC_PMG_AUTO_CFG 0x28d4
111
112#define MC_SEQ_STATUS_M 0x29f4
113# define PMG_PWRSTATE (1 << 16)
114
115#define MC_SEQ_MISC0 0x2a00
116#define MC_SEQ_MISC0_GDDR5_SHIFT 28
117#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
118#define MC_SEQ_MISC0_GDDR5_VALUE 5
119#define MC_SEQ_MISC1 0x2a04
120#define MC_SEQ_RESERVE_M 0x2a08
121#define MC_PMG_CMD_EMRS 0x2a0c
122
123#define MC_SEQ_MISC3 0x2a2c
124
125#define MC_SEQ_MISC5 0x2a54
126#define MC_SEQ_MISC6 0x2a58
127
128#define MC_SEQ_MISC7 0x2a64
129
130#define MC_SEQ_CG 0x2a68
131#define CG_SEQ_REQ(x) ((x) << 0)
132#define CG_SEQ_REQ_MASK (0xff << 0)
133#define CG_SEQ_REQ_SHIFT 0
134#define CG_SEQ_RESP(x) ((x) << 8)
135#define CG_SEQ_RESP_MASK (0xff << 8)
136#define CG_SEQ_RESP_SHIFT 8
137#define SEQ_CG_REQ(x) ((x) << 16)
138#define SEQ_CG_REQ_MASK (0xff << 16)
139#define SEQ_CG_REQ_SHIFT 16
140#define SEQ_CG_RESP(x) ((x) << 24)
141#define SEQ_CG_RESP_MASK (0xff << 24)
142#define SEQ_CG_RESP_SHIFT 24
143#define MC_SEQ_RAS_TIMING_LP 0x2a6c
144#define MC_SEQ_CAS_TIMING_LP 0x2a70
145#define MC_SEQ_MISC_TIMING_LP 0x2a74
146#define MC_SEQ_MISC_TIMING2_LP 0x2a78
147#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
148#define MC_SEQ_WR_CTL_D1_LP 0x2a80
149#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
150#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
151
152#define MC_PMG_CMD_MRS 0x2aac
153
154#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
155#define MC_SEQ_RD_CTL_D1_LP 0x2b20
156
157#define MC_PMG_CMD_MRS1 0x2b44
158#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
159
160#define LB_SYNC_RESET_SEL 0x6b28
161#define LB_SYNC_RESET_SEL_MASK (3 << 0)
162#define LB_SYNC_RESET_SEL_SHIFT 0
163
164/* PCIE link stuff */
165#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
166# define LC_GEN2_EN_STRAP (1 << 0)
167# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
168# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
169# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
170# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
171# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
172# define LC_CURRENT_DATA_RATE (1 << 11)
173# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
174# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
175# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
176# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
177# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
178# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
179# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
180
181#endif
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
new file mode 100644
index 000000000000..ed1d91025928
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -0,0 +1,6987 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h>
27#include <linux/module.h>
28#include "drmP.h"
29#include "radeon.h"
30#include "radeon_asic.h"
31#include "cikd.h"
32#include "atom.h"
33#include "cik_blit_shaders.h"
34
35/* GFX */
36#define CIK_PFP_UCODE_SIZE 2144
37#define CIK_ME_UCODE_SIZE 2144
38#define CIK_CE_UCODE_SIZE 2144
39/* compute */
40#define CIK_MEC_UCODE_SIZE 4192
41/* interrupts */
42#define BONAIRE_RLC_UCODE_SIZE 2048
43#define KB_RLC_UCODE_SIZE 2560
44#define KV_RLC_UCODE_SIZE 2560
45/* gddr controller */
46#define CIK_MC_UCODE_SIZE 7866
47/* sdma */
48#define CIK_SDMA_UCODE_SIZE 1050
49#define CIK_SDMA_UCODE_VERSION 64
50
51MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
52MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
53MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
54MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
55MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
56MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
57MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
58MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
59MODULE_FIRMWARE("radeon/KAVERI_me.bin");
60MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
61MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
62MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
63MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
64MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
65MODULE_FIRMWARE("radeon/KABINI_me.bin");
66MODULE_FIRMWARE("radeon/KABINI_ce.bin");
67MODULE_FIRMWARE("radeon/KABINI_mec.bin");
68MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
69MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
70
71extern int r600_ih_ring_alloc(struct radeon_device *rdev);
72extern void r600_ih_ring_fini(struct radeon_device *rdev);
73extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
74extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
75extern bool evergreen_is_display_hung(struct radeon_device *rdev);
76extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
77extern void si_rlc_fini(struct radeon_device *rdev);
78extern int si_rlc_init(struct radeon_device *rdev);
79static void cik_rlc_stop(struct radeon_device *rdev);
80
81/*
82 * Indirect registers accessor
83 */
84u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
85{
86 u32 r;
87
88 WREG32(PCIE_INDEX, reg);
89 (void)RREG32(PCIE_INDEX);
90 r = RREG32(PCIE_DATA);
91 return r;
92}
93
94void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
95{
96 WREG32(PCIE_INDEX, reg);
97 (void)RREG32(PCIE_INDEX);
98 WREG32(PCIE_DATA, v);
99 (void)RREG32(PCIE_DATA);
100}
101
102static const u32 bonaire_golden_spm_registers[] =
103{
104 0x30800, 0xe0ffffff, 0xe0000000
105};
106
107static const u32 bonaire_golden_common_registers[] =
108{
109 0xc770, 0xffffffff, 0x00000800,
110 0xc774, 0xffffffff, 0x00000800,
111 0xc798, 0xffffffff, 0x00007fbf,
112 0xc79c, 0xffffffff, 0x00007faf
113};
114
115static const u32 bonaire_golden_registers[] =
116{
117 0x3354, 0x00000333, 0x00000333,
118 0x3350, 0x000c0fc0, 0x00040200,
119 0x9a10, 0x00010000, 0x00058208,
120 0x3c000, 0xffff1fff, 0x00140000,
121 0x3c200, 0xfdfc0fff, 0x00000100,
122 0x3c234, 0x40000000, 0x40000200,
123 0x9830, 0xffffffff, 0x00000000,
124 0x9834, 0xf00fffff, 0x00000400,
125 0x9838, 0x0002021c, 0x00020200,
126 0xc78, 0x00000080, 0x00000000,
127 0x5bb0, 0x000000f0, 0x00000070,
128 0x5bc0, 0xf0311fff, 0x80300000,
129 0x98f8, 0x73773777, 0x12010001,
130 0x350c, 0x00810000, 0x408af000,
131 0x7030, 0x31000111, 0x00000011,
132 0x2f48, 0x73773777, 0x12010001,
133 0x220c, 0x00007fb6, 0x0021a1b1,
134 0x2210, 0x00007fb6, 0x002021b1,
135 0x2180, 0x00007fb6, 0x00002191,
136 0x2218, 0x00007fb6, 0x002121b1,
137 0x221c, 0x00007fb6, 0x002021b1,
138 0x21dc, 0x00007fb6, 0x00002191,
139 0x21e0, 0x00007fb6, 0x00002191,
140 0x3628, 0x0000003f, 0x0000000a,
141 0x362c, 0x0000003f, 0x0000000a,
142 0x2ae4, 0x00073ffe, 0x000022a2,
143 0x240c, 0x000007ff, 0x00000000,
144 0x8a14, 0xf000003f, 0x00000007,
145 0x8bf0, 0x00002001, 0x00000001,
146 0x8b24, 0xffffffff, 0x00ffffff,
147 0x30a04, 0x0000ff0f, 0x00000000,
148 0x28a4c, 0x07ffffff, 0x06000000,
149 0x4d8, 0x00000fff, 0x00000100,
150 0x3e78, 0x00000001, 0x00000002,
151 0x9100, 0x03000000, 0x0362c688,
152 0x8c00, 0x000000ff, 0x00000001,
153 0xe40, 0x00001fff, 0x00001fff,
154 0x9060, 0x0000007f, 0x00000020,
155 0x9508, 0x00010000, 0x00010000,
156 0xac14, 0x000003ff, 0x000000f3,
157 0xac0c, 0xffffffff, 0x00001032
158};
159
160static const u32 bonaire_mgcg_cgcg_init[] =
161{
162 0xc420, 0xffffffff, 0xfffffffc,
163 0x30800, 0xffffffff, 0xe0000000,
164 0x3c2a0, 0xffffffff, 0x00000100,
165 0x3c208, 0xffffffff, 0x00000100,
166 0x3c2c0, 0xffffffff, 0xc0000100,
167 0x3c2c8, 0xffffffff, 0xc0000100,
168 0x3c2c4, 0xffffffff, 0xc0000100,
169 0x55e4, 0xffffffff, 0x00600100,
170 0x3c280, 0xffffffff, 0x00000100,
171 0x3c214, 0xffffffff, 0x06000100,
172 0x3c220, 0xffffffff, 0x00000100,
173 0x3c218, 0xffffffff, 0x06000100,
174 0x3c204, 0xffffffff, 0x00000100,
175 0x3c2e0, 0xffffffff, 0x00000100,
176 0x3c224, 0xffffffff, 0x00000100,
177 0x3c200, 0xffffffff, 0x00000100,
178 0x3c230, 0xffffffff, 0x00000100,
179 0x3c234, 0xffffffff, 0x00000100,
180 0x3c250, 0xffffffff, 0x00000100,
181 0x3c254, 0xffffffff, 0x00000100,
182 0x3c258, 0xffffffff, 0x00000100,
183 0x3c25c, 0xffffffff, 0x00000100,
184 0x3c260, 0xffffffff, 0x00000100,
185 0x3c27c, 0xffffffff, 0x00000100,
186 0x3c278, 0xffffffff, 0x00000100,
187 0x3c210, 0xffffffff, 0x06000100,
188 0x3c290, 0xffffffff, 0x00000100,
189 0x3c274, 0xffffffff, 0x00000100,
190 0x3c2b4, 0xffffffff, 0x00000100,
191 0x3c2b0, 0xffffffff, 0x00000100,
192 0x3c270, 0xffffffff, 0x00000100,
193 0x30800, 0xffffffff, 0xe0000000,
194 0x3c020, 0xffffffff, 0x00010000,
195 0x3c024, 0xffffffff, 0x00030002,
196 0x3c028, 0xffffffff, 0x00040007,
197 0x3c02c, 0xffffffff, 0x00060005,
198 0x3c030, 0xffffffff, 0x00090008,
199 0x3c034, 0xffffffff, 0x00010000,
200 0x3c038, 0xffffffff, 0x00030002,
201 0x3c03c, 0xffffffff, 0x00040007,
202 0x3c040, 0xffffffff, 0x00060005,
203 0x3c044, 0xffffffff, 0x00090008,
204 0x3c048, 0xffffffff, 0x00010000,
205 0x3c04c, 0xffffffff, 0x00030002,
206 0x3c050, 0xffffffff, 0x00040007,
207 0x3c054, 0xffffffff, 0x00060005,
208 0x3c058, 0xffffffff, 0x00090008,
209 0x3c05c, 0xffffffff, 0x00010000,
210 0x3c060, 0xffffffff, 0x00030002,
211 0x3c064, 0xffffffff, 0x00040007,
212 0x3c068, 0xffffffff, 0x00060005,
213 0x3c06c, 0xffffffff, 0x00090008,
214 0x3c070, 0xffffffff, 0x00010000,
215 0x3c074, 0xffffffff, 0x00030002,
216 0x3c078, 0xffffffff, 0x00040007,
217 0x3c07c, 0xffffffff, 0x00060005,
218 0x3c080, 0xffffffff, 0x00090008,
219 0x3c084, 0xffffffff, 0x00010000,
220 0x3c088, 0xffffffff, 0x00030002,
221 0x3c08c, 0xffffffff, 0x00040007,
222 0x3c090, 0xffffffff, 0x00060005,
223 0x3c094, 0xffffffff, 0x00090008,
224 0x3c098, 0xffffffff, 0x00010000,
225 0x3c09c, 0xffffffff, 0x00030002,
226 0x3c0a0, 0xffffffff, 0x00040007,
227 0x3c0a4, 0xffffffff, 0x00060005,
228 0x3c0a8, 0xffffffff, 0x00090008,
229 0x3c000, 0xffffffff, 0x96e00200,
230 0x8708, 0xffffffff, 0x00900100,
231 0xc424, 0xffffffff, 0x0020003f,
232 0x38, 0xffffffff, 0x0140001c,
233 0x3c, 0x000f0000, 0x000f0000,
234 0x220, 0xffffffff, 0xC060000C,
235 0x224, 0xc0000fff, 0x00000100,
236 0xf90, 0xffffffff, 0x00000100,
237 0xf98, 0x00000101, 0x00000000,
238 0x20a8, 0xffffffff, 0x00000104,
239 0x55e4, 0xff000fff, 0x00000100,
240 0x30cc, 0xc0000fff, 0x00000104,
241 0xc1e4, 0x00000001, 0x00000001,
242 0xd00c, 0xff000ff0, 0x00000100,
243 0xd80c, 0xff000ff0, 0x00000100
244};
245
246static const u32 spectre_golden_spm_registers[] =
247{
248 0x30800, 0xe0ffffff, 0xe0000000
249};
250
251static const u32 spectre_golden_common_registers[] =
252{
253 0xc770, 0xffffffff, 0x00000800,
254 0xc774, 0xffffffff, 0x00000800,
255 0xc798, 0xffffffff, 0x00007fbf,
256 0xc79c, 0xffffffff, 0x00007faf
257};
258
259static const u32 spectre_golden_registers[] =
260{
261 0x3c000, 0xffff1fff, 0x96940200,
262 0x3c00c, 0xffff0001, 0xff000000,
263 0x3c200, 0xfffc0fff, 0x00000100,
264 0x6ed8, 0x00010101, 0x00010000,
265 0x9834, 0xf00fffff, 0x00000400,
266 0x9838, 0xfffffffc, 0x00020200,
267 0x5bb0, 0x000000f0, 0x00000070,
268 0x5bc0, 0xf0311fff, 0x80300000,
269 0x98f8, 0x73773777, 0x12010001,
270 0x9b7c, 0x00ff0000, 0x00fc0000,
271 0x2f48, 0x73773777, 0x12010001,
272 0x8a14, 0xf000003f, 0x00000007,
273 0x8b24, 0xffffffff, 0x00ffffff,
274 0x28350, 0x3f3f3fff, 0x00000082,
275 0x28355, 0x0000003f, 0x00000000,
276 0x3e78, 0x00000001, 0x00000002,
277 0x913c, 0xffff03df, 0x00000004,
278 0xc768, 0x00000008, 0x00000008,
279 0x8c00, 0x000008ff, 0x00000800,
280 0x9508, 0x00010000, 0x00010000,
281 0xac0c, 0xffffffff, 0x54763210,
282 0x214f8, 0x01ff01ff, 0x00000002,
283 0x21498, 0x007ff800, 0x00200000,
284 0x2015c, 0xffffffff, 0x00000f40,
285 0x30934, 0xffffffff, 0x00000001
286};
287
288static const u32 spectre_mgcg_cgcg_init[] =
289{
290 0xc420, 0xffffffff, 0xfffffffc,
291 0x30800, 0xffffffff, 0xe0000000,
292 0x3c2a0, 0xffffffff, 0x00000100,
293 0x3c208, 0xffffffff, 0x00000100,
294 0x3c2c0, 0xffffffff, 0x00000100,
295 0x3c2c8, 0xffffffff, 0x00000100,
296 0x3c2c4, 0xffffffff, 0x00000100,
297 0x55e4, 0xffffffff, 0x00600100,
298 0x3c280, 0xffffffff, 0x00000100,
299 0x3c214, 0xffffffff, 0x06000100,
300 0x3c220, 0xffffffff, 0x00000100,
301 0x3c218, 0xffffffff, 0x06000100,
302 0x3c204, 0xffffffff, 0x00000100,
303 0x3c2e0, 0xffffffff, 0x00000100,
304 0x3c224, 0xffffffff, 0x00000100,
305 0x3c200, 0xffffffff, 0x00000100,
306 0x3c230, 0xffffffff, 0x00000100,
307 0x3c234, 0xffffffff, 0x00000100,
308 0x3c250, 0xffffffff, 0x00000100,
309 0x3c254, 0xffffffff, 0x00000100,
310 0x3c258, 0xffffffff, 0x00000100,
311 0x3c25c, 0xffffffff, 0x00000100,
312 0x3c260, 0xffffffff, 0x00000100,
313 0x3c27c, 0xffffffff, 0x00000100,
314 0x3c278, 0xffffffff, 0x00000100,
315 0x3c210, 0xffffffff, 0x06000100,
316 0x3c290, 0xffffffff, 0x00000100,
317 0x3c274, 0xffffffff, 0x00000100,
318 0x3c2b4, 0xffffffff, 0x00000100,
319 0x3c2b0, 0xffffffff, 0x00000100,
320 0x3c270, 0xffffffff, 0x00000100,
321 0x30800, 0xffffffff, 0xe0000000,
322 0x3c020, 0xffffffff, 0x00010000,
323 0x3c024, 0xffffffff, 0x00030002,
324 0x3c028, 0xffffffff, 0x00040007,
325 0x3c02c, 0xffffffff, 0x00060005,
326 0x3c030, 0xffffffff, 0x00090008,
327 0x3c034, 0xffffffff, 0x00010000,
328 0x3c038, 0xffffffff, 0x00030002,
329 0x3c03c, 0xffffffff, 0x00040007,
330 0x3c040, 0xffffffff, 0x00060005,
331 0x3c044, 0xffffffff, 0x00090008,
332 0x3c048, 0xffffffff, 0x00010000,
333 0x3c04c, 0xffffffff, 0x00030002,
334 0x3c050, 0xffffffff, 0x00040007,
335 0x3c054, 0xffffffff, 0x00060005,
336 0x3c058, 0xffffffff, 0x00090008,
337 0x3c05c, 0xffffffff, 0x00010000,
338 0x3c060, 0xffffffff, 0x00030002,
339 0x3c064, 0xffffffff, 0x00040007,
340 0x3c068, 0xffffffff, 0x00060005,
341 0x3c06c, 0xffffffff, 0x00090008,
342 0x3c070, 0xffffffff, 0x00010000,
343 0x3c074, 0xffffffff, 0x00030002,
344 0x3c078, 0xffffffff, 0x00040007,
345 0x3c07c, 0xffffffff, 0x00060005,
346 0x3c080, 0xffffffff, 0x00090008,
347 0x3c084, 0xffffffff, 0x00010000,
348 0x3c088, 0xffffffff, 0x00030002,
349 0x3c08c, 0xffffffff, 0x00040007,
350 0x3c090, 0xffffffff, 0x00060005,
351 0x3c094, 0xffffffff, 0x00090008,
352 0x3c098, 0xffffffff, 0x00010000,
353 0x3c09c, 0xffffffff, 0x00030002,
354 0x3c0a0, 0xffffffff, 0x00040007,
355 0x3c0a4, 0xffffffff, 0x00060005,
356 0x3c0a8, 0xffffffff, 0x00090008,
357 0x3c0ac, 0xffffffff, 0x00010000,
358 0x3c0b0, 0xffffffff, 0x00030002,
359 0x3c0b4, 0xffffffff, 0x00040007,
360 0x3c0b8, 0xffffffff, 0x00060005,
361 0x3c0bc, 0xffffffff, 0x00090008,
362 0x3c000, 0xffffffff, 0x96e00200,
363 0x8708, 0xffffffff, 0x00900100,
364 0xc424, 0xffffffff, 0x0020003f,
365 0x38, 0xffffffff, 0x0140001c,
366 0x3c, 0x000f0000, 0x000f0000,
367 0x220, 0xffffffff, 0xC060000C,
368 0x224, 0xc0000fff, 0x00000100,
369 0xf90, 0xffffffff, 0x00000100,
370 0xf98, 0x00000101, 0x00000000,
371 0x20a8, 0xffffffff, 0x00000104,
372 0x55e4, 0xff000fff, 0x00000100,
373 0x30cc, 0xc0000fff, 0x00000104,
374 0xc1e4, 0x00000001, 0x00000001,
375 0xd00c, 0xff000ff0, 0x00000100,
376 0xd80c, 0xff000ff0, 0x00000100
377};
378
379static const u32 kalindi_golden_spm_registers[] =
380{
381 0x30800, 0xe0ffffff, 0xe0000000
382};
383
384static const u32 kalindi_golden_common_registers[] =
385{
386 0xc770, 0xffffffff, 0x00000800,
387 0xc774, 0xffffffff, 0x00000800,
388 0xc798, 0xffffffff, 0x00007fbf,
389 0xc79c, 0xffffffff, 0x00007faf
390};
391
392static const u32 kalindi_golden_registers[] =
393{
394 0x3c000, 0xffffdfff, 0x6e944040,
395 0x55e4, 0xff607fff, 0xfc000100,
396 0x3c220, 0xff000fff, 0x00000100,
397 0x3c224, 0xff000fff, 0x00000100,
398 0x3c200, 0xfffc0fff, 0x00000100,
399 0x6ed8, 0x00010101, 0x00010000,
400 0x9830, 0xffffffff, 0x00000000,
401 0x9834, 0xf00fffff, 0x00000400,
402 0x5bb0, 0x000000f0, 0x00000070,
403 0x5bc0, 0xf0311fff, 0x80300000,
404 0x98f8, 0x73773777, 0x12010001,
405 0x98fc, 0xffffffff, 0x00000010,
406 0x9b7c, 0x00ff0000, 0x00fc0000,
407 0x8030, 0x00001f0f, 0x0000100a,
408 0x2f48, 0x73773777, 0x12010001,
409 0x2408, 0x000fffff, 0x000c007f,
410 0x8a14, 0xf000003f, 0x00000007,
411 0x8b24, 0x3fff3fff, 0x00ffcfff,
412 0x30a04, 0x0000ff0f, 0x00000000,
413 0x28a4c, 0x07ffffff, 0x06000000,
414 0x4d8, 0x00000fff, 0x00000100,
415 0x3e78, 0x00000001, 0x00000002,
416 0xc768, 0x00000008, 0x00000008,
417 0x8c00, 0x000000ff, 0x00000003,
418 0x214f8, 0x01ff01ff, 0x00000002,
419 0x21498, 0x007ff800, 0x00200000,
420 0x2015c, 0xffffffff, 0x00000f40,
421 0x88c4, 0x001f3ae3, 0x00000082,
422 0x88d4, 0x0000001f, 0x00000010,
423 0x30934, 0xffffffff, 0x00000000
424};
425
426static const u32 kalindi_mgcg_cgcg_init[] =
427{
428 0xc420, 0xffffffff, 0xfffffffc,
429 0x30800, 0xffffffff, 0xe0000000,
430 0x3c2a0, 0xffffffff, 0x00000100,
431 0x3c208, 0xffffffff, 0x00000100,
432 0x3c2c0, 0xffffffff, 0x00000100,
433 0x3c2c8, 0xffffffff, 0x00000100,
434 0x3c2c4, 0xffffffff, 0x00000100,
435 0x55e4, 0xffffffff, 0x00600100,
436 0x3c280, 0xffffffff, 0x00000100,
437 0x3c214, 0xffffffff, 0x06000100,
438 0x3c220, 0xffffffff, 0x00000100,
439 0x3c218, 0xffffffff, 0x06000100,
440 0x3c204, 0xffffffff, 0x00000100,
441 0x3c2e0, 0xffffffff, 0x00000100,
442 0x3c224, 0xffffffff, 0x00000100,
443 0x3c200, 0xffffffff, 0x00000100,
444 0x3c230, 0xffffffff, 0x00000100,
445 0x3c234, 0xffffffff, 0x00000100,
446 0x3c250, 0xffffffff, 0x00000100,
447 0x3c254, 0xffffffff, 0x00000100,
448 0x3c258, 0xffffffff, 0x00000100,
449 0x3c25c, 0xffffffff, 0x00000100,
450 0x3c260, 0xffffffff, 0x00000100,
451 0x3c27c, 0xffffffff, 0x00000100,
452 0x3c278, 0xffffffff, 0x00000100,
453 0x3c210, 0xffffffff, 0x06000100,
454 0x3c290, 0xffffffff, 0x00000100,
455 0x3c274, 0xffffffff, 0x00000100,
456 0x3c2b4, 0xffffffff, 0x00000100,
457 0x3c2b0, 0xffffffff, 0x00000100,
458 0x3c270, 0xffffffff, 0x00000100,
459 0x30800, 0xffffffff, 0xe0000000,
460 0x3c020, 0xffffffff, 0x00010000,
461 0x3c024, 0xffffffff, 0x00030002,
462 0x3c028, 0xffffffff, 0x00040007,
463 0x3c02c, 0xffffffff, 0x00060005,
464 0x3c030, 0xffffffff, 0x00090008,
465 0x3c034, 0xffffffff, 0x00010000,
466 0x3c038, 0xffffffff, 0x00030002,
467 0x3c03c, 0xffffffff, 0x00040007,
468 0x3c040, 0xffffffff, 0x00060005,
469 0x3c044, 0xffffffff, 0x00090008,
470 0x3c000, 0xffffffff, 0x96e00200,
471 0x8708, 0xffffffff, 0x00900100,
472 0xc424, 0xffffffff, 0x0020003f,
473 0x38, 0xffffffff, 0x0140001c,
474 0x3c, 0x000f0000, 0x000f0000,
475 0x220, 0xffffffff, 0xC060000C,
476 0x224, 0xc0000fff, 0x00000100,
477 0x20a8, 0xffffffff, 0x00000104,
478 0x55e4, 0xff000fff, 0x00000100,
479 0x30cc, 0xc0000fff, 0x00000104,
480 0xc1e4, 0x00000001, 0x00000001,
481 0xd00c, 0xff000ff0, 0x00000100,
482 0xd80c, 0xff000ff0, 0x00000100
483};
484
485static void cik_init_golden_registers(struct radeon_device *rdev)
486{
487 switch (rdev->family) {
488 case CHIP_BONAIRE:
489 radeon_program_register_sequence(rdev,
490 bonaire_mgcg_cgcg_init,
491 (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
492 radeon_program_register_sequence(rdev,
493 bonaire_golden_registers,
494 (const u32)ARRAY_SIZE(bonaire_golden_registers));
495 radeon_program_register_sequence(rdev,
496 bonaire_golden_common_registers,
497 (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
498 radeon_program_register_sequence(rdev,
499 bonaire_golden_spm_registers,
500 (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
501 break;
502 case CHIP_KABINI:
503 radeon_program_register_sequence(rdev,
504 kalindi_mgcg_cgcg_init,
505 (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
506 radeon_program_register_sequence(rdev,
507 kalindi_golden_registers,
508 (const u32)ARRAY_SIZE(kalindi_golden_registers));
509 radeon_program_register_sequence(rdev,
510 kalindi_golden_common_registers,
511 (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
512 radeon_program_register_sequence(rdev,
513 kalindi_golden_spm_registers,
514 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
515 break;
516 case CHIP_KAVERI:
517 radeon_program_register_sequence(rdev,
518 spectre_mgcg_cgcg_init,
519 (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
520 radeon_program_register_sequence(rdev,
521 spectre_golden_registers,
522 (const u32)ARRAY_SIZE(spectre_golden_registers));
523 radeon_program_register_sequence(rdev,
524 spectre_golden_common_registers,
525 (const u32)ARRAY_SIZE(spectre_golden_common_registers));
526 radeon_program_register_sequence(rdev,
527 spectre_golden_spm_registers,
528 (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
529 break;
530 default:
531 break;
532 }
533}
534
535/**
536 * cik_get_xclk - get the xclk
537 *
538 * @rdev: radeon_device pointer
539 *
540 * Returns the reference clock used by the gfx engine
541 * (CIK).
542 */
543u32 cik_get_xclk(struct radeon_device *rdev)
544{
545 u32 reference_clock = rdev->clock.spll.reference_freq;
546
547 if (rdev->flags & RADEON_IS_IGP) {
548 if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
549 return reference_clock / 2;
550 } else {
551 if (RREG32_SMC(CG_CLKPIN_CNTL) & XTALIN_DIVIDE)
552 return reference_clock / 4;
553 }
554 return reference_clock;
555}
556
557/**
558 * cik_mm_rdoorbell - read a doorbell dword
559 *
560 * @rdev: radeon_device pointer
561 * @offset: byte offset into the aperture
562 *
563 * Returns the value in the doorbell aperture at the
564 * requested offset (CIK).
565 */
566u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
567{
568 if (offset < rdev->doorbell.size) {
569 return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
570 } else {
571 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
572 return 0;
573 }
574}
575
576/**
577 * cik_mm_wdoorbell - write a doorbell dword
578 *
579 * @rdev: radeon_device pointer
580 * @offset: byte offset into the aperture
581 * @v: value to write
582 *
583 * Writes @v to the doorbell aperture at the
584 * requested offset (CIK).
585 */
586void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
587{
588 if (offset < rdev->doorbell.size) {
589 writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
590 } else {
591 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
592 }
593}
594
595#define BONAIRE_IO_MC_REGS_SIZE 36
596
597static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
598{
599 {0x00000070, 0x04400000},
600 {0x00000071, 0x80c01803},
601 {0x00000072, 0x00004004},
602 {0x00000073, 0x00000100},
603 {0x00000074, 0x00ff0000},
604 {0x00000075, 0x34000000},
605 {0x00000076, 0x08000014},
606 {0x00000077, 0x00cc08ec},
607 {0x00000078, 0x00000400},
608 {0x00000079, 0x00000000},
609 {0x0000007a, 0x04090000},
610 {0x0000007c, 0x00000000},
611 {0x0000007e, 0x4408a8e8},
612 {0x0000007f, 0x00000304},
613 {0x00000080, 0x00000000},
614 {0x00000082, 0x00000001},
615 {0x00000083, 0x00000002},
616 {0x00000084, 0xf3e4f400},
617 {0x00000085, 0x052024e3},
618 {0x00000087, 0x00000000},
619 {0x00000088, 0x01000000},
620 {0x0000008a, 0x1c0a0000},
621 {0x0000008b, 0xff010000},
622 {0x0000008d, 0xffffefff},
623 {0x0000008e, 0xfff3efff},
624 {0x0000008f, 0xfff3efbf},
625 {0x00000092, 0xf7ffffff},
626 {0x00000093, 0xffffff7f},
627 {0x00000095, 0x00101101},
628 {0x00000096, 0x00000fff},
629 {0x00000097, 0x00116fff},
630 {0x00000098, 0x60010000},
631 {0x00000099, 0x10010000},
632 {0x0000009a, 0x00006000},
633 {0x0000009b, 0x00001000},
634 {0x0000009f, 0x00b48000}
635};
636
637/**
638 * cik_srbm_select - select specific register instances
639 *
640 * @rdev: radeon_device pointer
641 * @me: selected ME (micro engine)
642 * @pipe: pipe
643 * @queue: queue
644 * @vmid: VMID
645 *
646 * Switches the currently active registers instances. Some
647 * registers are instanced per VMID, others are instanced per
648 * me/pipe/queue combination.
649 */
650static void cik_srbm_select(struct radeon_device *rdev,
651 u32 me, u32 pipe, u32 queue, u32 vmid)
652{
653 u32 srbm_gfx_cntl = (PIPEID(pipe & 0x3) |
654 MEID(me & 0x3) |
655 VMID(vmid & 0xf) |
656 QUEUEID(queue & 0x7));
657 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl);
658}
659
660/* ucode loading */
661/**
662 * ci_mc_load_microcode - load MC ucode into the hw
663 *
664 * @rdev: radeon_device pointer
665 *
666 * Load the GDDR MC ucode into the hw (CIK).
667 * Returns 0 on success, error on failure.
668 */
669static int ci_mc_load_microcode(struct radeon_device *rdev)
670{
671 const __be32 *fw_data;
672 u32 running, blackout = 0;
673 u32 *io_mc_regs;
674 int i, ucode_size, regs_size;
675
676 if (!rdev->mc_fw)
677 return -EINVAL;
678
679 switch (rdev->family) {
680 case CHIP_BONAIRE:
681 default:
682 io_mc_regs = (u32 *)&bonaire_io_mc_regs;
683 ucode_size = CIK_MC_UCODE_SIZE;
684 regs_size = BONAIRE_IO_MC_REGS_SIZE;
685 break;
686 }
687
688 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
689
690 if (running == 0) {
691 if (running) {
692 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
693 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
694 }
695
696 /* reset the engine and set to writable */
697 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
698 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
699
700 /* load mc io regs */
701 for (i = 0; i < regs_size; i++) {
702 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
703 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
704 }
705 /* load the MC ucode */
706 fw_data = (const __be32 *)rdev->mc_fw->data;
707 for (i = 0; i < ucode_size; i++)
708 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
709
710 /* put the engine back into the active state */
711 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
712 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
713 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
714
715 /* wait for training to complete */
716 for (i = 0; i < rdev->usec_timeout; i++) {
717 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
718 break;
719 udelay(1);
720 }
721 for (i = 0; i < rdev->usec_timeout; i++) {
722 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
723 break;
724 udelay(1);
725 }
726
727 if (running)
728 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
729 }
730
731 return 0;
732}
733
734/**
735 * cik_init_microcode - load ucode images from disk
736 *
737 * @rdev: radeon_device pointer
738 *
739 * Use the firmware interface to load the ucode images into
740 * the driver (not loaded into hw).
741 * Returns 0 on success, error on failure.
742 */
743static int cik_init_microcode(struct radeon_device *rdev)
744{
745 struct platform_device *pdev;
746 const char *chip_name;
747 size_t pfp_req_size, me_req_size, ce_req_size,
748 mec_req_size, rlc_req_size, mc_req_size,
749 sdma_req_size;
750 char fw_name[30];
751 int err;
752
753 DRM_DEBUG("\n");
754
755 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
756 err = IS_ERR(pdev);
757 if (err) {
758 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
759 return -EINVAL;
760 }
761
762 switch (rdev->family) {
763 case CHIP_BONAIRE:
764 chip_name = "BONAIRE";
765 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
766 me_req_size = CIK_ME_UCODE_SIZE * 4;
767 ce_req_size = CIK_CE_UCODE_SIZE * 4;
768 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
769 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
770 mc_req_size = CIK_MC_UCODE_SIZE * 4;
771 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
772 break;
773 case CHIP_KAVERI:
774 chip_name = "KAVERI";
775 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
776 me_req_size = CIK_ME_UCODE_SIZE * 4;
777 ce_req_size = CIK_CE_UCODE_SIZE * 4;
778 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
779 rlc_req_size = KV_RLC_UCODE_SIZE * 4;
780 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
781 break;
782 case CHIP_KABINI:
783 chip_name = "KABINI";
784 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
785 me_req_size = CIK_ME_UCODE_SIZE * 4;
786 ce_req_size = CIK_CE_UCODE_SIZE * 4;
787 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
788 rlc_req_size = KB_RLC_UCODE_SIZE * 4;
789 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
790 break;
791 default: BUG();
792 }
793
794 DRM_INFO("Loading %s Microcode\n", chip_name);
795
796 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
797 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
798 if (err)
799 goto out;
800 if (rdev->pfp_fw->size != pfp_req_size) {
801 printk(KERN_ERR
802 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
803 rdev->pfp_fw->size, fw_name);
804 err = -EINVAL;
805 goto out;
806 }
807
808 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
809 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
810 if (err)
811 goto out;
812 if (rdev->me_fw->size != me_req_size) {
813 printk(KERN_ERR
814 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
815 rdev->me_fw->size, fw_name);
816 err = -EINVAL;
817 }
818
819 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
820 err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
821 if (err)
822 goto out;
823 if (rdev->ce_fw->size != ce_req_size) {
824 printk(KERN_ERR
825 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
826 rdev->ce_fw->size, fw_name);
827 err = -EINVAL;
828 }
829
830 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
831 err = request_firmware(&rdev->mec_fw, fw_name, &pdev->dev);
832 if (err)
833 goto out;
834 if (rdev->mec_fw->size != mec_req_size) {
835 printk(KERN_ERR
836 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
837 rdev->mec_fw->size, fw_name);
838 err = -EINVAL;
839 }
840
841 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
842 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
843 if (err)
844 goto out;
845 if (rdev->rlc_fw->size != rlc_req_size) {
846 printk(KERN_ERR
847 "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
848 rdev->rlc_fw->size, fw_name);
849 err = -EINVAL;
850 }
851
852 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
853 err = request_firmware(&rdev->sdma_fw, fw_name, &pdev->dev);
854 if (err)
855 goto out;
856 if (rdev->sdma_fw->size != sdma_req_size) {
857 printk(KERN_ERR
858 "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
859 rdev->sdma_fw->size, fw_name);
860 err = -EINVAL;
861 }
862
863 /* No MC ucode on APUs */
864 if (!(rdev->flags & RADEON_IS_IGP)) {
865 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
866 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
867 if (err)
868 goto out;
869 if (rdev->mc_fw->size != mc_req_size) {
870 printk(KERN_ERR
871 "cik_mc: Bogus length %zu in firmware \"%s\"\n",
872 rdev->mc_fw->size, fw_name);
873 err = -EINVAL;
874 }
875 }
876
877out:
878 platform_device_unregister(pdev);
879
880 if (err) {
881 if (err != -EINVAL)
882 printk(KERN_ERR
883 "cik_cp: Failed to load firmware \"%s\"\n",
884 fw_name);
885 release_firmware(rdev->pfp_fw);
886 rdev->pfp_fw = NULL;
887 release_firmware(rdev->me_fw);
888 rdev->me_fw = NULL;
889 release_firmware(rdev->ce_fw);
890 rdev->ce_fw = NULL;
891 release_firmware(rdev->rlc_fw);
892 rdev->rlc_fw = NULL;
893 release_firmware(rdev->mc_fw);
894 rdev->mc_fw = NULL;
895 }
896 return err;
897}
898
899/*
900 * Core functions
901 */
902/**
903 * cik_tiling_mode_table_init - init the hw tiling table
904 *
905 * @rdev: radeon_device pointer
906 *
907 * Starting with SI, the tiling setup is done globally in a
908 * set of 32 tiling modes. Rather than selecting each set of
909 * parameters per surface as on older asics, we just select
910 * which index in the tiling table we want to use, and the
911 * surface uses those parameters (CIK).
912 */
913static void cik_tiling_mode_table_init(struct radeon_device *rdev)
914{
915 const u32 num_tile_mode_states = 32;
916 const u32 num_secondary_tile_mode_states = 16;
917 u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
918 u32 num_pipe_configs;
919 u32 num_rbs = rdev->config.cik.max_backends_per_se *
920 rdev->config.cik.max_shader_engines;
921
922 switch (rdev->config.cik.mem_row_size_in_kb) {
923 case 1:
924 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
925 break;
926 case 2:
927 default:
928 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
929 break;
930 case 4:
931 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
932 break;
933 }
934
935 num_pipe_configs = rdev->config.cik.max_tile_pipes;
936 if (num_pipe_configs > 8)
937 num_pipe_configs = 8; /* ??? */
938
939 if (num_pipe_configs == 8) {
940 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
941 switch (reg_offset) {
942 case 0:
943 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
944 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
945 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
946 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
947 break;
948 case 1:
949 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
950 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
951 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
952 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
953 break;
954 case 2:
955 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
956 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
957 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
958 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
959 break;
960 case 3:
961 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
962 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
963 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
964 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
965 break;
966 case 4:
967 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
968 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
969 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
970 TILE_SPLIT(split_equal_to_row_size));
971 break;
972 case 5:
973 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
974 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
975 break;
976 case 6:
977 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
978 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
979 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
980 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
981 break;
982 case 7:
983 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
984 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
985 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
986 TILE_SPLIT(split_equal_to_row_size));
987 break;
988 case 8:
989 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
990 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
991 break;
992 case 9:
993 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
994 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
995 break;
996 case 10:
997 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
998 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
999 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1000 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1001 break;
1002 case 11:
1003 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1004 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1005 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1006 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1007 break;
1008 case 12:
1009 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1010 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1011 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1012 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1013 break;
1014 case 13:
1015 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1016 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1017 break;
1018 case 14:
1019 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1020 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1021 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1022 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1023 break;
1024 case 16:
1025 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1026 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1027 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1028 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1029 break;
1030 case 17:
1031 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1032 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1033 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1034 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1035 break;
1036 case 27:
1037 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1038 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1039 break;
1040 case 28:
1041 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1042 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1043 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1044 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1045 break;
1046 case 29:
1047 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1048 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1049 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1050 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1051 break;
1052 case 30:
1053 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1054 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1055 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1056 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1057 break;
1058 default:
1059 gb_tile_moden = 0;
1060 break;
1061 }
1062 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
1063 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1064 }
1065 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1066 switch (reg_offset) {
1067 case 0:
1068 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1069 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1070 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1071 NUM_BANKS(ADDR_SURF_16_BANK));
1072 break;
1073 case 1:
1074 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1075 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1076 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1077 NUM_BANKS(ADDR_SURF_16_BANK));
1078 break;
1079 case 2:
1080 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1081 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1082 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1083 NUM_BANKS(ADDR_SURF_16_BANK));
1084 break;
1085 case 3:
1086 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1087 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1088 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1089 NUM_BANKS(ADDR_SURF_16_BANK));
1090 break;
1091 case 4:
1092 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1093 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1094 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1095 NUM_BANKS(ADDR_SURF_8_BANK));
1096 break;
1097 case 5:
1098 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1099 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1100 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1101 NUM_BANKS(ADDR_SURF_4_BANK));
1102 break;
1103 case 6:
1104 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1105 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1106 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1107 NUM_BANKS(ADDR_SURF_2_BANK));
1108 break;
1109 case 8:
1110 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1111 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1112 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1113 NUM_BANKS(ADDR_SURF_16_BANK));
1114 break;
1115 case 9:
1116 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1117 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1118 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1119 NUM_BANKS(ADDR_SURF_16_BANK));
1120 break;
1121 case 10:
1122 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1123 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1124 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1125 NUM_BANKS(ADDR_SURF_16_BANK));
1126 break;
1127 case 11:
1128 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1129 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1130 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1131 NUM_BANKS(ADDR_SURF_16_BANK));
1132 break;
1133 case 12:
1134 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1135 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1136 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1137 NUM_BANKS(ADDR_SURF_8_BANK));
1138 break;
1139 case 13:
1140 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1141 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1142 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1143 NUM_BANKS(ADDR_SURF_4_BANK));
1144 break;
1145 case 14:
1146 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1147 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1148 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1149 NUM_BANKS(ADDR_SURF_2_BANK));
1150 break;
1151 default:
1152 gb_tile_moden = 0;
1153 break;
1154 }
1155 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1156 }
1157 } else if (num_pipe_configs == 4) {
1158 if (num_rbs == 4) {
1159 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1160 switch (reg_offset) {
1161 case 0:
1162 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1163 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1164 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1165 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
1166 break;
1167 case 1:
1168 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1169 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1170 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1171 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
1172 break;
1173 case 2:
1174 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1175 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1176 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1177 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
1178 break;
1179 case 3:
1180 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1181 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1182 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1183 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
1184 break;
1185 case 4:
1186 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1187 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1188 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1189 TILE_SPLIT(split_equal_to_row_size));
1190 break;
1191 case 5:
1192 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1193 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1194 break;
1195 case 6:
1196 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1197 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1198 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1199 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
1200 break;
1201 case 7:
1202 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1203 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1204 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1205 TILE_SPLIT(split_equal_to_row_size));
1206 break;
1207 case 8:
1208 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1209 PIPE_CONFIG(ADDR_SURF_P4_16x16));
1210 break;
1211 case 9:
1212 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1213 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1214 break;
1215 case 10:
1216 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1217 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1218 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1219 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1220 break;
1221 case 11:
1222 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1223 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1224 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1225 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1226 break;
1227 case 12:
1228 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1229 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1230 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1231 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1232 break;
1233 case 13:
1234 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1235 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1236 break;
1237 case 14:
1238 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1239 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1240 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1241 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1242 break;
1243 case 16:
1244 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1245 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1246 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1247 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1248 break;
1249 case 17:
1250 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1251 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1252 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1253 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1254 break;
1255 case 27:
1256 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1257 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1258 break;
1259 case 28:
1260 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1261 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1262 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1263 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1264 break;
1265 case 29:
1266 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1267 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1268 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1269 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1270 break;
1271 case 30:
1272 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1273 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1274 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1275 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1276 break;
1277 default:
1278 gb_tile_moden = 0;
1279 break;
1280 }
1281 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
1282 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1283 }
1284 } else if (num_rbs < 4) {
1285 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1286 switch (reg_offset) {
1287 case 0:
1288 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1289 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1290 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1291 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
1292 break;
1293 case 1:
1294 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1295 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1296 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1297 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
1298 break;
1299 case 2:
1300 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1301 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1302 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1303 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
1304 break;
1305 case 3:
1306 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1307 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1308 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1309 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
1310 break;
1311 case 4:
1312 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1313 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1314 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1315 TILE_SPLIT(split_equal_to_row_size));
1316 break;
1317 case 5:
1318 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1319 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1320 break;
1321 case 6:
1322 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1323 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1324 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1325 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
1326 break;
1327 case 7:
1328 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1329 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1330 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1331 TILE_SPLIT(split_equal_to_row_size));
1332 break;
1333 case 8:
1334 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1335 PIPE_CONFIG(ADDR_SURF_P4_8x16));
1336 break;
1337 case 9:
1338 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1339 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1340 break;
1341 case 10:
1342 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1343 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1344 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1345 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1346 break;
1347 case 11:
1348 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1349 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1350 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1351 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1352 break;
1353 case 12:
1354 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1355 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1356 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1357 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1358 break;
1359 case 13:
1360 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1361 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1362 break;
1363 case 14:
1364 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1365 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1366 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1367 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1368 break;
1369 case 16:
1370 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1371 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1372 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1373 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1374 break;
1375 case 17:
1376 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1377 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1378 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1379 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1380 break;
1381 case 27:
1382 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1383 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1384 break;
1385 case 28:
1386 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1387 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1388 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1389 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1390 break;
1391 case 29:
1392 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1393 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1394 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1395 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1396 break;
1397 case 30:
1398 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1399 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1400 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1401 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1402 break;
1403 default:
1404 gb_tile_moden = 0;
1405 break;
1406 }
1407 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
1408 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1409 }
1410 }
1411 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1412 switch (reg_offset) {
1413 case 0:
1414 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1415 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1416 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1417 NUM_BANKS(ADDR_SURF_16_BANK));
1418 break;
1419 case 1:
1420 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1421 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1422 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1423 NUM_BANKS(ADDR_SURF_16_BANK));
1424 break;
1425 case 2:
1426 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1427 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1428 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1429 NUM_BANKS(ADDR_SURF_16_BANK));
1430 break;
1431 case 3:
1432 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1433 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1434 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1435 NUM_BANKS(ADDR_SURF_16_BANK));
1436 break;
1437 case 4:
1438 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1439 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1440 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1441 NUM_BANKS(ADDR_SURF_16_BANK));
1442 break;
1443 case 5:
1444 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1445 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1446 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1447 NUM_BANKS(ADDR_SURF_8_BANK));
1448 break;
1449 case 6:
1450 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1451 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1452 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1453 NUM_BANKS(ADDR_SURF_4_BANK));
1454 break;
1455 case 8:
1456 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1457 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1458 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1459 NUM_BANKS(ADDR_SURF_16_BANK));
1460 break;
1461 case 9:
1462 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1463 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1464 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1465 NUM_BANKS(ADDR_SURF_16_BANK));
1466 break;
1467 case 10:
1468 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1469 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1470 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1471 NUM_BANKS(ADDR_SURF_16_BANK));
1472 break;
1473 case 11:
1474 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1475 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1476 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1477 NUM_BANKS(ADDR_SURF_16_BANK));
1478 break;
1479 case 12:
1480 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1481 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1482 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1483 NUM_BANKS(ADDR_SURF_16_BANK));
1484 break;
1485 case 13:
1486 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1487 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1488 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1489 NUM_BANKS(ADDR_SURF_8_BANK));
1490 break;
1491 case 14:
1492 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1493 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1494 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1495 NUM_BANKS(ADDR_SURF_4_BANK));
1496 break;
1497 default:
1498 gb_tile_moden = 0;
1499 break;
1500 }
1501 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1502 }
1503 } else if (num_pipe_configs == 2) {
1504 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1505 switch (reg_offset) {
1506 case 0:
1507 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1508 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1509 PIPE_CONFIG(ADDR_SURF_P2) |
1510 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
1511 break;
1512 case 1:
1513 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1514 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1515 PIPE_CONFIG(ADDR_SURF_P2) |
1516 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
1517 break;
1518 case 2:
1519 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1520 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1521 PIPE_CONFIG(ADDR_SURF_P2) |
1522 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
1523 break;
1524 case 3:
1525 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1526 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1527 PIPE_CONFIG(ADDR_SURF_P2) |
1528 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
1529 break;
1530 case 4:
1531 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1532 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1533 PIPE_CONFIG(ADDR_SURF_P2) |
1534 TILE_SPLIT(split_equal_to_row_size));
1535 break;
1536 case 5:
1537 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1538 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1539 break;
1540 case 6:
1541 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1542 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1543 PIPE_CONFIG(ADDR_SURF_P2) |
1544 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
1545 break;
1546 case 7:
1547 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1548 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1549 PIPE_CONFIG(ADDR_SURF_P2) |
1550 TILE_SPLIT(split_equal_to_row_size));
1551 break;
1552 case 8:
1553 gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
1554 break;
1555 case 9:
1556 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1557 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1558 break;
1559 case 10:
1560 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1561 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1562 PIPE_CONFIG(ADDR_SURF_P2) |
1563 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1564 break;
1565 case 11:
1566 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1567 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1568 PIPE_CONFIG(ADDR_SURF_P2) |
1569 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1570 break;
1571 case 12:
1572 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1573 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1574 PIPE_CONFIG(ADDR_SURF_P2) |
1575 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1576 break;
1577 case 13:
1578 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1579 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1580 break;
1581 case 14:
1582 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1583 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1584 PIPE_CONFIG(ADDR_SURF_P2) |
1585 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1586 break;
1587 case 16:
1588 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1589 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1590 PIPE_CONFIG(ADDR_SURF_P2) |
1591 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1592 break;
1593 case 17:
1594 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1595 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1596 PIPE_CONFIG(ADDR_SURF_P2) |
1597 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1598 break;
1599 case 27:
1600 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1601 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1602 break;
1603 case 28:
1604 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1605 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1606 PIPE_CONFIG(ADDR_SURF_P2) |
1607 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1608 break;
1609 case 29:
1610 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1611 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1612 PIPE_CONFIG(ADDR_SURF_P2) |
1613 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1614 break;
1615 case 30:
1616 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1617 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1618 PIPE_CONFIG(ADDR_SURF_P2) |
1619 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1620 break;
1621 default:
1622 gb_tile_moden = 0;
1623 break;
1624 }
1625 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
1626 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1627 }
1628 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1629 switch (reg_offset) {
1630 case 0:
1631 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1632 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1633 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1634 NUM_BANKS(ADDR_SURF_16_BANK));
1635 break;
1636 case 1:
1637 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1638 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1639 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1640 NUM_BANKS(ADDR_SURF_16_BANK));
1641 break;
1642 case 2:
1643 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1644 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1645 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1646 NUM_BANKS(ADDR_SURF_16_BANK));
1647 break;
1648 case 3:
1649 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1650 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1651 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1652 NUM_BANKS(ADDR_SURF_16_BANK));
1653 break;
1654 case 4:
1655 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1656 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1657 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1658 NUM_BANKS(ADDR_SURF_16_BANK));
1659 break;
1660 case 5:
1661 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1662 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1663 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1664 NUM_BANKS(ADDR_SURF_16_BANK));
1665 break;
1666 case 6:
1667 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1668 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1669 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1670 NUM_BANKS(ADDR_SURF_8_BANK));
1671 break;
1672 case 8:
1673 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1674 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1675 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1676 NUM_BANKS(ADDR_SURF_16_BANK));
1677 break;
1678 case 9:
1679 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1680 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1681 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1682 NUM_BANKS(ADDR_SURF_16_BANK));
1683 break;
1684 case 10:
1685 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1686 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1687 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1688 NUM_BANKS(ADDR_SURF_16_BANK));
1689 break;
1690 case 11:
1691 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1692 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1693 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1694 NUM_BANKS(ADDR_SURF_16_BANK));
1695 break;
1696 case 12:
1697 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1698 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1699 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1700 NUM_BANKS(ADDR_SURF_16_BANK));
1701 break;
1702 case 13:
1703 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1704 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1705 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1706 NUM_BANKS(ADDR_SURF_16_BANK));
1707 break;
1708 case 14:
1709 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1710 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1711 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1712 NUM_BANKS(ADDR_SURF_8_BANK));
1713 break;
1714 default:
1715 gb_tile_moden = 0;
1716 break;
1717 }
1718 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1719 }
1720 } else
1721 DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
1722}
1723
1724/**
1725 * cik_select_se_sh - select which SE, SH to address
1726 *
1727 * @rdev: radeon_device pointer
1728 * @se_num: shader engine to address
1729 * @sh_num: sh block to address
1730 *
1731 * Select which SE, SH combinations to address. Certain
1732 * registers are instanced per SE or SH. 0xffffffff means
1733 * broadcast to all SEs or SHs (CIK).
1734 */
1735static void cik_select_se_sh(struct radeon_device *rdev,
1736 u32 se_num, u32 sh_num)
1737{
1738 u32 data = INSTANCE_BROADCAST_WRITES;
1739
1740 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1741 data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1742 else if (se_num == 0xffffffff)
1743 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
1744 else if (sh_num == 0xffffffff)
1745 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
1746 else
1747 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
1748 WREG32(GRBM_GFX_INDEX, data);
1749}
1750
1751/**
1752 * cik_create_bitmask - create a bitmask
1753 *
1754 * @bit_width: length of the mask
1755 *
1756 * create a variable length bit mask (CIK).
1757 * Returns the bitmask.
1758 */
1759static u32 cik_create_bitmask(u32 bit_width)
1760{
1761 u32 i, mask = 0;
1762
1763 for (i = 0; i < bit_width; i++) {
1764 mask <<= 1;
1765 mask |= 1;
1766 }
1767 return mask;
1768}
1769
1770/**
1771 * cik_select_se_sh - select which SE, SH to address
1772 *
1773 * @rdev: radeon_device pointer
1774 * @max_rb_num: max RBs (render backends) for the asic
1775 * @se_num: number of SEs (shader engines) for the asic
1776 * @sh_per_se: number of SH blocks per SE for the asic
1777 *
1778 * Calculates the bitmask of disabled RBs (CIK).
1779 * Returns the disabled RB bitmask.
1780 */
1781static u32 cik_get_rb_disabled(struct radeon_device *rdev,
1782 u32 max_rb_num, u32 se_num,
1783 u32 sh_per_se)
1784{
1785 u32 data, mask;
1786
1787 data = RREG32(CC_RB_BACKEND_DISABLE);
1788 if (data & 1)
1789 data &= BACKEND_DISABLE_MASK;
1790 else
1791 data = 0;
1792 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
1793
1794 data >>= BACKEND_DISABLE_SHIFT;
1795
1796 mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
1797
1798 return data & mask;
1799}
1800
1801/**
1802 * cik_setup_rb - setup the RBs on the asic
1803 *
1804 * @rdev: radeon_device pointer
1805 * @se_num: number of SEs (shader engines) for the asic
1806 * @sh_per_se: number of SH blocks per SE for the asic
1807 * @max_rb_num: max RBs (render backends) for the asic
1808 *
1809 * Configures per-SE/SH RB registers (CIK).
1810 */
1811static void cik_setup_rb(struct radeon_device *rdev,
1812 u32 se_num, u32 sh_per_se,
1813 u32 max_rb_num)
1814{
1815 int i, j;
1816 u32 data, mask;
1817 u32 disabled_rbs = 0;
1818 u32 enabled_rbs = 0;
1819
1820 for (i = 0; i < se_num; i++) {
1821 for (j = 0; j < sh_per_se; j++) {
1822 cik_select_se_sh(rdev, i, j);
1823 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
1824 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
1825 }
1826 }
1827 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1828
1829 mask = 1;
1830 for (i = 0; i < max_rb_num; i++) {
1831 if (!(disabled_rbs & mask))
1832 enabled_rbs |= mask;
1833 mask <<= 1;
1834 }
1835
1836 for (i = 0; i < se_num; i++) {
1837 cik_select_se_sh(rdev, i, 0xffffffff);
1838 data = 0;
1839 for (j = 0; j < sh_per_se; j++) {
1840 switch (enabled_rbs & 3) {
1841 case 1:
1842 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
1843 break;
1844 case 2:
1845 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
1846 break;
1847 case 3:
1848 default:
1849 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
1850 break;
1851 }
1852 enabled_rbs >>= 2;
1853 }
1854 WREG32(PA_SC_RASTER_CONFIG, data);
1855 }
1856 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1857}
1858
1859/**
1860 * cik_gpu_init - setup the 3D engine
1861 *
1862 * @rdev: radeon_device pointer
1863 *
1864 * Configures the 3D engine and tiling configuration
1865 * registers so that the 3D engine is usable.
1866 */
1867static void cik_gpu_init(struct radeon_device *rdev)
1868{
1869 u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
1870 u32 mc_shared_chmap, mc_arb_ramcfg;
1871 u32 hdp_host_path_cntl;
1872 u32 tmp;
1873 int i, j;
1874
1875 switch (rdev->family) {
1876 case CHIP_BONAIRE:
1877 rdev->config.cik.max_shader_engines = 2;
1878 rdev->config.cik.max_tile_pipes = 4;
1879 rdev->config.cik.max_cu_per_sh = 7;
1880 rdev->config.cik.max_sh_per_se = 1;
1881 rdev->config.cik.max_backends_per_se = 2;
1882 rdev->config.cik.max_texture_channel_caches = 4;
1883 rdev->config.cik.max_gprs = 256;
1884 rdev->config.cik.max_gs_threads = 32;
1885 rdev->config.cik.max_hw_contexts = 8;
1886
1887 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
1888 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
1889 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
1890 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
1891 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1892 break;
1893 case CHIP_KAVERI:
1894 /* TODO */
1895 break;
1896 case CHIP_KABINI:
1897 default:
1898 rdev->config.cik.max_shader_engines = 1;
1899 rdev->config.cik.max_tile_pipes = 2;
1900 rdev->config.cik.max_cu_per_sh = 2;
1901 rdev->config.cik.max_sh_per_se = 1;
1902 rdev->config.cik.max_backends_per_se = 1;
1903 rdev->config.cik.max_texture_channel_caches = 2;
1904 rdev->config.cik.max_gprs = 256;
1905 rdev->config.cik.max_gs_threads = 16;
1906 rdev->config.cik.max_hw_contexts = 8;
1907
1908 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
1909 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
1910 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
1911 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
1912 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1913 break;
1914 }
1915
1916 /* Initialize HDP */
1917 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1918 WREG32((0x2c14 + j), 0x00000000);
1919 WREG32((0x2c18 + j), 0x00000000);
1920 WREG32((0x2c1c + j), 0x00000000);
1921 WREG32((0x2c20 + j), 0x00000000);
1922 WREG32((0x2c24 + j), 0x00000000);
1923 }
1924
1925 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1926
1927 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1928
1929 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1930 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1931
1932 rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
1933 rdev->config.cik.mem_max_burst_length_bytes = 256;
1934 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1935 rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1936 if (rdev->config.cik.mem_row_size_in_kb > 4)
1937 rdev->config.cik.mem_row_size_in_kb = 4;
1938 /* XXX use MC settings? */
1939 rdev->config.cik.shader_engine_tile_size = 32;
1940 rdev->config.cik.num_gpus = 1;
1941 rdev->config.cik.multi_gpu_tile_size = 64;
1942
1943 /* fix up row size */
1944 gb_addr_config &= ~ROW_SIZE_MASK;
1945 switch (rdev->config.cik.mem_row_size_in_kb) {
1946 case 1:
1947 default:
1948 gb_addr_config |= ROW_SIZE(0);
1949 break;
1950 case 2:
1951 gb_addr_config |= ROW_SIZE(1);
1952 break;
1953 case 4:
1954 gb_addr_config |= ROW_SIZE(2);
1955 break;
1956 }
1957
1958 /* setup tiling info dword. gb_addr_config is not adequate since it does
1959 * not have bank info, so create a custom tiling dword.
1960 * bits 3:0 num_pipes
1961 * bits 7:4 num_banks
1962 * bits 11:8 group_size
1963 * bits 15:12 row_size
1964 */
1965 rdev->config.cik.tile_config = 0;
1966 switch (rdev->config.cik.num_tile_pipes) {
1967 case 1:
1968 rdev->config.cik.tile_config |= (0 << 0);
1969 break;
1970 case 2:
1971 rdev->config.cik.tile_config |= (1 << 0);
1972 break;
1973 case 4:
1974 rdev->config.cik.tile_config |= (2 << 0);
1975 break;
1976 case 8:
1977 default:
1978 /* XXX what about 12? */
1979 rdev->config.cik.tile_config |= (3 << 0);
1980 break;
1981 }
1982 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
1983 rdev->config.cik.tile_config |= 1 << 4;
1984 else
1985 rdev->config.cik.tile_config |= 0 << 4;
1986 rdev->config.cik.tile_config |=
1987 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1988 rdev->config.cik.tile_config |=
1989 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1990
1991 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1992 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1993 WREG32(DMIF_ADDR_CALC, gb_addr_config);
1994 WREG32(SDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
1995 WREG32(SDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
1996 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
1997 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
1998 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
1999
2000 cik_tiling_mode_table_init(rdev);
2001
2002 cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
2003 rdev->config.cik.max_sh_per_se,
2004 rdev->config.cik.max_backends_per_se);
2005
2006 /* set HW defaults for 3D engine */
2007 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
2008
2009 WREG32(SX_DEBUG_1, 0x20);
2010
2011 WREG32(TA_CNTL_AUX, 0x00010000);
2012
2013 tmp = RREG32(SPI_CONFIG_CNTL);
2014 tmp |= 0x03000000;
2015 WREG32(SPI_CONFIG_CNTL, tmp);
2016
2017 WREG32(SQ_CONFIG, 1);
2018
2019 WREG32(DB_DEBUG, 0);
2020
2021 tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
2022 tmp |= 0x00000400;
2023 WREG32(DB_DEBUG2, tmp);
2024
2025 tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
2026 tmp |= 0x00020200;
2027 WREG32(DB_DEBUG3, tmp);
2028
2029 tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
2030 tmp |= 0x00018208;
2031 WREG32(CB_HW_CONTROL, tmp);
2032
2033 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2034
2035 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
2036 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
2037 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
2038 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
2039
2040 WREG32(VGT_NUM_INSTANCES, 1);
2041
2042 WREG32(CP_PERFMON_CNTL, 0);
2043
2044 WREG32(SQ_CONFIG, 0);
2045
2046 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2047 FORCE_EOV_MAX_REZ_CNT(255)));
2048
2049 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
2050 AUTO_INVLD_EN(ES_AND_GS_AUTO));
2051
2052 WREG32(VGT_GS_VERTEX_REUSE, 16);
2053 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2054
2055 tmp = RREG32(HDP_MISC_CNTL);
2056 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2057 WREG32(HDP_MISC_CNTL, tmp);
2058
2059 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2060 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2061
2062 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2063 WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
2064
2065 udelay(50);
2066}
2067
2068/*
2069 * GPU scratch registers helpers function.
2070 */
2071/**
2072 * cik_scratch_init - setup driver info for CP scratch regs
2073 *
2074 * @rdev: radeon_device pointer
2075 *
2076 * Set up the number and offset of the CP scratch registers.
2077 * NOTE: use of CP scratch registers is a legacy inferface and
2078 * is not used by default on newer asics (r6xx+). On newer asics,
2079 * memory buffers are used for fences rather than scratch regs.
2080 */
2081static void cik_scratch_init(struct radeon_device *rdev)
2082{
2083 int i;
2084
2085 rdev->scratch.num_reg = 7;
2086 rdev->scratch.reg_base = SCRATCH_REG0;
2087 for (i = 0; i < rdev->scratch.num_reg; i++) {
2088 rdev->scratch.free[i] = true;
2089 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2090 }
2091}
2092
2093/**
2094 * cik_ring_test - basic gfx ring test
2095 *
2096 * @rdev: radeon_device pointer
2097 * @ring: radeon_ring structure holding ring information
2098 *
2099 * Allocate a scratch register and write to it using the gfx ring (CIK).
2100 * Provides a basic gfx ring test to verify that the ring is working.
2101 * Used by cik_cp_gfx_resume();
2102 * Returns 0 on success, error on failure.
2103 */
2104int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2105{
2106 uint32_t scratch;
2107 uint32_t tmp = 0;
2108 unsigned i;
2109 int r;
2110
2111 r = radeon_scratch_get(rdev, &scratch);
2112 if (r) {
2113 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2114 return r;
2115 }
2116 WREG32(scratch, 0xCAFEDEAD);
2117 r = radeon_ring_lock(rdev, ring, 3);
2118 if (r) {
2119 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2120 radeon_scratch_free(rdev, scratch);
2121 return r;
2122 }
2123 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2124 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
2125 radeon_ring_write(ring, 0xDEADBEEF);
2126 radeon_ring_unlock_commit(rdev, ring);
2127
2128 for (i = 0; i < rdev->usec_timeout; i++) {
2129 tmp = RREG32(scratch);
2130 if (tmp == 0xDEADBEEF)
2131 break;
2132 DRM_UDELAY(1);
2133 }
2134 if (i < rdev->usec_timeout) {
2135 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2136 } else {
2137 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2138 ring->idx, scratch, tmp);
2139 r = -EINVAL;
2140 }
2141 radeon_scratch_free(rdev, scratch);
2142 return r;
2143}
2144
2145/**
2146 * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
2147 *
2148 * @rdev: radeon_device pointer
2149 * @fence: radeon fence object
2150 *
2151 * Emits a fence sequnce number on the gfx ring and flushes
2152 * GPU caches.
2153 */
2154void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
2155 struct radeon_fence *fence)
2156{
2157 struct radeon_ring *ring = &rdev->ring[fence->ring];
2158 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2159
2160 /* EVENT_WRITE_EOP - flush caches, send int */
2161 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2162 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
2163 EOP_TC_ACTION_EN |
2164 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2165 EVENT_INDEX(5)));
2166 radeon_ring_write(ring, addr & 0xfffffffc);
2167 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
2168 radeon_ring_write(ring, fence->seq);
2169 radeon_ring_write(ring, 0);
2170 /* HDP flush */
2171 /* We should be using the new WAIT_REG_MEM special op packet here
2172 * but it causes the CP to hang
2173 */
2174 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2175 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2176 WRITE_DATA_DST_SEL(0)));
2177 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
2178 radeon_ring_write(ring, 0);
2179 radeon_ring_write(ring, 0);
2180}
2181
2182/**
2183 * cik_fence_compute_ring_emit - emit a fence on the compute ring
2184 *
2185 * @rdev: radeon_device pointer
2186 * @fence: radeon fence object
2187 *
2188 * Emits a fence sequnce number on the compute ring and flushes
2189 * GPU caches.
2190 */
2191void cik_fence_compute_ring_emit(struct radeon_device *rdev,
2192 struct radeon_fence *fence)
2193{
2194 struct radeon_ring *ring = &rdev->ring[fence->ring];
2195 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2196
2197 /* RELEASE_MEM - flush caches, send int */
2198 radeon_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
2199 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
2200 EOP_TC_ACTION_EN |
2201 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2202 EVENT_INDEX(5)));
2203 radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2));
2204 radeon_ring_write(ring, addr & 0xfffffffc);
2205 radeon_ring_write(ring, upper_32_bits(addr));
2206 radeon_ring_write(ring, fence->seq);
2207 radeon_ring_write(ring, 0);
2208 /* HDP flush */
2209 /* We should be using the new WAIT_REG_MEM special op packet here
2210 * but it causes the CP to hang
2211 */
2212 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2213 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2214 WRITE_DATA_DST_SEL(0)));
2215 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
2216 radeon_ring_write(ring, 0);
2217 radeon_ring_write(ring, 0);
2218}
2219
2220void cik_semaphore_ring_emit(struct radeon_device *rdev,
2221 struct radeon_ring *ring,
2222 struct radeon_semaphore *semaphore,
2223 bool emit_wait)
2224{
2225 uint64_t addr = semaphore->gpu_addr;
2226 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2227
2228 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2229 radeon_ring_write(ring, addr & 0xffffffff);
2230 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
2231}
2232
2233/*
2234 * IB stuff
2235 */
2236/**
2237 * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring
2238 *
2239 * @rdev: radeon_device pointer
2240 * @ib: radeon indirect buffer object
2241 *
2242 * Emits an DE (drawing engine) or CE (constant engine) IB
2243 * on the gfx ring. IBs are usually generated by userspace
2244 * acceleration drivers and submitted to the kernel for
2245 * sheduling on the ring. This function schedules the IB
2246 * on the gfx ring for execution by the GPU.
2247 */
2248void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2249{
2250 struct radeon_ring *ring = &rdev->ring[ib->ring];
2251 u32 header, control = INDIRECT_BUFFER_VALID;
2252
2253 if (ib->is_const_ib) {
2254 /* set switch buffer packet before const IB */
2255 radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2256 radeon_ring_write(ring, 0);
2257
2258 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
2259 } else {
2260 u32 next_rptr;
2261 if (ring->rptr_save_reg) {
2262 next_rptr = ring->wptr + 3 + 4;
2263 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2264 radeon_ring_write(ring, ((ring->rptr_save_reg -
2265 PACKET3_SET_UCONFIG_REG_START) >> 2));
2266 radeon_ring_write(ring, next_rptr);
2267 } else if (rdev->wb.enabled) {
2268 next_rptr = ring->wptr + 5 + 4;
2269 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2270 radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
2271 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2272 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
2273 radeon_ring_write(ring, next_rptr);
2274 }
2275
2276 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2277 }
2278
2279 control |= ib->length_dw |
2280 (ib->vm ? (ib->vm->id << 24) : 0);
2281
2282 radeon_ring_write(ring, header);
2283 radeon_ring_write(ring,
2284#ifdef __BIG_ENDIAN
2285 (2 << 0) |
2286#endif
2287 (ib->gpu_addr & 0xFFFFFFFC));
2288 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2289 radeon_ring_write(ring, control);
2290}
2291
2292/**
2293 * cik_ib_test - basic gfx ring IB test
2294 *
2295 * @rdev: radeon_device pointer
2296 * @ring: radeon_ring structure holding ring information
2297 *
2298 * Allocate an IB and execute it on the gfx ring (CIK).
2299 * Provides a basic gfx ring test to verify that IBs are working.
2300 * Returns 0 on success, error on failure.
2301 */
2302int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2303{
2304 struct radeon_ib ib;
2305 uint32_t scratch;
2306 uint32_t tmp = 0;
2307 unsigned i;
2308 int r;
2309
2310 r = radeon_scratch_get(rdev, &scratch);
2311 if (r) {
2312 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2313 return r;
2314 }
2315 WREG32(scratch, 0xCAFEDEAD);
2316 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
2317 if (r) {
2318 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2319 return r;
2320 }
2321 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2322 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
2323 ib.ptr[2] = 0xDEADBEEF;
2324 ib.length_dw = 3;
2325 r = radeon_ib_schedule(rdev, &ib, NULL);
2326 if (r) {
2327 radeon_scratch_free(rdev, scratch);
2328 radeon_ib_free(rdev, &ib);
2329 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2330 return r;
2331 }
2332 r = radeon_fence_wait(ib.fence, false);
2333 if (r) {
2334 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2335 return r;
2336 }
2337 for (i = 0; i < rdev->usec_timeout; i++) {
2338 tmp = RREG32(scratch);
2339 if (tmp == 0xDEADBEEF)
2340 break;
2341 DRM_UDELAY(1);
2342 }
2343 if (i < rdev->usec_timeout) {
2344 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
2345 } else {
2346 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2347 scratch, tmp);
2348 r = -EINVAL;
2349 }
2350 radeon_scratch_free(rdev, scratch);
2351 radeon_ib_free(rdev, &ib);
2352 return r;
2353}
2354
2355/*
2356 * CP.
2357 * On CIK, gfx and compute now have independant command processors.
2358 *
2359 * GFX
2360 * Gfx consists of a single ring and can process both gfx jobs and
2361 * compute jobs. The gfx CP consists of three microengines (ME):
2362 * PFP - Pre-Fetch Parser
2363 * ME - Micro Engine
2364 * CE - Constant Engine
2365 * The PFP and ME make up what is considered the Drawing Engine (DE).
2366 * The CE is an asynchronous engine used for updating buffer desciptors
2367 * used by the DE so that they can be loaded into cache in parallel
2368 * while the DE is processing state update packets.
2369 *
2370 * Compute
2371 * The compute CP consists of two microengines (ME):
2372 * MEC1 - Compute MicroEngine 1
2373 * MEC2 - Compute MicroEngine 2
2374 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2375 * The queues are exposed to userspace and are programmed directly
2376 * by the compute runtime.
2377 */
2378/**
2379 * cik_cp_gfx_enable - enable/disable the gfx CP MEs
2380 *
2381 * @rdev: radeon_device pointer
2382 * @enable: enable or disable the MEs
2383 *
2384 * Halts or unhalts the gfx MEs.
2385 */
2386static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
2387{
2388 if (enable)
2389 WREG32(CP_ME_CNTL, 0);
2390 else {
2391 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
2392 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2393 }
2394 udelay(50);
2395}
2396
2397/**
2398 * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
2399 *
2400 * @rdev: radeon_device pointer
2401 *
2402 * Loads the gfx PFP, ME, and CE ucode.
2403 * Returns 0 for success, -EINVAL if the ucode is not available.
2404 */
2405static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
2406{
2407 const __be32 *fw_data;
2408 int i;
2409
2410 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
2411 return -EINVAL;
2412
2413 cik_cp_gfx_enable(rdev, false);
2414
2415 /* PFP */
2416 fw_data = (const __be32 *)rdev->pfp_fw->data;
2417 WREG32(CP_PFP_UCODE_ADDR, 0);
2418 for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
2419 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
2420 WREG32(CP_PFP_UCODE_ADDR, 0);
2421
2422 /* CE */
2423 fw_data = (const __be32 *)rdev->ce_fw->data;
2424 WREG32(CP_CE_UCODE_ADDR, 0);
2425 for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
2426 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
2427 WREG32(CP_CE_UCODE_ADDR, 0);
2428
2429 /* ME */
2430 fw_data = (const __be32 *)rdev->me_fw->data;
2431 WREG32(CP_ME_RAM_WADDR, 0);
2432 for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
2433 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
2434 WREG32(CP_ME_RAM_WADDR, 0);
2435
2436 WREG32(CP_PFP_UCODE_ADDR, 0);
2437 WREG32(CP_CE_UCODE_ADDR, 0);
2438 WREG32(CP_ME_RAM_WADDR, 0);
2439 WREG32(CP_ME_RAM_RADDR, 0);
2440 return 0;
2441}
2442
2443/**
2444 * cik_cp_gfx_start - start the gfx ring
2445 *
2446 * @rdev: radeon_device pointer
2447 *
2448 * Enables the ring and loads the clear state context and other
2449 * packets required to init the ring.
2450 * Returns 0 for success, error for failure.
2451 */
2452static int cik_cp_gfx_start(struct radeon_device *rdev)
2453{
2454 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2455 int r, i;
2456
2457 /* init the CP */
2458 WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
2459 WREG32(CP_ENDIAN_SWAP, 0);
2460 WREG32(CP_DEVICE_ID, 1);
2461
2462 cik_cp_gfx_enable(rdev, true);
2463
2464 r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
2465 if (r) {
2466 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2467 return r;
2468 }
2469
2470 /* init the CE partitions. CE only used for gfx on CIK */
2471 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2472 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2473 radeon_ring_write(ring, 0xc000);
2474 radeon_ring_write(ring, 0xc000);
2475
2476 /* setup clear context state */
2477 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2478 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2479
2480 radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2481 radeon_ring_write(ring, 0x80000000);
2482 radeon_ring_write(ring, 0x80000000);
2483
2484 for (i = 0; i < cik_default_size; i++)
2485 radeon_ring_write(ring, cik_default_state[i]);
2486
2487 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2488 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2489
2490 /* set clear context state */
2491 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2492 radeon_ring_write(ring, 0);
2493
2494 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2495 radeon_ring_write(ring, 0x00000316);
2496 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2497 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2498
2499 radeon_ring_unlock_commit(rdev, ring);
2500
2501 return 0;
2502}
2503
2504/**
2505 * cik_cp_gfx_fini - stop the gfx ring
2506 *
2507 * @rdev: radeon_device pointer
2508 *
2509 * Stop the gfx ring and tear down the driver ring
2510 * info.
2511 */
2512static void cik_cp_gfx_fini(struct radeon_device *rdev)
2513{
2514 cik_cp_gfx_enable(rdev, false);
2515 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
2516}
2517
2518/**
2519 * cik_cp_gfx_resume - setup the gfx ring buffer registers
2520 *
2521 * @rdev: radeon_device pointer
2522 *
2523 * Program the location and size of the gfx ring buffer
2524 * and test it to make sure it's working.
2525 * Returns 0 for success, error for failure.
2526 */
2527static int cik_cp_gfx_resume(struct radeon_device *rdev)
2528{
2529 struct radeon_ring *ring;
2530 u32 tmp;
2531 u32 rb_bufsz;
2532 u64 rb_addr;
2533 int r;
2534
2535 WREG32(CP_SEM_WAIT_TIMER, 0x0);
2536 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2537
2538 /* Set the write pointer delay */
2539 WREG32(CP_RB_WPTR_DELAY, 0);
2540
2541 /* set the RB to use vmid 0 */
2542 WREG32(CP_RB_VMID, 0);
2543
2544 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2545
2546 /* ring 0 - compute and gfx */
2547 /* Set ring buffer size */
2548 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2549 rb_bufsz = drm_order(ring->ring_size / 8);
2550 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2551#ifdef __BIG_ENDIAN
2552 tmp |= BUF_SWAP_32BIT;
2553#endif
2554 WREG32(CP_RB0_CNTL, tmp);
2555
2556 /* Initialize the ring buffer's read and write pointers */
2557 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
2558 ring->wptr = 0;
2559 WREG32(CP_RB0_WPTR, ring->wptr);
2560
2561 /* set the wb address wether it's enabled or not */
2562 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
2563 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2564
2565 /* scratch register shadowing is no longer supported */
2566 WREG32(SCRATCH_UMSK, 0);
2567
2568 if (!rdev->wb.enabled)
2569 tmp |= RB_NO_UPDATE;
2570
2571 mdelay(1);
2572 WREG32(CP_RB0_CNTL, tmp);
2573
2574 rb_addr = ring->gpu_addr >> 8;
2575 WREG32(CP_RB0_BASE, rb_addr);
2576 WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
2577
2578 ring->rptr = RREG32(CP_RB0_RPTR);
2579
2580 /* start the ring */
2581 cik_cp_gfx_start(rdev);
2582 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
2583 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
2584 if (r) {
2585 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2586 return r;
2587 }
2588 return 0;
2589}
2590
2591u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
2592 struct radeon_ring *ring)
2593{
2594 u32 rptr;
2595
2596
2597
2598 if (rdev->wb.enabled) {
2599 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
2600 } else {
2601 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
2602 rptr = RREG32(CP_HQD_PQ_RPTR);
2603 cik_srbm_select(rdev, 0, 0, 0, 0);
2604 }
2605 rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
2606
2607 return rptr;
2608}
2609
2610u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
2611 struct radeon_ring *ring)
2612{
2613 u32 wptr;
2614
2615 if (rdev->wb.enabled) {
2616 wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
2617 } else {
2618 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
2619 wptr = RREG32(CP_HQD_PQ_WPTR);
2620 cik_srbm_select(rdev, 0, 0, 0, 0);
2621 }
2622 wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
2623
2624 return wptr;
2625}
2626
2627void cik_compute_ring_set_wptr(struct radeon_device *rdev,
2628 struct radeon_ring *ring)
2629{
2630 u32 wptr = (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask;
2631
2632 rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(wptr);
2633 WDOORBELL32(ring->doorbell_offset, wptr);
2634}
2635
2636/**
2637 * cik_cp_compute_enable - enable/disable the compute CP MEs
2638 *
2639 * @rdev: radeon_device pointer
2640 * @enable: enable or disable the MEs
2641 *
2642 * Halts or unhalts the compute MEs.
2643 */
2644static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
2645{
2646 if (enable)
2647 WREG32(CP_MEC_CNTL, 0);
2648 else
2649 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
2650 udelay(50);
2651}
2652
2653/**
2654 * cik_cp_compute_load_microcode - load the compute CP ME ucode
2655 *
2656 * @rdev: radeon_device pointer
2657 *
2658 * Loads the compute MEC1&2 ucode.
2659 * Returns 0 for success, -EINVAL if the ucode is not available.
2660 */
2661static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
2662{
2663 const __be32 *fw_data;
2664 int i;
2665
2666 if (!rdev->mec_fw)
2667 return -EINVAL;
2668
2669 cik_cp_compute_enable(rdev, false);
2670
2671 /* MEC1 */
2672 fw_data = (const __be32 *)rdev->mec_fw->data;
2673 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
2674 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
2675 WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
2676 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
2677
2678 if (rdev->family == CHIP_KAVERI) {
2679 /* MEC2 */
2680 fw_data = (const __be32 *)rdev->mec_fw->data;
2681 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
2682 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
2683 WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
2684 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
2685 }
2686
2687 return 0;
2688}
2689
2690/**
2691 * cik_cp_compute_start - start the compute queues
2692 *
2693 * @rdev: radeon_device pointer
2694 *
2695 * Enable the compute queues.
2696 * Returns 0 for success, error for failure.
2697 */
2698static int cik_cp_compute_start(struct radeon_device *rdev)
2699{
2700 cik_cp_compute_enable(rdev, true);
2701
2702 return 0;
2703}
2704
2705/**
2706 * cik_cp_compute_fini - stop the compute queues
2707 *
2708 * @rdev: radeon_device pointer
2709 *
2710 * Stop the compute queues and tear down the driver queue
2711 * info.
2712 */
2713static void cik_cp_compute_fini(struct radeon_device *rdev)
2714{
2715 int i, idx, r;
2716
2717 cik_cp_compute_enable(rdev, false);
2718
2719 for (i = 0; i < 2; i++) {
2720 if (i == 0)
2721 idx = CAYMAN_RING_TYPE_CP1_INDEX;
2722 else
2723 idx = CAYMAN_RING_TYPE_CP2_INDEX;
2724
2725 if (rdev->ring[idx].mqd_obj) {
2726 r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
2727 if (unlikely(r != 0))
2728 dev_warn(rdev->dev, "(%d) reserve MQD bo failed\n", r);
2729
2730 radeon_bo_unpin(rdev->ring[idx].mqd_obj);
2731 radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
2732
2733 radeon_bo_unref(&rdev->ring[idx].mqd_obj);
2734 rdev->ring[idx].mqd_obj = NULL;
2735 }
2736 }
2737}
2738
2739static void cik_mec_fini(struct radeon_device *rdev)
2740{
2741 int r;
2742
2743 if (rdev->mec.hpd_eop_obj) {
2744 r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
2745 if (unlikely(r != 0))
2746 dev_warn(rdev->dev, "(%d) reserve HPD EOP bo failed\n", r);
2747 radeon_bo_unpin(rdev->mec.hpd_eop_obj);
2748 radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
2749
2750 radeon_bo_unref(&rdev->mec.hpd_eop_obj);
2751 rdev->mec.hpd_eop_obj = NULL;
2752 }
2753}
2754
2755#define MEC_HPD_SIZE 2048
2756
2757static int cik_mec_init(struct radeon_device *rdev)
2758{
2759 int r;
2760 u32 *hpd;
2761
2762 /*
2763 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
2764 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
2765 */
2766 if (rdev->family == CHIP_KAVERI)
2767 rdev->mec.num_mec = 2;
2768 else
2769 rdev->mec.num_mec = 1;
2770 rdev->mec.num_pipe = 4;
2771 rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
2772
2773 if (rdev->mec.hpd_eop_obj == NULL) {
2774 r = radeon_bo_create(rdev,
2775 rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
2776 PAGE_SIZE, true,
2777 RADEON_GEM_DOMAIN_GTT, NULL,
2778 &rdev->mec.hpd_eop_obj);
2779 if (r) {
2780 dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
2781 return r;
2782 }
2783 }
2784
2785 r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
2786 if (unlikely(r != 0)) {
2787 cik_mec_fini(rdev);
2788 return r;
2789 }
2790 r = radeon_bo_pin(rdev->mec.hpd_eop_obj, RADEON_GEM_DOMAIN_GTT,
2791 &rdev->mec.hpd_eop_gpu_addr);
2792 if (r) {
2793 dev_warn(rdev->dev, "(%d) pin HDP EOP bo failed\n", r);
2794 cik_mec_fini(rdev);
2795 return r;
2796 }
2797 r = radeon_bo_kmap(rdev->mec.hpd_eop_obj, (void **)&hpd);
2798 if (r) {
2799 dev_warn(rdev->dev, "(%d) map HDP EOP bo failed\n", r);
2800 cik_mec_fini(rdev);
2801 return r;
2802 }
2803
2804 /* clear memory. Not sure if this is required or not */
2805 memset(hpd, 0, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2);
2806
2807 radeon_bo_kunmap(rdev->mec.hpd_eop_obj);
2808 radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
2809
2810 return 0;
2811}
2812
2813struct hqd_registers
2814{
2815 u32 cp_mqd_base_addr;
2816 u32 cp_mqd_base_addr_hi;
2817 u32 cp_hqd_active;
2818 u32 cp_hqd_vmid;
2819 u32 cp_hqd_persistent_state;
2820 u32 cp_hqd_pipe_priority;
2821 u32 cp_hqd_queue_priority;
2822 u32 cp_hqd_quantum;
2823 u32 cp_hqd_pq_base;
2824 u32 cp_hqd_pq_base_hi;
2825 u32 cp_hqd_pq_rptr;
2826 u32 cp_hqd_pq_rptr_report_addr;
2827 u32 cp_hqd_pq_rptr_report_addr_hi;
2828 u32 cp_hqd_pq_wptr_poll_addr;
2829 u32 cp_hqd_pq_wptr_poll_addr_hi;
2830 u32 cp_hqd_pq_doorbell_control;
2831 u32 cp_hqd_pq_wptr;
2832 u32 cp_hqd_pq_control;
2833 u32 cp_hqd_ib_base_addr;
2834 u32 cp_hqd_ib_base_addr_hi;
2835 u32 cp_hqd_ib_rptr;
2836 u32 cp_hqd_ib_control;
2837 u32 cp_hqd_iq_timer;
2838 u32 cp_hqd_iq_rptr;
2839 u32 cp_hqd_dequeue_request;
2840 u32 cp_hqd_dma_offload;
2841 u32 cp_hqd_sema_cmd;
2842 u32 cp_hqd_msg_type;
2843 u32 cp_hqd_atomic0_preop_lo;
2844 u32 cp_hqd_atomic0_preop_hi;
2845 u32 cp_hqd_atomic1_preop_lo;
2846 u32 cp_hqd_atomic1_preop_hi;
2847 u32 cp_hqd_hq_scheduler0;
2848 u32 cp_hqd_hq_scheduler1;
2849 u32 cp_mqd_control;
2850};
2851
2852struct bonaire_mqd
2853{
2854 u32 header;
2855 u32 dispatch_initiator;
2856 u32 dimensions[3];
2857 u32 start_idx[3];
2858 u32 num_threads[3];
2859 u32 pipeline_stat_enable;
2860 u32 perf_counter_enable;
2861 u32 pgm[2];
2862 u32 tba[2];
2863 u32 tma[2];
2864 u32 pgm_rsrc[2];
2865 u32 vmid;
2866 u32 resource_limits;
2867 u32 static_thread_mgmt01[2];
2868 u32 tmp_ring_size;
2869 u32 static_thread_mgmt23[2];
2870 u32 restart[3];
2871 u32 thread_trace_enable;
2872 u32 reserved1;
2873 u32 user_data[16];
2874 u32 vgtcs_invoke_count[2];
2875 struct hqd_registers queue_state;
2876 u32 dequeue_cntr;
2877 u32 interrupt_queue[64];
2878};
2879
2880/**
2881 * cik_cp_compute_resume - setup the compute queue registers
2882 *
2883 * @rdev: radeon_device pointer
2884 *
2885 * Program the compute queues and test them to make sure they
2886 * are working.
2887 * Returns 0 for success, error for failure.
2888 */
2889static int cik_cp_compute_resume(struct radeon_device *rdev)
2890{
2891 int r, i, idx;
2892 u32 tmp;
2893 bool use_doorbell = true;
2894 u64 hqd_gpu_addr;
2895 u64 mqd_gpu_addr;
2896 u64 eop_gpu_addr;
2897 u64 wb_gpu_addr;
2898 u32 *buf;
2899 struct bonaire_mqd *mqd;
2900
2901 r = cik_cp_compute_start(rdev);
2902 if (r)
2903 return r;
2904
2905 /* fix up chicken bits */
2906 tmp = RREG32(CP_CPF_DEBUG);
2907 tmp |= (1 << 23);
2908 WREG32(CP_CPF_DEBUG, tmp);
2909
2910 /* init the pipes */
2911 for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
2912 int me = (i < 4) ? 1 : 2;
2913 int pipe = (i < 4) ? i : (i - 4);
2914
2915 eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
2916
2917 cik_srbm_select(rdev, me, pipe, 0, 0);
2918
2919 /* write the EOP addr */
2920 WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
2921 WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
2922
2923 /* set the VMID assigned */
2924 WREG32(CP_HPD_EOP_VMID, 0);
2925
2926 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2927 tmp = RREG32(CP_HPD_EOP_CONTROL);
2928 tmp &= ~EOP_SIZE_MASK;
2929 tmp |= drm_order(MEC_HPD_SIZE / 8);
2930 WREG32(CP_HPD_EOP_CONTROL, tmp);
2931 }
2932 cik_srbm_select(rdev, 0, 0, 0, 0);
2933
2934 /* init the queues. Just two for now. */
2935 for (i = 0; i < 2; i++) {
2936 if (i == 0)
2937 idx = CAYMAN_RING_TYPE_CP1_INDEX;
2938 else
2939 idx = CAYMAN_RING_TYPE_CP2_INDEX;
2940
2941 if (rdev->ring[idx].mqd_obj == NULL) {
2942 r = radeon_bo_create(rdev,
2943 sizeof(struct bonaire_mqd),
2944 PAGE_SIZE, true,
2945 RADEON_GEM_DOMAIN_GTT, NULL,
2946 &rdev->ring[idx].mqd_obj);
2947 if (r) {
2948 dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
2949 return r;
2950 }
2951 }
2952
2953 r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
2954 if (unlikely(r != 0)) {
2955 cik_cp_compute_fini(rdev);
2956 return r;
2957 }
2958 r = radeon_bo_pin(rdev->ring[idx].mqd_obj, RADEON_GEM_DOMAIN_GTT,
2959 &mqd_gpu_addr);
2960 if (r) {
2961 dev_warn(rdev->dev, "(%d) pin MQD bo failed\n", r);
2962 cik_cp_compute_fini(rdev);
2963 return r;
2964 }
2965 r = radeon_bo_kmap(rdev->ring[idx].mqd_obj, (void **)&buf);
2966 if (r) {
2967 dev_warn(rdev->dev, "(%d) map MQD bo failed\n", r);
2968 cik_cp_compute_fini(rdev);
2969 return r;
2970 }
2971
2972 /* doorbell offset */
2973 rdev->ring[idx].doorbell_offset =
2974 (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
2975
2976 /* init the mqd struct */
2977 memset(buf, 0, sizeof(struct bonaire_mqd));
2978
2979 mqd = (struct bonaire_mqd *)buf;
2980 mqd->header = 0xC0310800;
2981 mqd->static_thread_mgmt01[0] = 0xffffffff;
2982 mqd->static_thread_mgmt01[1] = 0xffffffff;
2983 mqd->static_thread_mgmt23[0] = 0xffffffff;
2984 mqd->static_thread_mgmt23[1] = 0xffffffff;
2985
2986 cik_srbm_select(rdev, rdev->ring[idx].me,
2987 rdev->ring[idx].pipe,
2988 rdev->ring[idx].queue, 0);
2989
2990 /* disable wptr polling */
2991 tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
2992 tmp &= ~WPTR_POLL_EN;
2993 WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
2994
2995 /* enable doorbell? */
2996 mqd->queue_state.cp_hqd_pq_doorbell_control =
2997 RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
2998 if (use_doorbell)
2999 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
3000 else
3001 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_EN;
3002 WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
3003 mqd->queue_state.cp_hqd_pq_doorbell_control);
3004
3005 /* disable the queue if it's active */
3006 mqd->queue_state.cp_hqd_dequeue_request = 0;
3007 mqd->queue_state.cp_hqd_pq_rptr = 0;
3008 mqd->queue_state.cp_hqd_pq_wptr= 0;
3009 if (RREG32(CP_HQD_ACTIVE) & 1) {
3010 WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
3011 for (i = 0; i < rdev->usec_timeout; i++) {
3012 if (!(RREG32(CP_HQD_ACTIVE) & 1))
3013 break;
3014 udelay(1);
3015 }
3016 WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
3017 WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
3018 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
3019 }
3020
3021 /* set the pointer to the MQD */
3022 mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
3023 mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
3024 WREG32(CP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
3025 WREG32(CP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
3026 /* set MQD vmid to 0 */
3027 mqd->queue_state.cp_mqd_control = RREG32(CP_MQD_CONTROL);
3028 mqd->queue_state.cp_mqd_control &= ~MQD_VMID_MASK;
3029 WREG32(CP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
3030
3031 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3032 hqd_gpu_addr = rdev->ring[idx].gpu_addr >> 8;
3033 mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
3034 mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3035 WREG32(CP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
3036 WREG32(CP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
3037
3038 /* set up the HQD, this is similar to CP_RB0_CNTL */
3039 mqd->queue_state.cp_hqd_pq_control = RREG32(CP_HQD_PQ_CONTROL);
3040 mqd->queue_state.cp_hqd_pq_control &=
3041 ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
3042
3043 mqd->queue_state.cp_hqd_pq_control |=
3044 drm_order(rdev->ring[idx].ring_size / 8);
3045 mqd->queue_state.cp_hqd_pq_control |=
3046 (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8);
3047#ifdef __BIG_ENDIAN
3048 mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
3049#endif
3050 mqd->queue_state.cp_hqd_pq_control &=
3051 ~(UNORD_DISPATCH | ROQ_PQ_IB_FLIP | PQ_VOLATILE);
3052 mqd->queue_state.cp_hqd_pq_control |=
3053 PRIV_STATE | KMD_QUEUE; /* assuming kernel queue control */
3054 WREG32(CP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
3055
3056 /* only used if CP_PQ_WPTR_POLL_CNTL.WPTR_POLL_EN=1 */
3057 if (i == 0)
3058 wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP1_WPTR_OFFSET;
3059 else
3060 wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP2_WPTR_OFFSET;
3061 mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
3062 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3063 WREG32(CP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
3064 WREG32(CP_HQD_PQ_WPTR_POLL_ADDR_HI,
3065 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
3066
3067 /* set the wb address wether it's enabled or not */
3068 if (i == 0)
3069 wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET;
3070 else
3071 wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET;
3072 mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
3073 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
3074 upper_32_bits(wb_gpu_addr) & 0xffff;
3075 WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR,
3076 mqd->queue_state.cp_hqd_pq_rptr_report_addr);
3077 WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3078 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
3079
3080 /* enable the doorbell if requested */
3081 if (use_doorbell) {
3082 mqd->queue_state.cp_hqd_pq_doorbell_control =
3083 RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
3084 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
3085 mqd->queue_state.cp_hqd_pq_doorbell_control |=
3086 DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
3087 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
3088 mqd->queue_state.cp_hqd_pq_doorbell_control &=
3089 ~(DOORBELL_SOURCE | DOORBELL_HIT);
3090
3091 } else {
3092 mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
3093 }
3094 WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
3095 mqd->queue_state.cp_hqd_pq_doorbell_control);
3096
3097 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3098 rdev->ring[idx].wptr = 0;
3099 mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
3100 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
3101 rdev->ring[idx].rptr = RREG32(CP_HQD_PQ_RPTR);
3102 mqd->queue_state.cp_hqd_pq_rptr = rdev->ring[idx].rptr;
3103
3104 /* set the vmid for the queue */
3105 mqd->queue_state.cp_hqd_vmid = 0;
3106 WREG32(CP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
3107
3108 /* activate the queue */
3109 mqd->queue_state.cp_hqd_active = 1;
3110 WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
3111
3112 cik_srbm_select(rdev, 0, 0, 0, 0);
3113
3114 radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
3115 radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
3116
3117 rdev->ring[idx].ready = true;
3118 r = radeon_ring_test(rdev, idx, &rdev->ring[idx]);
3119 if (r)
3120 rdev->ring[idx].ready = false;
3121 }
3122
3123 return 0;
3124}
3125
3126static void cik_cp_enable(struct radeon_device *rdev, bool enable)
3127{
3128 cik_cp_gfx_enable(rdev, enable);
3129 cik_cp_compute_enable(rdev, enable);
3130}
3131
3132static int cik_cp_load_microcode(struct radeon_device *rdev)
3133{
3134 int r;
3135
3136 r = cik_cp_gfx_load_microcode(rdev);
3137 if (r)
3138 return r;
3139 r = cik_cp_compute_load_microcode(rdev);
3140 if (r)
3141 return r;
3142
3143 return 0;
3144}
3145
3146static void cik_cp_fini(struct radeon_device *rdev)
3147{
3148 cik_cp_gfx_fini(rdev);
3149 cik_cp_compute_fini(rdev);
3150}
3151
3152static int cik_cp_resume(struct radeon_device *rdev)
3153{
3154 int r;
3155
3156 /* Reset all cp blocks */
3157 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
3158 RREG32(GRBM_SOFT_RESET);
3159 mdelay(15);
3160 WREG32(GRBM_SOFT_RESET, 0);
3161 RREG32(GRBM_SOFT_RESET);
3162
3163 r = cik_cp_load_microcode(rdev);
3164 if (r)
3165 return r;
3166
3167 r = cik_cp_gfx_resume(rdev);
3168 if (r)
3169 return r;
3170 r = cik_cp_compute_resume(rdev);
3171 if (r)
3172 return r;
3173
3174 return 0;
3175}
3176
3177/*
3178 * sDMA - System DMA
3179 * Starting with CIK, the GPU has new asynchronous
3180 * DMA engines. These engines are used for compute
3181 * and gfx. There are two DMA engines (SDMA0, SDMA1)
3182 * and each one supports 1 ring buffer used for gfx
3183 * and 2 queues used for compute.
3184 *
3185 * The programming model is very similar to the CP
3186 * (ring buffer, IBs, etc.), but sDMA has it's own
3187 * packet format that is different from the PM4 format
3188 * used by the CP. sDMA supports copying data, writing
3189 * embedded data, solid fills, and a number of other
3190 * things. It also has support for tiling/detiling of
3191 * buffers.
3192 */
3193/**
3194 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
3195 *
3196 * @rdev: radeon_device pointer
3197 * @ib: IB object to schedule
3198 *
3199 * Schedule an IB in the DMA ring (CIK).
3200 */
3201void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
3202 struct radeon_ib *ib)
3203{
3204 struct radeon_ring *ring = &rdev->ring[ib->ring];
3205 u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
3206
3207 if (rdev->wb.enabled) {
3208 u32 next_rptr = ring->wptr + 5;
3209 while ((next_rptr & 7) != 4)
3210 next_rptr++;
3211 next_rptr += 4;
3212 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
3213 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3214 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3215 radeon_ring_write(ring, 1); /* number of DWs to follow */
3216 radeon_ring_write(ring, next_rptr);
3217 }
3218
3219 /* IB packet must end on a 8 DW boundary */
3220 while ((ring->wptr & 7) != 4)
3221 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
3222 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
3223 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
3224 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
3225 radeon_ring_write(ring, ib->length_dw);
3226
3227}
3228
3229/**
3230 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
3231 *
3232 * @rdev: radeon_device pointer
3233 * @fence: radeon fence object
3234 *
3235 * Add a DMA fence packet to the ring to write
3236 * the fence seq number and DMA trap packet to generate
3237 * an interrupt if needed (CIK).
3238 */
3239void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
3240 struct radeon_fence *fence)
3241{
3242 struct radeon_ring *ring = &rdev->ring[fence->ring];
3243 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3244 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
3245 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
3246 u32 ref_and_mask;
3247
3248 if (fence->ring == R600_RING_TYPE_DMA_INDEX)
3249 ref_and_mask = SDMA0;
3250 else
3251 ref_and_mask = SDMA1;
3252
3253 /* write the fence */
3254 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
3255 radeon_ring_write(ring, addr & 0xffffffff);
3256 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3257 radeon_ring_write(ring, fence->seq);
3258 /* generate an interrupt */
3259 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
3260 /* flush HDP */
3261 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
3262 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
3263 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
3264 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
3265 radeon_ring_write(ring, ref_and_mask); /* MASK */
3266 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
3267}
3268
3269/**
3270 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
3271 *
3272 * @rdev: radeon_device pointer
3273 * @ring: radeon_ring structure holding ring information
3274 * @semaphore: radeon semaphore object
3275 * @emit_wait: wait or signal semaphore
3276 *
3277 * Add a DMA semaphore packet to the ring wait on or signal
3278 * other rings (CIK).
3279 */
3280void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
3281 struct radeon_ring *ring,
3282 struct radeon_semaphore *semaphore,
3283 bool emit_wait)
3284{
3285 u64 addr = semaphore->gpu_addr;
3286 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
3287
3288 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
3289 radeon_ring_write(ring, addr & 0xfffffff8);
3290 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3291}
3292
3293/**
3294 * cik_sdma_gfx_stop - stop the gfx async dma engines
3295 *
3296 * @rdev: radeon_device pointer
3297 *
3298 * Stop the gfx async dma ring buffers (CIK).
3299 */
3300static void cik_sdma_gfx_stop(struct radeon_device *rdev)
3301{
3302 u32 rb_cntl, reg_offset;
3303 int i;
3304
3305 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
3306
3307 for (i = 0; i < 2; i++) {
3308 if (i == 0)
3309 reg_offset = SDMA0_REGISTER_OFFSET;
3310 else
3311 reg_offset = SDMA1_REGISTER_OFFSET;
3312 rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
3313 rb_cntl &= ~SDMA_RB_ENABLE;
3314 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
3315 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
3316 }
3317}
3318
3319/**
3320 * cik_sdma_rlc_stop - stop the compute async dma engines
3321 *
3322 * @rdev: radeon_device pointer
3323 *
3324 * Stop the compute async dma queues (CIK).
3325 */
3326static void cik_sdma_rlc_stop(struct radeon_device *rdev)
3327{
3328 /* XXX todo */
3329}
3330
3331/**
3332 * cik_sdma_enable - stop the async dma engines
3333 *
3334 * @rdev: radeon_device pointer
3335 * @enable: enable/disable the DMA MEs.
3336 *
3337 * Halt or unhalt the async dma engines (CIK).
3338 */
3339static void cik_sdma_enable(struct radeon_device *rdev, bool enable)
3340{
3341 u32 me_cntl, reg_offset;
3342 int i;
3343
3344 for (i = 0; i < 2; i++) {
3345 if (i == 0)
3346 reg_offset = SDMA0_REGISTER_OFFSET;
3347 else
3348 reg_offset = SDMA1_REGISTER_OFFSET;
3349 me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
3350 if (enable)
3351 me_cntl &= ~SDMA_HALT;
3352 else
3353 me_cntl |= SDMA_HALT;
3354 WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
3355 }
3356}
3357
3358/**
3359 * cik_sdma_gfx_resume - setup and start the async dma engines
3360 *
3361 * @rdev: radeon_device pointer
3362 *
3363 * Set up the gfx DMA ring buffers and enable them (CIK).
3364 * Returns 0 for success, error for failure.
3365 */
3366static int cik_sdma_gfx_resume(struct radeon_device *rdev)
3367{
3368 struct radeon_ring *ring;
3369 u32 rb_cntl, ib_cntl;
3370 u32 rb_bufsz;
3371 u32 reg_offset, wb_offset;
3372 int i, r;
3373
3374 for (i = 0; i < 2; i++) {
3375 if (i == 0) {
3376 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3377 reg_offset = SDMA0_REGISTER_OFFSET;
3378 wb_offset = R600_WB_DMA_RPTR_OFFSET;
3379 } else {
3380 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
3381 reg_offset = SDMA1_REGISTER_OFFSET;
3382 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
3383 }
3384
3385 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
3386 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
3387
3388 /* Set ring buffer size in dwords */
3389 rb_bufsz = drm_order(ring->ring_size / 4);
3390 rb_cntl = rb_bufsz << 1;
3391#ifdef __BIG_ENDIAN
3392 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
3393#endif
3394 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
3395
3396 /* Initialize the ring buffer's read and write pointers */
3397 WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
3398 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
3399
3400 /* set the wb address whether it's enabled or not */
3401 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
3402 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
3403 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
3404 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
3405
3406 if (rdev->wb.enabled)
3407 rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
3408
3409 WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
3410 WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
3411
3412 ring->wptr = 0;
3413 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
3414
3415 ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
3416
3417 /* enable DMA RB */
3418 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
3419
3420 ib_cntl = SDMA_IB_ENABLE;
3421#ifdef __BIG_ENDIAN
3422 ib_cntl |= SDMA_IB_SWAP_ENABLE;
3423#endif
3424 /* enable DMA IBs */
3425 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
3426
3427 ring->ready = true;
3428
3429 r = radeon_ring_test(rdev, ring->idx, ring);
3430 if (r) {
3431 ring->ready = false;
3432 return r;
3433 }
3434 }
3435
3436 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3437
3438 return 0;
3439}
3440
3441/**
3442 * cik_sdma_rlc_resume - setup and start the async dma engines
3443 *
3444 * @rdev: radeon_device pointer
3445 *
3446 * Set up the compute DMA queues and enable them (CIK).
3447 * Returns 0 for success, error for failure.
3448 */
3449static int cik_sdma_rlc_resume(struct radeon_device *rdev)
3450{
3451 /* XXX todo */
3452 return 0;
3453}
3454
3455/**
3456 * cik_sdma_load_microcode - load the sDMA ME ucode
3457 *
3458 * @rdev: radeon_device pointer
3459 *
3460 * Loads the sDMA0/1 ucode.
3461 * Returns 0 for success, -EINVAL if the ucode is not available.
3462 */
3463static int cik_sdma_load_microcode(struct radeon_device *rdev)
3464{
3465 const __be32 *fw_data;
3466 int i;
3467
3468 if (!rdev->sdma_fw)
3469 return -EINVAL;
3470
3471 /* stop the gfx rings and rlc compute queues */
3472 cik_sdma_gfx_stop(rdev);
3473 cik_sdma_rlc_stop(rdev);
3474
3475 /* halt the MEs */
3476 cik_sdma_enable(rdev, false);
3477
3478 /* sdma0 */
3479 fw_data = (const __be32 *)rdev->sdma_fw->data;
3480 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
3481 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
3482 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
3483 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
3484
3485 /* sdma1 */
3486 fw_data = (const __be32 *)rdev->sdma_fw->data;
3487 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
3488 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
3489 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
3490 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
3491
3492 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
3493 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
3494 return 0;
3495}
3496
3497/**
3498 * cik_sdma_resume - setup and start the async dma engines
3499 *
3500 * @rdev: radeon_device pointer
3501 *
3502 * Set up the DMA engines and enable them (CIK).
3503 * Returns 0 for success, error for failure.
3504 */
3505static int cik_sdma_resume(struct radeon_device *rdev)
3506{
3507 int r;
3508
3509 /* Reset dma */
3510 WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
3511 RREG32(SRBM_SOFT_RESET);
3512 udelay(50);
3513 WREG32(SRBM_SOFT_RESET, 0);
3514 RREG32(SRBM_SOFT_RESET);
3515
3516 r = cik_sdma_load_microcode(rdev);
3517 if (r)
3518 return r;
3519
3520 /* unhalt the MEs */
3521 cik_sdma_enable(rdev, true);
3522
3523 /* start the gfx rings and rlc compute queues */
3524 r = cik_sdma_gfx_resume(rdev);
3525 if (r)
3526 return r;
3527 r = cik_sdma_rlc_resume(rdev);
3528 if (r)
3529 return r;
3530
3531 return 0;
3532}
3533
3534/**
3535 * cik_sdma_fini - tear down the async dma engines
3536 *
3537 * @rdev: radeon_device pointer
3538 *
3539 * Stop the async dma engines and free the rings (CIK).
3540 */
3541static void cik_sdma_fini(struct radeon_device *rdev)
3542{
3543 /* stop the gfx rings and rlc compute queues */
3544 cik_sdma_gfx_stop(rdev);
3545 cik_sdma_rlc_stop(rdev);
3546 /* halt the MEs */
3547 cik_sdma_enable(rdev, false);
3548 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
3549 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
3550 /* XXX - compute dma queue tear down */
3551}
3552
3553/**
3554 * cik_copy_dma - copy pages using the DMA engine
3555 *
3556 * @rdev: radeon_device pointer
3557 * @src_offset: src GPU address
3558 * @dst_offset: dst GPU address
3559 * @num_gpu_pages: number of GPU pages to xfer
3560 * @fence: radeon fence object
3561 *
3562 * Copy GPU paging using the DMA engine (CIK).
3563 * Used by the radeon ttm implementation to move pages if
3564 * registered as the asic copy callback.
3565 */
3566int cik_copy_dma(struct radeon_device *rdev,
3567 uint64_t src_offset, uint64_t dst_offset,
3568 unsigned num_gpu_pages,
3569 struct radeon_fence **fence)
3570{
3571 struct radeon_semaphore *sem = NULL;
3572 int ring_index = rdev->asic->copy.dma_ring_index;
3573 struct radeon_ring *ring = &rdev->ring[ring_index];
3574 u32 size_in_bytes, cur_size_in_bytes;
3575 int i, num_loops;
3576 int r = 0;
3577
3578 r = radeon_semaphore_create(rdev, &sem);
3579 if (r) {
3580 DRM_ERROR("radeon: moving bo (%d).\n", r);
3581 return r;
3582 }
3583
3584 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3585 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3586 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
3587 if (r) {
3588 DRM_ERROR("radeon: moving bo (%d).\n", r);
3589 radeon_semaphore_free(rdev, &sem, NULL);
3590 return r;
3591 }
3592
3593 if (radeon_fence_need_sync(*fence, ring->idx)) {
3594 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3595 ring->idx);
3596 radeon_fence_note_sync(*fence, ring->idx);
3597 } else {
3598 radeon_semaphore_free(rdev, &sem, NULL);
3599 }
3600
3601 for (i = 0; i < num_loops; i++) {
3602 cur_size_in_bytes = size_in_bytes;
3603 if (cur_size_in_bytes > 0x1fffff)
3604 cur_size_in_bytes = 0x1fffff;
3605 size_in_bytes -= cur_size_in_bytes;
3606 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
3607 radeon_ring_write(ring, cur_size_in_bytes);
3608 radeon_ring_write(ring, 0); /* src/dst endian swap */
3609 radeon_ring_write(ring, src_offset & 0xffffffff);
3610 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
3611 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3612 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
3613 src_offset += cur_size_in_bytes;
3614 dst_offset += cur_size_in_bytes;
3615 }
3616
3617 r = radeon_fence_emit(rdev, fence, ring->idx);
3618 if (r) {
3619 radeon_ring_unlock_undo(rdev, ring);
3620 return r;
3621 }
3622
3623 radeon_ring_unlock_commit(rdev, ring);
3624 radeon_semaphore_free(rdev, &sem, *fence);
3625
3626 return r;
3627}
3628
3629/**
3630 * cik_sdma_ring_test - simple async dma engine test
3631 *
3632 * @rdev: radeon_device pointer
3633 * @ring: radeon_ring structure holding ring information
3634 *
3635 * Test the DMA engine by writing using it to write an
3636 * value to memory. (CIK).
3637 * Returns 0 for success, error for failure.
3638 */
3639int cik_sdma_ring_test(struct radeon_device *rdev,
3640 struct radeon_ring *ring)
3641{
3642 unsigned i;
3643 int r;
3644 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3645 u32 tmp;
3646
3647 if (!ptr) {
3648 DRM_ERROR("invalid vram scratch pointer\n");
3649 return -EINVAL;
3650 }
3651
3652 tmp = 0xCAFEDEAD;
3653 writel(tmp, ptr);
3654
3655 r = radeon_ring_lock(rdev, ring, 4);
3656 if (r) {
3657 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
3658 return r;
3659 }
3660 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
3661 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
3662 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
3663 radeon_ring_write(ring, 1); /* number of DWs to follow */
3664 radeon_ring_write(ring, 0xDEADBEEF);
3665 radeon_ring_unlock_commit(rdev, ring);
3666
3667 for (i = 0; i < rdev->usec_timeout; i++) {
3668 tmp = readl(ptr);
3669 if (tmp == 0xDEADBEEF)
3670 break;
3671 DRM_UDELAY(1);
3672 }
3673
3674 if (i < rdev->usec_timeout) {
3675 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
3676 } else {
3677 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
3678 ring->idx, tmp);
3679 r = -EINVAL;
3680 }
3681 return r;
3682}
3683
3684/**
3685 * cik_sdma_ib_test - test an IB on the DMA engine
3686 *
3687 * @rdev: radeon_device pointer
3688 * @ring: radeon_ring structure holding ring information
3689 *
3690 * Test a simple IB in the DMA ring (CIK).
3691 * Returns 0 on success, error on failure.
3692 */
3693int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3694{
3695 struct radeon_ib ib;
3696 unsigned i;
3697 int r;
3698 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3699 u32 tmp = 0;
3700
3701 if (!ptr) {
3702 DRM_ERROR("invalid vram scratch pointer\n");
3703 return -EINVAL;
3704 }
3705
3706 tmp = 0xCAFEDEAD;
3707 writel(tmp, ptr);
3708
3709 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3710 if (r) {
3711 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3712 return r;
3713 }
3714
3715 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
3716 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
3717 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
3718 ib.ptr[3] = 1;
3719 ib.ptr[4] = 0xDEADBEEF;
3720 ib.length_dw = 5;
3721
3722 r = radeon_ib_schedule(rdev, &ib, NULL);
3723 if (r) {
3724 radeon_ib_free(rdev, &ib);
3725 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3726 return r;
3727 }
3728 r = radeon_fence_wait(ib.fence, false);
3729 if (r) {
3730 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3731 return r;
3732 }
3733 for (i = 0; i < rdev->usec_timeout; i++) {
3734 tmp = readl(ptr);
3735 if (tmp == 0xDEADBEEF)
3736 break;
3737 DRM_UDELAY(1);
3738 }
3739 if (i < rdev->usec_timeout) {
3740 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3741 } else {
3742 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
3743 r = -EINVAL;
3744 }
3745 radeon_ib_free(rdev, &ib);
3746 return r;
3747}
3748
3749
3750static void cik_print_gpu_status_regs(struct radeon_device *rdev)
3751{
3752 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
3753 RREG32(GRBM_STATUS));
3754 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
3755 RREG32(GRBM_STATUS2));
3756 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
3757 RREG32(GRBM_STATUS_SE0));
3758 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
3759 RREG32(GRBM_STATUS_SE1));
3760 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
3761 RREG32(GRBM_STATUS_SE2));
3762 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
3763 RREG32(GRBM_STATUS_SE3));
3764 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
3765 RREG32(SRBM_STATUS));
3766 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
3767 RREG32(SRBM_STATUS2));
3768 dev_info(rdev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
3769 RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
3770 dev_info(rdev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
3771 RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
3772 dev_info(rdev->dev, " CP_STAT = 0x%08x\n", RREG32(CP_STAT));
3773 dev_info(rdev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
3774 RREG32(CP_STALLED_STAT1));
3775 dev_info(rdev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
3776 RREG32(CP_STALLED_STAT2));
3777 dev_info(rdev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
3778 RREG32(CP_STALLED_STAT3));
3779 dev_info(rdev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
3780 RREG32(CP_CPF_BUSY_STAT));
3781 dev_info(rdev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
3782 RREG32(CP_CPF_STALLED_STAT1));
3783 dev_info(rdev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(CP_CPF_STATUS));
3784 dev_info(rdev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(CP_CPC_BUSY_STAT));
3785 dev_info(rdev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
3786 RREG32(CP_CPC_STALLED_STAT1));
3787 dev_info(rdev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(CP_CPC_STATUS));
3788}
3789
3790/**
3791 * cik_gpu_check_soft_reset - check which blocks are busy
3792 *
3793 * @rdev: radeon_device pointer
3794 *
3795 * Check which blocks are busy and return the relevant reset
3796 * mask to be used by cik_gpu_soft_reset().
3797 * Returns a mask of the blocks to be reset.
3798 */
3799static u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
3800{
3801 u32 reset_mask = 0;
3802 u32 tmp;
3803
3804 /* GRBM_STATUS */
3805 tmp = RREG32(GRBM_STATUS);
3806 if (tmp & (PA_BUSY | SC_BUSY |
3807 BCI_BUSY | SX_BUSY |
3808 TA_BUSY | VGT_BUSY |
3809 DB_BUSY | CB_BUSY |
3810 GDS_BUSY | SPI_BUSY |
3811 IA_BUSY | IA_BUSY_NO_DMA))
3812 reset_mask |= RADEON_RESET_GFX;
3813
3814 if (tmp & (CP_BUSY | CP_COHERENCY_BUSY))
3815 reset_mask |= RADEON_RESET_CP;
3816
3817 /* GRBM_STATUS2 */
3818 tmp = RREG32(GRBM_STATUS2);
3819 if (tmp & RLC_BUSY)
3820 reset_mask |= RADEON_RESET_RLC;
3821
3822 /* SDMA0_STATUS_REG */
3823 tmp = RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
3824 if (!(tmp & SDMA_IDLE))
3825 reset_mask |= RADEON_RESET_DMA;
3826
3827 /* SDMA1_STATUS_REG */
3828 tmp = RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
3829 if (!(tmp & SDMA_IDLE))
3830 reset_mask |= RADEON_RESET_DMA1;
3831
3832 /* SRBM_STATUS2 */
3833 tmp = RREG32(SRBM_STATUS2);
3834 if (tmp & SDMA_BUSY)
3835 reset_mask |= RADEON_RESET_DMA;
3836
3837 if (tmp & SDMA1_BUSY)
3838 reset_mask |= RADEON_RESET_DMA1;
3839
3840 /* SRBM_STATUS */
3841 tmp = RREG32(SRBM_STATUS);
3842
3843 if (tmp & IH_BUSY)
3844 reset_mask |= RADEON_RESET_IH;
3845
3846 if (tmp & SEM_BUSY)
3847 reset_mask |= RADEON_RESET_SEM;
3848
3849 if (tmp & GRBM_RQ_PENDING)
3850 reset_mask |= RADEON_RESET_GRBM;
3851
3852 if (tmp & VMC_BUSY)
3853 reset_mask |= RADEON_RESET_VMC;
3854
3855 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
3856 MCC_BUSY | MCD_BUSY))
3857 reset_mask |= RADEON_RESET_MC;
3858
3859 if (evergreen_is_display_hung(rdev))
3860 reset_mask |= RADEON_RESET_DISPLAY;
3861
3862 /* Skip MC reset as it's mostly likely not hung, just busy */
3863 if (reset_mask & RADEON_RESET_MC) {
3864 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
3865 reset_mask &= ~RADEON_RESET_MC;
3866 }
3867
3868 return reset_mask;
3869}
3870
3871/**
3872 * cik_gpu_soft_reset - soft reset GPU
3873 *
3874 * @rdev: radeon_device pointer
3875 * @reset_mask: mask of which blocks to reset
3876 *
3877 * Soft reset the blocks specified in @reset_mask.
3878 */
3879static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
3880{
3881 struct evergreen_mc_save save;
3882 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
3883 u32 tmp;
3884
3885 if (reset_mask == 0)
3886 return;
3887
3888 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
3889
3890 cik_print_gpu_status_regs(rdev);
3891 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3892 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3893 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3894 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3895
3896 /* stop the rlc */
3897 cik_rlc_stop(rdev);
3898
3899 /* Disable GFX parsing/prefetching */
3900 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3901
3902 /* Disable MEC parsing/prefetching */
3903 WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
3904
3905 if (reset_mask & RADEON_RESET_DMA) {
3906 /* sdma0 */
3907 tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
3908 tmp |= SDMA_HALT;
3909 WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
3910 }
3911 if (reset_mask & RADEON_RESET_DMA1) {
3912 /* sdma1 */
3913 tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
3914 tmp |= SDMA_HALT;
3915 WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
3916 }
3917
3918 evergreen_mc_stop(rdev, &save);
3919 if (evergreen_mc_wait_for_idle(rdev)) {
3920 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3921 }
3922
3923 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))
3924 grbm_soft_reset = SOFT_RESET_CP | SOFT_RESET_GFX;
3925
3926 if (reset_mask & RADEON_RESET_CP) {
3927 grbm_soft_reset |= SOFT_RESET_CP;
3928
3929 srbm_soft_reset |= SOFT_RESET_GRBM;
3930 }
3931
3932 if (reset_mask & RADEON_RESET_DMA)
3933 srbm_soft_reset |= SOFT_RESET_SDMA;
3934
3935 if (reset_mask & RADEON_RESET_DMA1)
3936 srbm_soft_reset |= SOFT_RESET_SDMA1;
3937
3938 if (reset_mask & RADEON_RESET_DISPLAY)
3939 srbm_soft_reset |= SOFT_RESET_DC;
3940
3941 if (reset_mask & RADEON_RESET_RLC)
3942 grbm_soft_reset |= SOFT_RESET_RLC;
3943
3944 if (reset_mask & RADEON_RESET_SEM)
3945 srbm_soft_reset |= SOFT_RESET_SEM;
3946
3947 if (reset_mask & RADEON_RESET_IH)
3948 srbm_soft_reset |= SOFT_RESET_IH;
3949
3950 if (reset_mask & RADEON_RESET_GRBM)
3951 srbm_soft_reset |= SOFT_RESET_GRBM;
3952
3953 if (reset_mask & RADEON_RESET_VMC)
3954 srbm_soft_reset |= SOFT_RESET_VMC;
3955
3956 if (!(rdev->flags & RADEON_IS_IGP)) {
3957 if (reset_mask & RADEON_RESET_MC)
3958 srbm_soft_reset |= SOFT_RESET_MC;
3959 }
3960
3961 if (grbm_soft_reset) {
3962 tmp = RREG32(GRBM_SOFT_RESET);
3963 tmp |= grbm_soft_reset;
3964 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3965 WREG32(GRBM_SOFT_RESET, tmp);
3966 tmp = RREG32(GRBM_SOFT_RESET);
3967
3968 udelay(50);
3969
3970 tmp &= ~grbm_soft_reset;
3971 WREG32(GRBM_SOFT_RESET, tmp);
3972 tmp = RREG32(GRBM_SOFT_RESET);
3973 }
3974
3975 if (srbm_soft_reset) {
3976 tmp = RREG32(SRBM_SOFT_RESET);
3977 tmp |= srbm_soft_reset;
3978 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3979 WREG32(SRBM_SOFT_RESET, tmp);
3980 tmp = RREG32(SRBM_SOFT_RESET);
3981
3982 udelay(50);
3983
3984 tmp &= ~srbm_soft_reset;
3985 WREG32(SRBM_SOFT_RESET, tmp);
3986 tmp = RREG32(SRBM_SOFT_RESET);
3987 }
3988
3989 /* Wait a little for things to settle down */
3990 udelay(50);
3991
3992 evergreen_mc_resume(rdev, &save);
3993 udelay(50);
3994
3995 cik_print_gpu_status_regs(rdev);
3996}
3997
3998/**
3999 * cik_asic_reset - soft reset GPU
4000 *
4001 * @rdev: radeon_device pointer
4002 *
4003 * Look up which blocks are hung and attempt
4004 * to reset them.
4005 * Returns 0 for success.
4006 */
4007int cik_asic_reset(struct radeon_device *rdev)
4008{
4009 u32 reset_mask;
4010
4011 reset_mask = cik_gpu_check_soft_reset(rdev);
4012
4013 if (reset_mask)
4014 r600_set_bios_scratch_engine_hung(rdev, true);
4015
4016 cik_gpu_soft_reset(rdev, reset_mask);
4017
4018 reset_mask = cik_gpu_check_soft_reset(rdev);
4019
4020 if (!reset_mask)
4021 r600_set_bios_scratch_engine_hung(rdev, false);
4022
4023 return 0;
4024}
4025
4026/**
4027 * cik_gfx_is_lockup - check if the 3D engine is locked up
4028 *
4029 * @rdev: radeon_device pointer
4030 * @ring: radeon_ring structure holding ring information
4031 *
4032 * Check if the 3D engine is locked up (CIK).
4033 * Returns true if the engine is locked, false if not.
4034 */
4035bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4036{
4037 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
4038
4039 if (!(reset_mask & (RADEON_RESET_GFX |
4040 RADEON_RESET_COMPUTE |
4041 RADEON_RESET_CP))) {
4042 radeon_ring_lockup_update(ring);
4043 return false;
4044 }
4045 /* force CP activities */
4046 radeon_ring_force_activity(rdev, ring);
4047 return radeon_ring_test_lockup(rdev, ring);
4048}
4049
4050/**
4051 * cik_sdma_is_lockup - Check if the DMA engine is locked up
4052 *
4053 * @rdev: radeon_device pointer
4054 * @ring: radeon_ring structure holding ring information
4055 *
4056 * Check if the async DMA engine is locked up (CIK).
4057 * Returns true if the engine appears to be locked up, false if not.
4058 */
4059bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4060{
4061 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
4062 u32 mask;
4063
4064 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
4065 mask = RADEON_RESET_DMA;
4066 else
4067 mask = RADEON_RESET_DMA1;
4068
4069 if (!(reset_mask & mask)) {
4070 radeon_ring_lockup_update(ring);
4071 return false;
4072 }
4073 /* force ring activities */
4074 radeon_ring_force_activity(rdev, ring);
4075 return radeon_ring_test_lockup(rdev, ring);
4076}
4077
4078/* MC */
4079/**
4080 * cik_mc_program - program the GPU memory controller
4081 *
4082 * @rdev: radeon_device pointer
4083 *
4084 * Set the location of vram, gart, and AGP in the GPU's
4085 * physical address space (CIK).
4086 */
4087static void cik_mc_program(struct radeon_device *rdev)
4088{
4089 struct evergreen_mc_save save;
4090 u32 tmp;
4091 int i, j;
4092
4093 /* Initialize HDP */
4094 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
4095 WREG32((0x2c14 + j), 0x00000000);
4096 WREG32((0x2c18 + j), 0x00000000);
4097 WREG32((0x2c1c + j), 0x00000000);
4098 WREG32((0x2c20 + j), 0x00000000);
4099 WREG32((0x2c24 + j), 0x00000000);
4100 }
4101 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
4102
4103 evergreen_mc_stop(rdev, &save);
4104 if (radeon_mc_wait_for_idle(rdev)) {
4105 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4106 }
4107 /* Lockout access through VGA aperture*/
4108 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
4109 /* Update configuration */
4110 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
4111 rdev->mc.vram_start >> 12);
4112 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
4113 rdev->mc.vram_end >> 12);
4114 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
4115 rdev->vram_scratch.gpu_addr >> 12);
4116 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
4117 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
4118 WREG32(MC_VM_FB_LOCATION, tmp);
4119 /* XXX double check these! */
4120 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
4121 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
4122 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
4123 WREG32(MC_VM_AGP_BASE, 0);
4124 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
4125 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
4126 if (radeon_mc_wait_for_idle(rdev)) {
4127 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4128 }
4129 evergreen_mc_resume(rdev, &save);
4130 /* we need to own VRAM, so turn off the VGA renderer here
4131 * to stop it overwriting our objects */
4132 rv515_vga_render_disable(rdev);
4133}
4134
4135/**
4136 * cik_mc_init - initialize the memory controller driver params
4137 *
4138 * @rdev: radeon_device pointer
4139 *
4140 * Look up the amount of vram, vram width, and decide how to place
4141 * vram and gart within the GPU's physical address space (CIK).
4142 * Returns 0 for success.
4143 */
4144static int cik_mc_init(struct radeon_device *rdev)
4145{
4146 u32 tmp;
4147 int chansize, numchan;
4148
4149 /* Get VRAM informations */
4150 rdev->mc.vram_is_ddr = true;
4151 tmp = RREG32(MC_ARB_RAMCFG);
4152 if (tmp & CHANSIZE_MASK) {
4153 chansize = 64;
4154 } else {
4155 chansize = 32;
4156 }
4157 tmp = RREG32(MC_SHARED_CHMAP);
4158 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
4159 case 0:
4160 default:
4161 numchan = 1;
4162 break;
4163 case 1:
4164 numchan = 2;
4165 break;
4166 case 2:
4167 numchan = 4;
4168 break;
4169 case 3:
4170 numchan = 8;
4171 break;
4172 case 4:
4173 numchan = 3;
4174 break;
4175 case 5:
4176 numchan = 6;
4177 break;
4178 case 6:
4179 numchan = 10;
4180 break;
4181 case 7:
4182 numchan = 12;
4183 break;
4184 case 8:
4185 numchan = 16;
4186 break;
4187 }
4188 rdev->mc.vram_width = numchan * chansize;
4189 /* Could aper size report 0 ? */
4190 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4191 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4192 /* size in MB on si */
4193 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
4194 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
4195 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4196 si_vram_gtt_location(rdev, &rdev->mc);
4197 radeon_update_bandwidth_info(rdev);
4198
4199 return 0;
4200}
4201
4202/*
4203 * GART
4204 * VMID 0 is the physical GPU addresses as used by the kernel.
4205 * VMIDs 1-15 are used for userspace clients and are handled
4206 * by the radeon vm/hsa code.
4207 */
4208/**
4209 * cik_pcie_gart_tlb_flush - gart tlb flush callback
4210 *
4211 * @rdev: radeon_device pointer
4212 *
4213 * Flush the TLB for the VMID 0 page table (CIK).
4214 */
4215void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
4216{
4217 /* flush hdp cache */
4218 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
4219
4220 /* bits 0-15 are the VM contexts0-15 */
4221 WREG32(VM_INVALIDATE_REQUEST, 0x1);
4222}
4223
4224/**
4225 * cik_pcie_gart_enable - gart enable
4226 *
4227 * @rdev: radeon_device pointer
4228 *
4229 * This sets up the TLBs, programs the page tables for VMID0,
4230 * sets up the hw for VMIDs 1-15 which are allocated on
4231 * demand, and sets up the global locations for the LDS, GDS,
4232 * and GPUVM for FSA64 clients (CIK).
4233 * Returns 0 for success, errors for failure.
4234 */
4235static int cik_pcie_gart_enable(struct radeon_device *rdev)
4236{
4237 int r, i;
4238
4239 if (rdev->gart.robj == NULL) {
4240 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
4241 return -EINVAL;
4242 }
4243 r = radeon_gart_table_vram_pin(rdev);
4244 if (r)
4245 return r;
4246 radeon_gart_restore(rdev);
4247 /* Setup TLB control */
4248 WREG32(MC_VM_MX_L1_TLB_CNTL,
4249 (0xA << 7) |
4250 ENABLE_L1_TLB |
4251 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4252 ENABLE_ADVANCED_DRIVER_MODEL |
4253 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4254 /* Setup L2 cache */
4255 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
4256 ENABLE_L2_FRAGMENT_PROCESSING |
4257 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4258 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4259 EFFECTIVE_L2_QUEUE_SIZE(7) |
4260 CONTEXT1_IDENTITY_ACCESS_MODE(1));
4261 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
4262 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4263 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
4264 /* setup context0 */
4265 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
4266 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
4267 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
4268 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
4269 (u32)(rdev->dummy_page.addr >> 12));
4270 WREG32(VM_CONTEXT0_CNTL2, 0);
4271 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
4272 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
4273
4274 WREG32(0x15D4, 0);
4275 WREG32(0x15D8, 0);
4276 WREG32(0x15DC, 0);
4277
4278 /* empty context1-15 */
4279 /* FIXME start with 4G, once using 2 level pt switch to full
4280 * vm size space
4281 */
4282 /* set vm size, must be a multiple of 4 */
4283 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
4284 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
4285 for (i = 1; i < 16; i++) {
4286 if (i < 8)
4287 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
4288 rdev->gart.table_addr >> 12);
4289 else
4290 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
4291 rdev->gart.table_addr >> 12);
4292 }
4293
4294 /* enable context1-15 */
4295 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
4296 (u32)(rdev->dummy_page.addr >> 12));
4297 WREG32(VM_CONTEXT1_CNTL2, 4);
4298 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
4299 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4300 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4301 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4302 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4303 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
4304 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
4305 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
4306 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
4307 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
4308 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
4309 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4310 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
4311
4312 /* TC cache setup ??? */
4313 WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
4314 WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
4315 WREG32(TC_CFG_L1_STORE_POLICY, 0);
4316
4317 WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
4318 WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
4319 WREG32(TC_CFG_L2_STORE_POLICY0, 0);
4320 WREG32(TC_CFG_L2_STORE_POLICY1, 0);
4321 WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
4322
4323 WREG32(TC_CFG_L1_VOLATILE, 0);
4324 WREG32(TC_CFG_L2_VOLATILE, 0);
4325
4326 if (rdev->family == CHIP_KAVERI) {
4327 u32 tmp = RREG32(CHUB_CONTROL);
4328 tmp &= ~BYPASS_VM;
4329 WREG32(CHUB_CONTROL, tmp);
4330 }
4331
4332 /* XXX SH_MEM regs */
4333 /* where to put LDS, scratch, GPUVM in FSA64 space */
4334 for (i = 0; i < 16; i++) {
4335 cik_srbm_select(rdev, 0, 0, 0, i);
4336 /* CP and shaders */
4337 WREG32(SH_MEM_CONFIG, 0);
4338 WREG32(SH_MEM_APE1_BASE, 1);
4339 WREG32(SH_MEM_APE1_LIMIT, 0);
4340 WREG32(SH_MEM_BASES, 0);
4341 /* SDMA GFX */
4342 WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA0_REGISTER_OFFSET, 0);
4343 WREG32(SDMA0_GFX_APE1_CNTL + SDMA0_REGISTER_OFFSET, 0);
4344 WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA1_REGISTER_OFFSET, 0);
4345 WREG32(SDMA0_GFX_APE1_CNTL + SDMA1_REGISTER_OFFSET, 0);
4346 /* XXX SDMA RLC - todo */
4347 }
4348 cik_srbm_select(rdev, 0, 0, 0, 0);
4349
4350 cik_pcie_gart_tlb_flush(rdev);
4351 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
4352 (unsigned)(rdev->mc.gtt_size >> 20),
4353 (unsigned long long)rdev->gart.table_addr);
4354 rdev->gart.ready = true;
4355 return 0;
4356}
4357
4358/**
4359 * cik_pcie_gart_disable - gart disable
4360 *
4361 * @rdev: radeon_device pointer
4362 *
4363 * This disables all VM page table (CIK).
4364 */
4365static void cik_pcie_gart_disable(struct radeon_device *rdev)
4366{
4367 /* Disable all tables */
4368 WREG32(VM_CONTEXT0_CNTL, 0);
4369 WREG32(VM_CONTEXT1_CNTL, 0);
4370 /* Setup TLB control */
4371 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4372 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4373 /* Setup L2 cache */
4374 WREG32(VM_L2_CNTL,
4375 ENABLE_L2_FRAGMENT_PROCESSING |
4376 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4377 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4378 EFFECTIVE_L2_QUEUE_SIZE(7) |
4379 CONTEXT1_IDENTITY_ACCESS_MODE(1));
4380 WREG32(VM_L2_CNTL2, 0);
4381 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4382 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
4383 radeon_gart_table_vram_unpin(rdev);
4384}
4385
4386/**
4387 * cik_pcie_gart_fini - vm fini callback
4388 *
4389 * @rdev: radeon_device pointer
4390 *
4391 * Tears down the driver GART/VM setup (CIK).
4392 */
4393static void cik_pcie_gart_fini(struct radeon_device *rdev)
4394{
4395 cik_pcie_gart_disable(rdev);
4396 radeon_gart_table_vram_free(rdev);
4397 radeon_gart_fini(rdev);
4398}
4399
4400/* vm parser */
4401/**
4402 * cik_ib_parse - vm ib_parse callback
4403 *
4404 * @rdev: radeon_device pointer
4405 * @ib: indirect buffer pointer
4406 *
4407 * CIK uses hw IB checking so this is a nop (CIK).
4408 */
4409int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4410{
4411 return 0;
4412}
4413
4414/*
4415 * vm
4416 * VMID 0 is the physical GPU addresses as used by the kernel.
4417 * VMIDs 1-15 are used for userspace clients and are handled
4418 * by the radeon vm/hsa code.
4419 */
4420/**
4421 * cik_vm_init - cik vm init callback
4422 *
4423 * @rdev: radeon_device pointer
4424 *
4425 * Inits cik specific vm parameters (number of VMs, base of vram for
4426 * VMIDs 1-15) (CIK).
4427 * Returns 0 for success.
4428 */
4429int cik_vm_init(struct radeon_device *rdev)
4430{
4431 /* number of VMs */
4432 rdev->vm_manager.nvm = 16;
4433 /* base offset of vram pages */
4434 if (rdev->flags & RADEON_IS_IGP) {
4435 u64 tmp = RREG32(MC_VM_FB_OFFSET);
4436 tmp <<= 22;
4437 rdev->vm_manager.vram_base_offset = tmp;
4438 } else
4439 rdev->vm_manager.vram_base_offset = 0;
4440
4441 return 0;
4442}
4443
4444/**
4445 * cik_vm_fini - cik vm fini callback
4446 *
4447 * @rdev: radeon_device pointer
4448 *
4449 * Tear down any asic specific VM setup (CIK).
4450 */
4451void cik_vm_fini(struct radeon_device *rdev)
4452{
4453}
4454
4455/**
4456 * cik_vm_flush - cik vm flush using the CP
4457 *
4458 * @rdev: radeon_device pointer
4459 *
4460 * Update the page table base and flush the VM TLB
4461 * using the CP (CIK).
4462 */
4463void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4464{
4465 struct radeon_ring *ring = &rdev->ring[ridx];
4466
4467 if (vm == NULL)
4468 return;
4469
4470 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4471 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4472 WRITE_DATA_DST_SEL(0)));
4473 if (vm->id < 8) {
4474 radeon_ring_write(ring,
4475 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
4476 } else {
4477 radeon_ring_write(ring,
4478 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
4479 }
4480 radeon_ring_write(ring, 0);
4481 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
4482
4483 /* update SH_MEM_* regs */
4484 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4485 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4486 WRITE_DATA_DST_SEL(0)));
4487 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
4488 radeon_ring_write(ring, 0);
4489 radeon_ring_write(ring, VMID(vm->id));
4490
4491 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
4492 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4493 WRITE_DATA_DST_SEL(0)));
4494 radeon_ring_write(ring, SH_MEM_BASES >> 2);
4495 radeon_ring_write(ring, 0);
4496
4497 radeon_ring_write(ring, 0); /* SH_MEM_BASES */
4498 radeon_ring_write(ring, 0); /* SH_MEM_CONFIG */
4499 radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
4500 radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
4501
4502 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4503 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4504 WRITE_DATA_DST_SEL(0)));
4505 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
4506 radeon_ring_write(ring, 0);
4507 radeon_ring_write(ring, VMID(0));
4508
4509 /* HDP flush */
4510 /* We should be using the WAIT_REG_MEM packet here like in
4511 * cik_fence_ring_emit(), but it causes the CP to hang in this
4512 * context...
4513 */
4514 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4515 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4516 WRITE_DATA_DST_SEL(0)));
4517 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
4518 radeon_ring_write(ring, 0);
4519 radeon_ring_write(ring, 0);
4520
4521 /* bits 0-15 are the VM contexts0-15 */
4522 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4523 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4524 WRITE_DATA_DST_SEL(0)));
4525 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
4526 radeon_ring_write(ring, 0);
4527 radeon_ring_write(ring, 1 << vm->id);
4528
4529 /* compute doesn't have PFP */
4530 if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
4531 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4532 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4533 radeon_ring_write(ring, 0x0);
4534 }
4535}
4536
4537/**
4538 * cik_vm_set_page - update the page tables using sDMA
4539 *
4540 * @rdev: radeon_device pointer
4541 * @ib: indirect buffer to fill with commands
4542 * @pe: addr of the page entry
4543 * @addr: dst addr to write into pe
4544 * @count: number of page entries to update
4545 * @incr: increase next addr by incr bytes
4546 * @flags: access flags
4547 *
4548 * Update the page tables using CP or sDMA (CIK).
4549 */
4550void cik_vm_set_page(struct radeon_device *rdev,
4551 struct radeon_ib *ib,
4552 uint64_t pe,
4553 uint64_t addr, unsigned count,
4554 uint32_t incr, uint32_t flags)
4555{
4556 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
4557 uint64_t value;
4558 unsigned ndw;
4559
4560 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
4561 /* CP */
4562 while (count) {
4563 ndw = 2 + count * 2;
4564 if (ndw > 0x3FFE)
4565 ndw = 0x3FFE;
4566
4567 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
4568 ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
4569 WRITE_DATA_DST_SEL(1));
4570 ib->ptr[ib->length_dw++] = pe;
4571 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4572 for (; ndw > 2; ndw -= 2, --count, pe += 8) {
4573 if (flags & RADEON_VM_PAGE_SYSTEM) {
4574 value = radeon_vm_map_gart(rdev, addr);
4575 value &= 0xFFFFFFFFFFFFF000ULL;
4576 } else if (flags & RADEON_VM_PAGE_VALID) {
4577 value = addr;
4578 } else {
4579 value = 0;
4580 }
4581 addr += incr;
4582 value |= r600_flags;
4583 ib->ptr[ib->length_dw++] = value;
4584 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4585 }
4586 }
4587 } else {
4588 /* DMA */
4589 if (flags & RADEON_VM_PAGE_SYSTEM) {
4590 while (count) {
4591 ndw = count * 2;
4592 if (ndw > 0xFFFFE)
4593 ndw = 0xFFFFE;
4594
4595 /* for non-physically contiguous pages (system) */
4596 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
4597 ib->ptr[ib->length_dw++] = pe;
4598 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4599 ib->ptr[ib->length_dw++] = ndw;
4600 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
4601 if (flags & RADEON_VM_PAGE_SYSTEM) {
4602 value = radeon_vm_map_gart(rdev, addr);
4603 value &= 0xFFFFFFFFFFFFF000ULL;
4604 } else if (flags & RADEON_VM_PAGE_VALID) {
4605 value = addr;
4606 } else {
4607 value = 0;
4608 }
4609 addr += incr;
4610 value |= r600_flags;
4611 ib->ptr[ib->length_dw++] = value;
4612 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4613 }
4614 }
4615 } else {
4616 while (count) {
4617 ndw = count;
4618 if (ndw > 0x7FFFF)
4619 ndw = 0x7FFFF;
4620
4621 if (flags & RADEON_VM_PAGE_VALID)
4622 value = addr;
4623 else
4624 value = 0;
4625 /* for physically contiguous pages (vram) */
4626 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
4627 ib->ptr[ib->length_dw++] = pe; /* dst addr */
4628 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4629 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
4630 ib->ptr[ib->length_dw++] = 0;
4631 ib->ptr[ib->length_dw++] = value; /* value */
4632 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4633 ib->ptr[ib->length_dw++] = incr; /* increment size */
4634 ib->ptr[ib->length_dw++] = 0;
4635 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
4636 pe += ndw * 8;
4637 addr += ndw * incr;
4638 count -= ndw;
4639 }
4640 }
4641 while (ib->length_dw & 0x7)
4642 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
4643 }
4644}
4645
4646/**
4647 * cik_dma_vm_flush - cik vm flush using sDMA
4648 *
4649 * @rdev: radeon_device pointer
4650 *
4651 * Update the page table base and flush the VM TLB
4652 * using sDMA (CIK).
4653 */
4654void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4655{
4656 struct radeon_ring *ring = &rdev->ring[ridx];
4657 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
4658 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
4659 u32 ref_and_mask;
4660
4661 if (vm == NULL)
4662 return;
4663
4664 if (ridx == R600_RING_TYPE_DMA_INDEX)
4665 ref_and_mask = SDMA0;
4666 else
4667 ref_and_mask = SDMA1;
4668
4669 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4670 if (vm->id < 8) {
4671 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
4672 } else {
4673 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
4674 }
4675 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
4676
4677 /* update SH_MEM_* regs */
4678 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4679 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
4680 radeon_ring_write(ring, VMID(vm->id));
4681
4682 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4683 radeon_ring_write(ring, SH_MEM_BASES >> 2);
4684 radeon_ring_write(ring, 0);
4685
4686 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4687 radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
4688 radeon_ring_write(ring, 0);
4689
4690 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4691 radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
4692 radeon_ring_write(ring, 1);
4693
4694 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4695 radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
4696 radeon_ring_write(ring, 0);
4697
4698 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4699 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
4700 radeon_ring_write(ring, VMID(0));
4701
4702 /* flush HDP */
4703 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
4704 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
4705 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
4706 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
4707 radeon_ring_write(ring, ref_and_mask); /* MASK */
4708 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
4709
4710 /* flush TLB */
4711 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4712 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
4713 radeon_ring_write(ring, 1 << vm->id);
4714}
4715
4716/*
4717 * RLC
4718 * The RLC is a multi-purpose microengine that handles a
4719 * variety of functions, the most important of which is
4720 * the interrupt controller.
4721 */
4722/**
4723 * cik_rlc_stop - stop the RLC ME
4724 *
4725 * @rdev: radeon_device pointer
4726 *
4727 * Halt the RLC ME (MicroEngine) (CIK).
4728 */
4729static void cik_rlc_stop(struct radeon_device *rdev)
4730{
4731 int i, j, k;
4732 u32 mask, tmp;
4733
4734 tmp = RREG32(CP_INT_CNTL_RING0);
4735 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4736 WREG32(CP_INT_CNTL_RING0, tmp);
4737
4738 RREG32(CB_CGTT_SCLK_CTRL);
4739 RREG32(CB_CGTT_SCLK_CTRL);
4740 RREG32(CB_CGTT_SCLK_CTRL);
4741 RREG32(CB_CGTT_SCLK_CTRL);
4742
4743 tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
4744 WREG32(RLC_CGCG_CGLS_CTRL, tmp);
4745
4746 WREG32(RLC_CNTL, 0);
4747
4748 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
4749 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
4750 cik_select_se_sh(rdev, i, j);
4751 for (k = 0; k < rdev->usec_timeout; k++) {
4752 if (RREG32(RLC_SERDES_CU_MASTER_BUSY) == 0)
4753 break;
4754 udelay(1);
4755 }
4756 }
4757 }
4758 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
4759
4760 mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
4761 for (k = 0; k < rdev->usec_timeout; k++) {
4762 if ((RREG32(RLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
4763 break;
4764 udelay(1);
4765 }
4766}
4767
4768/**
4769 * cik_rlc_start - start the RLC ME
4770 *
4771 * @rdev: radeon_device pointer
4772 *
4773 * Unhalt the RLC ME (MicroEngine) (CIK).
4774 */
4775static void cik_rlc_start(struct radeon_device *rdev)
4776{
4777 u32 tmp;
4778
4779 WREG32(RLC_CNTL, RLC_ENABLE);
4780
4781 tmp = RREG32(CP_INT_CNTL_RING0);
4782 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4783 WREG32(CP_INT_CNTL_RING0, tmp);
4784
4785 udelay(50);
4786}
4787
4788/**
4789 * cik_rlc_resume - setup the RLC hw
4790 *
4791 * @rdev: radeon_device pointer
4792 *
4793 * Initialize the RLC registers, load the ucode,
4794 * and start the RLC (CIK).
4795 * Returns 0 for success, -EINVAL if the ucode is not available.
4796 */
4797static int cik_rlc_resume(struct radeon_device *rdev)
4798{
4799 u32 i, size;
4800 u32 clear_state_info[3];
4801 const __be32 *fw_data;
4802
4803 if (!rdev->rlc_fw)
4804 return -EINVAL;
4805
4806 switch (rdev->family) {
4807 case CHIP_BONAIRE:
4808 default:
4809 size = BONAIRE_RLC_UCODE_SIZE;
4810 break;
4811 case CHIP_KAVERI:
4812 size = KV_RLC_UCODE_SIZE;
4813 break;
4814 case CHIP_KABINI:
4815 size = KB_RLC_UCODE_SIZE;
4816 break;
4817 }
4818
4819 cik_rlc_stop(rdev);
4820
4821 WREG32(GRBM_SOFT_RESET, SOFT_RESET_RLC);
4822 RREG32(GRBM_SOFT_RESET);
4823 udelay(50);
4824 WREG32(GRBM_SOFT_RESET, 0);
4825 RREG32(GRBM_SOFT_RESET);
4826 udelay(50);
4827
4828 WREG32(RLC_LB_CNTR_INIT, 0);
4829 WREG32(RLC_LB_CNTR_MAX, 0x00008000);
4830
4831 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
4832 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
4833 WREG32(RLC_LB_PARAMS, 0x00600408);
4834 WREG32(RLC_LB_CNTL, 0x80000004);
4835
4836 WREG32(RLC_MC_CNTL, 0);
4837 WREG32(RLC_UCODE_CNTL, 0);
4838
4839 fw_data = (const __be32 *)rdev->rlc_fw->data;
4840 WREG32(RLC_GPM_UCODE_ADDR, 0);
4841 for (i = 0; i < size; i++)
4842 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
4843 WREG32(RLC_GPM_UCODE_ADDR, 0);
4844
4845 /* XXX */
4846 clear_state_info[0] = 0;//upper_32_bits(rdev->rlc.save_restore_gpu_addr);
4847 clear_state_info[1] = 0;//rdev->rlc.save_restore_gpu_addr;
4848 clear_state_info[2] = 0;//cik_default_size;
4849 WREG32(RLC_GPM_SCRATCH_ADDR, 0x3d);
4850 for (i = 0; i < 3; i++)
4851 WREG32(RLC_GPM_SCRATCH_DATA, clear_state_info[i]);
4852 WREG32(RLC_DRIVER_DMA_STATUS, 0);
4853
4854 cik_rlc_start(rdev);
4855
4856 return 0;
4857}
4858
4859/*
4860 * Interrupts
4861 * Starting with r6xx, interrupts are handled via a ring buffer.
4862 * Ring buffers are areas of GPU accessible memory that the GPU
4863 * writes interrupt vectors into and the host reads vectors out of.
4864 * There is a rptr (read pointer) that determines where the
4865 * host is currently reading, and a wptr (write pointer)
4866 * which determines where the GPU has written. When the
4867 * pointers are equal, the ring is idle. When the GPU
4868 * writes vectors to the ring buffer, it increments the
4869 * wptr. When there is an interrupt, the host then starts
4870 * fetching commands and processing them until the pointers are
4871 * equal again at which point it updates the rptr.
4872 */
4873
4874/**
4875 * cik_enable_interrupts - Enable the interrupt ring buffer
4876 *
4877 * @rdev: radeon_device pointer
4878 *
4879 * Enable the interrupt ring buffer (CIK).
4880 */
4881static void cik_enable_interrupts(struct radeon_device *rdev)
4882{
4883 u32 ih_cntl = RREG32(IH_CNTL);
4884 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
4885
4886 ih_cntl |= ENABLE_INTR;
4887 ih_rb_cntl |= IH_RB_ENABLE;
4888 WREG32(IH_CNTL, ih_cntl);
4889 WREG32(IH_RB_CNTL, ih_rb_cntl);
4890 rdev->ih.enabled = true;
4891}
4892
4893/**
4894 * cik_disable_interrupts - Disable the interrupt ring buffer
4895 *
4896 * @rdev: radeon_device pointer
4897 *
4898 * Disable the interrupt ring buffer (CIK).
4899 */
4900static void cik_disable_interrupts(struct radeon_device *rdev)
4901{
4902 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
4903 u32 ih_cntl = RREG32(IH_CNTL);
4904
4905 ih_rb_cntl &= ~IH_RB_ENABLE;
4906 ih_cntl &= ~ENABLE_INTR;
4907 WREG32(IH_RB_CNTL, ih_rb_cntl);
4908 WREG32(IH_CNTL, ih_cntl);
4909 /* set rptr, wptr to 0 */
4910 WREG32(IH_RB_RPTR, 0);
4911 WREG32(IH_RB_WPTR, 0);
4912 rdev->ih.enabled = false;
4913 rdev->ih.rptr = 0;
4914}
4915
4916/**
4917 * cik_disable_interrupt_state - Disable all interrupt sources
4918 *
4919 * @rdev: radeon_device pointer
4920 *
4921 * Clear all interrupt enable bits used by the driver (CIK).
4922 */
4923static void cik_disable_interrupt_state(struct radeon_device *rdev)
4924{
4925 u32 tmp;
4926
4927 /* gfx ring */
4928 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4929 /* sdma */
4930 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
4931 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
4932 tmp = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
4933 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, tmp);
4934 /* compute queues */
4935 WREG32(CP_ME1_PIPE0_INT_CNTL, 0);
4936 WREG32(CP_ME1_PIPE1_INT_CNTL, 0);
4937 WREG32(CP_ME1_PIPE2_INT_CNTL, 0);
4938 WREG32(CP_ME1_PIPE3_INT_CNTL, 0);
4939 WREG32(CP_ME2_PIPE0_INT_CNTL, 0);
4940 WREG32(CP_ME2_PIPE1_INT_CNTL, 0);
4941 WREG32(CP_ME2_PIPE2_INT_CNTL, 0);
4942 WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
4943 /* grbm */
4944 WREG32(GRBM_INT_CNTL, 0);
4945 /* vline/vblank, etc. */
4946 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
4947 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
4948 if (rdev->num_crtc >= 4) {
4949 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
4950 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
4951 }
4952 if (rdev->num_crtc >= 6) {
4953 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
4954 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
4955 }
4956
4957 /* dac hotplug */
4958 WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
4959
4960 /* digital hotplug */
4961 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4962 WREG32(DC_HPD1_INT_CONTROL, tmp);
4963 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4964 WREG32(DC_HPD2_INT_CONTROL, tmp);
4965 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4966 WREG32(DC_HPD3_INT_CONTROL, tmp);
4967 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4968 WREG32(DC_HPD4_INT_CONTROL, tmp);
4969 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4970 WREG32(DC_HPD5_INT_CONTROL, tmp);
4971 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4972 WREG32(DC_HPD6_INT_CONTROL, tmp);
4973
4974}
4975
4976/**
4977 * cik_irq_init - init and enable the interrupt ring
4978 *
4979 * @rdev: radeon_device pointer
4980 *
4981 * Allocate a ring buffer for the interrupt controller,
4982 * enable the RLC, disable interrupts, enable the IH
4983 * ring buffer and enable it (CIK).
4984 * Called at device load and reume.
4985 * Returns 0 for success, errors for failure.
4986 */
4987static int cik_irq_init(struct radeon_device *rdev)
4988{
4989 int ret = 0;
4990 int rb_bufsz;
4991 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
4992
4993 /* allocate ring */
4994 ret = r600_ih_ring_alloc(rdev);
4995 if (ret)
4996 return ret;
4997
4998 /* disable irqs */
4999 cik_disable_interrupts(rdev);
5000
5001 /* init rlc */
5002 ret = cik_rlc_resume(rdev);
5003 if (ret) {
5004 r600_ih_ring_fini(rdev);
5005 return ret;
5006 }
5007
5008 /* setup interrupt control */
5009 /* XXX this should actually be a bus address, not an MC address. same on older asics */
5010 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
5011 interrupt_cntl = RREG32(INTERRUPT_CNTL);
5012 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
5013 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
5014 */
5015 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
5016 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
5017 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
5018 WREG32(INTERRUPT_CNTL, interrupt_cntl);
5019
5020 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
5021 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
5022
5023 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
5024 IH_WPTR_OVERFLOW_CLEAR |
5025 (rb_bufsz << 1));
5026
5027 if (rdev->wb.enabled)
5028 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
5029
5030 /* set the writeback address whether it's enabled or not */
5031 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
5032 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
5033
5034 WREG32(IH_RB_CNTL, ih_rb_cntl);
5035
5036 /* set rptr, wptr to 0 */
5037 WREG32(IH_RB_RPTR, 0);
5038 WREG32(IH_RB_WPTR, 0);
5039
5040 /* Default settings for IH_CNTL (disabled at first) */
5041 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
5042 /* RPTR_REARM only works if msi's are enabled */
5043 if (rdev->msi_enabled)
5044 ih_cntl |= RPTR_REARM;
5045 WREG32(IH_CNTL, ih_cntl);
5046
5047 /* force the active interrupt state to all disabled */
5048 cik_disable_interrupt_state(rdev);
5049
5050 pci_set_master(rdev->pdev);
5051
5052 /* enable irqs */
5053 cik_enable_interrupts(rdev);
5054
5055 return ret;
5056}
5057
5058/**
5059 * cik_irq_set - enable/disable interrupt sources
5060 *
5061 * @rdev: radeon_device pointer
5062 *
5063 * Enable interrupt sources on the GPU (vblanks, hpd,
5064 * etc.) (CIK).
5065 * Returns 0 for success, errors for failure.
5066 */
5067int cik_irq_set(struct radeon_device *rdev)
5068{
5069 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE |
5070 PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
5071 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
5072 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
5073 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
5074 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
5075 u32 grbm_int_cntl = 0;
5076 u32 dma_cntl, dma_cntl1;
5077
5078 if (!rdev->irq.installed) {
5079 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
5080 return -EINVAL;
5081 }
5082 /* don't enable anything if the ih is disabled */
5083 if (!rdev->ih.enabled) {
5084 cik_disable_interrupts(rdev);
5085 /* force the active interrupt state to all disabled */
5086 cik_disable_interrupt_state(rdev);
5087 return 0;
5088 }
5089
5090 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
5091 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
5092 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
5093 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
5094 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
5095 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
5096
5097 dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5098 dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5099
5100 cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5101 cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5102 cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5103 cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5104 cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5105 cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5106 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5107 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5108
5109 /* enable CP interrupts on all rings */
5110 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
5111 DRM_DEBUG("cik_irq_set: sw int gfx\n");
5112 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
5113 }
5114 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
5115 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
5116 DRM_DEBUG("si_irq_set: sw int cp1\n");
5117 if (ring->me == 1) {
5118 switch (ring->pipe) {
5119 case 0:
5120 cp_m1p0 |= TIME_STAMP_INT_ENABLE;
5121 break;
5122 case 1:
5123 cp_m1p1 |= TIME_STAMP_INT_ENABLE;
5124 break;
5125 case 2:
5126 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
5127 break;
5128 case 3:
5129 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
5130 break;
5131 default:
5132 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
5133 break;
5134 }
5135 } else if (ring->me == 2) {
5136 switch (ring->pipe) {
5137 case 0:
5138 cp_m2p0 |= TIME_STAMP_INT_ENABLE;
5139 break;
5140 case 1:
5141 cp_m2p1 |= TIME_STAMP_INT_ENABLE;
5142 break;
5143 case 2:
5144 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
5145 break;
5146 case 3:
5147 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
5148 break;
5149 default:
5150 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
5151 break;
5152 }
5153 } else {
5154 DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
5155 }
5156 }
5157 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
5158 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
5159 DRM_DEBUG("si_irq_set: sw int cp2\n");
5160 if (ring->me == 1) {
5161 switch (ring->pipe) {
5162 case 0:
5163 cp_m1p0 |= TIME_STAMP_INT_ENABLE;
5164 break;
5165 case 1:
5166 cp_m1p1 |= TIME_STAMP_INT_ENABLE;
5167 break;
5168 case 2:
5169 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
5170 break;
5171 case 3:
5172 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
5173 break;
5174 default:
5175 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
5176 break;
5177 }
5178 } else if (ring->me == 2) {
5179 switch (ring->pipe) {
5180 case 0:
5181 cp_m2p0 |= TIME_STAMP_INT_ENABLE;
5182 break;
5183 case 1:
5184 cp_m2p1 |= TIME_STAMP_INT_ENABLE;
5185 break;
5186 case 2:
5187 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
5188 break;
5189 case 3:
5190 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
5191 break;
5192 default:
5193 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
5194 break;
5195 }
5196 } else {
5197 DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
5198 }
5199 }
5200
5201 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
5202 DRM_DEBUG("cik_irq_set: sw int dma\n");
5203 dma_cntl |= TRAP_ENABLE;
5204 }
5205
5206 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
5207 DRM_DEBUG("cik_irq_set: sw int dma1\n");
5208 dma_cntl1 |= TRAP_ENABLE;
5209 }
5210
5211 if (rdev->irq.crtc_vblank_int[0] ||
5212 atomic_read(&rdev->irq.pflip[0])) {
5213 DRM_DEBUG("cik_irq_set: vblank 0\n");
5214 crtc1 |= VBLANK_INTERRUPT_MASK;
5215 }
5216 if (rdev->irq.crtc_vblank_int[1] ||
5217 atomic_read(&rdev->irq.pflip[1])) {
5218 DRM_DEBUG("cik_irq_set: vblank 1\n");
5219 crtc2 |= VBLANK_INTERRUPT_MASK;
5220 }
5221 if (rdev->irq.crtc_vblank_int[2] ||
5222 atomic_read(&rdev->irq.pflip[2])) {
5223 DRM_DEBUG("cik_irq_set: vblank 2\n");
5224 crtc3 |= VBLANK_INTERRUPT_MASK;
5225 }
5226 if (rdev->irq.crtc_vblank_int[3] ||
5227 atomic_read(&rdev->irq.pflip[3])) {
5228 DRM_DEBUG("cik_irq_set: vblank 3\n");
5229 crtc4 |= VBLANK_INTERRUPT_MASK;
5230 }
5231 if (rdev->irq.crtc_vblank_int[4] ||
5232 atomic_read(&rdev->irq.pflip[4])) {
5233 DRM_DEBUG("cik_irq_set: vblank 4\n");
5234 crtc5 |= VBLANK_INTERRUPT_MASK;
5235 }
5236 if (rdev->irq.crtc_vblank_int[5] ||
5237 atomic_read(&rdev->irq.pflip[5])) {
5238 DRM_DEBUG("cik_irq_set: vblank 5\n");
5239 crtc6 |= VBLANK_INTERRUPT_MASK;
5240 }
5241 if (rdev->irq.hpd[0]) {
5242 DRM_DEBUG("cik_irq_set: hpd 1\n");
5243 hpd1 |= DC_HPDx_INT_EN;
5244 }
5245 if (rdev->irq.hpd[1]) {
5246 DRM_DEBUG("cik_irq_set: hpd 2\n");
5247 hpd2 |= DC_HPDx_INT_EN;
5248 }
5249 if (rdev->irq.hpd[2]) {
5250 DRM_DEBUG("cik_irq_set: hpd 3\n");
5251 hpd3 |= DC_HPDx_INT_EN;
5252 }
5253 if (rdev->irq.hpd[3]) {
5254 DRM_DEBUG("cik_irq_set: hpd 4\n");
5255 hpd4 |= DC_HPDx_INT_EN;
5256 }
5257 if (rdev->irq.hpd[4]) {
5258 DRM_DEBUG("cik_irq_set: hpd 5\n");
5259 hpd5 |= DC_HPDx_INT_EN;
5260 }
5261 if (rdev->irq.hpd[5]) {
5262 DRM_DEBUG("cik_irq_set: hpd 6\n");
5263 hpd6 |= DC_HPDx_INT_EN;
5264 }
5265
5266 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
5267
5268 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
5269 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
5270
5271 WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
5272 WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
5273 WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
5274 WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
5275 WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
5276 WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
5277 WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
5278 WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
5279
5280 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
5281
5282 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
5283 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
5284 if (rdev->num_crtc >= 4) {
5285 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
5286 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
5287 }
5288 if (rdev->num_crtc >= 6) {
5289 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
5290 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
5291 }
5292
5293 WREG32(DC_HPD1_INT_CONTROL, hpd1);
5294 WREG32(DC_HPD2_INT_CONTROL, hpd2);
5295 WREG32(DC_HPD3_INT_CONTROL, hpd3);
5296 WREG32(DC_HPD4_INT_CONTROL, hpd4);
5297 WREG32(DC_HPD5_INT_CONTROL, hpd5);
5298 WREG32(DC_HPD6_INT_CONTROL, hpd6);
5299
5300 return 0;
5301}
5302
5303/**
5304 * cik_irq_ack - ack interrupt sources
5305 *
5306 * @rdev: radeon_device pointer
5307 *
5308 * Ack interrupt sources on the GPU (vblanks, hpd,
5309 * etc.) (CIK). Certain interrupts sources are sw
5310 * generated and do not require an explicit ack.
5311 */
5312static inline void cik_irq_ack(struct radeon_device *rdev)
5313{
5314 u32 tmp;
5315
5316 rdev->irq.stat_regs.cik.disp_int = RREG32(DISP_INTERRUPT_STATUS);
5317 rdev->irq.stat_regs.cik.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
5318 rdev->irq.stat_regs.cik.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
5319 rdev->irq.stat_regs.cik.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
5320 rdev->irq.stat_regs.cik.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
5321 rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
5322 rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
5323
5324 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
5325 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
5326 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
5327 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
5328 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
5329 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
5330 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)
5331 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
5332
5333 if (rdev->num_crtc >= 4) {
5334 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
5335 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
5336 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
5337 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
5338 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
5339 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
5340 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
5341 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
5342 }
5343
5344 if (rdev->num_crtc >= 6) {
5345 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
5346 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
5347 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
5348 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
5349 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
5350 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
5351 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
5352 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
5353 }
5354
5355 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
5356 tmp = RREG32(DC_HPD1_INT_CONTROL);
5357 tmp |= DC_HPDx_INT_ACK;
5358 WREG32(DC_HPD1_INT_CONTROL, tmp);
5359 }
5360 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
5361 tmp = RREG32(DC_HPD2_INT_CONTROL);
5362 tmp |= DC_HPDx_INT_ACK;
5363 WREG32(DC_HPD2_INT_CONTROL, tmp);
5364 }
5365 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
5366 tmp = RREG32(DC_HPD3_INT_CONTROL);
5367 tmp |= DC_HPDx_INT_ACK;
5368 WREG32(DC_HPD3_INT_CONTROL, tmp);
5369 }
5370 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
5371 tmp = RREG32(DC_HPD4_INT_CONTROL);
5372 tmp |= DC_HPDx_INT_ACK;
5373 WREG32(DC_HPD4_INT_CONTROL, tmp);
5374 }
5375 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
5376 tmp = RREG32(DC_HPD5_INT_CONTROL);
5377 tmp |= DC_HPDx_INT_ACK;
5378 WREG32(DC_HPD5_INT_CONTROL, tmp);
5379 }
5380 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
5381 tmp = RREG32(DC_HPD5_INT_CONTROL);
5382 tmp |= DC_HPDx_INT_ACK;
5383 WREG32(DC_HPD6_INT_CONTROL, tmp);
5384 }
5385}
5386
5387/**
5388 * cik_irq_disable - disable interrupts
5389 *
5390 * @rdev: radeon_device pointer
5391 *
5392 * Disable interrupts on the hw (CIK).
5393 */
5394static void cik_irq_disable(struct radeon_device *rdev)
5395{
5396 cik_disable_interrupts(rdev);
5397 /* Wait and acknowledge irq */
5398 mdelay(1);
5399 cik_irq_ack(rdev);
5400 cik_disable_interrupt_state(rdev);
5401}
5402
5403/**
5404 * cik_irq_disable - disable interrupts for suspend
5405 *
5406 * @rdev: radeon_device pointer
5407 *
5408 * Disable interrupts and stop the RLC (CIK).
5409 * Used for suspend.
5410 */
5411static void cik_irq_suspend(struct radeon_device *rdev)
5412{
5413 cik_irq_disable(rdev);
5414 cik_rlc_stop(rdev);
5415}
5416
5417/**
5418 * cik_irq_fini - tear down interrupt support
5419 *
5420 * @rdev: radeon_device pointer
5421 *
5422 * Disable interrupts on the hw and free the IH ring
5423 * buffer (CIK).
5424 * Used for driver unload.
5425 */
5426static void cik_irq_fini(struct radeon_device *rdev)
5427{
5428 cik_irq_suspend(rdev);
5429 r600_ih_ring_fini(rdev);
5430}
5431
5432/**
5433 * cik_get_ih_wptr - get the IH ring buffer wptr
5434 *
5435 * @rdev: radeon_device pointer
5436 *
5437 * Get the IH ring buffer wptr from either the register
5438 * or the writeback memory buffer (CIK). Also check for
5439 * ring buffer overflow and deal with it.
5440 * Used by cik_irq_process().
5441 * Returns the value of the wptr.
5442 */
5443static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
5444{
5445 u32 wptr, tmp;
5446
5447 if (rdev->wb.enabled)
5448 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
5449 else
5450 wptr = RREG32(IH_RB_WPTR);
5451
5452 if (wptr & RB_OVERFLOW) {
5453 /* When a ring buffer overflow happen start parsing interrupt
5454 * from the last not overwritten vector (wptr + 16). Hopefully
5455 * this should allow us to catchup.
5456 */
5457 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
5458 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
5459 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
5460 tmp = RREG32(IH_RB_CNTL);
5461 tmp |= IH_WPTR_OVERFLOW_CLEAR;
5462 WREG32(IH_RB_CNTL, tmp);
5463 }
5464 return (wptr & rdev->ih.ptr_mask);
5465}
5466
5467/* CIK IV Ring
5468 * Each IV ring entry is 128 bits:
5469 * [7:0] - interrupt source id
5470 * [31:8] - reserved
5471 * [59:32] - interrupt source data
5472 * [63:60] - reserved
5473 * [71:64] - RINGID
5474 * CP:
5475 * ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
5476 * QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
5477 * - for gfx, hw shader state (0=PS...5=LS, 6=CS)
5478 * ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
5479 * PIPE_ID - ME0 0=3D
5480 * - ME1&2 compute dispatcher (4 pipes each)
5481 * SDMA:
5482 * INSTANCE_ID [1:0], QUEUE_ID[1:0]
5483 * INSTANCE_ID - 0 = sdma0, 1 = sdma1
5484 * QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
5485 * [79:72] - VMID
5486 * [95:80] - PASID
5487 * [127:96] - reserved
5488 */
5489/**
5490 * cik_irq_process - interrupt handler
5491 *
5492 * @rdev: radeon_device pointer
5493 *
5494 * Interrupt hander (CIK). Walk the IH ring,
5495 * ack interrupts and schedule work to handle
5496 * interrupt events.
5497 * Returns irq process return code.
5498 */
5499int cik_irq_process(struct radeon_device *rdev)
5500{
5501 struct radeon_ring *cp1_ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
5502 struct radeon_ring *cp2_ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
5503 u32 wptr;
5504 u32 rptr;
5505 u32 src_id, src_data, ring_id;
5506 u8 me_id, pipe_id, queue_id;
5507 u32 ring_index;
5508 bool queue_hotplug = false;
5509 bool queue_reset = false;
5510
5511 if (!rdev->ih.enabled || rdev->shutdown)
5512 return IRQ_NONE;
5513
5514 wptr = cik_get_ih_wptr(rdev);
5515
5516restart_ih:
5517 /* is somebody else already processing irqs? */
5518 if (atomic_xchg(&rdev->ih.lock, 1))
5519 return IRQ_NONE;
5520
5521 rptr = rdev->ih.rptr;
5522 DRM_DEBUG("cik_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
5523
5524 /* Order reading of wptr vs. reading of IH ring data */
5525 rmb();
5526
5527 /* display interrupts */
5528 cik_irq_ack(rdev);
5529
5530 while (rptr != wptr) {
5531 /* wptr/rptr are in bytes! */
5532 ring_index = rptr / 4;
5533 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
5534 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
5535 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
5536
5537 switch (src_id) {
5538 case 1: /* D1 vblank/vline */
5539 switch (src_data) {
5540 case 0: /* D1 vblank */
5541 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
5542 if (rdev->irq.crtc_vblank_int[0]) {
5543 drm_handle_vblank(rdev->ddev, 0);
5544 rdev->pm.vblank_sync = true;
5545 wake_up(&rdev->irq.vblank_queue);
5546 }
5547 if (atomic_read(&rdev->irq.pflip[0]))
5548 radeon_crtc_handle_flip(rdev, 0);
5549 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
5550 DRM_DEBUG("IH: D1 vblank\n");
5551 }
5552 break;
5553 case 1: /* D1 vline */
5554 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
5555 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
5556 DRM_DEBUG("IH: D1 vline\n");
5557 }
5558 break;
5559 default:
5560 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5561 break;
5562 }
5563 break;
5564 case 2: /* D2 vblank/vline */
5565 switch (src_data) {
5566 case 0: /* D2 vblank */
5567 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
5568 if (rdev->irq.crtc_vblank_int[1]) {
5569 drm_handle_vblank(rdev->ddev, 1);
5570 rdev->pm.vblank_sync = true;
5571 wake_up(&rdev->irq.vblank_queue);
5572 }
5573 if (atomic_read(&rdev->irq.pflip[1]))
5574 radeon_crtc_handle_flip(rdev, 1);
5575 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
5576 DRM_DEBUG("IH: D2 vblank\n");
5577 }
5578 break;
5579 case 1: /* D2 vline */
5580 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
5581 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
5582 DRM_DEBUG("IH: D2 vline\n");
5583 }
5584 break;
5585 default:
5586 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5587 break;
5588 }
5589 break;
5590 case 3: /* D3 vblank/vline */
5591 switch (src_data) {
5592 case 0: /* D3 vblank */
5593 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
5594 if (rdev->irq.crtc_vblank_int[2]) {
5595 drm_handle_vblank(rdev->ddev, 2);
5596 rdev->pm.vblank_sync = true;
5597 wake_up(&rdev->irq.vblank_queue);
5598 }
5599 if (atomic_read(&rdev->irq.pflip[2]))
5600 radeon_crtc_handle_flip(rdev, 2);
5601 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
5602 DRM_DEBUG("IH: D3 vblank\n");
5603 }
5604 break;
5605 case 1: /* D3 vline */
5606 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
5607 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
5608 DRM_DEBUG("IH: D3 vline\n");
5609 }
5610 break;
5611 default:
5612 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5613 break;
5614 }
5615 break;
5616 case 4: /* D4 vblank/vline */
5617 switch (src_data) {
5618 case 0: /* D4 vblank */
5619 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
5620 if (rdev->irq.crtc_vblank_int[3]) {
5621 drm_handle_vblank(rdev->ddev, 3);
5622 rdev->pm.vblank_sync = true;
5623 wake_up(&rdev->irq.vblank_queue);
5624 }
5625 if (atomic_read(&rdev->irq.pflip[3]))
5626 radeon_crtc_handle_flip(rdev, 3);
5627 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
5628 DRM_DEBUG("IH: D4 vblank\n");
5629 }
5630 break;
5631 case 1: /* D4 vline */
5632 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
5633 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
5634 DRM_DEBUG("IH: D4 vline\n");
5635 }
5636 break;
5637 default:
5638 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5639 break;
5640 }
5641 break;
5642 case 5: /* D5 vblank/vline */
5643 switch (src_data) {
5644 case 0: /* D5 vblank */
5645 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
5646 if (rdev->irq.crtc_vblank_int[4]) {
5647 drm_handle_vblank(rdev->ddev, 4);
5648 rdev->pm.vblank_sync = true;
5649 wake_up(&rdev->irq.vblank_queue);
5650 }
5651 if (atomic_read(&rdev->irq.pflip[4]))
5652 radeon_crtc_handle_flip(rdev, 4);
5653 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
5654 DRM_DEBUG("IH: D5 vblank\n");
5655 }
5656 break;
5657 case 1: /* D5 vline */
5658 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
5659 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
5660 DRM_DEBUG("IH: D5 vline\n");
5661 }
5662 break;
5663 default:
5664 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5665 break;
5666 }
5667 break;
5668 case 6: /* D6 vblank/vline */
5669 switch (src_data) {
5670 case 0: /* D6 vblank */
5671 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
5672 if (rdev->irq.crtc_vblank_int[5]) {
5673 drm_handle_vblank(rdev->ddev, 5);
5674 rdev->pm.vblank_sync = true;
5675 wake_up(&rdev->irq.vblank_queue);
5676 }
5677 if (atomic_read(&rdev->irq.pflip[5]))
5678 radeon_crtc_handle_flip(rdev, 5);
5679 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
5680 DRM_DEBUG("IH: D6 vblank\n");
5681 }
5682 break;
5683 case 1: /* D6 vline */
5684 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
5685 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
5686 DRM_DEBUG("IH: D6 vline\n");
5687 }
5688 break;
5689 default:
5690 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5691 break;
5692 }
5693 break;
5694 case 42: /* HPD hotplug */
5695 switch (src_data) {
5696 case 0:
5697 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
5698 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
5699 queue_hotplug = true;
5700 DRM_DEBUG("IH: HPD1\n");
5701 }
5702 break;
5703 case 1:
5704 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
5705 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
5706 queue_hotplug = true;
5707 DRM_DEBUG("IH: HPD2\n");
5708 }
5709 break;
5710 case 2:
5711 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
5712 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
5713 queue_hotplug = true;
5714 DRM_DEBUG("IH: HPD3\n");
5715 }
5716 break;
5717 case 3:
5718 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
5719 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
5720 queue_hotplug = true;
5721 DRM_DEBUG("IH: HPD4\n");
5722 }
5723 break;
5724 case 4:
5725 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
5726 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
5727 queue_hotplug = true;
5728 DRM_DEBUG("IH: HPD5\n");
5729 }
5730 break;
5731 case 5:
5732 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
5733 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
5734 queue_hotplug = true;
5735 DRM_DEBUG("IH: HPD6\n");
5736 }
5737 break;
5738 default:
5739 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5740 break;
5741 }
5742 break;
5743 case 146:
5744 case 147:
5745 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
5746 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
5747 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
5748 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
5749 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
5750 /* reset addr and status */
5751 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
5752 break;
5753 case 176: /* GFX RB CP_INT */
5754 case 177: /* GFX IB CP_INT */
5755 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
5756 break;
5757 case 181: /* CP EOP event */
5758 DRM_DEBUG("IH: CP EOP\n");
5759 /* XXX check the bitfield order! */
5760 me_id = (ring_id & 0x60) >> 5;
5761 pipe_id = (ring_id & 0x18) >> 3;
5762 queue_id = (ring_id & 0x7) >> 0;
5763 switch (me_id) {
5764 case 0:
5765 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
5766 break;
5767 case 1:
5768 case 2:
5769 if ((cp1_ring->me == me_id) & (cp1_ring->pipe == pipe_id))
5770 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
5771 if ((cp2_ring->me == me_id) & (cp2_ring->pipe == pipe_id))
5772 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
5773 break;
5774 }
5775 break;
5776 case 184: /* CP Privileged reg access */
5777 DRM_ERROR("Illegal register access in command stream\n");
5778 /* XXX check the bitfield order! */
5779 me_id = (ring_id & 0x60) >> 5;
5780 pipe_id = (ring_id & 0x18) >> 3;
5781 queue_id = (ring_id & 0x7) >> 0;
5782 switch (me_id) {
5783 case 0:
5784 /* This results in a full GPU reset, but all we need to do is soft
5785 * reset the CP for gfx
5786 */
5787 queue_reset = true;
5788 break;
5789 case 1:
5790 /* XXX compute */
5791 queue_reset = true;
5792 break;
5793 case 2:
5794 /* XXX compute */
5795 queue_reset = true;
5796 break;
5797 }
5798 break;
5799 case 185: /* CP Privileged inst */
5800 DRM_ERROR("Illegal instruction in command stream\n");
5801 /* XXX check the bitfield order! */
5802 me_id = (ring_id & 0x60) >> 5;
5803 pipe_id = (ring_id & 0x18) >> 3;
5804 queue_id = (ring_id & 0x7) >> 0;
5805 switch (me_id) {
5806 case 0:
5807 /* This results in a full GPU reset, but all we need to do is soft
5808 * reset the CP for gfx
5809 */
5810 queue_reset = true;
5811 break;
5812 case 1:
5813 /* XXX compute */
5814 queue_reset = true;
5815 break;
5816 case 2:
5817 /* XXX compute */
5818 queue_reset = true;
5819 break;
5820 }
5821 break;
5822 case 224: /* SDMA trap event */
5823 /* XXX check the bitfield order! */
5824 me_id = (ring_id & 0x3) >> 0;
5825 queue_id = (ring_id & 0xc) >> 2;
5826 DRM_DEBUG("IH: SDMA trap\n");
5827 switch (me_id) {
5828 case 0:
5829 switch (queue_id) {
5830 case 0:
5831 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
5832 break;
5833 case 1:
5834 /* XXX compute */
5835 break;
5836 case 2:
5837 /* XXX compute */
5838 break;
5839 }
5840 break;
5841 case 1:
5842 switch (queue_id) {
5843 case 0:
5844 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
5845 break;
5846 case 1:
5847 /* XXX compute */
5848 break;
5849 case 2:
5850 /* XXX compute */
5851 break;
5852 }
5853 break;
5854 }
5855 break;
5856 case 241: /* SDMA Privileged inst */
5857 case 247: /* SDMA Privileged inst */
5858 DRM_ERROR("Illegal instruction in SDMA command stream\n");
5859 /* XXX check the bitfield order! */
5860 me_id = (ring_id & 0x3) >> 0;
5861 queue_id = (ring_id & 0xc) >> 2;
5862 switch (me_id) {
5863 case 0:
5864 switch (queue_id) {
5865 case 0:
5866 queue_reset = true;
5867 break;
5868 case 1:
5869 /* XXX compute */
5870 queue_reset = true;
5871 break;
5872 case 2:
5873 /* XXX compute */
5874 queue_reset = true;
5875 break;
5876 }
5877 break;
5878 case 1:
5879 switch (queue_id) {
5880 case 0:
5881 queue_reset = true;
5882 break;
5883 case 1:
5884 /* XXX compute */
5885 queue_reset = true;
5886 break;
5887 case 2:
5888 /* XXX compute */
5889 queue_reset = true;
5890 break;
5891 }
5892 break;
5893 }
5894 break;
5895 case 233: /* GUI IDLE */
5896 DRM_DEBUG("IH: GUI idle\n");
5897 break;
5898 default:
5899 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5900 break;
5901 }
5902
5903 /* wptr/rptr are in bytes! */
5904 rptr += 16;
5905 rptr &= rdev->ih.ptr_mask;
5906 }
5907 if (queue_hotplug)
5908 schedule_work(&rdev->hotplug_work);
5909 if (queue_reset)
5910 schedule_work(&rdev->reset_work);
5911 rdev->ih.rptr = rptr;
5912 WREG32(IH_RB_RPTR, rdev->ih.rptr);
5913 atomic_set(&rdev->ih.lock, 0);
5914
5915 /* make sure wptr hasn't changed while processing */
5916 wptr = cik_get_ih_wptr(rdev);
5917 if (wptr != rptr)
5918 goto restart_ih;
5919
5920 return IRQ_HANDLED;
5921}
5922
5923/*
5924 * startup/shutdown callbacks
5925 */
5926/**
5927 * cik_startup - program the asic to a functional state
5928 *
5929 * @rdev: radeon_device pointer
5930 *
5931 * Programs the asic to a functional state (CIK).
5932 * Called by cik_init() and cik_resume().
5933 * Returns 0 for success, error for failure.
5934 */
5935static int cik_startup(struct radeon_device *rdev)
5936{
5937 struct radeon_ring *ring;
5938 int r;
5939
5940 if (rdev->flags & RADEON_IS_IGP) {
5941 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
5942 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
5943 r = cik_init_microcode(rdev);
5944 if (r) {
5945 DRM_ERROR("Failed to load firmware!\n");
5946 return r;
5947 }
5948 }
5949 } else {
5950 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
5951 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
5952 !rdev->mc_fw) {
5953 r = cik_init_microcode(rdev);
5954 if (r) {
5955 DRM_ERROR("Failed to load firmware!\n");
5956 return r;
5957 }
5958 }
5959
5960 r = ci_mc_load_microcode(rdev);
5961 if (r) {
5962 DRM_ERROR("Failed to load MC firmware!\n");
5963 return r;
5964 }
5965 }
5966
5967 r = r600_vram_scratch_init(rdev);
5968 if (r)
5969 return r;
5970
5971 cik_mc_program(rdev);
5972 r = cik_pcie_gart_enable(rdev);
5973 if (r)
5974 return r;
5975 cik_gpu_init(rdev);
5976
5977 /* allocate rlc buffers */
5978 r = si_rlc_init(rdev);
5979 if (r) {
5980 DRM_ERROR("Failed to init rlc BOs!\n");
5981 return r;
5982 }
5983
5984 /* allocate wb buffer */
5985 r = radeon_wb_init(rdev);
5986 if (r)
5987 return r;
5988
5989 /* allocate mec buffers */
5990 r = cik_mec_init(rdev);
5991 if (r) {
5992 DRM_ERROR("Failed to init MEC BOs!\n");
5993 return r;
5994 }
5995
5996 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
5997 if (r) {
5998 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
5999 return r;
6000 }
6001
6002 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6003 if (r) {
6004 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6005 return r;
6006 }
6007
6008 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6009 if (r) {
6010 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6011 return r;
6012 }
6013
6014 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
6015 if (r) {
6016 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6017 return r;
6018 }
6019
6020 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6021 if (r) {
6022 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6023 return r;
6024 }
6025
6026 r = cik_uvd_resume(rdev);
6027 if (!r) {
6028 r = radeon_fence_driver_start_ring(rdev,
6029 R600_RING_TYPE_UVD_INDEX);
6030 if (r)
6031 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
6032 }
6033 if (r)
6034 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
6035
6036 /* Enable IRQ */
6037 if (!rdev->irq.installed) {
6038 r = radeon_irq_kms_init(rdev);
6039 if (r)
6040 return r;
6041 }
6042
6043 r = cik_irq_init(rdev);
6044 if (r) {
6045 DRM_ERROR("radeon: IH init failed (%d).\n", r);
6046 radeon_irq_kms_fini(rdev);
6047 return r;
6048 }
6049 cik_irq_set(rdev);
6050
6051 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6052 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
6053 CP_RB0_RPTR, CP_RB0_WPTR,
6054 0, 0xfffff, RADEON_CP_PACKET2);
6055 if (r)
6056 return r;
6057
6058 /* set up the compute queues */
6059 /* type-2 packets are deprecated on MEC, use type-3 instead */
6060 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6061 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
6062 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
6063 0, 0xfffff, PACKET3(PACKET3_NOP, 0x3FFF));
6064 if (r)
6065 return r;
6066 ring->me = 1; /* first MEC */
6067 ring->pipe = 0; /* first pipe */
6068 ring->queue = 0; /* first queue */
6069 ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET;
6070
6071 /* type-2 packets are deprecated on MEC, use type-3 instead */
6072 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6073 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
6074 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
6075 0, 0xffffffff, PACKET3(PACKET3_NOP, 0x3FFF));
6076 if (r)
6077 return r;
6078 /* dGPU only have 1 MEC */
6079 ring->me = 1; /* first MEC */
6080 ring->pipe = 0; /* first pipe */
6081 ring->queue = 1; /* second queue */
6082 ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET;
6083
6084 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6085 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
6086 SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
6087 SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
6088 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
6089 if (r)
6090 return r;
6091
6092 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6093 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
6094 SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
6095 SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
6096 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
6097 if (r)
6098 return r;
6099
6100 r = cik_cp_resume(rdev);
6101 if (r)
6102 return r;
6103
6104 r = cik_sdma_resume(rdev);
6105 if (r)
6106 return r;
6107
6108 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6109 if (ring->ring_size) {
6110 r = radeon_ring_init(rdev, ring, ring->ring_size,
6111 R600_WB_UVD_RPTR_OFFSET,
6112 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
6113 0, 0xfffff, RADEON_CP_PACKET2);
6114 if (!r)
6115 r = r600_uvd_init(rdev);
6116 if (r)
6117 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
6118 }
6119
6120 r = radeon_ib_pool_init(rdev);
6121 if (r) {
6122 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
6123 return r;
6124 }
6125
6126 r = radeon_vm_manager_init(rdev);
6127 if (r) {
6128 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
6129 return r;
6130 }
6131
6132 return 0;
6133}
6134
6135/**
6136 * cik_resume - resume the asic to a functional state
6137 *
6138 * @rdev: radeon_device pointer
6139 *
6140 * Programs the asic to a functional state (CIK).
6141 * Called at resume.
6142 * Returns 0 for success, error for failure.
6143 */
6144int cik_resume(struct radeon_device *rdev)
6145{
6146 int r;
6147
6148 /* post card */
6149 atom_asic_init(rdev->mode_info.atom_context);
6150
6151 /* init golden registers */
6152 cik_init_golden_registers(rdev);
6153
6154 rdev->accel_working = true;
6155 r = cik_startup(rdev);
6156 if (r) {
6157 DRM_ERROR("cik startup failed on resume\n");
6158 rdev->accel_working = false;
6159 return r;
6160 }
6161
6162 return r;
6163
6164}
6165
6166/**
6167 * cik_suspend - suspend the asic
6168 *
6169 * @rdev: radeon_device pointer
6170 *
6171 * Bring the chip into a state suitable for suspend (CIK).
6172 * Called at suspend.
6173 * Returns 0 for success.
6174 */
6175int cik_suspend(struct radeon_device *rdev)
6176{
6177 radeon_vm_manager_fini(rdev);
6178 cik_cp_enable(rdev, false);
6179 cik_sdma_enable(rdev, false);
6180 r600_uvd_rbc_stop(rdev);
6181 radeon_uvd_suspend(rdev);
6182 cik_irq_suspend(rdev);
6183 radeon_wb_disable(rdev);
6184 cik_pcie_gart_disable(rdev);
6185 return 0;
6186}
6187
6188/* Plan is to move initialization in that function and use
6189 * helper function so that radeon_device_init pretty much
6190 * do nothing more than calling asic specific function. This
6191 * should also allow to remove a bunch of callback function
6192 * like vram_info.
6193 */
6194/**
6195 * cik_init - asic specific driver and hw init
6196 *
6197 * @rdev: radeon_device pointer
6198 *
6199 * Setup asic specific driver variables and program the hw
6200 * to a functional state (CIK).
6201 * Called at driver startup.
6202 * Returns 0 for success, errors for failure.
6203 */
6204int cik_init(struct radeon_device *rdev)
6205{
6206 struct radeon_ring *ring;
6207 int r;
6208
6209 /* Read BIOS */
6210 if (!radeon_get_bios(rdev)) {
6211 if (ASIC_IS_AVIVO(rdev))
6212 return -EINVAL;
6213 }
6214 /* Must be an ATOMBIOS */
6215 if (!rdev->is_atom_bios) {
6216 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
6217 return -EINVAL;
6218 }
6219 r = radeon_atombios_init(rdev);
6220 if (r)
6221 return r;
6222
6223 /* Post card if necessary */
6224 if (!radeon_card_posted(rdev)) {
6225 if (!rdev->bios) {
6226 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
6227 return -EINVAL;
6228 }
6229 DRM_INFO("GPU not posted. posting now...\n");
6230 atom_asic_init(rdev->mode_info.atom_context);
6231 }
6232 /* init golden registers */
6233 cik_init_golden_registers(rdev);
6234 /* Initialize scratch registers */
6235 cik_scratch_init(rdev);
6236 /* Initialize surface registers */
6237 radeon_surface_init(rdev);
6238 /* Initialize clocks */
6239 radeon_get_clock_info(rdev->ddev);
6240
6241 /* Fence driver */
6242 r = radeon_fence_driver_init(rdev);
6243 if (r)
6244 return r;
6245
6246 /* initialize memory controller */
6247 r = cik_mc_init(rdev);
6248 if (r)
6249 return r;
6250 /* Memory manager */
6251 r = radeon_bo_init(rdev);
6252 if (r)
6253 return r;
6254
6255 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6256 ring->ring_obj = NULL;
6257 r600_ring_init(rdev, ring, 1024 * 1024);
6258
6259 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6260 ring->ring_obj = NULL;
6261 r600_ring_init(rdev, ring, 1024 * 1024);
6262 r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
6263 if (r)
6264 return r;
6265
6266 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6267 ring->ring_obj = NULL;
6268 r600_ring_init(rdev, ring, 1024 * 1024);
6269 r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
6270 if (r)
6271 return r;
6272
6273 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6274 ring->ring_obj = NULL;
6275 r600_ring_init(rdev, ring, 256 * 1024);
6276
6277 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6278 ring->ring_obj = NULL;
6279 r600_ring_init(rdev, ring, 256 * 1024);
6280
6281 r = radeon_uvd_init(rdev);
6282 if (!r) {
6283 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6284 ring->ring_obj = NULL;
6285 r600_ring_init(rdev, ring, 4096);
6286 }
6287
6288 rdev->ih.ring_obj = NULL;
6289 r600_ih_ring_init(rdev, 64 * 1024);
6290
6291 r = r600_pcie_gart_init(rdev);
6292 if (r)
6293 return r;
6294
6295 rdev->accel_working = true;
6296 r = cik_startup(rdev);
6297 if (r) {
6298 dev_err(rdev->dev, "disabling GPU acceleration\n");
6299 cik_cp_fini(rdev);
6300 cik_sdma_fini(rdev);
6301 cik_irq_fini(rdev);
6302 si_rlc_fini(rdev);
6303 cik_mec_fini(rdev);
6304 radeon_wb_fini(rdev);
6305 radeon_ib_pool_fini(rdev);
6306 radeon_vm_manager_fini(rdev);
6307 radeon_irq_kms_fini(rdev);
6308 cik_pcie_gart_fini(rdev);
6309 rdev->accel_working = false;
6310 }
6311
6312 /* Don't start up if the MC ucode is missing.
6313 * The default clocks and voltages before the MC ucode
6314 * is loaded are not suffient for advanced operations.
6315 */
6316 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
6317 DRM_ERROR("radeon: MC ucode required for NI+.\n");
6318 return -EINVAL;
6319 }
6320
6321 return 0;
6322}
6323
6324/**
6325 * cik_fini - asic specific driver and hw fini
6326 *
6327 * @rdev: radeon_device pointer
6328 *
6329 * Tear down the asic specific driver variables and program the hw
6330 * to an idle state (CIK).
6331 * Called at driver unload.
6332 */
6333void cik_fini(struct radeon_device *rdev)
6334{
6335 cik_cp_fini(rdev);
6336 cik_sdma_fini(rdev);
6337 cik_irq_fini(rdev);
6338 si_rlc_fini(rdev);
6339 cik_mec_fini(rdev);
6340 radeon_wb_fini(rdev);
6341 radeon_vm_manager_fini(rdev);
6342 radeon_ib_pool_fini(rdev);
6343 radeon_irq_kms_fini(rdev);
6344 radeon_uvd_fini(rdev);
6345 cik_pcie_gart_fini(rdev);
6346 r600_vram_scratch_fini(rdev);
6347 radeon_gem_fini(rdev);
6348 radeon_fence_driver_fini(rdev);
6349 radeon_bo_fini(rdev);
6350 radeon_atombios_fini(rdev);
6351 kfree(rdev->bios);
6352 rdev->bios = NULL;
6353}
6354
6355/* display watermark setup */
6356/**
6357 * dce8_line_buffer_adjust - Set up the line buffer
6358 *
6359 * @rdev: radeon_device pointer
6360 * @radeon_crtc: the selected display controller
6361 * @mode: the current display mode on the selected display
6362 * controller
6363 *
6364 * Setup up the line buffer allocation for
6365 * the selected display controller (CIK).
6366 * Returns the line buffer size in pixels.
6367 */
6368static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
6369 struct radeon_crtc *radeon_crtc,
6370 struct drm_display_mode *mode)
6371{
6372 u32 tmp;
6373
6374 /*
6375 * Line Buffer Setup
6376 * There are 6 line buffers, one for each display controllers.
6377 * There are 3 partitions per LB. Select the number of partitions
6378 * to enable based on the display width. For display widths larger
6379 * than 4096, you need use to use 2 display controllers and combine
6380 * them using the stereo blender.
6381 */
6382 if (radeon_crtc->base.enabled && mode) {
6383 if (mode->crtc_hdisplay < 1920)
6384 tmp = 1;
6385 else if (mode->crtc_hdisplay < 2560)
6386 tmp = 2;
6387 else if (mode->crtc_hdisplay < 4096)
6388 tmp = 0;
6389 else {
6390 DRM_DEBUG_KMS("Mode too big for LB!\n");
6391 tmp = 0;
6392 }
6393 } else
6394 tmp = 1;
6395
6396 WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
6397 LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
6398
6399 if (radeon_crtc->base.enabled && mode) {
6400 switch (tmp) {
6401 case 0:
6402 default:
6403 return 4096 * 2;
6404 case 1:
6405 return 1920 * 2;
6406 case 2:
6407 return 2560 * 2;
6408 }
6409 }
6410
6411 /* controller not enabled, so no lb used */
6412 return 0;
6413}
6414
6415/**
6416 * cik_get_number_of_dram_channels - get the number of dram channels
6417 *
6418 * @rdev: radeon_device pointer
6419 *
6420 * Look up the number of video ram channels (CIK).
6421 * Used for display watermark bandwidth calculations
6422 * Returns the number of dram channels
6423 */
6424static u32 cik_get_number_of_dram_channels(struct radeon_device *rdev)
6425{
6426 u32 tmp = RREG32(MC_SHARED_CHMAP);
6427
6428 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
6429 case 0:
6430 default:
6431 return 1;
6432 case 1:
6433 return 2;
6434 case 2:
6435 return 4;
6436 case 3:
6437 return 8;
6438 case 4:
6439 return 3;
6440 case 5:
6441 return 6;
6442 case 6:
6443 return 10;
6444 case 7:
6445 return 12;
6446 case 8:
6447 return 16;
6448 }
6449}
6450
6451struct dce8_wm_params {
6452 u32 dram_channels; /* number of dram channels */
6453 u32 yclk; /* bandwidth per dram data pin in kHz */
6454 u32 sclk; /* engine clock in kHz */
6455 u32 disp_clk; /* display clock in kHz */
6456 u32 src_width; /* viewport width */
6457 u32 active_time; /* active display time in ns */
6458 u32 blank_time; /* blank time in ns */
6459 bool interlaced; /* mode is interlaced */
6460 fixed20_12 vsc; /* vertical scale ratio */
6461 u32 num_heads; /* number of active crtcs */
6462 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
6463 u32 lb_size; /* line buffer allocated to pipe */
6464 u32 vtaps; /* vertical scaler taps */
6465};
6466
6467/**
6468 * dce8_dram_bandwidth - get the dram bandwidth
6469 *
6470 * @wm: watermark calculation data
6471 *
6472 * Calculate the raw dram bandwidth (CIK).
6473 * Used for display watermark bandwidth calculations
6474 * Returns the dram bandwidth in MBytes/s
6475 */
6476static u32 dce8_dram_bandwidth(struct dce8_wm_params *wm)
6477{
6478 /* Calculate raw DRAM Bandwidth */
6479 fixed20_12 dram_efficiency; /* 0.7 */
6480 fixed20_12 yclk, dram_channels, bandwidth;
6481 fixed20_12 a;
6482
6483 a.full = dfixed_const(1000);
6484 yclk.full = dfixed_const(wm->yclk);
6485 yclk.full = dfixed_div(yclk, a);
6486 dram_channels.full = dfixed_const(wm->dram_channels * 4);
6487 a.full = dfixed_const(10);
6488 dram_efficiency.full = dfixed_const(7);
6489 dram_efficiency.full = dfixed_div(dram_efficiency, a);
6490 bandwidth.full = dfixed_mul(dram_channels, yclk);
6491 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
6492
6493 return dfixed_trunc(bandwidth);
6494}
6495
6496/**
6497 * dce8_dram_bandwidth_for_display - get the dram bandwidth for display
6498 *
6499 * @wm: watermark calculation data
6500 *
6501 * Calculate the dram bandwidth used for display (CIK).
6502 * Used for display watermark bandwidth calculations
6503 * Returns the dram bandwidth for display in MBytes/s
6504 */
6505static u32 dce8_dram_bandwidth_for_display(struct dce8_wm_params *wm)
6506{
6507 /* Calculate DRAM Bandwidth and the part allocated to display. */
6508 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
6509 fixed20_12 yclk, dram_channels, bandwidth;
6510 fixed20_12 a;
6511
6512 a.full = dfixed_const(1000);
6513 yclk.full = dfixed_const(wm->yclk);
6514 yclk.full = dfixed_div(yclk, a);
6515 dram_channels.full = dfixed_const(wm->dram_channels * 4);
6516 a.full = dfixed_const(10);
6517 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
6518 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
6519 bandwidth.full = dfixed_mul(dram_channels, yclk);
6520 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
6521
6522 return dfixed_trunc(bandwidth);
6523}
6524
6525/**
6526 * dce8_data_return_bandwidth - get the data return bandwidth
6527 *
6528 * @wm: watermark calculation data
6529 *
6530 * Calculate the data return bandwidth used for display (CIK).
6531 * Used for display watermark bandwidth calculations
6532 * Returns the data return bandwidth in MBytes/s
6533 */
6534static u32 dce8_data_return_bandwidth(struct dce8_wm_params *wm)
6535{
6536 /* Calculate the display Data return Bandwidth */
6537 fixed20_12 return_efficiency; /* 0.8 */
6538 fixed20_12 sclk, bandwidth;
6539 fixed20_12 a;
6540
6541 a.full = dfixed_const(1000);
6542 sclk.full = dfixed_const(wm->sclk);
6543 sclk.full = dfixed_div(sclk, a);
6544 a.full = dfixed_const(10);
6545 return_efficiency.full = dfixed_const(8);
6546 return_efficiency.full = dfixed_div(return_efficiency, a);
6547 a.full = dfixed_const(32);
6548 bandwidth.full = dfixed_mul(a, sclk);
6549 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
6550
6551 return dfixed_trunc(bandwidth);
6552}
6553
6554/**
6555 * dce8_dmif_request_bandwidth - get the dmif bandwidth
6556 *
6557 * @wm: watermark calculation data
6558 *
6559 * Calculate the dmif bandwidth used for display (CIK).
6560 * Used for display watermark bandwidth calculations
6561 * Returns the dmif bandwidth in MBytes/s
6562 */
6563static u32 dce8_dmif_request_bandwidth(struct dce8_wm_params *wm)
6564{
6565 /* Calculate the DMIF Request Bandwidth */
6566 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
6567 fixed20_12 disp_clk, bandwidth;
6568 fixed20_12 a, b;
6569
6570 a.full = dfixed_const(1000);
6571 disp_clk.full = dfixed_const(wm->disp_clk);
6572 disp_clk.full = dfixed_div(disp_clk, a);
6573 a.full = dfixed_const(32);
6574 b.full = dfixed_mul(a, disp_clk);
6575
6576 a.full = dfixed_const(10);
6577 disp_clk_request_efficiency.full = dfixed_const(8);
6578 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
6579
6580 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
6581
6582 return dfixed_trunc(bandwidth);
6583}
6584
6585/**
6586 * dce8_available_bandwidth - get the min available bandwidth
6587 *
6588 * @wm: watermark calculation data
6589 *
6590 * Calculate the min available bandwidth used for display (CIK).
6591 * Used for display watermark bandwidth calculations
6592 * Returns the min available bandwidth in MBytes/s
6593 */
6594static u32 dce8_available_bandwidth(struct dce8_wm_params *wm)
6595{
6596 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
6597 u32 dram_bandwidth = dce8_dram_bandwidth(wm);
6598 u32 data_return_bandwidth = dce8_data_return_bandwidth(wm);
6599 u32 dmif_req_bandwidth = dce8_dmif_request_bandwidth(wm);
6600
6601 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
6602}
6603
6604/**
6605 * dce8_average_bandwidth - get the average available bandwidth
6606 *
6607 * @wm: watermark calculation data
6608 *
6609 * Calculate the average available bandwidth used for display (CIK).
6610 * Used for display watermark bandwidth calculations
6611 * Returns the average available bandwidth in MBytes/s
6612 */
6613static u32 dce8_average_bandwidth(struct dce8_wm_params *wm)
6614{
6615 /* Calculate the display mode Average Bandwidth
6616 * DisplayMode should contain the source and destination dimensions,
6617 * timing, etc.
6618 */
6619 fixed20_12 bpp;
6620 fixed20_12 line_time;
6621 fixed20_12 src_width;
6622 fixed20_12 bandwidth;
6623 fixed20_12 a;
6624
6625 a.full = dfixed_const(1000);
6626 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
6627 line_time.full = dfixed_div(line_time, a);
6628 bpp.full = dfixed_const(wm->bytes_per_pixel);
6629 src_width.full = dfixed_const(wm->src_width);
6630 bandwidth.full = dfixed_mul(src_width, bpp);
6631 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
6632 bandwidth.full = dfixed_div(bandwidth, line_time);
6633
6634 return dfixed_trunc(bandwidth);
6635}
6636
6637/**
6638 * dce8_latency_watermark - get the latency watermark
6639 *
6640 * @wm: watermark calculation data
6641 *
6642 * Calculate the latency watermark (CIK).
6643 * Used for display watermark bandwidth calculations
6644 * Returns the latency watermark in ns
6645 */
6646static u32 dce8_latency_watermark(struct dce8_wm_params *wm)
6647{
6648 /* First calculate the latency in ns */
6649 u32 mc_latency = 2000; /* 2000 ns. */
6650 u32 available_bandwidth = dce8_available_bandwidth(wm);
6651 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
6652 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
6653 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
6654 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
6655 (wm->num_heads * cursor_line_pair_return_time);
6656 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
6657 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
6658 u32 tmp, dmif_size = 12288;
6659 fixed20_12 a, b, c;
6660
6661 if (wm->num_heads == 0)
6662 return 0;
6663
6664 a.full = dfixed_const(2);
6665 b.full = dfixed_const(1);
6666 if ((wm->vsc.full > a.full) ||
6667 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
6668 (wm->vtaps >= 5) ||
6669 ((wm->vsc.full >= a.full) && wm->interlaced))
6670 max_src_lines_per_dst_line = 4;
6671 else
6672 max_src_lines_per_dst_line = 2;
6673
6674 a.full = dfixed_const(available_bandwidth);
6675 b.full = dfixed_const(wm->num_heads);
6676 a.full = dfixed_div(a, b);
6677
6678 b.full = dfixed_const(mc_latency + 512);
6679 c.full = dfixed_const(wm->disp_clk);
6680 b.full = dfixed_div(b, c);
6681
6682 c.full = dfixed_const(dmif_size);
6683 b.full = dfixed_div(c, b);
6684
6685 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
6686
6687 b.full = dfixed_const(1000);
6688 c.full = dfixed_const(wm->disp_clk);
6689 b.full = dfixed_div(c, b);
6690 c.full = dfixed_const(wm->bytes_per_pixel);
6691 b.full = dfixed_mul(b, c);
6692
6693 lb_fill_bw = min(tmp, dfixed_trunc(b));
6694
6695 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
6696 b.full = dfixed_const(1000);
6697 c.full = dfixed_const(lb_fill_bw);
6698 b.full = dfixed_div(c, b);
6699 a.full = dfixed_div(a, b);
6700 line_fill_time = dfixed_trunc(a);
6701
6702 if (line_fill_time < wm->active_time)
6703 return latency;
6704 else
6705 return latency + (line_fill_time - wm->active_time);
6706
6707}
6708
6709/**
6710 * dce8_average_bandwidth_vs_dram_bandwidth_for_display - check
6711 * average and available dram bandwidth
6712 *
6713 * @wm: watermark calculation data
6714 *
6715 * Check if the display average bandwidth fits in the display
6716 * dram bandwidth (CIK).
6717 * Used for display watermark bandwidth calculations
6718 * Returns true if the display fits, false if not.
6719 */
6720static bool dce8_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
6721{
6722 if (dce8_average_bandwidth(wm) <=
6723 (dce8_dram_bandwidth_for_display(wm) / wm->num_heads))
6724 return true;
6725 else
6726 return false;
6727}
6728
6729/**
6730 * dce8_average_bandwidth_vs_available_bandwidth - check
6731 * average and available bandwidth
6732 *
6733 * @wm: watermark calculation data
6734 *
6735 * Check if the display average bandwidth fits in the display
6736 * available bandwidth (CIK).
6737 * Used for display watermark bandwidth calculations
6738 * Returns true if the display fits, false if not.
6739 */
6740static bool dce8_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
6741{
6742 if (dce8_average_bandwidth(wm) <=
6743 (dce8_available_bandwidth(wm) / wm->num_heads))
6744 return true;
6745 else
6746 return false;
6747}
6748
6749/**
6750 * dce8_check_latency_hiding - check latency hiding
6751 *
6752 * @wm: watermark calculation data
6753 *
6754 * Check latency hiding (CIK).
6755 * Used for display watermark bandwidth calculations
6756 * Returns true if the display fits, false if not.
6757 */
6758static bool dce8_check_latency_hiding(struct dce8_wm_params *wm)
6759{
6760 u32 lb_partitions = wm->lb_size / wm->src_width;
6761 u32 line_time = wm->active_time + wm->blank_time;
6762 u32 latency_tolerant_lines;
6763 u32 latency_hiding;
6764 fixed20_12 a;
6765
6766 a.full = dfixed_const(1);
6767 if (wm->vsc.full > a.full)
6768 latency_tolerant_lines = 1;
6769 else {
6770 if (lb_partitions <= (wm->vtaps + 1))
6771 latency_tolerant_lines = 1;
6772 else
6773 latency_tolerant_lines = 2;
6774 }
6775
6776 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
6777
6778 if (dce8_latency_watermark(wm) <= latency_hiding)
6779 return true;
6780 else
6781 return false;
6782}
6783
6784/**
6785 * dce8_program_watermarks - program display watermarks
6786 *
6787 * @rdev: radeon_device pointer
6788 * @radeon_crtc: the selected display controller
6789 * @lb_size: line buffer size
6790 * @num_heads: number of display controllers in use
6791 *
6792 * Calculate and program the display watermarks for the
6793 * selected display controller (CIK).
6794 */
6795static void dce8_program_watermarks(struct radeon_device *rdev,
6796 struct radeon_crtc *radeon_crtc,
6797 u32 lb_size, u32 num_heads)
6798{
6799 struct drm_display_mode *mode = &radeon_crtc->base.mode;
6800 struct dce8_wm_params wm;
6801 u32 pixel_period;
6802 u32 line_time = 0;
6803 u32 latency_watermark_a = 0, latency_watermark_b = 0;
6804 u32 tmp, wm_mask;
6805
6806 if (radeon_crtc->base.enabled && num_heads && mode) {
6807 pixel_period = 1000000 / (u32)mode->clock;
6808 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
6809
6810 wm.yclk = rdev->pm.current_mclk * 10;
6811 wm.sclk = rdev->pm.current_sclk * 10;
6812 wm.disp_clk = mode->clock;
6813 wm.src_width = mode->crtc_hdisplay;
6814 wm.active_time = mode->crtc_hdisplay * pixel_period;
6815 wm.blank_time = line_time - wm.active_time;
6816 wm.interlaced = false;
6817 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
6818 wm.interlaced = true;
6819 wm.vsc = radeon_crtc->vsc;
6820 wm.vtaps = 1;
6821 if (radeon_crtc->rmx_type != RMX_OFF)
6822 wm.vtaps = 2;
6823 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
6824 wm.lb_size = lb_size;
6825 wm.dram_channels = cik_get_number_of_dram_channels(rdev);
6826 wm.num_heads = num_heads;
6827
6828 /* set for high clocks */
6829 latency_watermark_a = min(dce8_latency_watermark(&wm), (u32)65535);
6830 /* set for low clocks */
6831 /* wm.yclk = low clk; wm.sclk = low clk */
6832 latency_watermark_b = min(dce8_latency_watermark(&wm), (u32)65535);
6833
6834 /* possibly force display priority to high */
6835 /* should really do this at mode validation time... */
6836 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
6837 !dce8_average_bandwidth_vs_available_bandwidth(&wm) ||
6838 !dce8_check_latency_hiding(&wm) ||
6839 (rdev->disp_priority == 2)) {
6840 DRM_DEBUG_KMS("force priority to high\n");
6841 }
6842 }
6843
6844 /* select wm A */
6845 wm_mask = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
6846 tmp = wm_mask;
6847 tmp &= ~LATENCY_WATERMARK_MASK(3);
6848 tmp |= LATENCY_WATERMARK_MASK(1);
6849 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
6850 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
6851 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
6852 LATENCY_HIGH_WATERMARK(line_time)));
6853 /* select wm B */
6854 tmp = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
6855 tmp &= ~LATENCY_WATERMARK_MASK(3);
6856 tmp |= LATENCY_WATERMARK_MASK(2);
6857 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
6858 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
6859 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
6860 LATENCY_HIGH_WATERMARK(line_time)));
6861 /* restore original selection */
6862 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
6863}
6864
6865/**
6866 * dce8_bandwidth_update - program display watermarks
6867 *
6868 * @rdev: radeon_device pointer
6869 *
6870 * Calculate and program the display watermarks and line
6871 * buffer allocation (CIK).
6872 */
6873void dce8_bandwidth_update(struct radeon_device *rdev)
6874{
6875 struct drm_display_mode *mode = NULL;
6876 u32 num_heads = 0, lb_size;
6877 int i;
6878
6879 radeon_update_display_priority(rdev);
6880
6881 for (i = 0; i < rdev->num_crtc; i++) {
6882 if (rdev->mode_info.crtcs[i]->base.enabled)
6883 num_heads++;
6884 }
6885 for (i = 0; i < rdev->num_crtc; i++) {
6886 mode = &rdev->mode_info.crtcs[i]->base.mode;
6887 lb_size = dce8_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode);
6888 dce8_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
6889 }
6890}
6891
6892/**
6893 * cik_get_gpu_clock_counter - return GPU clock counter snapshot
6894 *
6895 * @rdev: radeon_device pointer
6896 *
6897 * Fetches a GPU clock counter snapshot (SI).
6898 * Returns the 64 bit clock counter snapshot.
6899 */
6900uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev)
6901{
6902 uint64_t clock;
6903
6904 mutex_lock(&rdev->gpu_clock_mutex);
6905 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
6906 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
6907 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
6908 mutex_unlock(&rdev->gpu_clock_mutex);
6909 return clock;
6910}
6911
6912static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
6913 u32 cntl_reg, u32 status_reg)
6914{
6915 int r, i;
6916 struct atom_clock_dividers dividers;
6917 uint32_t tmp;
6918
6919 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
6920 clock, false, &dividers);
6921 if (r)
6922 return r;
6923
6924 tmp = RREG32_SMC(cntl_reg);
6925 tmp &= ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK);
6926 tmp |= dividers.post_divider;
6927 WREG32_SMC(cntl_reg, tmp);
6928
6929 for (i = 0; i < 100; i++) {
6930 if (RREG32_SMC(status_reg) & DCLK_STATUS)
6931 break;
6932 mdelay(10);
6933 }
6934 if (i == 100)
6935 return -ETIMEDOUT;
6936
6937 return 0;
6938}
6939
6940int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
6941{
6942 int r = 0;
6943
6944 r = cik_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
6945 if (r)
6946 return r;
6947
6948 r = cik_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
6949 return r;
6950}
6951
6952int cik_uvd_resume(struct radeon_device *rdev)
6953{
6954 uint64_t addr;
6955 uint32_t size;
6956 int r;
6957
6958 r = radeon_uvd_resume(rdev);
6959 if (r)
6960 return r;
6961
6962 /* programm the VCPU memory controller bits 0-27 */
6963 addr = rdev->uvd.gpu_addr >> 3;
6964 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
6965 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
6966 WREG32(UVD_VCPU_CACHE_SIZE0, size);
6967
6968 addr += size;
6969 size = RADEON_UVD_STACK_SIZE >> 3;
6970 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
6971 WREG32(UVD_VCPU_CACHE_SIZE1, size);
6972
6973 addr += size;
6974 size = RADEON_UVD_HEAP_SIZE >> 3;
6975 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
6976 WREG32(UVD_VCPU_CACHE_SIZE2, size);
6977
6978 /* bits 28-31 */
6979 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
6980 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
6981
6982 /* bits 32-39 */
6983 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
6984 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
6985
6986 return 0;
6987}
diff --git a/drivers/gpu/drm/radeon/cik_blit_shaders.c b/drivers/gpu/drm/radeon/cik_blit_shaders.c
new file mode 100644
index 000000000000..ff1311806e91
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_blit_shaders.c
@@ -0,0 +1,246 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Alex Deucher <alexander.deucher@amd.com>
25 */
26
27#include <linux/types.h>
28#include <linux/bug.h>
29#include <linux/kernel.h>
30
31const u32 cik_default_state[] =
32{
33 0xc0066900,
34 0x00000000,
35 0x00000060, /* DB_RENDER_CONTROL */
36 0x00000000, /* DB_COUNT_CONTROL */
37 0x00000000, /* DB_DEPTH_VIEW */
38 0x0000002a, /* DB_RENDER_OVERRIDE */
39 0x00000000, /* DB_RENDER_OVERRIDE2 */
40 0x00000000, /* DB_HTILE_DATA_BASE */
41
42 0xc0046900,
43 0x00000008,
44 0x00000000, /* DB_DEPTH_BOUNDS_MIN */
45 0x00000000, /* DB_DEPTH_BOUNDS_MAX */
46 0x00000000, /* DB_STENCIL_CLEAR */
47 0x00000000, /* DB_DEPTH_CLEAR */
48
49 0xc0036900,
50 0x0000000f,
51 0x00000000, /* DB_DEPTH_INFO */
52 0x00000000, /* DB_Z_INFO */
53 0x00000000, /* DB_STENCIL_INFO */
54
55 0xc0016900,
56 0x00000080,
57 0x00000000, /* PA_SC_WINDOW_OFFSET */
58
59 0xc00d6900,
60 0x00000083,
61 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
62 0x00000000, /* PA_SC_CLIPRECT_0_TL */
63 0x20002000, /* PA_SC_CLIPRECT_0_BR */
64 0x00000000,
65 0x20002000,
66 0x00000000,
67 0x20002000,
68 0x00000000,
69 0x20002000,
70 0xaaaaaaaa, /* PA_SC_EDGERULE */
71 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
72 0x0000000f, /* CB_TARGET_MASK */
73 0x0000000f, /* CB_SHADER_MASK */
74
75 0xc0226900,
76 0x00000094,
77 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
78 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
79 0x80000000,
80 0x20002000,
81 0x80000000,
82 0x20002000,
83 0x80000000,
84 0x20002000,
85 0x80000000,
86 0x20002000,
87 0x80000000,
88 0x20002000,
89 0x80000000,
90 0x20002000,
91 0x80000000,
92 0x20002000,
93 0x80000000,
94 0x20002000,
95 0x80000000,
96 0x20002000,
97 0x80000000,
98 0x20002000,
99 0x80000000,
100 0x20002000,
101 0x80000000,
102 0x20002000,
103 0x80000000,
104 0x20002000,
105 0x80000000,
106 0x20002000,
107 0x80000000,
108 0x20002000,
109 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
110 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
111
112 0xc0046900,
113 0x00000100,
114 0xffffffff, /* VGT_MAX_VTX_INDX */
115 0x00000000, /* VGT_MIN_VTX_INDX */
116 0x00000000, /* VGT_INDX_OFFSET */
117 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
118
119 0xc0046900,
120 0x00000105,
121 0x00000000, /* CB_BLEND_RED */
122 0x00000000, /* CB_BLEND_GREEN */
123 0x00000000, /* CB_BLEND_BLUE */
124 0x00000000, /* CB_BLEND_ALPHA */
125
126 0xc0016900,
127 0x000001e0,
128 0x00000000, /* CB_BLEND0_CONTROL */
129
130 0xc00c6900,
131 0x00000200,
132 0x00000000, /* DB_DEPTH_CONTROL */
133 0x00000000, /* DB_EQAA */
134 0x00cc0010, /* CB_COLOR_CONTROL */
135 0x00000210, /* DB_SHADER_CONTROL */
136 0x00010000, /* PA_CL_CLIP_CNTL */
137 0x00000004, /* PA_SU_SC_MODE_CNTL */
138 0x00000100, /* PA_CL_VTE_CNTL */
139 0x00000000, /* PA_CL_VS_OUT_CNTL */
140 0x00000000, /* PA_CL_NANINF_CNTL */
141 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
142 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
143 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
144
145 0xc0116900,
146 0x00000280,
147 0x00000000, /* PA_SU_POINT_SIZE */
148 0x00000000, /* PA_SU_POINT_MINMAX */
149 0x00000008, /* PA_SU_LINE_CNTL */
150 0x00000000, /* PA_SC_LINE_STIPPLE */
151 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
152 0x00000000, /* VGT_HOS_CNTL */
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000, /* VGT_GS_MODE */
164
165 0xc0026900,
166 0x00000292,
167 0x00000000, /* PA_SC_MODE_CNTL_0 */
168 0x00000000, /* PA_SC_MODE_CNTL_1 */
169
170 0xc0016900,
171 0x000002a1,
172 0x00000000, /* VGT_PRIMITIVEID_EN */
173
174 0xc0016900,
175 0x000002a5,
176 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
177
178 0xc0026900,
179 0x000002a8,
180 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
181 0x00000000,
182
183 0xc0026900,
184 0x000002ad,
185 0x00000000, /* VGT_REUSE_OFF */
186 0x00000000,
187
188 0xc0016900,
189 0x000002d5,
190 0x00000000, /* VGT_SHADER_STAGES_EN */
191
192 0xc0016900,
193 0x000002dc,
194 0x0000aa00, /* DB_ALPHA_TO_MASK */
195
196 0xc0066900,
197 0x000002de,
198 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204
205 0xc0026900,
206 0x000002e5,
207 0x00000000, /* VGT_STRMOUT_CONFIG */
208 0x00000000,
209
210 0xc01b6900,
211 0x000002f5,
212 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
213 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
214 0x00000000, /* PA_SC_LINE_CNTL */
215 0x00000000, /* PA_SC_AA_CONFIG */
216 0x00000005, /* PA_SU_VTX_CNTL */
217 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
218 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
219 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
220 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
221 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
222 0x00000000,
223 0x00000000,
224 0x00000000,
225 0x00000000,
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
238 0xffffffff,
239
240 0xc0026900,
241 0x00000316,
242 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
243 0x00000010, /* */
244};
245
246const u32 cik_default_size = ARRAY_SIZE(cik_default_state);
diff --git a/drivers/gpu/drm/radeon/cik_blit_shaders.h b/drivers/gpu/drm/radeon/cik_blit_shaders.h
new file mode 100644
index 000000000000..dfe7314f9ff4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_blit_shaders.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#ifndef CIK_BLIT_SHADERS_H
26#define CIK_BLIT_SHADERS_H
27
28extern const u32 cik_default_state[];
29
30extern const u32 cik_default_size;
31
32#endif
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
new file mode 100644
index 000000000000..d71e46d571f5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -0,0 +1,147 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef __CIK_REG_H__
25#define __CIK_REG_H__
26
27#define CIK_DC_GPIO_HPD_MASK 0x65b0
28#define CIK_DC_GPIO_HPD_A 0x65b4
29#define CIK_DC_GPIO_HPD_EN 0x65b8
30#define CIK_DC_GPIO_HPD_Y 0x65bc
31
32#define CIK_GRPH_CONTROL 0x6804
33# define CIK_GRPH_DEPTH(x) (((x) & 0x3) << 0)
34# define CIK_GRPH_DEPTH_8BPP 0
35# define CIK_GRPH_DEPTH_16BPP 1
36# define CIK_GRPH_DEPTH_32BPP 2
37# define CIK_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
38# define CIK_ADDR_SURF_2_BANK 0
39# define CIK_ADDR_SURF_4_BANK 1
40# define CIK_ADDR_SURF_8_BANK 2
41# define CIK_ADDR_SURF_16_BANK 3
42# define CIK_GRPH_Z(x) (((x) & 0x3) << 4)
43# define CIK_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
44# define CIK_ADDR_SURF_BANK_WIDTH_1 0
45# define CIK_ADDR_SURF_BANK_WIDTH_2 1
46# define CIK_ADDR_SURF_BANK_WIDTH_4 2
47# define CIK_ADDR_SURF_BANK_WIDTH_8 3
48# define CIK_GRPH_FORMAT(x) (((x) & 0x7) << 8)
49/* 8 BPP */
50# define CIK_GRPH_FORMAT_INDEXED 0
51/* 16 BPP */
52# define CIK_GRPH_FORMAT_ARGB1555 0
53# define CIK_GRPH_FORMAT_ARGB565 1
54# define CIK_GRPH_FORMAT_ARGB4444 2
55# define CIK_GRPH_FORMAT_AI88 3
56# define CIK_GRPH_FORMAT_MONO16 4
57# define CIK_GRPH_FORMAT_BGRA5551 5
58/* 32 BPP */
59# define CIK_GRPH_FORMAT_ARGB8888 0
60# define CIK_GRPH_FORMAT_ARGB2101010 1
61# define CIK_GRPH_FORMAT_32BPP_DIG 2
62# define CIK_GRPH_FORMAT_8B_ARGB2101010 3
63# define CIK_GRPH_FORMAT_BGRA1010102 4
64# define CIK_GRPH_FORMAT_8B_BGRA1010102 5
65# define CIK_GRPH_FORMAT_RGB111110 6
66# define CIK_GRPH_FORMAT_BGR101111 7
67# define CIK_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
68# define CIK_ADDR_SURF_BANK_HEIGHT_1 0
69# define CIK_ADDR_SURF_BANK_HEIGHT_2 1
70# define CIK_ADDR_SURF_BANK_HEIGHT_4 2
71# define CIK_ADDR_SURF_BANK_HEIGHT_8 3
72# define CIK_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
73# define CIK_ADDR_SURF_TILE_SPLIT_64B 0
74# define CIK_ADDR_SURF_TILE_SPLIT_128B 1
75# define CIK_ADDR_SURF_TILE_SPLIT_256B 2
76# define CIK_ADDR_SURF_TILE_SPLIT_512B 3
77# define CIK_ADDR_SURF_TILE_SPLIT_1KB 4
78# define CIK_ADDR_SURF_TILE_SPLIT_2KB 5
79# define CIK_ADDR_SURF_TILE_SPLIT_4KB 6
80# define CIK_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
81# define CIK_ADDR_SURF_MACRO_TILE_ASPECT_1 0
82# define CIK_ADDR_SURF_MACRO_TILE_ASPECT_2 1
83# define CIK_ADDR_SURF_MACRO_TILE_ASPECT_4 2
84# define CIK_ADDR_SURF_MACRO_TILE_ASPECT_8 3
85# define CIK_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
86# define CIK_GRPH_ARRAY_LINEAR_GENERAL 0
87# define CIK_GRPH_ARRAY_LINEAR_ALIGNED 1
88# define CIK_GRPH_ARRAY_1D_TILED_THIN1 2
89# define CIK_GRPH_ARRAY_2D_TILED_THIN1 4
90# define CIK_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
91# define CIK_ADDR_SURF_P2 0
92# define CIK_ADDR_SURF_P4_8x16 4
93# define CIK_ADDR_SURF_P4_16x16 5
94# define CIK_ADDR_SURF_P4_16x32 6
95# define CIK_ADDR_SURF_P4_32x32 7
96# define CIK_ADDR_SURF_P8_16x16_8x16 8
97# define CIK_ADDR_SURF_P8_16x32_8x16 9
98# define CIK_ADDR_SURF_P8_32x32_8x16 10
99# define CIK_ADDR_SURF_P8_16x32_16x16 11
100# define CIK_ADDR_SURF_P8_32x32_16x16 12
101# define CIK_ADDR_SURF_P8_32x32_16x32 13
102# define CIK_ADDR_SURF_P8_32x64_32x32 14
103# define CIK_GRPH_MICRO_TILE_MODE(x) (((x) & 0x7) << 29)
104# define CIK_DISPLAY_MICRO_TILING 0
105# define CIK_THIN_MICRO_TILING 1
106# define CIK_DEPTH_MICRO_TILING 2
107# define CIK_ROTATED_MICRO_TILING 4
108
109/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
110#define CIK_CUR_CONTROL 0x6998
111# define CIK_CURSOR_EN (1 << 0)
112# define CIK_CURSOR_MODE(x) (((x) & 0x3) << 8)
113# define CIK_CURSOR_MONO 0
114# define CIK_CURSOR_24_1 1
115# define CIK_CURSOR_24_8_PRE_MULT 2
116# define CIK_CURSOR_24_8_UNPRE_MULT 3
117# define CIK_CURSOR_2X_MAGNIFY (1 << 16)
118# define CIK_CURSOR_FORCE_MC_ON (1 << 20)
119# define CIK_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
120# define CIK_CURSOR_URGENT_ALWAYS 0
121# define CIK_CURSOR_URGENT_1_8 1
122# define CIK_CURSOR_URGENT_1_4 2
123# define CIK_CURSOR_URGENT_3_8 3
124# define CIK_CURSOR_URGENT_1_2 4
125#define CIK_CUR_SURFACE_ADDRESS 0x699c
126# define CIK_CUR_SURFACE_ADDRESS_MASK 0xfffff000
127#define CIK_CUR_SIZE 0x69a0
128#define CIK_CUR_SURFACE_ADDRESS_HIGH 0x69a4
129#define CIK_CUR_POSITION 0x69a8
130#define CIK_CUR_HOT_SPOT 0x69ac
131#define CIK_CUR_COLOR1 0x69b0
132#define CIK_CUR_COLOR2 0x69b4
133#define CIK_CUR_UPDATE 0x69b8
134# define CIK_CURSOR_UPDATE_PENDING (1 << 0)
135# define CIK_CURSOR_UPDATE_TAKEN (1 << 1)
136# define CIK_CURSOR_UPDATE_LOCK (1 << 16)
137# define CIK_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
138
139#define CIK_ALPHA_CONTROL 0x6af0
140# define CIK_CURSOR_ALPHA_BLND_ENA (1 << 1)
141
142#define CIK_LB_DATA_FORMAT 0x6b00
143# define CIK_INTERLEAVE_EN (1 << 3)
144
145#define CIK_LB_DESKTOP_HEIGHT 0x6b0c
146
147#endif
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
new file mode 100644
index 000000000000..63514b95889a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -0,0 +1,1297 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef CIK_H
25#define CIK_H
26
27#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
28
29#define CIK_RB_BITMAP_WIDTH_PER_SH 2
30
31/* SMC IND registers */
32#define GENERAL_PWRMGT 0xC0200000
33# define GPU_COUNTER_CLK (1 << 15)
34
35#define CG_CLKPIN_CNTL 0xC05001A0
36# define XTALIN_DIVIDE (1 << 1)
37
38#define PCIE_INDEX 0x38
39#define PCIE_DATA 0x3C
40
41#define VGA_HDP_CONTROL 0x328
42#define VGA_MEMORY_DISABLE (1 << 4)
43
44#define DMIF_ADDR_CALC 0xC00
45
46#define SRBM_GFX_CNTL 0xE44
47#define PIPEID(x) ((x) << 0)
48#define MEID(x) ((x) << 2)
49#define VMID(x) ((x) << 4)
50#define QUEUEID(x) ((x) << 8)
51
52#define SRBM_STATUS2 0xE4C
53#define SDMA_BUSY (1 << 5)
54#define SDMA1_BUSY (1 << 6)
55#define SRBM_STATUS 0xE50
56#define UVD_RQ_PENDING (1 << 1)
57#define GRBM_RQ_PENDING (1 << 5)
58#define VMC_BUSY (1 << 8)
59#define MCB_BUSY (1 << 9)
60#define MCB_NON_DISPLAY_BUSY (1 << 10)
61#define MCC_BUSY (1 << 11)
62#define MCD_BUSY (1 << 12)
63#define SEM_BUSY (1 << 14)
64#define IH_BUSY (1 << 17)
65#define UVD_BUSY (1 << 19)
66
67#define SRBM_SOFT_RESET 0xE60
68#define SOFT_RESET_BIF (1 << 1)
69#define SOFT_RESET_R0PLL (1 << 4)
70#define SOFT_RESET_DC (1 << 5)
71#define SOFT_RESET_SDMA1 (1 << 6)
72#define SOFT_RESET_GRBM (1 << 8)
73#define SOFT_RESET_HDP (1 << 9)
74#define SOFT_RESET_IH (1 << 10)
75#define SOFT_RESET_MC (1 << 11)
76#define SOFT_RESET_ROM (1 << 14)
77#define SOFT_RESET_SEM (1 << 15)
78#define SOFT_RESET_VMC (1 << 17)
79#define SOFT_RESET_SDMA (1 << 20)
80#define SOFT_RESET_TST (1 << 21)
81#define SOFT_RESET_REGBB (1 << 22)
82#define SOFT_RESET_ORB (1 << 23)
83#define SOFT_RESET_VCE (1 << 24)
84
85#define VM_L2_CNTL 0x1400
86#define ENABLE_L2_CACHE (1 << 0)
87#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
88#define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2)
89#define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4)
90#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
91#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
92#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15)
93#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19)
94#define VM_L2_CNTL2 0x1404
95#define INVALIDATE_ALL_L1_TLBS (1 << 0)
96#define INVALIDATE_L2_CACHE (1 << 1)
97#define INVALIDATE_CACHE_MODE(x) ((x) << 26)
98#define INVALIDATE_PTE_AND_PDE_CACHES 0
99#define INVALIDATE_ONLY_PTE_CACHES 1
100#define INVALIDATE_ONLY_PDE_CACHES 2
101#define VM_L2_CNTL3 0x1408
102#define BANK_SELECT(x) ((x) << 0)
103#define L2_CACHE_UPDATE_MODE(x) ((x) << 6)
104#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
105#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
106#define VM_L2_STATUS 0x140C
107#define L2_BUSY (1 << 0)
108#define VM_CONTEXT0_CNTL 0x1410
109#define ENABLE_CONTEXT (1 << 0)
110#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
111#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
112#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
113#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
114#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
115#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
116#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
117#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
118#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
119#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
120#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
121#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
122#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
123#define VM_CONTEXT1_CNTL 0x1414
124#define VM_CONTEXT0_CNTL2 0x1430
125#define VM_CONTEXT1_CNTL2 0x1434
126#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x1438
127#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x143c
128#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x1440
129#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x1444
130#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x1448
131#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x144c
132#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450
133#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454
134
135#define VM_INVALIDATE_REQUEST 0x1478
136#define VM_INVALIDATE_RESPONSE 0x147c
137
138#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
139
140#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
141
142#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
143#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
144
145#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153c
146#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x1540
147#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x1544
148#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x1548
149#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x154c
150#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x1550
151#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x1554
152#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x1558
153#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155c
154#define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x1560
155
156#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
157#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
158
159#define MC_SHARED_CHMAP 0x2004
160#define NOOFCHAN_SHIFT 12
161#define NOOFCHAN_MASK 0x0000f000
162#define MC_SHARED_CHREMAP 0x2008
163
164#define CHUB_CONTROL 0x1864
165#define BYPASS_VM (1 << 0)
166
167#define MC_VM_FB_LOCATION 0x2024
168#define MC_VM_AGP_TOP 0x2028
169#define MC_VM_AGP_BOT 0x202C
170#define MC_VM_AGP_BASE 0x2030
171#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
172#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
173#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
174
175#define MC_VM_MX_L1_TLB_CNTL 0x2064
176#define ENABLE_L1_TLB (1 << 0)
177#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
178#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
179#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
180#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
181#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
182#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
183#define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6)
184#define MC_VM_FB_OFFSET 0x2068
185
186#define MC_SHARED_BLACKOUT_CNTL 0x20ac
187
188#define MC_ARB_RAMCFG 0x2760
189#define NOOFBANK_SHIFT 0
190#define NOOFBANK_MASK 0x00000003
191#define NOOFRANK_SHIFT 2
192#define NOOFRANK_MASK 0x00000004
193#define NOOFROWS_SHIFT 3
194#define NOOFROWS_MASK 0x00000038
195#define NOOFCOLS_SHIFT 6
196#define NOOFCOLS_MASK 0x000000C0
197#define CHANSIZE_SHIFT 8
198#define CHANSIZE_MASK 0x00000100
199#define NOOFGROUPS_SHIFT 12
200#define NOOFGROUPS_MASK 0x00001000
201
202#define MC_SEQ_SUP_CNTL 0x28c8
203#define RUN_MASK (1 << 0)
204#define MC_SEQ_SUP_PGM 0x28cc
205
206#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
207#define TRAIN_DONE_D0 (1 << 30)
208#define TRAIN_DONE_D1 (1 << 31)
209
210#define MC_IO_PAD_CNTL_D0 0x29d0
211#define MEM_FALL_OUT_CMD (1 << 8)
212
213#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
214#define MC_SEQ_IO_DEBUG_DATA 0x2a48
215
216#define HDP_HOST_PATH_CNTL 0x2C00
217#define HDP_NONSURFACE_BASE 0x2C04
218#define HDP_NONSURFACE_INFO 0x2C08
219#define HDP_NONSURFACE_SIZE 0x2C0C
220
221#define HDP_ADDR_CONFIG 0x2F48
222#define HDP_MISC_CNTL 0x2F4C
223#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
224
225#define IH_RB_CNTL 0x3e00
226# define IH_RB_ENABLE (1 << 0)
227# define IH_RB_SIZE(x) ((x) << 1) /* log2 */
228# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
229# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
230# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
231# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
232# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
233#define IH_RB_BASE 0x3e04
234#define IH_RB_RPTR 0x3e08
235#define IH_RB_WPTR 0x3e0c
236# define RB_OVERFLOW (1 << 0)
237# define WPTR_OFFSET_MASK 0x3fffc
238#define IH_RB_WPTR_ADDR_HI 0x3e10
239#define IH_RB_WPTR_ADDR_LO 0x3e14
240#define IH_CNTL 0x3e18
241# define ENABLE_INTR (1 << 0)
242# define IH_MC_SWAP(x) ((x) << 1)
243# define IH_MC_SWAP_NONE 0
244# define IH_MC_SWAP_16BIT 1
245# define IH_MC_SWAP_32BIT 2
246# define IH_MC_SWAP_64BIT 3
247# define RPTR_REARM (1 << 4)
248# define MC_WRREQ_CREDIT(x) ((x) << 15)
249# define MC_WR_CLEAN_CNT(x) ((x) << 20)
250# define MC_VMID(x) ((x) << 25)
251
252#define CONFIG_MEMSIZE 0x5428
253
254#define INTERRUPT_CNTL 0x5468
255# define IH_DUMMY_RD_OVERRIDE (1 << 0)
256# define IH_DUMMY_RD_EN (1 << 1)
257# define IH_REQ_NONSNOOP_EN (1 << 3)
258# define GEN_IH_INT_EN (1 << 8)
259#define INTERRUPT_CNTL2 0x546c
260
261#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
262
263#define BIF_FB_EN 0x5490
264#define FB_READ_EN (1 << 0)
265#define FB_WRITE_EN (1 << 1)
266
267#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
268
269#define GPU_HDP_FLUSH_REQ 0x54DC
270#define GPU_HDP_FLUSH_DONE 0x54E0
271#define CP0 (1 << 0)
272#define CP1 (1 << 1)
273#define CP2 (1 << 2)
274#define CP3 (1 << 3)
275#define CP4 (1 << 4)
276#define CP5 (1 << 5)
277#define CP6 (1 << 6)
278#define CP7 (1 << 7)
279#define CP8 (1 << 8)
280#define CP9 (1 << 9)
281#define SDMA0 (1 << 10)
282#define SDMA1 (1 << 11)
283
284/* 0x6b04, 0x7704, 0x10304, 0x10f04, 0x11b04, 0x12704 */
285#define LB_MEMORY_CTRL 0x6b04
286#define LB_MEMORY_SIZE(x) ((x) << 0)
287#define LB_MEMORY_CONFIG(x) ((x) << 20)
288
289#define DPG_WATERMARK_MASK_CONTROL 0x6cc8
290# define LATENCY_WATERMARK_MASK(x) ((x) << 8)
291#define DPG_PIPE_LATENCY_CONTROL 0x6ccc
292# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
293# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
294
295/* 0x6b24, 0x7724, 0x10324, 0x10f24, 0x11b24, 0x12724 */
296#define LB_VLINE_STATUS 0x6b24
297# define VLINE_OCCURRED (1 << 0)
298# define VLINE_ACK (1 << 4)
299# define VLINE_STAT (1 << 12)
300# define VLINE_INTERRUPT (1 << 16)
301# define VLINE_INTERRUPT_TYPE (1 << 17)
302/* 0x6b2c, 0x772c, 0x1032c, 0x10f2c, 0x11b2c, 0x1272c */
303#define LB_VBLANK_STATUS 0x6b2c
304# define VBLANK_OCCURRED (1 << 0)
305# define VBLANK_ACK (1 << 4)
306# define VBLANK_STAT (1 << 12)
307# define VBLANK_INTERRUPT (1 << 16)
308# define VBLANK_INTERRUPT_TYPE (1 << 17)
309
310/* 0x6b20, 0x7720, 0x10320, 0x10f20, 0x11b20, 0x12720 */
311#define LB_INTERRUPT_MASK 0x6b20
312# define VBLANK_INTERRUPT_MASK (1 << 0)
313# define VLINE_INTERRUPT_MASK (1 << 4)
314# define VLINE2_INTERRUPT_MASK (1 << 8)
315
316#define DISP_INTERRUPT_STATUS 0x60f4
317# define LB_D1_VLINE_INTERRUPT (1 << 2)
318# define LB_D1_VBLANK_INTERRUPT (1 << 3)
319# define DC_HPD1_INTERRUPT (1 << 17)
320# define DC_HPD1_RX_INTERRUPT (1 << 18)
321# define DACA_AUTODETECT_INTERRUPT (1 << 22)
322# define DACB_AUTODETECT_INTERRUPT (1 << 23)
323# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
324# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
325#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
326# define LB_D2_VLINE_INTERRUPT (1 << 2)
327# define LB_D2_VBLANK_INTERRUPT (1 << 3)
328# define DC_HPD2_INTERRUPT (1 << 17)
329# define DC_HPD2_RX_INTERRUPT (1 << 18)
330# define DISP_TIMER_INTERRUPT (1 << 24)
331#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
332# define LB_D3_VLINE_INTERRUPT (1 << 2)
333# define LB_D3_VBLANK_INTERRUPT (1 << 3)
334# define DC_HPD3_INTERRUPT (1 << 17)
335# define DC_HPD3_RX_INTERRUPT (1 << 18)
336#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
337# define LB_D4_VLINE_INTERRUPT (1 << 2)
338# define LB_D4_VBLANK_INTERRUPT (1 << 3)
339# define DC_HPD4_INTERRUPT (1 << 17)
340# define DC_HPD4_RX_INTERRUPT (1 << 18)
341#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
342# define LB_D5_VLINE_INTERRUPT (1 << 2)
343# define LB_D5_VBLANK_INTERRUPT (1 << 3)
344# define DC_HPD5_INTERRUPT (1 << 17)
345# define DC_HPD5_RX_INTERRUPT (1 << 18)
346#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150
347# define LB_D6_VLINE_INTERRUPT (1 << 2)
348# define LB_D6_VBLANK_INTERRUPT (1 << 3)
349# define DC_HPD6_INTERRUPT (1 << 17)
350# define DC_HPD6_RX_INTERRUPT (1 << 18)
351#define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780
352
353#define DAC_AUTODETECT_INT_CONTROL 0x67c8
354
355#define DC_HPD1_INT_STATUS 0x601c
356#define DC_HPD2_INT_STATUS 0x6028
357#define DC_HPD3_INT_STATUS 0x6034
358#define DC_HPD4_INT_STATUS 0x6040
359#define DC_HPD5_INT_STATUS 0x604c
360#define DC_HPD6_INT_STATUS 0x6058
361# define DC_HPDx_INT_STATUS (1 << 0)
362# define DC_HPDx_SENSE (1 << 1)
363# define DC_HPDx_SENSE_DELAYED (1 << 4)
364# define DC_HPDx_RX_INT_STATUS (1 << 8)
365
366#define DC_HPD1_INT_CONTROL 0x6020
367#define DC_HPD2_INT_CONTROL 0x602c
368#define DC_HPD3_INT_CONTROL 0x6038
369#define DC_HPD4_INT_CONTROL 0x6044
370#define DC_HPD5_INT_CONTROL 0x6050
371#define DC_HPD6_INT_CONTROL 0x605c
372# define DC_HPDx_INT_ACK (1 << 0)
373# define DC_HPDx_INT_POLARITY (1 << 8)
374# define DC_HPDx_INT_EN (1 << 16)
375# define DC_HPDx_RX_INT_ACK (1 << 20)
376# define DC_HPDx_RX_INT_EN (1 << 24)
377
378#define DC_HPD1_CONTROL 0x6024
379#define DC_HPD2_CONTROL 0x6030
380#define DC_HPD3_CONTROL 0x603c
381#define DC_HPD4_CONTROL 0x6048
382#define DC_HPD5_CONTROL 0x6054
383#define DC_HPD6_CONTROL 0x6060
384# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
385# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
386# define DC_HPDx_EN (1 << 28)
387
388#define GRBM_CNTL 0x8000
389#define GRBM_READ_TIMEOUT(x) ((x) << 0)
390
391#define GRBM_STATUS2 0x8008
392#define ME0PIPE1_CMDFIFO_AVAIL_MASK 0x0000000F
393#define ME0PIPE1_CF_RQ_PENDING (1 << 4)
394#define ME0PIPE1_PF_RQ_PENDING (1 << 5)
395#define ME1PIPE0_RQ_PENDING (1 << 6)
396#define ME1PIPE1_RQ_PENDING (1 << 7)
397#define ME1PIPE2_RQ_PENDING (1 << 8)
398#define ME1PIPE3_RQ_PENDING (1 << 9)
399#define ME2PIPE0_RQ_PENDING (1 << 10)
400#define ME2PIPE1_RQ_PENDING (1 << 11)
401#define ME2PIPE2_RQ_PENDING (1 << 12)
402#define ME2PIPE3_RQ_PENDING (1 << 13)
403#define RLC_RQ_PENDING (1 << 14)
404#define RLC_BUSY (1 << 24)
405#define TC_BUSY (1 << 25)
406#define CPF_BUSY (1 << 28)
407#define CPC_BUSY (1 << 29)
408#define CPG_BUSY (1 << 30)
409
410#define GRBM_STATUS 0x8010
411#define ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000F
412#define SRBM_RQ_PENDING (1 << 5)
413#define ME0PIPE0_CF_RQ_PENDING (1 << 7)
414#define ME0PIPE0_PF_RQ_PENDING (1 << 8)
415#define GDS_DMA_RQ_PENDING (1 << 9)
416#define DB_CLEAN (1 << 12)
417#define CB_CLEAN (1 << 13)
418#define TA_BUSY (1 << 14)
419#define GDS_BUSY (1 << 15)
420#define WD_BUSY_NO_DMA (1 << 16)
421#define VGT_BUSY (1 << 17)
422#define IA_BUSY_NO_DMA (1 << 18)
423#define IA_BUSY (1 << 19)
424#define SX_BUSY (1 << 20)
425#define WD_BUSY (1 << 21)
426#define SPI_BUSY (1 << 22)
427#define BCI_BUSY (1 << 23)
428#define SC_BUSY (1 << 24)
429#define PA_BUSY (1 << 25)
430#define DB_BUSY (1 << 26)
431#define CP_COHERENCY_BUSY (1 << 28)
432#define CP_BUSY (1 << 29)
433#define CB_BUSY (1 << 30)
434#define GUI_ACTIVE (1 << 31)
435#define GRBM_STATUS_SE0 0x8014
436#define GRBM_STATUS_SE1 0x8018
437#define GRBM_STATUS_SE2 0x8038
438#define GRBM_STATUS_SE3 0x803C
439#define SE_DB_CLEAN (1 << 1)
440#define SE_CB_CLEAN (1 << 2)
441#define SE_BCI_BUSY (1 << 22)
442#define SE_VGT_BUSY (1 << 23)
443#define SE_PA_BUSY (1 << 24)
444#define SE_TA_BUSY (1 << 25)
445#define SE_SX_BUSY (1 << 26)
446#define SE_SPI_BUSY (1 << 27)
447#define SE_SC_BUSY (1 << 29)
448#define SE_DB_BUSY (1 << 30)
449#define SE_CB_BUSY (1 << 31)
450
451#define GRBM_SOFT_RESET 0x8020
452#define SOFT_RESET_CP (1 << 0) /* All CP blocks */
453#define SOFT_RESET_RLC (1 << 2) /* RLC */
454#define SOFT_RESET_GFX (1 << 16) /* GFX */
455#define SOFT_RESET_CPF (1 << 17) /* CP fetcher shared by gfx and compute */
456#define SOFT_RESET_CPC (1 << 18) /* CP Compute (MEC1/2) */
457#define SOFT_RESET_CPG (1 << 19) /* CP GFX (PFP, ME, CE) */
458
459#define GRBM_INT_CNTL 0x8060
460# define RDERR_INT_ENABLE (1 << 0)
461# define GUI_IDLE_INT_ENABLE (1 << 19)
462
463#define CP_CPC_STATUS 0x8210
464#define CP_CPC_BUSY_STAT 0x8214
465#define CP_CPC_STALLED_STAT1 0x8218
466#define CP_CPF_STATUS 0x821c
467#define CP_CPF_BUSY_STAT 0x8220
468#define CP_CPF_STALLED_STAT1 0x8224
469
470#define CP_MEC_CNTL 0x8234
471#define MEC_ME2_HALT (1 << 28)
472#define MEC_ME1_HALT (1 << 30)
473
474#define CP_MEC_CNTL 0x8234
475#define MEC_ME2_HALT (1 << 28)
476#define MEC_ME1_HALT (1 << 30)
477
478#define CP_STALLED_STAT3 0x8670
479#define CP_STALLED_STAT1 0x8674
480#define CP_STALLED_STAT2 0x8678
481
482#define CP_STAT 0x8680
483
484#define CP_ME_CNTL 0x86D8
485#define CP_CE_HALT (1 << 24)
486#define CP_PFP_HALT (1 << 26)
487#define CP_ME_HALT (1 << 28)
488
489#define CP_RB0_RPTR 0x8700
490#define CP_RB_WPTR_DELAY 0x8704
491
492#define CP_MEQ_THRESHOLDS 0x8764
493#define MEQ1_START(x) ((x) << 0)
494#define MEQ2_START(x) ((x) << 8)
495
496#define VGT_VTX_VECT_EJECT_REG 0x88B0
497
498#define VGT_CACHE_INVALIDATION 0x88C4
499#define CACHE_INVALIDATION(x) ((x) << 0)
500#define VC_ONLY 0
501#define TC_ONLY 1
502#define VC_AND_TC 2
503#define AUTO_INVLD_EN(x) ((x) << 6)
504#define NO_AUTO 0
505#define ES_AUTO 1
506#define GS_AUTO 2
507#define ES_AND_GS_AUTO 3
508
509#define VGT_GS_VERTEX_REUSE 0x88D4
510
511#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc
512#define INACTIVE_CUS_MASK 0xFFFF0000
513#define INACTIVE_CUS_SHIFT 16
514#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0
515
516#define PA_CL_ENHANCE 0x8A14
517#define CLIP_VTX_REORDER_ENA (1 << 0)
518#define NUM_CLIP_SEQ(x) ((x) << 1)
519
520#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
521#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
522#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
523
524#define PA_SC_FIFO_SIZE 0x8BCC
525#define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0)
526#define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6)
527#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15)
528#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23)
529
530#define PA_SC_ENHANCE 0x8BF0
531#define ENABLE_PA_SC_OUT_OF_ORDER (1 << 0)
532#define DISABLE_PA_SC_GUIDANCE (1 << 13)
533
534#define SQ_CONFIG 0x8C00
535
536#define SH_MEM_BASES 0x8C28
537/* if PTR32, these are the bases for scratch and lds */
538#define PRIVATE_BASE(x) ((x) << 0) /* scratch */
539#define SHARED_BASE(x) ((x) << 16) /* LDS */
540#define SH_MEM_APE1_BASE 0x8C2C
541/* if PTR32, this is the base location of GPUVM */
542#define SH_MEM_APE1_LIMIT 0x8C30
543/* if PTR32, this is the upper limit of GPUVM */
544#define SH_MEM_CONFIG 0x8C34
545#define PTR32 (1 << 0)
546#define ALIGNMENT_MODE(x) ((x) << 2)
547#define SH_MEM_ALIGNMENT_MODE_DWORD 0
548#define SH_MEM_ALIGNMENT_MODE_DWORD_STRICT 1
549#define SH_MEM_ALIGNMENT_MODE_STRICT 2
550#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3
551#define DEFAULT_MTYPE(x) ((x) << 4)
552#define APE1_MTYPE(x) ((x) << 7)
553
554#define SX_DEBUG_1 0x9060
555
556#define SPI_CONFIG_CNTL 0x9100
557
558#define SPI_CONFIG_CNTL_1 0x913C
559#define VTX_DONE_DELAY(x) ((x) << 0)
560#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
561
562#define TA_CNTL_AUX 0x9508
563
564#define DB_DEBUG 0x9830
565#define DB_DEBUG2 0x9834
566#define DB_DEBUG3 0x9838
567
568#define CC_RB_BACKEND_DISABLE 0x98F4
569#define BACKEND_DISABLE(x) ((x) << 16)
570#define GB_ADDR_CONFIG 0x98F8
571#define NUM_PIPES(x) ((x) << 0)
572#define NUM_PIPES_MASK 0x00000007
573#define NUM_PIPES_SHIFT 0
574#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
575#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
576#define PIPE_INTERLEAVE_SIZE_SHIFT 4
577#define NUM_SHADER_ENGINES(x) ((x) << 12)
578#define NUM_SHADER_ENGINES_MASK 0x00003000
579#define NUM_SHADER_ENGINES_SHIFT 12
580#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
581#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
582#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
583#define ROW_SIZE(x) ((x) << 28)
584#define ROW_SIZE_MASK 0x30000000
585#define ROW_SIZE_SHIFT 28
586
587#define GB_TILE_MODE0 0x9910
588# define ARRAY_MODE(x) ((x) << 2)
589# define ARRAY_LINEAR_GENERAL 0
590# define ARRAY_LINEAR_ALIGNED 1
591# define ARRAY_1D_TILED_THIN1 2
592# define ARRAY_2D_TILED_THIN1 4
593# define ARRAY_PRT_TILED_THIN1 5
594# define ARRAY_PRT_2D_TILED_THIN1 6
595# define PIPE_CONFIG(x) ((x) << 6)
596# define ADDR_SURF_P2 0
597# define ADDR_SURF_P4_8x16 4
598# define ADDR_SURF_P4_16x16 5
599# define ADDR_SURF_P4_16x32 6
600# define ADDR_SURF_P4_32x32 7
601# define ADDR_SURF_P8_16x16_8x16 8
602# define ADDR_SURF_P8_16x32_8x16 9
603# define ADDR_SURF_P8_32x32_8x16 10
604# define ADDR_SURF_P8_16x32_16x16 11
605# define ADDR_SURF_P8_32x32_16x16 12
606# define ADDR_SURF_P8_32x32_16x32 13
607# define ADDR_SURF_P8_32x64_32x32 14
608# define TILE_SPLIT(x) ((x) << 11)
609# define ADDR_SURF_TILE_SPLIT_64B 0
610# define ADDR_SURF_TILE_SPLIT_128B 1
611# define ADDR_SURF_TILE_SPLIT_256B 2
612# define ADDR_SURF_TILE_SPLIT_512B 3
613# define ADDR_SURF_TILE_SPLIT_1KB 4
614# define ADDR_SURF_TILE_SPLIT_2KB 5
615# define ADDR_SURF_TILE_SPLIT_4KB 6
616# define MICRO_TILE_MODE_NEW(x) ((x) << 22)
617# define ADDR_SURF_DISPLAY_MICRO_TILING 0
618# define ADDR_SURF_THIN_MICRO_TILING 1
619# define ADDR_SURF_DEPTH_MICRO_TILING 2
620# define ADDR_SURF_ROTATED_MICRO_TILING 3
621# define SAMPLE_SPLIT(x) ((x) << 25)
622# define ADDR_SURF_SAMPLE_SPLIT_1 0
623# define ADDR_SURF_SAMPLE_SPLIT_2 1
624# define ADDR_SURF_SAMPLE_SPLIT_4 2
625# define ADDR_SURF_SAMPLE_SPLIT_8 3
626
627#define GB_MACROTILE_MODE0 0x9990
628# define BANK_WIDTH(x) ((x) << 0)
629# define ADDR_SURF_BANK_WIDTH_1 0
630# define ADDR_SURF_BANK_WIDTH_2 1
631# define ADDR_SURF_BANK_WIDTH_4 2
632# define ADDR_SURF_BANK_WIDTH_8 3
633# define BANK_HEIGHT(x) ((x) << 2)
634# define ADDR_SURF_BANK_HEIGHT_1 0
635# define ADDR_SURF_BANK_HEIGHT_2 1
636# define ADDR_SURF_BANK_HEIGHT_4 2
637# define ADDR_SURF_BANK_HEIGHT_8 3
638# define MACRO_TILE_ASPECT(x) ((x) << 4)
639# define ADDR_SURF_MACRO_ASPECT_1 0
640# define ADDR_SURF_MACRO_ASPECT_2 1
641# define ADDR_SURF_MACRO_ASPECT_4 2
642# define ADDR_SURF_MACRO_ASPECT_8 3
643# define NUM_BANKS(x) ((x) << 6)
644# define ADDR_SURF_2_BANK 0
645# define ADDR_SURF_4_BANK 1
646# define ADDR_SURF_8_BANK 2
647# define ADDR_SURF_16_BANK 3
648
649#define CB_HW_CONTROL 0x9A10
650
651#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
652#define BACKEND_DISABLE_MASK 0x00FF0000
653#define BACKEND_DISABLE_SHIFT 16
654
655#define TCP_CHAN_STEER_LO 0xac0c
656#define TCP_CHAN_STEER_HI 0xac10
657
658#define TC_CFG_L1_LOAD_POLICY0 0xAC68
659#define TC_CFG_L1_LOAD_POLICY1 0xAC6C
660#define TC_CFG_L1_STORE_POLICY 0xAC70
661#define TC_CFG_L2_LOAD_POLICY0 0xAC74
662#define TC_CFG_L2_LOAD_POLICY1 0xAC78
663#define TC_CFG_L2_STORE_POLICY0 0xAC7C
664#define TC_CFG_L2_STORE_POLICY1 0xAC80
665#define TC_CFG_L2_ATOMIC_POLICY 0xAC84
666#define TC_CFG_L1_VOLATILE 0xAC88
667#define TC_CFG_L2_VOLATILE 0xAC8C
668
669#define CP_RB0_BASE 0xC100
670#define CP_RB0_CNTL 0xC104
671#define RB_BUFSZ(x) ((x) << 0)
672#define RB_BLKSZ(x) ((x) << 8)
673#define BUF_SWAP_32BIT (2 << 16)
674#define RB_NO_UPDATE (1 << 27)
675#define RB_RPTR_WR_ENA (1 << 31)
676
677#define CP_RB0_RPTR_ADDR 0xC10C
678#define RB_RPTR_SWAP_32BIT (2 << 0)
679#define CP_RB0_RPTR_ADDR_HI 0xC110
680#define CP_RB0_WPTR 0xC114
681
682#define CP_DEVICE_ID 0xC12C
683#define CP_ENDIAN_SWAP 0xC140
684#define CP_RB_VMID 0xC144
685
686#define CP_PFP_UCODE_ADDR 0xC150
687#define CP_PFP_UCODE_DATA 0xC154
688#define CP_ME_RAM_RADDR 0xC158
689#define CP_ME_RAM_WADDR 0xC15C
690#define CP_ME_RAM_DATA 0xC160
691
692#define CP_CE_UCODE_ADDR 0xC168
693#define CP_CE_UCODE_DATA 0xC16C
694#define CP_MEC_ME1_UCODE_ADDR 0xC170
695#define CP_MEC_ME1_UCODE_DATA 0xC174
696#define CP_MEC_ME2_UCODE_ADDR 0xC178
697#define CP_MEC_ME2_UCODE_DATA 0xC17C
698
699#define CP_INT_CNTL_RING0 0xC1A8
700# define CNTX_BUSY_INT_ENABLE (1 << 19)
701# define CNTX_EMPTY_INT_ENABLE (1 << 20)
702# define PRIV_INSTR_INT_ENABLE (1 << 22)
703# define PRIV_REG_INT_ENABLE (1 << 23)
704# define TIME_STAMP_INT_ENABLE (1 << 26)
705# define CP_RINGID2_INT_ENABLE (1 << 29)
706# define CP_RINGID1_INT_ENABLE (1 << 30)
707# define CP_RINGID0_INT_ENABLE (1 << 31)
708
709#define CP_INT_STATUS_RING0 0xC1B4
710# define PRIV_INSTR_INT_STAT (1 << 22)
711# define PRIV_REG_INT_STAT (1 << 23)
712# define TIME_STAMP_INT_STAT (1 << 26)
713# define CP_RINGID2_INT_STAT (1 << 29)
714# define CP_RINGID1_INT_STAT (1 << 30)
715# define CP_RINGID0_INT_STAT (1 << 31)
716
717#define CP_CPF_DEBUG 0xC200
718
719#define CP_PQ_WPTR_POLL_CNTL 0xC20C
720#define WPTR_POLL_EN (1 << 31)
721
722#define CP_ME1_PIPE0_INT_CNTL 0xC214
723#define CP_ME1_PIPE1_INT_CNTL 0xC218
724#define CP_ME1_PIPE2_INT_CNTL 0xC21C
725#define CP_ME1_PIPE3_INT_CNTL 0xC220
726#define CP_ME2_PIPE0_INT_CNTL 0xC224
727#define CP_ME2_PIPE1_INT_CNTL 0xC228
728#define CP_ME2_PIPE2_INT_CNTL 0xC22C
729#define CP_ME2_PIPE3_INT_CNTL 0xC230
730# define DEQUEUE_REQUEST_INT_ENABLE (1 << 13)
731# define WRM_POLL_TIMEOUT_INT_ENABLE (1 << 17)
732# define PRIV_REG_INT_ENABLE (1 << 23)
733# define TIME_STAMP_INT_ENABLE (1 << 26)
734# define GENERIC2_INT_ENABLE (1 << 29)
735# define GENERIC1_INT_ENABLE (1 << 30)
736# define GENERIC0_INT_ENABLE (1 << 31)
737#define CP_ME1_PIPE0_INT_STATUS 0xC214
738#define CP_ME1_PIPE1_INT_STATUS 0xC218
739#define CP_ME1_PIPE2_INT_STATUS 0xC21C
740#define CP_ME1_PIPE3_INT_STATUS 0xC220
741#define CP_ME2_PIPE0_INT_STATUS 0xC224
742#define CP_ME2_PIPE1_INT_STATUS 0xC228
743#define CP_ME2_PIPE2_INT_STATUS 0xC22C
744#define CP_ME2_PIPE3_INT_STATUS 0xC230
745# define DEQUEUE_REQUEST_INT_STATUS (1 << 13)
746# define WRM_POLL_TIMEOUT_INT_STATUS (1 << 17)
747# define PRIV_REG_INT_STATUS (1 << 23)
748# define TIME_STAMP_INT_STATUS (1 << 26)
749# define GENERIC2_INT_STATUS (1 << 29)
750# define GENERIC1_INT_STATUS (1 << 30)
751# define GENERIC0_INT_STATUS (1 << 31)
752
753#define CP_MAX_CONTEXT 0xC2B8
754
755#define CP_RB0_BASE_HI 0xC2C4
756
757#define RLC_CNTL 0xC300
758# define RLC_ENABLE (1 << 0)
759
760#define RLC_MC_CNTL 0xC30C
761
762#define RLC_LB_CNTR_MAX 0xC348
763
764#define RLC_LB_CNTL 0xC364
765
766#define RLC_LB_CNTR_INIT 0xC36C
767
768#define RLC_SAVE_AND_RESTORE_BASE 0xC374
769#define RLC_DRIVER_DMA_STATUS 0xC378
770
771#define RLC_GPM_UCODE_ADDR 0xC388
772#define RLC_GPM_UCODE_DATA 0xC38C
773#define RLC_GPU_CLOCK_COUNT_LSB 0xC390
774#define RLC_GPU_CLOCK_COUNT_MSB 0xC394
775#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC398
776#define RLC_UCODE_CNTL 0xC39C
777
778#define RLC_CGCG_CGLS_CTRL 0xC424
779
780#define RLC_LB_INIT_CU_MASK 0xC43C
781
782#define RLC_LB_PARAMS 0xC444
783
784#define RLC_SERDES_CU_MASTER_BUSY 0xC484
785#define RLC_SERDES_NONCU_MASTER_BUSY 0xC488
786# define SE_MASTER_BUSY_MASK 0x0000ffff
787# define GC_MASTER_BUSY (1 << 16)
788# define TC0_MASTER_BUSY (1 << 17)
789# define TC1_MASTER_BUSY (1 << 18)
790
791#define RLC_GPM_SCRATCH_ADDR 0xC4B0
792#define RLC_GPM_SCRATCH_DATA 0xC4B4
793
794#define CP_HPD_EOP_BASE_ADDR 0xC904
795#define CP_HPD_EOP_BASE_ADDR_HI 0xC908
796#define CP_HPD_EOP_VMID 0xC90C
797#define CP_HPD_EOP_CONTROL 0xC910
798#define EOP_SIZE(x) ((x) << 0)
799#define EOP_SIZE_MASK (0x3f << 0)
800#define CP_MQD_BASE_ADDR 0xC914
801#define CP_MQD_BASE_ADDR_HI 0xC918
802#define CP_HQD_ACTIVE 0xC91C
803#define CP_HQD_VMID 0xC920
804
805#define CP_HQD_PQ_BASE 0xC934
806#define CP_HQD_PQ_BASE_HI 0xC938
807#define CP_HQD_PQ_RPTR 0xC93C
808#define CP_HQD_PQ_RPTR_REPORT_ADDR 0xC940
809#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI 0xC944
810#define CP_HQD_PQ_WPTR_POLL_ADDR 0xC948
811#define CP_HQD_PQ_WPTR_POLL_ADDR_HI 0xC94C
812#define CP_HQD_PQ_DOORBELL_CONTROL 0xC950
813#define DOORBELL_OFFSET(x) ((x) << 2)
814#define DOORBELL_OFFSET_MASK (0x1fffff << 2)
815#define DOORBELL_SOURCE (1 << 28)
816#define DOORBELL_SCHD_HIT (1 << 29)
817#define DOORBELL_EN (1 << 30)
818#define DOORBELL_HIT (1 << 31)
819#define CP_HQD_PQ_WPTR 0xC954
820#define CP_HQD_PQ_CONTROL 0xC958
821#define QUEUE_SIZE(x) ((x) << 0)
822#define QUEUE_SIZE_MASK (0x3f << 0)
823#define RPTR_BLOCK_SIZE(x) ((x) << 8)
824#define RPTR_BLOCK_SIZE_MASK (0x3f << 8)
825#define PQ_VOLATILE (1 << 26)
826#define NO_UPDATE_RPTR (1 << 27)
827#define UNORD_DISPATCH (1 << 28)
828#define ROQ_PQ_IB_FLIP (1 << 29)
829#define PRIV_STATE (1 << 30)
830#define KMD_QUEUE (1 << 31)
831
832#define CP_HQD_DEQUEUE_REQUEST 0xC974
833
834#define CP_MQD_CONTROL 0xC99C
835#define MQD_VMID(x) ((x) << 0)
836#define MQD_VMID_MASK (0xf << 0)
837
838#define PA_SC_RASTER_CONFIG 0x28350
839# define RASTER_CONFIG_RB_MAP_0 0
840# define RASTER_CONFIG_RB_MAP_1 1
841# define RASTER_CONFIG_RB_MAP_2 2
842# define RASTER_CONFIG_RB_MAP_3 3
843
844#define VGT_EVENT_INITIATOR 0x28a90
845# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
846# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
847# define SAMPLE_STREAMOUTSTATS3 (3 << 0)
848# define CACHE_FLUSH_TS (4 << 0)
849# define CACHE_FLUSH (6 << 0)
850# define CS_PARTIAL_FLUSH (7 << 0)
851# define VGT_STREAMOUT_RESET (10 << 0)
852# define END_OF_PIPE_INCR_DE (11 << 0)
853# define END_OF_PIPE_IB_END (12 << 0)
854# define RST_PIX_CNT (13 << 0)
855# define VS_PARTIAL_FLUSH (15 << 0)
856# define PS_PARTIAL_FLUSH (16 << 0)
857# define CACHE_FLUSH_AND_INV_TS_EVENT (20 << 0)
858# define ZPASS_DONE (21 << 0)
859# define CACHE_FLUSH_AND_INV_EVENT (22 << 0)
860# define PERFCOUNTER_START (23 << 0)
861# define PERFCOUNTER_STOP (24 << 0)
862# define PIPELINESTAT_START (25 << 0)
863# define PIPELINESTAT_STOP (26 << 0)
864# define PERFCOUNTER_SAMPLE (27 << 0)
865# define SAMPLE_PIPELINESTAT (30 << 0)
866# define SO_VGT_STREAMOUT_FLUSH (31 << 0)
867# define SAMPLE_STREAMOUTSTATS (32 << 0)
868# define RESET_VTX_CNT (33 << 0)
869# define VGT_FLUSH (36 << 0)
870# define BOTTOM_OF_PIPE_TS (40 << 0)
871# define DB_CACHE_FLUSH_AND_INV (42 << 0)
872# define FLUSH_AND_INV_DB_DATA_TS (43 << 0)
873# define FLUSH_AND_INV_DB_META (44 << 0)
874# define FLUSH_AND_INV_CB_DATA_TS (45 << 0)
875# define FLUSH_AND_INV_CB_META (46 << 0)
876# define CS_DONE (47 << 0)
877# define PS_DONE (48 << 0)
878# define FLUSH_AND_INV_CB_PIXEL_DATA (49 << 0)
879# define THREAD_TRACE_START (51 << 0)
880# define THREAD_TRACE_STOP (52 << 0)
881# define THREAD_TRACE_FLUSH (54 << 0)
882# define THREAD_TRACE_FINISH (55 << 0)
883# define PIXEL_PIPE_STAT_CONTROL (56 << 0)
884# define PIXEL_PIPE_STAT_DUMP (57 << 0)
885# define PIXEL_PIPE_STAT_RESET (58 << 0)
886
887#define SCRATCH_REG0 0x30100
888#define SCRATCH_REG1 0x30104
889#define SCRATCH_REG2 0x30108
890#define SCRATCH_REG3 0x3010C
891#define SCRATCH_REG4 0x30110
892#define SCRATCH_REG5 0x30114
893#define SCRATCH_REG6 0x30118
894#define SCRATCH_REG7 0x3011C
895
896#define SCRATCH_UMSK 0x30140
897#define SCRATCH_ADDR 0x30144
898
899#define CP_SEM_WAIT_TIMER 0x301BC
900
901#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x301C8
902
903#define CP_WAIT_REG_MEM_TIMEOUT 0x301D0
904
905#define GRBM_GFX_INDEX 0x30800
906#define INSTANCE_INDEX(x) ((x) << 0)
907#define SH_INDEX(x) ((x) << 8)
908#define SE_INDEX(x) ((x) << 16)
909#define SH_BROADCAST_WRITES (1 << 29)
910#define INSTANCE_BROADCAST_WRITES (1 << 30)
911#define SE_BROADCAST_WRITES (1 << 31)
912
913#define VGT_ESGS_RING_SIZE 0x30900
914#define VGT_GSVS_RING_SIZE 0x30904
915#define VGT_PRIMITIVE_TYPE 0x30908
916#define VGT_INDEX_TYPE 0x3090C
917
918#define VGT_NUM_INDICES 0x30930
919#define VGT_NUM_INSTANCES 0x30934
920#define VGT_TF_RING_SIZE 0x30938
921#define VGT_HS_OFFCHIP_PARAM 0x3093C
922#define VGT_TF_MEMORY_BASE 0x30940
923
924#define PA_SU_LINE_STIPPLE_VALUE 0x30a00
925#define PA_SC_LINE_STIPPLE_STATE 0x30a04
926
927#define SQC_CACHES 0x30d20
928
929#define CP_PERFMON_CNTL 0x36020
930
931#define CGTS_TCC_DISABLE 0x3c00c
932#define CGTS_USER_TCC_DISABLE 0x3c010
933#define TCC_DISABLE_MASK 0xFFFF0000
934#define TCC_DISABLE_SHIFT 16
935
936#define CB_CGTT_SCLK_CTRL 0x3c2a0
937
938/*
939 * PM4
940 */
941#define PACKET_TYPE0 0
942#define PACKET_TYPE1 1
943#define PACKET_TYPE2 2
944#define PACKET_TYPE3 3
945
946#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
947#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
948#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
949#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
950#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
951 (((reg) >> 2) & 0xFFFF) | \
952 ((n) & 0x3FFF) << 16)
953#define CP_PACKET2 0x80000000
954#define PACKET2_PAD_SHIFT 0
955#define PACKET2_PAD_MASK (0x3fffffff << 0)
956
957#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
958
959#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
960 (((op) & 0xFF) << 8) | \
961 ((n) & 0x3FFF) << 16)
962
963#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
964
965/* Packet 3 types */
966#define PACKET3_NOP 0x10
967#define PACKET3_SET_BASE 0x11
968#define PACKET3_BASE_INDEX(x) ((x) << 0)
969#define CE_PARTITION_BASE 3
970#define PACKET3_CLEAR_STATE 0x12
971#define PACKET3_INDEX_BUFFER_SIZE 0x13
972#define PACKET3_DISPATCH_DIRECT 0x15
973#define PACKET3_DISPATCH_INDIRECT 0x16
974#define PACKET3_ATOMIC_GDS 0x1D
975#define PACKET3_ATOMIC_MEM 0x1E
976#define PACKET3_OCCLUSION_QUERY 0x1F
977#define PACKET3_SET_PREDICATION 0x20
978#define PACKET3_REG_RMW 0x21
979#define PACKET3_COND_EXEC 0x22
980#define PACKET3_PRED_EXEC 0x23
981#define PACKET3_DRAW_INDIRECT 0x24
982#define PACKET3_DRAW_INDEX_INDIRECT 0x25
983#define PACKET3_INDEX_BASE 0x26
984#define PACKET3_DRAW_INDEX_2 0x27
985#define PACKET3_CONTEXT_CONTROL 0x28
986#define PACKET3_INDEX_TYPE 0x2A
987#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
988#define PACKET3_DRAW_INDEX_AUTO 0x2D
989#define PACKET3_NUM_INSTANCES 0x2F
990#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
991#define PACKET3_INDIRECT_BUFFER_CONST 0x33
992#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
993#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
994#define PACKET3_DRAW_PREAMBLE 0x36
995#define PACKET3_WRITE_DATA 0x37
996#define WRITE_DATA_DST_SEL(x) ((x) << 8)
997 /* 0 - register
998 * 1 - memory (sync - via GRBM)
999 * 2 - gl2
1000 * 3 - gds
1001 * 4 - reserved
1002 * 5 - memory (async - direct)
1003 */
1004#define WR_ONE_ADDR (1 << 16)
1005#define WR_CONFIRM (1 << 20)
1006#define WRITE_DATA_CACHE_POLICY(x) ((x) << 25)
1007 /* 0 - LRU
1008 * 1 - Stream
1009 */
1010#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
1011 /* 0 - me
1012 * 1 - pfp
1013 * 2 - ce
1014 */
1015#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
1016#define PACKET3_MEM_SEMAPHORE 0x39
1017# define PACKET3_SEM_USE_MAILBOX (0x1 << 16)
1018# define PACKET3_SEM_SEL_SIGNAL_TYPE (0x1 << 20) /* 0 = increment, 1 = write 1 */
1019# define PACKET3_SEM_CLIENT_CODE ((x) << 24) /* 0 = CP, 1 = CB, 2 = DB */
1020# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
1021# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
1022#define PACKET3_COPY_DW 0x3B
1023#define PACKET3_WAIT_REG_MEM 0x3C
1024#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
1025 /* 0 - always
1026 * 1 - <
1027 * 2 - <=
1028 * 3 - ==
1029 * 4 - !=
1030 * 5 - >=
1031 * 6 - >
1032 */
1033#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
1034 /* 0 - reg
1035 * 1 - mem
1036 */
1037#define WAIT_REG_MEM_OPERATION(x) ((x) << 6)
1038 /* 0 - wait_reg_mem
1039 * 1 - wr_wait_wr_reg
1040 */
1041#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
1042 /* 0 - me
1043 * 1 - pfp
1044 */
1045#define PACKET3_INDIRECT_BUFFER 0x3F
1046#define INDIRECT_BUFFER_TCL2_VOLATILE (1 << 22)
1047#define INDIRECT_BUFFER_VALID (1 << 23)
1048#define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28)
1049 /* 0 - LRU
1050 * 1 - Stream
1051 * 2 - Bypass
1052 */
1053#define PACKET3_COPY_DATA 0x40
1054#define PACKET3_PFP_SYNC_ME 0x42
1055#define PACKET3_SURFACE_SYNC 0x43
1056# define PACKET3_DEST_BASE_0_ENA (1 << 0)
1057# define PACKET3_DEST_BASE_1_ENA (1 << 1)
1058# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
1059# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
1060# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
1061# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
1062# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
1063# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
1064# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
1065# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
1066# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
1067# define PACKET3_TCL1_VOL_ACTION_ENA (1 << 15)
1068# define PACKET3_TC_VOL_ACTION_ENA (1 << 16) /* L2 */
1069# define PACKET3_TC_WB_ACTION_ENA (1 << 18) /* L2 */
1070# define PACKET3_DEST_BASE_2_ENA (1 << 19)
1071# define PACKET3_DEST_BASE_3_ENA (1 << 21)
1072# define PACKET3_TCL1_ACTION_ENA (1 << 22)
1073# define PACKET3_TC_ACTION_ENA (1 << 23) /* L2 */
1074# define PACKET3_CB_ACTION_ENA (1 << 25)
1075# define PACKET3_DB_ACTION_ENA (1 << 26)
1076# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
1077# define PACKET3_SH_KCACHE_VOL_ACTION_ENA (1 << 28)
1078# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
1079#define PACKET3_COND_WRITE 0x45
1080#define PACKET3_EVENT_WRITE 0x46
1081#define EVENT_TYPE(x) ((x) << 0)
1082#define EVENT_INDEX(x) ((x) << 8)
1083 /* 0 - any non-TS event
1084 * 1 - ZPASS_DONE, PIXEL_PIPE_STAT_*
1085 * 2 - SAMPLE_PIPELINESTAT
1086 * 3 - SAMPLE_STREAMOUTSTAT*
1087 * 4 - *S_PARTIAL_FLUSH
1088 * 5 - EOP events
1089 * 6 - EOS events
1090 */
1091#define PACKET3_EVENT_WRITE_EOP 0x47
1092#define EOP_TCL1_VOL_ACTION_EN (1 << 12)
1093#define EOP_TC_VOL_ACTION_EN (1 << 13) /* L2 */
1094#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
1095#define EOP_TCL1_ACTION_EN (1 << 16)
1096#define EOP_TC_ACTION_EN (1 << 17) /* L2 */
1097#define EOP_CACHE_POLICY(x) ((x) << 25)
1098 /* 0 - LRU
1099 * 1 - Stream
1100 * 2 - Bypass
1101 */
1102#define EOP_TCL2_VOLATILE (1 << 27)
1103#define DATA_SEL(x) ((x) << 29)
1104 /* 0 - discard
1105 * 1 - send low 32bit data
1106 * 2 - send 64bit data
1107 * 3 - send 64bit GPU counter value
1108 * 4 - send 64bit sys counter value
1109 */
1110#define INT_SEL(x) ((x) << 24)
1111 /* 0 - none
1112 * 1 - interrupt only (DATA_SEL = 0)
1113 * 2 - interrupt when data write is confirmed
1114 */
1115#define DST_SEL(x) ((x) << 16)
1116 /* 0 - MC
1117 * 1 - TC/L2
1118 */
1119#define PACKET3_EVENT_WRITE_EOS 0x48
1120#define PACKET3_RELEASE_MEM 0x49
1121#define PACKET3_PREAMBLE_CNTL 0x4A
1122# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
1123# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
1124#define PACKET3_DMA_DATA 0x50
1125#define PACKET3_AQUIRE_MEM 0x58
1126#define PACKET3_REWIND 0x59
1127#define PACKET3_LOAD_UCONFIG_REG 0x5E
1128#define PACKET3_LOAD_SH_REG 0x5F
1129#define PACKET3_LOAD_CONFIG_REG 0x60
1130#define PACKET3_LOAD_CONTEXT_REG 0x61
1131#define PACKET3_SET_CONFIG_REG 0x68
1132#define PACKET3_SET_CONFIG_REG_START 0x00008000
1133#define PACKET3_SET_CONFIG_REG_END 0x0000b000
1134#define PACKET3_SET_CONTEXT_REG 0x69
1135#define PACKET3_SET_CONTEXT_REG_START 0x00028000
1136#define PACKET3_SET_CONTEXT_REG_END 0x00029000
1137#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
1138#define PACKET3_SET_SH_REG 0x76
1139#define PACKET3_SET_SH_REG_START 0x0000b000
1140#define PACKET3_SET_SH_REG_END 0x0000c000
1141#define PACKET3_SET_SH_REG_OFFSET 0x77
1142#define PACKET3_SET_QUEUE_REG 0x78
1143#define PACKET3_SET_UCONFIG_REG 0x79
1144#define PACKET3_SET_UCONFIG_REG_START 0x00030000
1145#define PACKET3_SET_UCONFIG_REG_END 0x00031000
1146#define PACKET3_SCRATCH_RAM_WRITE 0x7D
1147#define PACKET3_SCRATCH_RAM_READ 0x7E
1148#define PACKET3_LOAD_CONST_RAM 0x80
1149#define PACKET3_WRITE_CONST_RAM 0x81
1150#define PACKET3_DUMP_CONST_RAM 0x83
1151#define PACKET3_INCREMENT_CE_COUNTER 0x84
1152#define PACKET3_INCREMENT_DE_COUNTER 0x85
1153#define PACKET3_WAIT_ON_CE_COUNTER 0x86
1154#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
1155#define PACKET3_SWITCH_BUFFER 0x8B
1156
1157/* SDMA - first instance at 0xd000, second at 0xd800 */
1158#define SDMA0_REGISTER_OFFSET 0x0 /* not a register */
1159#define SDMA1_REGISTER_OFFSET 0x800 /* not a register */
1160
1161#define SDMA0_UCODE_ADDR 0xD000
1162#define SDMA0_UCODE_DATA 0xD004
1163
1164#define SDMA0_CNTL 0xD010
1165# define TRAP_ENABLE (1 << 0)
1166# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
1167# define SEM_WAIT_INT_ENABLE (1 << 2)
1168# define DATA_SWAP_ENABLE (1 << 3)
1169# define FENCE_SWAP_ENABLE (1 << 4)
1170# define AUTO_CTXSW_ENABLE (1 << 18)
1171# define CTXEMPTY_INT_ENABLE (1 << 28)
1172
1173#define SDMA0_TILING_CONFIG 0xD018
1174
1175#define SDMA0_SEM_INCOMPLETE_TIMER_CNTL 0xD020
1176#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL 0xD024
1177
1178#define SDMA0_STATUS_REG 0xd034
1179# define SDMA_IDLE (1 << 0)
1180
1181#define SDMA0_ME_CNTL 0xD048
1182# define SDMA_HALT (1 << 0)
1183
1184#define SDMA0_GFX_RB_CNTL 0xD200
1185# define SDMA_RB_ENABLE (1 << 0)
1186# define SDMA_RB_SIZE(x) ((x) << 1) /* log2 */
1187# define SDMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
1188# define SDMA_RPTR_WRITEBACK_ENABLE (1 << 12)
1189# define SDMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
1190# define SDMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
1191#define SDMA0_GFX_RB_BASE 0xD204
1192#define SDMA0_GFX_RB_BASE_HI 0xD208
1193#define SDMA0_GFX_RB_RPTR 0xD20C
1194#define SDMA0_GFX_RB_WPTR 0xD210
1195
1196#define SDMA0_GFX_RB_RPTR_ADDR_HI 0xD220
1197#define SDMA0_GFX_RB_RPTR_ADDR_LO 0xD224
1198#define SDMA0_GFX_IB_CNTL 0xD228
1199# define SDMA_IB_ENABLE (1 << 0)
1200# define SDMA_IB_SWAP_ENABLE (1 << 4)
1201# define SDMA_SWITCH_INSIDE_IB (1 << 8)
1202# define SDMA_CMD_VMID(x) ((x) << 16)
1203
1204#define SDMA0_GFX_VIRTUAL_ADDR 0xD29C
1205#define SDMA0_GFX_APE1_CNTL 0xD2A0
1206
1207#define SDMA_PACKET(op, sub_op, e) ((((e) & 0xFFFF) << 16) | \
1208 (((sub_op) & 0xFF) << 8) | \
1209 (((op) & 0xFF) << 0))
1210/* sDMA opcodes */
1211#define SDMA_OPCODE_NOP 0
1212#define SDMA_OPCODE_COPY 1
1213# define SDMA_COPY_SUB_OPCODE_LINEAR 0
1214# define SDMA_COPY_SUB_OPCODE_TILED 1
1215# define SDMA_COPY_SUB_OPCODE_SOA 3
1216# define SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW 4
1217# define SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW 5
1218# define SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW 6
1219#define SDMA_OPCODE_WRITE 2
1220# define SDMA_WRITE_SUB_OPCODE_LINEAR 0
1221# define SDMA_WRTIE_SUB_OPCODE_TILED 1
1222#define SDMA_OPCODE_INDIRECT_BUFFER 4
1223#define SDMA_OPCODE_FENCE 5
1224#define SDMA_OPCODE_TRAP 6
1225#define SDMA_OPCODE_SEMAPHORE 7
1226# define SDMA_SEMAPHORE_EXTRA_O (1 << 13)
1227 /* 0 - increment
1228 * 1 - write 1
1229 */
1230# define SDMA_SEMAPHORE_EXTRA_S (1 << 14)
1231 /* 0 - wait
1232 * 1 - signal
1233 */
1234# define SDMA_SEMAPHORE_EXTRA_M (1 << 15)
1235 /* mailbox */
1236#define SDMA_OPCODE_POLL_REG_MEM 8
1237# define SDMA_POLL_REG_MEM_EXTRA_OP(x) ((x) << 10)
1238 /* 0 - wait_reg_mem
1239 * 1 - wr_wait_wr_reg
1240 */
1241# define SDMA_POLL_REG_MEM_EXTRA_FUNC(x) ((x) << 12)
1242 /* 0 - always
1243 * 1 - <
1244 * 2 - <=
1245 * 3 - ==
1246 * 4 - !=
1247 * 5 - >=
1248 * 6 - >
1249 */
1250# define SDMA_POLL_REG_MEM_EXTRA_M (1 << 15)
1251 /* 0 = register
1252 * 1 = memory
1253 */
1254#define SDMA_OPCODE_COND_EXEC 9
1255#define SDMA_OPCODE_CONSTANT_FILL 11
1256# define SDMA_CONSTANT_FILL_EXTRA_SIZE(x) ((x) << 14)
1257 /* 0 = byte fill
1258 * 2 = DW fill
1259 */
1260#define SDMA_OPCODE_GENERATE_PTE_PDE 12
1261#define SDMA_OPCODE_TIMESTAMP 13
1262# define SDMA_TIMESTAMP_SUB_OPCODE_SET_LOCAL 0
1263# define SDMA_TIMESTAMP_SUB_OPCODE_GET_LOCAL 1
1264# define SDMA_TIMESTAMP_SUB_OPCODE_GET_GLOBAL 2
1265#define SDMA_OPCODE_SRBM_WRITE 14
1266# define SDMA_SRBM_WRITE_EXTRA_BYTE_ENABLE(x) ((x) << 12)
1267 /* byte mask */
1268
1269/* UVD */
1270
1271#define UVD_UDEC_ADDR_CONFIG 0xef4c
1272#define UVD_UDEC_DB_ADDR_CONFIG 0xef50
1273#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
1274
1275#define UVD_LMI_EXT40_ADDR 0xf498
1276#define UVD_LMI_ADDR_EXT 0xf594
1277#define UVD_VCPU_CACHE_OFFSET0 0xf608
1278#define UVD_VCPU_CACHE_SIZE0 0xf60c
1279#define UVD_VCPU_CACHE_OFFSET1 0xf610
1280#define UVD_VCPU_CACHE_SIZE1 0xf614
1281#define UVD_VCPU_CACHE_OFFSET2 0xf618
1282#define UVD_VCPU_CACHE_SIZE2 0xf61c
1283
1284#define UVD_RBC_RB_RPTR 0xf690
1285#define UVD_RBC_RB_WPTR 0xf694
1286
1287/* UVD clocks */
1288
1289#define CG_DCLK_CNTL 0xC050009C
1290# define DCLK_DIVIDER_MASK 0x7f
1291# define DCLK_DIR_CNTL_EN (1 << 8)
1292#define CG_DCLK_STATUS 0xC05000A0
1293# define DCLK_STATUS (1 << 0)
1294#define CG_VCLK_CNTL 0xC05000A4
1295#define CG_VCLK_STATUS 0xC05000A8
1296
1297#endif
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
new file mode 100644
index 000000000000..c00339440c5e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -0,0 +1,1081 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24static const u32 SECT_CONTEXT_def_1[] =
25{
26 0x00000000, // DB_RENDER_CONTROL
27 0x00000000, // DB_COUNT_CONTROL
28 0x00000000, // DB_DEPTH_VIEW
29 0x00000000, // DB_RENDER_OVERRIDE
30 0x00000000, // DB_RENDER_OVERRIDE2
31 0x00000000, // DB_HTILE_DATA_BASE
32 0, // HOLE
33 0, // HOLE
34 0, // HOLE
35 0, // HOLE
36 0x00000000, // DB_STENCIL_CLEAR
37 0x00000000, // DB_DEPTH_CLEAR
38 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
39 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
40 0, // HOLE
41 0x00000000, // DB_DEPTH_INFO
42 0x00000000, // DB_Z_INFO
43 0x00000000, // DB_STENCIL_INFO
44 0x00000000, // DB_Z_READ_BASE
45 0x00000000, // DB_STENCIL_READ_BASE
46 0x00000000, // DB_Z_WRITE_BASE
47 0x00000000, // DB_STENCIL_WRITE_BASE
48 0x00000000, // DB_DEPTH_SIZE
49 0x00000000, // DB_DEPTH_SLICE
50 0, // HOLE
51 0, // HOLE
52 0, // HOLE
53 0, // HOLE
54 0, // HOLE
55 0, // HOLE
56 0, // HOLE
57 0, // HOLE
58 0, // HOLE
59 0, // HOLE
60 0, // HOLE
61 0, // HOLE
62 0, // HOLE
63 0, // HOLE
64 0, // HOLE
65 0, // HOLE
66 0, // HOLE
67 0, // HOLE
68 0, // HOLE
69 0, // HOLE
70 0, // HOLE
71 0, // HOLE
72 0, // HOLE
73 0, // HOLE
74 0, // HOLE
75 0, // HOLE
76 0, // HOLE
77 0, // HOLE
78 0, // HOLE
79 0, // HOLE
80 0, // HOLE
81 0, // HOLE
82 0, // HOLE
83 0, // HOLE
84 0, // HOLE
85 0, // HOLE
86 0, // HOLE
87 0, // HOLE
88 0, // HOLE
89 0, // HOLE
90 0, // HOLE
91 0, // HOLE
92 0, // HOLE
93 0, // HOLE
94 0, // HOLE
95 0, // HOLE
96 0, // HOLE
97 0, // HOLE
98 0, // HOLE
99 0, // HOLE
100 0, // HOLE
101 0, // HOLE
102 0, // HOLE
103 0, // HOLE
104 0, // HOLE
105 0, // HOLE
106 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_0
107 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_1
108 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_2
109 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_3
110 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_4
111 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_5
112 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_6
113 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_7
114 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_8
115 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_9
116 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_10
117 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_11
118 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_12
119 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_13
120 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_14
121 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_15
122 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_0
123 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_1
124 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_2
125 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_3
126 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_4
127 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_5
128 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_6
129 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_7
130 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_8
131 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_9
132 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_10
133 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_11
134 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_12
135 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_13
136 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_14
137 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_15
138 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_0
139 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_1
140 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_2
141 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_3
142 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_4
143 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_5
144 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_6
145 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_7
146 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_8
147 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_9
148 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_10
149 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_11
150 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_12
151 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_13
152 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_14
153 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_15
154 0x00000000, // PA_SC_WINDOW_OFFSET
155 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
156 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
157 0x0000ffff, // PA_SC_CLIPRECT_RULE
158 0x00000000, // PA_SC_CLIPRECT_0_TL
159 0x40004000, // PA_SC_CLIPRECT_0_BR
160 0x00000000, // PA_SC_CLIPRECT_1_TL
161 0x40004000, // PA_SC_CLIPRECT_1_BR
162 0x00000000, // PA_SC_CLIPRECT_2_TL
163 0x40004000, // PA_SC_CLIPRECT_2_BR
164 0x00000000, // PA_SC_CLIPRECT_3_TL
165 0x40004000, // PA_SC_CLIPRECT_3_BR
166 0xaa99aaaa, // PA_SC_EDGERULE
167 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
168 0xffffffff, // CB_TARGET_MASK
169 0xffffffff, // CB_SHADER_MASK
170 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
171 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
172 0x00000000, // COHER_DEST_BASE_0
173 0x00000000, // COHER_DEST_BASE_1
174 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
175 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
176 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
177 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
178 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
179 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
180 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
181 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
182 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
183 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
184 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
185 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
186 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
187 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
188 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
189 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
190 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
191 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
192 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
193 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
194 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
195 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
196 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
197 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
198 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
199 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
200 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
201 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
202 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
203 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
204 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
205 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
206 0x00000000, // PA_SC_VPORT_ZMIN_0
207 0x3f800000, // PA_SC_VPORT_ZMAX_0
208 0x00000000, // PA_SC_VPORT_ZMIN_1
209 0x3f800000, // PA_SC_VPORT_ZMAX_1
210 0x00000000, // PA_SC_VPORT_ZMIN_2
211 0x3f800000, // PA_SC_VPORT_ZMAX_2
212 0x00000000, // PA_SC_VPORT_ZMIN_3
213 0x3f800000, // PA_SC_VPORT_ZMAX_3
214 0x00000000, // PA_SC_VPORT_ZMIN_4
215 0x3f800000, // PA_SC_VPORT_ZMAX_4
216 0x00000000, // PA_SC_VPORT_ZMIN_5
217 0x3f800000, // PA_SC_VPORT_ZMAX_5
218 0x00000000, // PA_SC_VPORT_ZMIN_6
219 0x3f800000, // PA_SC_VPORT_ZMAX_6
220 0x00000000, // PA_SC_VPORT_ZMIN_7
221 0x3f800000, // PA_SC_VPORT_ZMAX_7
222 0x00000000, // PA_SC_VPORT_ZMIN_8
223 0x3f800000, // PA_SC_VPORT_ZMAX_8
224 0x00000000, // PA_SC_VPORT_ZMIN_9
225 0x3f800000, // PA_SC_VPORT_ZMAX_9
226 0x00000000, // PA_SC_VPORT_ZMIN_10
227 0x3f800000, // PA_SC_VPORT_ZMAX_10
228 0x00000000, // PA_SC_VPORT_ZMIN_11
229 0x3f800000, // PA_SC_VPORT_ZMAX_11
230 0x00000000, // PA_SC_VPORT_ZMIN_12
231 0x3f800000, // PA_SC_VPORT_ZMAX_12
232 0x00000000, // PA_SC_VPORT_ZMIN_13
233 0x3f800000, // PA_SC_VPORT_ZMAX_13
234 0x00000000, // PA_SC_VPORT_ZMIN_14
235 0x3f800000, // PA_SC_VPORT_ZMAX_14
236 0x00000000, // PA_SC_VPORT_ZMIN_15
237 0x3f800000, // PA_SC_VPORT_ZMAX_15
238 0x00000000, // SX_MISC
239 0x00000000, // SX_SURFACE_SYNC
240 0x00000000, // SX_SCATTER_EXPORT_BASE
241 0x00000000, // SX_SCATTER_EXPORT_SIZE
242 0x00000000, // CP_PERFMON_CNTX_CNTL
243 0x00000000, // CP_RINGID
244 0x00000000, // CP_VMID
245 0, // HOLE
246 0, // HOLE
247 0, // HOLE
248 0, // HOLE
249 0, // HOLE
250 0x00000000, // SQ_VTX_SEMANTIC_0
251 0x00000000, // SQ_VTX_SEMANTIC_1
252 0x00000000, // SQ_VTX_SEMANTIC_2
253 0x00000000, // SQ_VTX_SEMANTIC_3
254 0x00000000, // SQ_VTX_SEMANTIC_4
255 0x00000000, // SQ_VTX_SEMANTIC_5
256 0x00000000, // SQ_VTX_SEMANTIC_6
257 0x00000000, // SQ_VTX_SEMANTIC_7
258 0x00000000, // SQ_VTX_SEMANTIC_8
259 0x00000000, // SQ_VTX_SEMANTIC_9
260 0x00000000, // SQ_VTX_SEMANTIC_10
261 0x00000000, // SQ_VTX_SEMANTIC_11
262 0x00000000, // SQ_VTX_SEMANTIC_12
263 0x00000000, // SQ_VTX_SEMANTIC_13
264 0x00000000, // SQ_VTX_SEMANTIC_14
265 0x00000000, // SQ_VTX_SEMANTIC_15
266 0x00000000, // SQ_VTX_SEMANTIC_16
267 0x00000000, // SQ_VTX_SEMANTIC_17
268 0x00000000, // SQ_VTX_SEMANTIC_18
269 0x00000000, // SQ_VTX_SEMANTIC_19
270 0x00000000, // SQ_VTX_SEMANTIC_20
271 0x00000000, // SQ_VTX_SEMANTIC_21
272 0x00000000, // SQ_VTX_SEMANTIC_22
273 0x00000000, // SQ_VTX_SEMANTIC_23
274 0x00000000, // SQ_VTX_SEMANTIC_24
275 0x00000000, // SQ_VTX_SEMANTIC_25
276 0x00000000, // SQ_VTX_SEMANTIC_26
277 0x00000000, // SQ_VTX_SEMANTIC_27
278 0x00000000, // SQ_VTX_SEMANTIC_28
279 0x00000000, // SQ_VTX_SEMANTIC_29
280 0x00000000, // SQ_VTX_SEMANTIC_30
281 0x00000000, // SQ_VTX_SEMANTIC_31
282 0xffffffff, // VGT_MAX_VTX_INDX
283 0x00000000, // VGT_MIN_VTX_INDX
284 0x00000000, // VGT_INDX_OFFSET
285 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
286 0x00000000, // SX_ALPHA_TEST_CONTROL
287 0x00000000, // CB_BLEND_RED
288 0x00000000, // CB_BLEND_GREEN
289 0x00000000, // CB_BLEND_BLUE
290 0x00000000, // CB_BLEND_ALPHA
291 0, // HOLE
292 0, // HOLE
293 0, // HOLE
294 0x00000000, // DB_STENCILREFMASK
295 0x00000000, // DB_STENCILREFMASK_BF
296 0x00000000, // SX_ALPHA_REF
297 0x00000000, // PA_CL_VPORT_XSCALE
298 0x00000000, // PA_CL_VPORT_XOFFSET
299 0x00000000, // PA_CL_VPORT_YSCALE
300 0x00000000, // PA_CL_VPORT_YOFFSET
301 0x00000000, // PA_CL_VPORT_ZSCALE
302 0x00000000, // PA_CL_VPORT_ZOFFSET
303 0x00000000, // PA_CL_VPORT_XSCALE_1
304 0x00000000, // PA_CL_VPORT_XOFFSET_1
305 0x00000000, // PA_CL_VPORT_YSCALE_1
306 0x00000000, // PA_CL_VPORT_YOFFSET_1
307 0x00000000, // PA_CL_VPORT_ZSCALE_1
308 0x00000000, // PA_CL_VPORT_ZOFFSET_1
309 0x00000000, // PA_CL_VPORT_XSCALE_2
310 0x00000000, // PA_CL_VPORT_XOFFSET_2
311 0x00000000, // PA_CL_VPORT_YSCALE_2
312 0x00000000, // PA_CL_VPORT_YOFFSET_2
313 0x00000000, // PA_CL_VPORT_ZSCALE_2
314 0x00000000, // PA_CL_VPORT_ZOFFSET_2
315 0x00000000, // PA_CL_VPORT_XSCALE_3
316 0x00000000, // PA_CL_VPORT_XOFFSET_3
317 0x00000000, // PA_CL_VPORT_YSCALE_3
318 0x00000000, // PA_CL_VPORT_YOFFSET_3
319 0x00000000, // PA_CL_VPORT_ZSCALE_3
320 0x00000000, // PA_CL_VPORT_ZOFFSET_3
321 0x00000000, // PA_CL_VPORT_XSCALE_4
322 0x00000000, // PA_CL_VPORT_XOFFSET_4
323 0x00000000, // PA_CL_VPORT_YSCALE_4
324 0x00000000, // PA_CL_VPORT_YOFFSET_4
325 0x00000000, // PA_CL_VPORT_ZSCALE_4
326 0x00000000, // PA_CL_VPORT_ZOFFSET_4
327 0x00000000, // PA_CL_VPORT_XSCALE_5
328 0x00000000, // PA_CL_VPORT_XOFFSET_5
329 0x00000000, // PA_CL_VPORT_YSCALE_5
330 0x00000000, // PA_CL_VPORT_YOFFSET_5
331 0x00000000, // PA_CL_VPORT_ZSCALE_5
332 0x00000000, // PA_CL_VPORT_ZOFFSET_5
333 0x00000000, // PA_CL_VPORT_XSCALE_6
334 0x00000000, // PA_CL_VPORT_XOFFSET_6
335 0x00000000, // PA_CL_VPORT_YSCALE_6
336 0x00000000, // PA_CL_VPORT_YOFFSET_6
337 0x00000000, // PA_CL_VPORT_ZSCALE_6
338 0x00000000, // PA_CL_VPORT_ZOFFSET_6
339 0x00000000, // PA_CL_VPORT_XSCALE_7
340 0x00000000, // PA_CL_VPORT_XOFFSET_7
341 0x00000000, // PA_CL_VPORT_YSCALE_7
342 0x00000000, // PA_CL_VPORT_YOFFSET_7
343 0x00000000, // PA_CL_VPORT_ZSCALE_7
344 0x00000000, // PA_CL_VPORT_ZOFFSET_7
345 0x00000000, // PA_CL_VPORT_XSCALE_8
346 0x00000000, // PA_CL_VPORT_XOFFSET_8
347 0x00000000, // PA_CL_VPORT_YSCALE_8
348 0x00000000, // PA_CL_VPORT_YOFFSET_8
349 0x00000000, // PA_CL_VPORT_ZSCALE_8
350 0x00000000, // PA_CL_VPORT_ZOFFSET_8
351 0x00000000, // PA_CL_VPORT_XSCALE_9
352 0x00000000, // PA_CL_VPORT_XOFFSET_9
353 0x00000000, // PA_CL_VPORT_YSCALE_9
354 0x00000000, // PA_CL_VPORT_YOFFSET_9
355 0x00000000, // PA_CL_VPORT_ZSCALE_9
356 0x00000000, // PA_CL_VPORT_ZOFFSET_9
357 0x00000000, // PA_CL_VPORT_XSCALE_10
358 0x00000000, // PA_CL_VPORT_XOFFSET_10
359 0x00000000, // PA_CL_VPORT_YSCALE_10
360 0x00000000, // PA_CL_VPORT_YOFFSET_10
361 0x00000000, // PA_CL_VPORT_ZSCALE_10
362 0x00000000, // PA_CL_VPORT_ZOFFSET_10
363 0x00000000, // PA_CL_VPORT_XSCALE_11
364 0x00000000, // PA_CL_VPORT_XOFFSET_11
365 0x00000000, // PA_CL_VPORT_YSCALE_11
366 0x00000000, // PA_CL_VPORT_YOFFSET_11
367 0x00000000, // PA_CL_VPORT_ZSCALE_11
368 0x00000000, // PA_CL_VPORT_ZOFFSET_11
369 0x00000000, // PA_CL_VPORT_XSCALE_12
370 0x00000000, // PA_CL_VPORT_XOFFSET_12
371 0x00000000, // PA_CL_VPORT_YSCALE_12
372 0x00000000, // PA_CL_VPORT_YOFFSET_12
373 0x00000000, // PA_CL_VPORT_ZSCALE_12
374 0x00000000, // PA_CL_VPORT_ZOFFSET_12
375 0x00000000, // PA_CL_VPORT_XSCALE_13
376 0x00000000, // PA_CL_VPORT_XOFFSET_13
377 0x00000000, // PA_CL_VPORT_YSCALE_13
378 0x00000000, // PA_CL_VPORT_YOFFSET_13
379 0x00000000, // PA_CL_VPORT_ZSCALE_13
380 0x00000000, // PA_CL_VPORT_ZOFFSET_13
381 0x00000000, // PA_CL_VPORT_XSCALE_14
382 0x00000000, // PA_CL_VPORT_XOFFSET_14
383 0x00000000, // PA_CL_VPORT_YSCALE_14
384 0x00000000, // PA_CL_VPORT_YOFFSET_14
385 0x00000000, // PA_CL_VPORT_ZSCALE_14
386 0x00000000, // PA_CL_VPORT_ZOFFSET_14
387 0x00000000, // PA_CL_VPORT_XSCALE_15
388 0x00000000, // PA_CL_VPORT_XOFFSET_15
389 0x00000000, // PA_CL_VPORT_YSCALE_15
390 0x00000000, // PA_CL_VPORT_YOFFSET_15
391 0x00000000, // PA_CL_VPORT_ZSCALE_15
392 0x00000000, // PA_CL_VPORT_ZOFFSET_15
393 0x00000000, // PA_CL_UCP_0_X
394 0x00000000, // PA_CL_UCP_0_Y
395 0x00000000, // PA_CL_UCP_0_Z
396 0x00000000, // PA_CL_UCP_0_W
397 0x00000000, // PA_CL_UCP_1_X
398 0x00000000, // PA_CL_UCP_1_Y
399 0x00000000, // PA_CL_UCP_1_Z
400 0x00000000, // PA_CL_UCP_1_W
401 0x00000000, // PA_CL_UCP_2_X
402 0x00000000, // PA_CL_UCP_2_Y
403 0x00000000, // PA_CL_UCP_2_Z
404 0x00000000, // PA_CL_UCP_2_W
405 0x00000000, // PA_CL_UCP_3_X
406 0x00000000, // PA_CL_UCP_3_Y
407 0x00000000, // PA_CL_UCP_3_Z
408 0x00000000, // PA_CL_UCP_3_W
409 0x00000000, // PA_CL_UCP_4_X
410 0x00000000, // PA_CL_UCP_4_Y
411 0x00000000, // PA_CL_UCP_4_Z
412 0x00000000, // PA_CL_UCP_4_W
413 0x00000000, // PA_CL_UCP_5_X
414 0x00000000, // PA_CL_UCP_5_Y
415 0x00000000, // PA_CL_UCP_5_Z
416 0x00000000, // PA_CL_UCP_5_W
417 0x00000000, // SPI_VS_OUT_ID_0
418 0x00000000, // SPI_VS_OUT_ID_1
419 0x00000000, // SPI_VS_OUT_ID_2
420 0x00000000, // SPI_VS_OUT_ID_3
421 0x00000000, // SPI_VS_OUT_ID_4
422 0x00000000, // SPI_VS_OUT_ID_5
423 0x00000000, // SPI_VS_OUT_ID_6
424 0x00000000, // SPI_VS_OUT_ID_7
425 0x00000000, // SPI_VS_OUT_ID_8
426 0x00000000, // SPI_VS_OUT_ID_9
427 0x00000000, // SPI_PS_INPUT_CNTL_0
428 0x00000000, // SPI_PS_INPUT_CNTL_1
429 0x00000000, // SPI_PS_INPUT_CNTL_2
430 0x00000000, // SPI_PS_INPUT_CNTL_3
431 0x00000000, // SPI_PS_INPUT_CNTL_4
432 0x00000000, // SPI_PS_INPUT_CNTL_5
433 0x00000000, // SPI_PS_INPUT_CNTL_6
434 0x00000000, // SPI_PS_INPUT_CNTL_7
435 0x00000000, // SPI_PS_INPUT_CNTL_8
436 0x00000000, // SPI_PS_INPUT_CNTL_9
437 0x00000000, // SPI_PS_INPUT_CNTL_10
438 0x00000000, // SPI_PS_INPUT_CNTL_11
439 0x00000000, // SPI_PS_INPUT_CNTL_12
440 0x00000000, // SPI_PS_INPUT_CNTL_13
441 0x00000000, // SPI_PS_INPUT_CNTL_14
442 0x00000000, // SPI_PS_INPUT_CNTL_15
443 0x00000000, // SPI_PS_INPUT_CNTL_16
444 0x00000000, // SPI_PS_INPUT_CNTL_17
445 0x00000000, // SPI_PS_INPUT_CNTL_18
446 0x00000000, // SPI_PS_INPUT_CNTL_19
447 0x00000000, // SPI_PS_INPUT_CNTL_20
448 0x00000000, // SPI_PS_INPUT_CNTL_21
449 0x00000000, // SPI_PS_INPUT_CNTL_22
450 0x00000000, // SPI_PS_INPUT_CNTL_23
451 0x00000000, // SPI_PS_INPUT_CNTL_24
452 0x00000000, // SPI_PS_INPUT_CNTL_25
453 0x00000000, // SPI_PS_INPUT_CNTL_26
454 0x00000000, // SPI_PS_INPUT_CNTL_27
455 0x00000000, // SPI_PS_INPUT_CNTL_28
456 0x00000000, // SPI_PS_INPUT_CNTL_29
457 0x00000000, // SPI_PS_INPUT_CNTL_30
458 0x00000000, // SPI_PS_INPUT_CNTL_31
459 0x00000000, // SPI_VS_OUT_CONFIG
460 0x00000001, // SPI_THREAD_GROUPING
461 0x00000002, // SPI_PS_IN_CONTROL_0
462 0x00000000, // SPI_PS_IN_CONTROL_1
463 0x00000000, // SPI_INTERP_CONTROL_0
464 0x00000000, // SPI_INPUT_Z
465 0x00000000, // SPI_FOG_CNTL
466 0x00000000, // SPI_BARYC_CNTL
467 0x00000000, // SPI_PS_IN_CONTROL_2
468 0x00000000, // SPI_COMPUTE_INPUT_CNTL
469 0x00000000, // SPI_COMPUTE_NUM_THREAD_X
470 0x00000000, // SPI_COMPUTE_NUM_THREAD_Y
471 0x00000000, // SPI_COMPUTE_NUM_THREAD_Z
472 0x00000000, // SPI_GPR_MGMT
473 0x00000000, // SPI_LDS_MGMT
474 0x00000000, // SPI_STACK_MGMT
475 0x00000000, // SPI_WAVE_MGMT_1
476 0x00000000, // SPI_WAVE_MGMT_2
477 0, // HOLE
478 0, // HOLE
479 0, // HOLE
480 0, // HOLE
481 0, // HOLE
482 0x00000000, // GDS_ADDR_BASE
483 0x00003fff, // GDS_ADDR_SIZE
484 0, // HOLE
485 0, // HOLE
486 0x00000000, // GDS_ORDERED_COUNT
487 0, // HOLE
488 0, // HOLE
489 0, // HOLE
490 0x00000000, // GDS_APPEND_CONSUME_UAV0
491 0x00000000, // GDS_APPEND_CONSUME_UAV1
492 0x00000000, // GDS_APPEND_CONSUME_UAV2
493 0x00000000, // GDS_APPEND_CONSUME_UAV3
494 0x00000000, // GDS_APPEND_CONSUME_UAV4
495 0x00000000, // GDS_APPEND_CONSUME_UAV5
496 0x00000000, // GDS_APPEND_CONSUME_UAV6
497 0x00000000, // GDS_APPEND_CONSUME_UAV7
498 0x00000000, // GDS_APPEND_CONSUME_UAV8
499 0x00000000, // GDS_APPEND_CONSUME_UAV9
500 0x00000000, // GDS_APPEND_CONSUME_UAV10
501 0x00000000, // GDS_APPEND_CONSUME_UAV11
502 0, // HOLE
503 0, // HOLE
504 0, // HOLE
505 0, // HOLE
506 0x00000000, // CB_BLEND0_CONTROL
507 0x00000000, // CB_BLEND1_CONTROL
508 0x00000000, // CB_BLEND2_CONTROL
509 0x00000000, // CB_BLEND3_CONTROL
510 0x00000000, // CB_BLEND4_CONTROL
511 0x00000000, // CB_BLEND5_CONTROL
512 0x00000000, // CB_BLEND6_CONTROL
513 0x00000000, // CB_BLEND7_CONTROL
514};
515static const u32 SECT_CONTEXT_def_2[] =
516{
517 0x00000000, // PA_CL_POINT_X_RAD
518 0x00000000, // PA_CL_POINT_Y_RAD
519 0x00000000, // PA_CL_POINT_SIZE
520 0x00000000, // PA_CL_POINT_CULL_RAD
521 0x00000000, // VGT_DMA_BASE_HI
522 0x00000000, // VGT_DMA_BASE
523};
524static const u32 SECT_CONTEXT_def_3[] =
525{
526 0x00000000, // DB_DEPTH_CONTROL
527 0x00000000, // DB_EQAA
528 0x00000000, // CB_COLOR_CONTROL
529 0x00000200, // DB_SHADER_CONTROL
530 0x00000000, // PA_CL_CLIP_CNTL
531 0x00000000, // PA_SU_SC_MODE_CNTL
532 0x00000000, // PA_CL_VTE_CNTL
533 0x00000000, // PA_CL_VS_OUT_CNTL
534 0x00000000, // PA_CL_NANINF_CNTL
535 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
536 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
537 0x00000000, // PA_SU_PRIM_FILTER_CNTL
538 0x00000000, // SQ_LSTMP_RING_ITEMSIZE
539 0x00000000, // SQ_HSTMP_RING_ITEMSIZE
540 0, // HOLE
541 0, // HOLE
542 0x00000000, // SQ_PGM_START_PS
543 0x00000000, // SQ_PGM_RESOURCES_PS
544 0x00000000, // SQ_PGM_RESOURCES_2_PS
545 0x00000000, // SQ_PGM_EXPORTS_PS
546 0, // HOLE
547 0, // HOLE
548 0, // HOLE
549 0x00000000, // SQ_PGM_START_VS
550 0x00000000, // SQ_PGM_RESOURCES_VS
551 0x00000000, // SQ_PGM_RESOURCES_2_VS
552 0, // HOLE
553 0, // HOLE
554 0, // HOLE
555 0x00000000, // SQ_PGM_START_GS
556 0x00000000, // SQ_PGM_RESOURCES_GS
557 0x00000000, // SQ_PGM_RESOURCES_2_GS
558 0, // HOLE
559 0, // HOLE
560 0, // HOLE
561 0x00000000, // SQ_PGM_START_ES
562 0x00000000, // SQ_PGM_RESOURCES_ES
563 0x00000000, // SQ_PGM_RESOURCES_2_ES
564 0, // HOLE
565 0, // HOLE
566 0, // HOLE
567 0x00000000, // SQ_PGM_START_FS
568 0x00000000, // SQ_PGM_RESOURCES_FS
569 0, // HOLE
570 0, // HOLE
571 0, // HOLE
572 0x00000000, // SQ_PGM_START_HS
573 0x00000000, // SQ_PGM_RESOURCES_HS
574 0x00000000, // SQ_PGM_RESOURCES_2_HS
575 0, // HOLE
576 0, // HOLE
577 0, // HOLE
578 0x00000000, // SQ_PGM_START_LS
579 0x00000000, // SQ_PGM_RESOURCES_LS
580 0x00000000, // SQ_PGM_RESOURCES_2_LS
581};
582static const u32 SECT_CONTEXT_def_4[] =
583{
584 0x00000000, // SQ_LDS_ALLOC
585 0x00000000, // SQ_LDS_ALLOC_PS
586 0x00000000, // SQ_VTX_SEMANTIC_CLEAR
587 0, // HOLE
588 0x00000000, // SQ_THREAD_TRACE_CTRL
589 0, // HOLE
590 0x00000000, // SQ_ESGS_RING_ITEMSIZE
591 0x00000000, // SQ_GSVS_RING_ITEMSIZE
592 0x00000000, // SQ_ESTMP_RING_ITEMSIZE
593 0x00000000, // SQ_GSTMP_RING_ITEMSIZE
594 0x00000000, // SQ_VSTMP_RING_ITEMSIZE
595 0x00000000, // SQ_PSTMP_RING_ITEMSIZE
596 0, // HOLE
597 0x00000000, // SQ_GS_VERT_ITEMSIZE
598 0x00000000, // SQ_GS_VERT_ITEMSIZE_1
599 0x00000000, // SQ_GS_VERT_ITEMSIZE_2
600 0x00000000, // SQ_GS_VERT_ITEMSIZE_3
601 0x00000000, // SQ_GSVS_RING_OFFSET_1
602 0x00000000, // SQ_GSVS_RING_OFFSET_2
603 0x00000000, // SQ_GSVS_RING_OFFSET_3
604 0x00000000, // SQ_GWS_RING_OFFSET
605 0, // HOLE
606 0x00000000, // SQ_ALU_CONST_CACHE_PS_0
607 0x00000000, // SQ_ALU_CONST_CACHE_PS_1
608 0x00000000, // SQ_ALU_CONST_CACHE_PS_2
609 0x00000000, // SQ_ALU_CONST_CACHE_PS_3
610 0x00000000, // SQ_ALU_CONST_CACHE_PS_4
611 0x00000000, // SQ_ALU_CONST_CACHE_PS_5
612 0x00000000, // SQ_ALU_CONST_CACHE_PS_6
613 0x00000000, // SQ_ALU_CONST_CACHE_PS_7
614 0x00000000, // SQ_ALU_CONST_CACHE_PS_8
615 0x00000000, // SQ_ALU_CONST_CACHE_PS_9
616 0x00000000, // SQ_ALU_CONST_CACHE_PS_10
617 0x00000000, // SQ_ALU_CONST_CACHE_PS_11
618 0x00000000, // SQ_ALU_CONST_CACHE_PS_12
619 0x00000000, // SQ_ALU_CONST_CACHE_PS_13
620 0x00000000, // SQ_ALU_CONST_CACHE_PS_14
621 0x00000000, // SQ_ALU_CONST_CACHE_PS_15
622 0x00000000, // SQ_ALU_CONST_CACHE_VS_0
623 0x00000000, // SQ_ALU_CONST_CACHE_VS_1
624 0x00000000, // SQ_ALU_CONST_CACHE_VS_2
625 0x00000000, // SQ_ALU_CONST_CACHE_VS_3
626 0x00000000, // SQ_ALU_CONST_CACHE_VS_4
627 0x00000000, // SQ_ALU_CONST_CACHE_VS_5
628 0x00000000, // SQ_ALU_CONST_CACHE_VS_6
629 0x00000000, // SQ_ALU_CONST_CACHE_VS_7
630 0x00000000, // SQ_ALU_CONST_CACHE_VS_8
631 0x00000000, // SQ_ALU_CONST_CACHE_VS_9
632 0x00000000, // SQ_ALU_CONST_CACHE_VS_10
633 0x00000000, // SQ_ALU_CONST_CACHE_VS_11
634 0x00000000, // SQ_ALU_CONST_CACHE_VS_12
635 0x00000000, // SQ_ALU_CONST_CACHE_VS_13
636 0x00000000, // SQ_ALU_CONST_CACHE_VS_14
637 0x00000000, // SQ_ALU_CONST_CACHE_VS_15
638 0x00000000, // SQ_ALU_CONST_CACHE_GS_0
639 0x00000000, // SQ_ALU_CONST_CACHE_GS_1
640 0x00000000, // SQ_ALU_CONST_CACHE_GS_2
641 0x00000000, // SQ_ALU_CONST_CACHE_GS_3
642 0x00000000, // SQ_ALU_CONST_CACHE_GS_4
643 0x00000000, // SQ_ALU_CONST_CACHE_GS_5
644 0x00000000, // SQ_ALU_CONST_CACHE_GS_6
645 0x00000000, // SQ_ALU_CONST_CACHE_GS_7
646 0x00000000, // SQ_ALU_CONST_CACHE_GS_8
647 0x00000000, // SQ_ALU_CONST_CACHE_GS_9
648 0x00000000, // SQ_ALU_CONST_CACHE_GS_10
649 0x00000000, // SQ_ALU_CONST_CACHE_GS_11
650 0x00000000, // SQ_ALU_CONST_CACHE_GS_12
651 0x00000000, // SQ_ALU_CONST_CACHE_GS_13
652 0x00000000, // SQ_ALU_CONST_CACHE_GS_14
653 0x00000000, // SQ_ALU_CONST_CACHE_GS_15
654 0x00000000, // PA_SU_POINT_SIZE
655 0x00000000, // PA_SU_POINT_MINMAX
656 0x00000000, // PA_SU_LINE_CNTL
657 0x00000000, // PA_SC_LINE_STIPPLE
658 0x00000000, // VGT_OUTPUT_PATH_CNTL
659 0x00000000, // VGT_HOS_CNTL
660 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
661 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
662 0x00000000, // VGT_HOS_REUSE_DEPTH
663 0x00000000, // VGT_GROUP_PRIM_TYPE
664 0x00000000, // VGT_GROUP_FIRST_DECR
665 0x00000000, // VGT_GROUP_DECR
666 0x00000000, // VGT_GROUP_VECT_0_CNTL
667 0x00000000, // VGT_GROUP_VECT_1_CNTL
668 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
669 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
670 0x00000000, // VGT_GS_MODE
671 0, // HOLE
672 0x00000000, // PA_SC_MODE_CNTL_0
673 0x00000000, // PA_SC_MODE_CNTL_1
674 0x00000000, // VGT_ENHANCE
675 0x00000100, // VGT_GS_PER_ES
676 0x00000080, // VGT_ES_PER_GS
677 0x00000002, // VGT_GS_PER_VS
678 0, // HOLE
679 0, // HOLE
680 0, // HOLE
681 0x00000000, // VGT_GS_OUT_PRIM_TYPE
682 0x00000000, // IA_ENHANCE
683};
684static const u32 SECT_CONTEXT_def_5[] =
685{
686 0x00000000, // VGT_DMA_MAX_SIZE
687 0x00000000, // VGT_DMA_INDEX_TYPE
688 0, // HOLE
689 0x00000000, // VGT_PRIMITIVEID_EN
690 0x00000000, // VGT_DMA_NUM_INSTANCES
691};
692static const u32 SECT_CONTEXT_def_6[] =
693{
694 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
695 0, // HOLE
696 0, // HOLE
697 0x00000000, // VGT_INSTANCE_STEP_RATE_0
698 0x00000000, // VGT_INSTANCE_STEP_RATE_1
699 0x000000ff, // IA_MULTI_VGT_PARAM
700 0, // HOLE
701 0, // HOLE
702 0x00000000, // VGT_REUSE_OFF
703 0x00000000, // VGT_VTX_CNT_EN
704 0x00000000, // DB_HTILE_SURFACE
705 0x00000000, // DB_SRESULTS_COMPARE_STATE0
706 0x00000000, // DB_SRESULTS_COMPARE_STATE1
707 0x00000000, // DB_PRELOAD_CONTROL
708 0, // HOLE
709 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
710 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
711 0x00000000, // VGT_STRMOUT_BUFFER_BASE_0
712 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
713 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
714 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
715 0x00000000, // VGT_STRMOUT_BUFFER_BASE_1
716 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
717 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
718 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
719 0x00000000, // VGT_STRMOUT_BUFFER_BASE_2
720 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
721 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
722 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
723 0x00000000, // VGT_STRMOUT_BUFFER_BASE_3
724 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
725 0x00000000, // VGT_STRMOUT_BASE_OFFSET_0
726 0x00000000, // VGT_STRMOUT_BASE_OFFSET_1
727 0x00000000, // VGT_STRMOUT_BASE_OFFSET_2
728 0x00000000, // VGT_STRMOUT_BASE_OFFSET_3
729 0, // HOLE
730 0, // HOLE
731 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
732 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
733 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
734 0, // HOLE
735 0x00000000, // VGT_GS_MAX_VERT_OUT
736 0, // HOLE
737 0, // HOLE
738 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_0
739 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_1
740 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_2
741 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_3
742 0x00000000, // VGT_SHADER_STAGES_EN
743 0x00000000, // VGT_LS_HS_CONFIG
744 0, // HOLE
745 0, // HOLE
746 0, // HOLE
747 0, // HOLE
748 0x00000000, // VGT_TF_PARAM
749 0x00000000, // DB_ALPHA_TO_MASK
750};
751static const u32 SECT_CONTEXT_def_7[] =
752{
753 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
754 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
755 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
756 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
757 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
758 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
759 0x00000000, // VGT_GS_INSTANCE_CNT
760 0x00000000, // VGT_STRMOUT_CONFIG
761 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
762 0x00000000, // CB_IMMED0_BASE
763 0x00000000, // CB_IMMED1_BASE
764 0x00000000, // CB_IMMED2_BASE
765 0x00000000, // CB_IMMED3_BASE
766 0x00000000, // CB_IMMED4_BASE
767 0x00000000, // CB_IMMED5_BASE
768 0x00000000, // CB_IMMED6_BASE
769 0x00000000, // CB_IMMED7_BASE
770 0x00000000, // CB_IMMED8_BASE
771 0x00000000, // CB_IMMED9_BASE
772 0x00000000, // CB_IMMED10_BASE
773 0x00000000, // CB_IMMED11_BASE
774 0, // HOLE
775 0, // HOLE
776 0x00000000, // PA_SC_CENTROID_PRIORITY_0
777 0x00000000, // PA_SC_CENTROID_PRIORITY_1
778 0x00001000, // PA_SC_LINE_CNTL
779 0x00000000, // PA_SC_AA_CONFIG
780 0x00000005, // PA_SU_VTX_CNTL
781 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
782 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
783 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
784 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
785 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
786 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
787 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
788 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
789 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
790 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
791 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
792 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
793 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
794 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
795 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
796 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
797 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
798 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
799 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
800 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
801 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
802 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
803 0x00000000, // CB_CLRCMP_CONTROL
804 0x00000000, // CB_CLRCMP_SRC
805 0x00000000, // CB_CLRCMP_DST
806 0x00000000, // CB_CLRCMP_MSK
807 0, // HOLE
808 0, // HOLE
809 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
810 0x00000010, // VGT_OUT_DEALLOC_CNTL
811 0x00000000, // CB_COLOR0_BASE
812 0x00000000, // CB_COLOR0_PITCH
813 0x00000000, // CB_COLOR0_SLICE
814 0x00000000, // CB_COLOR0_VIEW
815 0x00000000, // CB_COLOR0_INFO
816 0x00000000, // CB_COLOR0_ATTRIB
817 0x00000000, // CB_COLOR0_DIM
818 0x00000000, // CB_COLOR0_CMASK
819 0x00000000, // CB_COLOR0_CMASK_SLICE
820 0x00000000, // CB_COLOR0_FMASK
821 0x00000000, // CB_COLOR0_FMASK_SLICE
822 0x00000000, // CB_COLOR0_CLEAR_WORD0
823 0x00000000, // CB_COLOR0_CLEAR_WORD1
824 0x00000000, // CB_COLOR0_CLEAR_WORD2
825 0x00000000, // CB_COLOR0_CLEAR_WORD3
826 0x00000000, // CB_COLOR1_BASE
827 0x00000000, // CB_COLOR1_PITCH
828 0x00000000, // CB_COLOR1_SLICE
829 0x00000000, // CB_COLOR1_VIEW
830 0x00000000, // CB_COLOR1_INFO
831 0x00000000, // CB_COLOR1_ATTRIB
832 0x00000000, // CB_COLOR1_DIM
833 0x00000000, // CB_COLOR1_CMASK
834 0x00000000, // CB_COLOR1_CMASK_SLICE
835 0x00000000, // CB_COLOR1_FMASK
836 0x00000000, // CB_COLOR1_FMASK_SLICE
837 0x00000000, // CB_COLOR1_CLEAR_WORD0
838 0x00000000, // CB_COLOR1_CLEAR_WORD1
839 0x00000000, // CB_COLOR1_CLEAR_WORD2
840 0x00000000, // CB_COLOR1_CLEAR_WORD3
841 0x00000000, // CB_COLOR2_BASE
842 0x00000000, // CB_COLOR2_PITCH
843 0x00000000, // CB_COLOR2_SLICE
844 0x00000000, // CB_COLOR2_VIEW
845 0x00000000, // CB_COLOR2_INFO
846 0x00000000, // CB_COLOR2_ATTRIB
847 0x00000000, // CB_COLOR2_DIM
848 0x00000000, // CB_COLOR2_CMASK
849 0x00000000, // CB_COLOR2_CMASK_SLICE
850 0x00000000, // CB_COLOR2_FMASK
851 0x00000000, // CB_COLOR2_FMASK_SLICE
852 0x00000000, // CB_COLOR2_CLEAR_WORD0
853 0x00000000, // CB_COLOR2_CLEAR_WORD1
854 0x00000000, // CB_COLOR2_CLEAR_WORD2
855 0x00000000, // CB_COLOR2_CLEAR_WORD3
856 0x00000000, // CB_COLOR3_BASE
857 0x00000000, // CB_COLOR3_PITCH
858 0x00000000, // CB_COLOR3_SLICE
859 0x00000000, // CB_COLOR3_VIEW
860 0x00000000, // CB_COLOR3_INFO
861 0x00000000, // CB_COLOR3_ATTRIB
862 0x00000000, // CB_COLOR3_DIM
863 0x00000000, // CB_COLOR3_CMASK
864 0x00000000, // CB_COLOR3_CMASK_SLICE
865 0x00000000, // CB_COLOR3_FMASK
866 0x00000000, // CB_COLOR3_FMASK_SLICE
867 0x00000000, // CB_COLOR3_CLEAR_WORD0
868 0x00000000, // CB_COLOR3_CLEAR_WORD1
869 0x00000000, // CB_COLOR3_CLEAR_WORD2
870 0x00000000, // CB_COLOR3_CLEAR_WORD3
871 0x00000000, // CB_COLOR4_BASE
872 0x00000000, // CB_COLOR4_PITCH
873 0x00000000, // CB_COLOR4_SLICE
874 0x00000000, // CB_COLOR4_VIEW
875 0x00000000, // CB_COLOR4_INFO
876 0x00000000, // CB_COLOR4_ATTRIB
877 0x00000000, // CB_COLOR4_DIM
878 0x00000000, // CB_COLOR4_CMASK
879 0x00000000, // CB_COLOR4_CMASK_SLICE
880 0x00000000, // CB_COLOR4_FMASK
881 0x00000000, // CB_COLOR4_FMASK_SLICE
882 0x00000000, // CB_COLOR4_CLEAR_WORD0
883 0x00000000, // CB_COLOR4_CLEAR_WORD1
884 0x00000000, // CB_COLOR4_CLEAR_WORD2
885 0x00000000, // CB_COLOR4_CLEAR_WORD3
886 0x00000000, // CB_COLOR5_BASE
887 0x00000000, // CB_COLOR5_PITCH
888 0x00000000, // CB_COLOR5_SLICE
889 0x00000000, // CB_COLOR5_VIEW
890 0x00000000, // CB_COLOR5_INFO
891 0x00000000, // CB_COLOR5_ATTRIB
892 0x00000000, // CB_COLOR5_DIM
893 0x00000000, // CB_COLOR5_CMASK
894 0x00000000, // CB_COLOR5_CMASK_SLICE
895 0x00000000, // CB_COLOR5_FMASK
896 0x00000000, // CB_COLOR5_FMASK_SLICE
897 0x00000000, // CB_COLOR5_CLEAR_WORD0
898 0x00000000, // CB_COLOR5_CLEAR_WORD1
899 0x00000000, // CB_COLOR5_CLEAR_WORD2
900 0x00000000, // CB_COLOR5_CLEAR_WORD3
901 0x00000000, // CB_COLOR6_BASE
902 0x00000000, // CB_COLOR6_PITCH
903 0x00000000, // CB_COLOR6_SLICE
904 0x00000000, // CB_COLOR6_VIEW
905 0x00000000, // CB_COLOR6_INFO
906 0x00000000, // CB_COLOR6_ATTRIB
907 0x00000000, // CB_COLOR6_DIM
908 0x00000000, // CB_COLOR6_CMASK
909 0x00000000, // CB_COLOR6_CMASK_SLICE
910 0x00000000, // CB_COLOR6_FMASK
911 0x00000000, // CB_COLOR6_FMASK_SLICE
912 0x00000000, // CB_COLOR6_CLEAR_WORD0
913 0x00000000, // CB_COLOR6_CLEAR_WORD1
914 0x00000000, // CB_COLOR6_CLEAR_WORD2
915 0x00000000, // CB_COLOR6_CLEAR_WORD3
916 0x00000000, // CB_COLOR7_BASE
917 0x00000000, // CB_COLOR7_PITCH
918 0x00000000, // CB_COLOR7_SLICE
919 0x00000000, // CB_COLOR7_VIEW
920 0x00000000, // CB_COLOR7_INFO
921 0x00000000, // CB_COLOR7_ATTRIB
922 0x00000000, // CB_COLOR7_DIM
923 0x00000000, // CB_COLOR7_CMASK
924 0x00000000, // CB_COLOR7_CMASK_SLICE
925 0x00000000, // CB_COLOR7_FMASK
926 0x00000000, // CB_COLOR7_FMASK_SLICE
927 0x00000000, // CB_COLOR7_CLEAR_WORD0
928 0x00000000, // CB_COLOR7_CLEAR_WORD1
929 0x00000000, // CB_COLOR7_CLEAR_WORD2
930 0x00000000, // CB_COLOR7_CLEAR_WORD3
931 0x00000000, // CB_COLOR8_BASE
932 0x00000000, // CB_COLOR8_PITCH
933 0x00000000, // CB_COLOR8_SLICE
934 0x00000000, // CB_COLOR8_VIEW
935 0x00000000, // CB_COLOR8_INFO
936 0x00000000, // CB_COLOR8_ATTRIB
937 0x00000000, // CB_COLOR8_DIM
938 0x00000000, // CB_COLOR9_BASE
939 0x00000000, // CB_COLOR9_PITCH
940 0x00000000, // CB_COLOR9_SLICE
941 0x00000000, // CB_COLOR9_VIEW
942 0x00000000, // CB_COLOR9_INFO
943 0x00000000, // CB_COLOR9_ATTRIB
944 0x00000000, // CB_COLOR9_DIM
945 0x00000000, // CB_COLOR10_BASE
946 0x00000000, // CB_COLOR10_PITCH
947 0x00000000, // CB_COLOR10_SLICE
948 0x00000000, // CB_COLOR10_VIEW
949 0x00000000, // CB_COLOR10_INFO
950 0x00000000, // CB_COLOR10_ATTRIB
951 0x00000000, // CB_COLOR10_DIM
952 0x00000000, // CB_COLOR11_BASE
953 0x00000000, // CB_COLOR11_PITCH
954 0x00000000, // CB_COLOR11_SLICE
955 0x00000000, // CB_COLOR11_VIEW
956 0x00000000, // CB_COLOR11_INFO
957 0x00000000, // CB_COLOR11_ATTRIB
958 0x00000000, // CB_COLOR11_DIM
959 0, // HOLE
960 0, // HOLE
961 0, // HOLE
962 0, // HOLE
963 0, // HOLE
964 0, // HOLE
965 0, // HOLE
966 0, // HOLE
967 0, // HOLE
968 0, // HOLE
969 0, // HOLE
970 0, // HOLE
971 0, // HOLE
972 0, // HOLE
973 0, // HOLE
974 0, // HOLE
975 0, // HOLE
976 0, // HOLE
977 0, // HOLE
978 0, // HOLE
979 0x00000000, // SQ_ALU_CONST_CACHE_HS_0
980 0x00000000, // SQ_ALU_CONST_CACHE_HS_1
981 0x00000000, // SQ_ALU_CONST_CACHE_HS_2
982 0x00000000, // SQ_ALU_CONST_CACHE_HS_3
983 0x00000000, // SQ_ALU_CONST_CACHE_HS_4
984 0x00000000, // SQ_ALU_CONST_CACHE_HS_5
985 0x00000000, // SQ_ALU_CONST_CACHE_HS_6
986 0x00000000, // SQ_ALU_CONST_CACHE_HS_7
987 0x00000000, // SQ_ALU_CONST_CACHE_HS_8
988 0x00000000, // SQ_ALU_CONST_CACHE_HS_9
989 0x00000000, // SQ_ALU_CONST_CACHE_HS_10
990 0x00000000, // SQ_ALU_CONST_CACHE_HS_11
991 0x00000000, // SQ_ALU_CONST_CACHE_HS_12
992 0x00000000, // SQ_ALU_CONST_CACHE_HS_13
993 0x00000000, // SQ_ALU_CONST_CACHE_HS_14
994 0x00000000, // SQ_ALU_CONST_CACHE_HS_15
995 0x00000000, // SQ_ALU_CONST_CACHE_LS_0
996 0x00000000, // SQ_ALU_CONST_CACHE_LS_1
997 0x00000000, // SQ_ALU_CONST_CACHE_LS_2
998 0x00000000, // SQ_ALU_CONST_CACHE_LS_3
999 0x00000000, // SQ_ALU_CONST_CACHE_LS_4
1000 0x00000000, // SQ_ALU_CONST_CACHE_LS_5
1001 0x00000000, // SQ_ALU_CONST_CACHE_LS_6
1002 0x00000000, // SQ_ALU_CONST_CACHE_LS_7
1003 0x00000000, // SQ_ALU_CONST_CACHE_LS_8
1004 0x00000000, // SQ_ALU_CONST_CACHE_LS_9
1005 0x00000000, // SQ_ALU_CONST_CACHE_LS_10
1006 0x00000000, // SQ_ALU_CONST_CACHE_LS_11
1007 0x00000000, // SQ_ALU_CONST_CACHE_LS_12
1008 0x00000000, // SQ_ALU_CONST_CACHE_LS_13
1009 0x00000000, // SQ_ALU_CONST_CACHE_LS_14
1010 0x00000000, // SQ_ALU_CONST_CACHE_LS_15
1011 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_0
1012 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_1
1013 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_2
1014 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_3
1015 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_4
1016 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_5
1017 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_6
1018 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_7
1019 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_8
1020 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_9
1021 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_10
1022 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_11
1023 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_12
1024 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_13
1025 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_14
1026 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_15
1027 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_0
1028 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_1
1029 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_2
1030 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_3
1031 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_4
1032 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_5
1033 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_6
1034 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_7
1035 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_8
1036 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_9
1037 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_10
1038 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_11
1039 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_12
1040 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_13
1041 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_14
1042 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_15
1043};
1044static const struct cs_extent_def SECT_CONTEXT_defs[] =
1045{
1046 {SECT_CONTEXT_def_1, 0x0000a000, 488 },
1047 {SECT_CONTEXT_def_2, 0x0000a1f5, 6 },
1048 {SECT_CONTEXT_def_3, 0x0000a200, 55 },
1049 {SECT_CONTEXT_def_4, 0x0000a23a, 99 },
1050 {SECT_CONTEXT_def_5, 0x0000a29e, 5 },
1051 {SECT_CONTEXT_def_6, 0x0000a2a5, 56 },
1052 {SECT_CONTEXT_def_7, 0x0000a2de, 290 },
1053 { 0, 0, 0 }
1054};
1055static const u32 SECT_CLEAR_def_1[] =
1056{
1057 0xffffffff, // SQ_TEX_SAMPLER_CLEAR
1058 0xffffffff, // SQ_TEX_RESOURCE_CLEAR
1059 0xffffffff, // SQ_LOOP_BOOL_CLEAR
1060};
1061static const struct cs_extent_def SECT_CLEAR_defs[] =
1062{
1063 {SECT_CLEAR_def_1, 0x0000ffc0, 3 },
1064 { 0, 0, 0 }
1065};
1066static const u32 SECT_CTRLCONST_def_1[] =
1067{
1068 0x00000000, // SQ_VTX_BASE_VTX_LOC
1069 0x00000000, // SQ_VTX_START_INST_LOC
1070};
1071static const struct cs_extent_def SECT_CTRLCONST_defs[] =
1072{
1073 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
1074 { 0, 0, 0 }
1075};
1076struct cs_section_def cayman_cs_data[] = {
1077 { SECT_CONTEXT_defs, SECT_CONTEXT },
1078 { SECT_CLEAR_defs, SECT_CLEAR },
1079 { SECT_CTRLCONST_defs, SECT_CTRLCONST },
1080 { 0, SECT_NONE }
1081};
diff --git a/drivers/gpu/drm/radeon/clearstate_defs.h b/drivers/gpu/drm/radeon/clearstate_defs.h
new file mode 100644
index 000000000000..3eda707d7388
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_defs.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef CLEARSTATE_DEFS_H
24#define CLEARSTATE_DEFS_H
25
26enum section_id {
27 SECT_NONE,
28 SECT_CONTEXT,
29 SECT_CLEAR,
30 SECT_CTRLCONST
31};
32
33struct cs_extent_def {
34 const unsigned int *extent;
35 const unsigned int reg_index;
36 const unsigned int reg_count;
37};
38
39struct cs_section_def {
40 const struct cs_extent_def *section;
41 const enum section_id id;
42};
43
44#endif
diff --git a/drivers/gpu/drm/radeon/clearstate_evergreen.h b/drivers/gpu/drm/radeon/clearstate_evergreen.h
new file mode 100644
index 000000000000..4791d856b7fd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_evergreen.h
@@ -0,0 +1,1080 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24static const u32 SECT_CONTEXT_def_1[] =
25{
26 0x00000000, // DB_RENDER_CONTROL
27 0x00000000, // DB_COUNT_CONTROL
28 0x00000000, // DB_DEPTH_VIEW
29 0x00000000, // DB_RENDER_OVERRIDE
30 0x00000000, // DB_RENDER_OVERRIDE2
31 0x00000000, // DB_HTILE_DATA_BASE
32 0, // HOLE
33 0, // HOLE
34 0, // HOLE
35 0, // HOLE
36 0x00000000, // DB_STENCIL_CLEAR
37 0x00000000, // DB_DEPTH_CLEAR
38 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
39 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
40 0, // HOLE
41 0, // HOLE
42 0x00000000, // DB_Z_INFO
43 0x00000000, // DB_STENCIL_INFO
44 0x00000000, // DB_Z_READ_BASE
45 0x00000000, // DB_STENCIL_READ_BASE
46 0x00000000, // DB_Z_WRITE_BASE
47 0x00000000, // DB_STENCIL_WRITE_BASE
48 0x00000000, // DB_DEPTH_SIZE
49 0x00000000, // DB_DEPTH_SLICE
50 0, // HOLE
51 0, // HOLE
52 0, // HOLE
53 0, // HOLE
54 0, // HOLE
55 0, // HOLE
56 0, // HOLE
57 0, // HOLE
58 0, // HOLE
59 0, // HOLE
60 0, // HOLE
61 0, // HOLE
62 0, // HOLE
63 0, // HOLE
64 0, // HOLE
65 0, // HOLE
66 0, // HOLE
67 0, // HOLE
68 0, // HOLE
69 0, // HOLE
70 0, // HOLE
71 0, // HOLE
72 0, // HOLE
73 0, // HOLE
74 0, // HOLE
75 0, // HOLE
76 0, // HOLE
77 0, // HOLE
78 0, // HOLE
79 0, // HOLE
80 0, // HOLE
81 0, // HOLE
82 0, // HOLE
83 0, // HOLE
84 0, // HOLE
85 0, // HOLE
86 0, // HOLE
87 0, // HOLE
88 0, // HOLE
89 0, // HOLE
90 0, // HOLE
91 0, // HOLE
92 0, // HOLE
93 0, // HOLE
94 0, // HOLE
95 0, // HOLE
96 0, // HOLE
97 0, // HOLE
98 0, // HOLE
99 0, // HOLE
100 0, // HOLE
101 0, // HOLE
102 0, // HOLE
103 0, // HOLE
104 0, // HOLE
105 0, // HOLE
106 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_0
107 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_1
108 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_2
109 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_3
110 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_4
111 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_5
112 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_6
113 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_7
114 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_8
115 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_9
116 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_10
117 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_11
118 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_12
119 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_13
120 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_14
121 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_15
122 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_0
123 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_1
124 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_2
125 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_3
126 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_4
127 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_5
128 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_6
129 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_7
130 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_8
131 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_9
132 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_10
133 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_11
134 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_12
135 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_13
136 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_14
137 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_15
138 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_0
139 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_1
140 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_2
141 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_3
142 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_4
143 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_5
144 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_6
145 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_7
146 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_8
147 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_9
148 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_10
149 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_11
150 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_12
151 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_13
152 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_14
153 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_15
154 0x00000000, // PA_SC_WINDOW_OFFSET
155 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
156 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
157 0x0000ffff, // PA_SC_CLIPRECT_RULE
158 0x00000000, // PA_SC_CLIPRECT_0_TL
159 0x40004000, // PA_SC_CLIPRECT_0_BR
160 0x00000000, // PA_SC_CLIPRECT_1_TL
161 0x40004000, // PA_SC_CLIPRECT_1_BR
162 0x00000000, // PA_SC_CLIPRECT_2_TL
163 0x40004000, // PA_SC_CLIPRECT_2_BR
164 0x00000000, // PA_SC_CLIPRECT_3_TL
165 0x40004000, // PA_SC_CLIPRECT_3_BR
166 0xaa99aaaa, // PA_SC_EDGERULE
167 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
168 0xffffffff, // CB_TARGET_MASK
169 0xffffffff, // CB_SHADER_MASK
170 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
171 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
172 0x00000000, // COHER_DEST_BASE_0
173 0x00000000, // COHER_DEST_BASE_1
174 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
175 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
176 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
177 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
178 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
179 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
180 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
181 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
182 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
183 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
184 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
185 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
186 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
187 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
188 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
189 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
190 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
191 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
192 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
193 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
194 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
195 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
196 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
197 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
198 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
199 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
200 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
201 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
202 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
203 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
204 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
205 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
206 0x00000000, // PA_SC_VPORT_ZMIN_0
207 0x3f800000, // PA_SC_VPORT_ZMAX_0
208 0x00000000, // PA_SC_VPORT_ZMIN_1
209 0x3f800000, // PA_SC_VPORT_ZMAX_1
210 0x00000000, // PA_SC_VPORT_ZMIN_2
211 0x3f800000, // PA_SC_VPORT_ZMAX_2
212 0x00000000, // PA_SC_VPORT_ZMIN_3
213 0x3f800000, // PA_SC_VPORT_ZMAX_3
214 0x00000000, // PA_SC_VPORT_ZMIN_4
215 0x3f800000, // PA_SC_VPORT_ZMAX_4
216 0x00000000, // PA_SC_VPORT_ZMIN_5
217 0x3f800000, // PA_SC_VPORT_ZMAX_5
218 0x00000000, // PA_SC_VPORT_ZMIN_6
219 0x3f800000, // PA_SC_VPORT_ZMAX_6
220 0x00000000, // PA_SC_VPORT_ZMIN_7
221 0x3f800000, // PA_SC_VPORT_ZMAX_7
222 0x00000000, // PA_SC_VPORT_ZMIN_8
223 0x3f800000, // PA_SC_VPORT_ZMAX_8
224 0x00000000, // PA_SC_VPORT_ZMIN_9
225 0x3f800000, // PA_SC_VPORT_ZMAX_9
226 0x00000000, // PA_SC_VPORT_ZMIN_10
227 0x3f800000, // PA_SC_VPORT_ZMAX_10
228 0x00000000, // PA_SC_VPORT_ZMIN_11
229 0x3f800000, // PA_SC_VPORT_ZMAX_11
230 0x00000000, // PA_SC_VPORT_ZMIN_12
231 0x3f800000, // PA_SC_VPORT_ZMAX_12
232 0x00000000, // PA_SC_VPORT_ZMIN_13
233 0x3f800000, // PA_SC_VPORT_ZMAX_13
234 0x00000000, // PA_SC_VPORT_ZMIN_14
235 0x3f800000, // PA_SC_VPORT_ZMAX_14
236 0x00000000, // PA_SC_VPORT_ZMIN_15
237 0x3f800000, // PA_SC_VPORT_ZMAX_15
238 0x00000000, // SX_MISC
239 0x00000000, // SX_SURFACE_SYNC
240 0x00000000, // CP_PERFMON_CNTX_CNTL
241 0, // HOLE
242 0, // HOLE
243 0, // HOLE
244 0, // HOLE
245 0, // HOLE
246 0, // HOLE
247 0, // HOLE
248 0, // HOLE
249 0, // HOLE
250 0x00000000, // SQ_VTX_SEMANTIC_0
251 0x00000000, // SQ_VTX_SEMANTIC_1
252 0x00000000, // SQ_VTX_SEMANTIC_2
253 0x00000000, // SQ_VTX_SEMANTIC_3
254 0x00000000, // SQ_VTX_SEMANTIC_4
255 0x00000000, // SQ_VTX_SEMANTIC_5
256 0x00000000, // SQ_VTX_SEMANTIC_6
257 0x00000000, // SQ_VTX_SEMANTIC_7
258 0x00000000, // SQ_VTX_SEMANTIC_8
259 0x00000000, // SQ_VTX_SEMANTIC_9
260 0x00000000, // SQ_VTX_SEMANTIC_10
261 0x00000000, // SQ_VTX_SEMANTIC_11
262 0x00000000, // SQ_VTX_SEMANTIC_12
263 0x00000000, // SQ_VTX_SEMANTIC_13
264 0x00000000, // SQ_VTX_SEMANTIC_14
265 0x00000000, // SQ_VTX_SEMANTIC_15
266 0x00000000, // SQ_VTX_SEMANTIC_16
267 0x00000000, // SQ_VTX_SEMANTIC_17
268 0x00000000, // SQ_VTX_SEMANTIC_18
269 0x00000000, // SQ_VTX_SEMANTIC_19
270 0x00000000, // SQ_VTX_SEMANTIC_20
271 0x00000000, // SQ_VTX_SEMANTIC_21
272 0x00000000, // SQ_VTX_SEMANTIC_22
273 0x00000000, // SQ_VTX_SEMANTIC_23
274 0x00000000, // SQ_VTX_SEMANTIC_24
275 0x00000000, // SQ_VTX_SEMANTIC_25
276 0x00000000, // SQ_VTX_SEMANTIC_26
277 0x00000000, // SQ_VTX_SEMANTIC_27
278 0x00000000, // SQ_VTX_SEMANTIC_28
279 0x00000000, // SQ_VTX_SEMANTIC_29
280 0x00000000, // SQ_VTX_SEMANTIC_30
281 0x00000000, // SQ_VTX_SEMANTIC_31
282 0xffffffff, // VGT_MAX_VTX_INDX
283 0x00000000, // VGT_MIN_VTX_INDX
284 0x00000000, // VGT_INDX_OFFSET
285 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
286 0x00000000, // SX_ALPHA_TEST_CONTROL
287 0x00000000, // CB_BLEND_RED
288 0x00000000, // CB_BLEND_GREEN
289 0x00000000, // CB_BLEND_BLUE
290 0x00000000, // CB_BLEND_ALPHA
291 0, // HOLE
292 0, // HOLE
293 0, // HOLE
294 0x00000000, // DB_STENCILREFMASK
295 0x00000000, // DB_STENCILREFMASK_BF
296 0x00000000, // SX_ALPHA_REF
297 0x00000000, // PA_CL_VPORT_XSCALE
298 0x00000000, // PA_CL_VPORT_XOFFSET
299 0x00000000, // PA_CL_VPORT_YSCALE
300 0x00000000, // PA_CL_VPORT_YOFFSET
301 0x00000000, // PA_CL_VPORT_ZSCALE
302 0x00000000, // PA_CL_VPORT_ZOFFSET
303 0x00000000, // PA_CL_VPORT_XSCALE_1
304 0x00000000, // PA_CL_VPORT_XOFFSET_1
305 0x00000000, // PA_CL_VPORT_YSCALE_1
306 0x00000000, // PA_CL_VPORT_YOFFSET_1
307 0x00000000, // PA_CL_VPORT_ZSCALE_1
308 0x00000000, // PA_CL_VPORT_ZOFFSET_1
309 0x00000000, // PA_CL_VPORT_XSCALE_2
310 0x00000000, // PA_CL_VPORT_XOFFSET_2
311 0x00000000, // PA_CL_VPORT_YSCALE_2
312 0x00000000, // PA_CL_VPORT_YOFFSET_2
313 0x00000000, // PA_CL_VPORT_ZSCALE_2
314 0x00000000, // PA_CL_VPORT_ZOFFSET_2
315 0x00000000, // PA_CL_VPORT_XSCALE_3
316 0x00000000, // PA_CL_VPORT_XOFFSET_3
317 0x00000000, // PA_CL_VPORT_YSCALE_3
318 0x00000000, // PA_CL_VPORT_YOFFSET_3
319 0x00000000, // PA_CL_VPORT_ZSCALE_3
320 0x00000000, // PA_CL_VPORT_ZOFFSET_3
321 0x00000000, // PA_CL_VPORT_XSCALE_4
322 0x00000000, // PA_CL_VPORT_XOFFSET_4
323 0x00000000, // PA_CL_VPORT_YSCALE_4
324 0x00000000, // PA_CL_VPORT_YOFFSET_4
325 0x00000000, // PA_CL_VPORT_ZSCALE_4
326 0x00000000, // PA_CL_VPORT_ZOFFSET_4
327 0x00000000, // PA_CL_VPORT_XSCALE_5
328 0x00000000, // PA_CL_VPORT_XOFFSET_5
329 0x00000000, // PA_CL_VPORT_YSCALE_5
330 0x00000000, // PA_CL_VPORT_YOFFSET_5
331 0x00000000, // PA_CL_VPORT_ZSCALE_5
332 0x00000000, // PA_CL_VPORT_ZOFFSET_5
333 0x00000000, // PA_CL_VPORT_XSCALE_6
334 0x00000000, // PA_CL_VPORT_XOFFSET_6
335 0x00000000, // PA_CL_VPORT_YSCALE_6
336 0x00000000, // PA_CL_VPORT_YOFFSET_6
337 0x00000000, // PA_CL_VPORT_ZSCALE_6
338 0x00000000, // PA_CL_VPORT_ZOFFSET_6
339 0x00000000, // PA_CL_VPORT_XSCALE_7
340 0x00000000, // PA_CL_VPORT_XOFFSET_7
341 0x00000000, // PA_CL_VPORT_YSCALE_7
342 0x00000000, // PA_CL_VPORT_YOFFSET_7
343 0x00000000, // PA_CL_VPORT_ZSCALE_7
344 0x00000000, // PA_CL_VPORT_ZOFFSET_7
345 0x00000000, // PA_CL_VPORT_XSCALE_8
346 0x00000000, // PA_CL_VPORT_XOFFSET_8
347 0x00000000, // PA_CL_VPORT_YSCALE_8
348 0x00000000, // PA_CL_VPORT_YOFFSET_8
349 0x00000000, // PA_CL_VPORT_ZSCALE_8
350 0x00000000, // PA_CL_VPORT_ZOFFSET_8
351 0x00000000, // PA_CL_VPORT_XSCALE_9
352 0x00000000, // PA_CL_VPORT_XOFFSET_9
353 0x00000000, // PA_CL_VPORT_YSCALE_9
354 0x00000000, // PA_CL_VPORT_YOFFSET_9
355 0x00000000, // PA_CL_VPORT_ZSCALE_9
356 0x00000000, // PA_CL_VPORT_ZOFFSET_9
357 0x00000000, // PA_CL_VPORT_XSCALE_10
358 0x00000000, // PA_CL_VPORT_XOFFSET_10
359 0x00000000, // PA_CL_VPORT_YSCALE_10
360 0x00000000, // PA_CL_VPORT_YOFFSET_10
361 0x00000000, // PA_CL_VPORT_ZSCALE_10
362 0x00000000, // PA_CL_VPORT_ZOFFSET_10
363 0x00000000, // PA_CL_VPORT_XSCALE_11
364 0x00000000, // PA_CL_VPORT_XOFFSET_11
365 0x00000000, // PA_CL_VPORT_YSCALE_11
366 0x00000000, // PA_CL_VPORT_YOFFSET_11
367 0x00000000, // PA_CL_VPORT_ZSCALE_11
368 0x00000000, // PA_CL_VPORT_ZOFFSET_11
369 0x00000000, // PA_CL_VPORT_XSCALE_12
370 0x00000000, // PA_CL_VPORT_XOFFSET_12
371 0x00000000, // PA_CL_VPORT_YSCALE_12
372 0x00000000, // PA_CL_VPORT_YOFFSET_12
373 0x00000000, // PA_CL_VPORT_ZSCALE_12
374 0x00000000, // PA_CL_VPORT_ZOFFSET_12
375 0x00000000, // PA_CL_VPORT_XSCALE_13
376 0x00000000, // PA_CL_VPORT_XOFFSET_13
377 0x00000000, // PA_CL_VPORT_YSCALE_13
378 0x00000000, // PA_CL_VPORT_YOFFSET_13
379 0x00000000, // PA_CL_VPORT_ZSCALE_13
380 0x00000000, // PA_CL_VPORT_ZOFFSET_13
381 0x00000000, // PA_CL_VPORT_XSCALE_14
382 0x00000000, // PA_CL_VPORT_XOFFSET_14
383 0x00000000, // PA_CL_VPORT_YSCALE_14
384 0x00000000, // PA_CL_VPORT_YOFFSET_14
385 0x00000000, // PA_CL_VPORT_ZSCALE_14
386 0x00000000, // PA_CL_VPORT_ZOFFSET_14
387 0x00000000, // PA_CL_VPORT_XSCALE_15
388 0x00000000, // PA_CL_VPORT_XOFFSET_15
389 0x00000000, // PA_CL_VPORT_YSCALE_15
390 0x00000000, // PA_CL_VPORT_YOFFSET_15
391 0x00000000, // PA_CL_VPORT_ZSCALE_15
392 0x00000000, // PA_CL_VPORT_ZOFFSET_15
393 0x00000000, // PA_CL_UCP_0_X
394 0x00000000, // PA_CL_UCP_0_Y
395 0x00000000, // PA_CL_UCP_0_Z
396 0x00000000, // PA_CL_UCP_0_W
397 0x00000000, // PA_CL_UCP_1_X
398 0x00000000, // PA_CL_UCP_1_Y
399 0x00000000, // PA_CL_UCP_1_Z
400 0x00000000, // PA_CL_UCP_1_W
401 0x00000000, // PA_CL_UCP_2_X
402 0x00000000, // PA_CL_UCP_2_Y
403 0x00000000, // PA_CL_UCP_2_Z
404 0x00000000, // PA_CL_UCP_2_W
405 0x00000000, // PA_CL_UCP_3_X
406 0x00000000, // PA_CL_UCP_3_Y
407 0x00000000, // PA_CL_UCP_3_Z
408 0x00000000, // PA_CL_UCP_3_W
409 0x00000000, // PA_CL_UCP_4_X
410 0x00000000, // PA_CL_UCP_4_Y
411 0x00000000, // PA_CL_UCP_4_Z
412 0x00000000, // PA_CL_UCP_4_W
413 0x00000000, // PA_CL_UCP_5_X
414 0x00000000, // PA_CL_UCP_5_Y
415 0x00000000, // PA_CL_UCP_5_Z
416 0x00000000, // PA_CL_UCP_5_W
417 0x00000000, // SPI_VS_OUT_ID_0
418 0x00000000, // SPI_VS_OUT_ID_1
419 0x00000000, // SPI_VS_OUT_ID_2
420 0x00000000, // SPI_VS_OUT_ID_3
421 0x00000000, // SPI_VS_OUT_ID_4
422 0x00000000, // SPI_VS_OUT_ID_5
423 0x00000000, // SPI_VS_OUT_ID_6
424 0x00000000, // SPI_VS_OUT_ID_7
425 0x00000000, // SPI_VS_OUT_ID_8
426 0x00000000, // SPI_VS_OUT_ID_9
427 0x00000000, // SPI_PS_INPUT_CNTL_0
428 0x00000000, // SPI_PS_INPUT_CNTL_1
429 0x00000000, // SPI_PS_INPUT_CNTL_2
430 0x00000000, // SPI_PS_INPUT_CNTL_3
431 0x00000000, // SPI_PS_INPUT_CNTL_4
432 0x00000000, // SPI_PS_INPUT_CNTL_5
433 0x00000000, // SPI_PS_INPUT_CNTL_6
434 0x00000000, // SPI_PS_INPUT_CNTL_7
435 0x00000000, // SPI_PS_INPUT_CNTL_8
436 0x00000000, // SPI_PS_INPUT_CNTL_9
437 0x00000000, // SPI_PS_INPUT_CNTL_10
438 0x00000000, // SPI_PS_INPUT_CNTL_11
439 0x00000000, // SPI_PS_INPUT_CNTL_12
440 0x00000000, // SPI_PS_INPUT_CNTL_13
441 0x00000000, // SPI_PS_INPUT_CNTL_14
442 0x00000000, // SPI_PS_INPUT_CNTL_15
443 0x00000000, // SPI_PS_INPUT_CNTL_16
444 0x00000000, // SPI_PS_INPUT_CNTL_17
445 0x00000000, // SPI_PS_INPUT_CNTL_18
446 0x00000000, // SPI_PS_INPUT_CNTL_19
447 0x00000000, // SPI_PS_INPUT_CNTL_20
448 0x00000000, // SPI_PS_INPUT_CNTL_21
449 0x00000000, // SPI_PS_INPUT_CNTL_22
450 0x00000000, // SPI_PS_INPUT_CNTL_23
451 0x00000000, // SPI_PS_INPUT_CNTL_24
452 0x00000000, // SPI_PS_INPUT_CNTL_25
453 0x00000000, // SPI_PS_INPUT_CNTL_26
454 0x00000000, // SPI_PS_INPUT_CNTL_27
455 0x00000000, // SPI_PS_INPUT_CNTL_28
456 0x00000000, // SPI_PS_INPUT_CNTL_29
457 0x00000000, // SPI_PS_INPUT_CNTL_30
458 0x00000000, // SPI_PS_INPUT_CNTL_31
459 0x00000000, // SPI_VS_OUT_CONFIG
460 0x00000001, // SPI_THREAD_GROUPING
461 0x00000000, // SPI_PS_IN_CONTROL_0
462 0x00000000, // SPI_PS_IN_CONTROL_1
463 0x00000000, // SPI_INTERP_CONTROL_0
464 0x00000000, // SPI_INPUT_Z
465 0x00000000, // SPI_FOG_CNTL
466 0x00000000, // SPI_BARYC_CNTL
467 0x00000000, // SPI_PS_IN_CONTROL_2
468 0x00000000, // SPI_COMPUTE_INPUT_CNTL
469 0x00000000, // SPI_COMPUTE_NUM_THREAD_X
470 0x00000000, // SPI_COMPUTE_NUM_THREAD_Y
471 0x00000000, // SPI_COMPUTE_NUM_THREAD_Z
472 0, // HOLE
473 0, // HOLE
474 0, // HOLE
475 0, // HOLE
476 0, // HOLE
477 0, // HOLE
478 0, // HOLE
479 0, // HOLE
480 0, // HOLE
481 0, // HOLE
482 0x00000000, // GDS_ADDR_BASE
483 0x00003fff, // GDS_ADDR_SIZE
484 0x00000001, // GDS_ORDERED_WAVE_PER_SE
485 0x00000000, // GDS_APPEND_CONSUME_UAV0
486 0x00000000, // GDS_APPEND_CONSUME_UAV1
487 0x00000000, // GDS_APPEND_CONSUME_UAV2
488 0x00000000, // GDS_APPEND_CONSUME_UAV3
489 0x00000000, // GDS_APPEND_CONSUME_UAV4
490 0x00000000, // GDS_APPEND_CONSUME_UAV5
491 0x00000000, // GDS_APPEND_CONSUME_UAV6
492 0x00000000, // GDS_APPEND_CONSUME_UAV7
493 0x00000000, // GDS_APPEND_CONSUME_UAV8
494 0x00000000, // GDS_APPEND_CONSUME_UAV9
495 0x00000000, // GDS_APPEND_CONSUME_UAV10
496 0x00000000, // GDS_APPEND_CONSUME_UAV11
497 0, // HOLE
498 0, // HOLE
499 0, // HOLE
500 0, // HOLE
501 0, // HOLE
502 0, // HOLE
503 0, // HOLE
504 0, // HOLE
505 0, // HOLE
506 0x00000000, // CB_BLEND0_CONTROL
507 0x00000000, // CB_BLEND1_CONTROL
508 0x00000000, // CB_BLEND2_CONTROL
509 0x00000000, // CB_BLEND3_CONTROL
510 0x00000000, // CB_BLEND4_CONTROL
511 0x00000000, // CB_BLEND5_CONTROL
512 0x00000000, // CB_BLEND6_CONTROL
513 0x00000000, // CB_BLEND7_CONTROL
514};
515static const u32 SECT_CONTEXT_def_2[] =
516{
517 0x00000000, // PA_CL_POINT_X_RAD
518 0x00000000, // PA_CL_POINT_Y_RAD
519 0x00000000, // PA_CL_POINT_SIZE
520 0x00000000, // PA_CL_POINT_CULL_RAD
521 0x00000000, // VGT_DMA_BASE_HI
522 0x00000000, // VGT_DMA_BASE
523};
524static const u32 SECT_CONTEXT_def_3[] =
525{
526 0x00000000, // DB_DEPTH_CONTROL
527 0, // HOLE
528 0x00000000, // CB_COLOR_CONTROL
529 0x00000200, // DB_SHADER_CONTROL
530 0x00000000, // PA_CL_CLIP_CNTL
531 0x00000000, // PA_SU_SC_MODE_CNTL
532 0x00000000, // PA_CL_VTE_CNTL
533 0x00000000, // PA_CL_VS_OUT_CNTL
534 0x00000000, // PA_CL_NANINF_CNTL
535 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
536 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
537 0x00000000, // PA_SU_PRIM_FILTER_CNTL
538 0x00000000, // SQ_LSTMP_RING_ITEMSIZE
539 0x00000000, // SQ_HSTMP_RING_ITEMSIZE
540 0x00000000, // SQ_DYN_GPR_RESOURCE_LIMIT_1
541 0, // HOLE
542 0x00000000, // SQ_PGM_START_PS
543 0x00000000, // SQ_PGM_RESOURCES_PS
544 0x00000000, // SQ_PGM_RESOURCES_2_PS
545 0x00000000, // SQ_PGM_EXPORTS_PS
546 0, // HOLE
547 0, // HOLE
548 0, // HOLE
549 0x00000000, // SQ_PGM_START_VS
550 0x00000000, // SQ_PGM_RESOURCES_VS
551 0x00000000, // SQ_PGM_RESOURCES_2_VS
552 0, // HOLE
553 0, // HOLE
554 0, // HOLE
555 0x00000000, // SQ_PGM_START_GS
556 0x00000000, // SQ_PGM_RESOURCES_GS
557 0x00000000, // SQ_PGM_RESOURCES_2_GS
558 0, // HOLE
559 0, // HOLE
560 0, // HOLE
561 0x00000000, // SQ_PGM_START_ES
562 0x00000000, // SQ_PGM_RESOURCES_ES
563 0x00000000, // SQ_PGM_RESOURCES_2_ES
564 0, // HOLE
565 0, // HOLE
566 0, // HOLE
567 0x00000000, // SQ_PGM_START_FS
568 0x00000000, // SQ_PGM_RESOURCES_FS
569 0, // HOLE
570 0, // HOLE
571 0, // HOLE
572 0x00000000, // SQ_PGM_START_HS
573 0x00000000, // SQ_PGM_RESOURCES_HS
574 0x00000000, // SQ_PGM_RESOURCES_2_HS
575 0, // HOLE
576 0, // HOLE
577 0, // HOLE
578 0x00000000, // SQ_PGM_START_LS
579 0x00000000, // SQ_PGM_RESOURCES_LS
580 0x00000000, // SQ_PGM_RESOURCES_2_LS
581};
582static const u32 SECT_CONTEXT_def_4[] =
583{
584 0x00000000, // SQ_LDS_ALLOC
585 0x00000000, // SQ_LDS_ALLOC_PS
586 0x00000000, // SQ_VTX_SEMANTIC_CLEAR
587 0, // HOLE
588 0x00000000, // SQ_THREAD_TRACE_CTRL
589 0, // HOLE
590 0x00000000, // SQ_ESGS_RING_ITEMSIZE
591 0x00000000, // SQ_GSVS_RING_ITEMSIZE
592 0x00000000, // SQ_ESTMP_RING_ITEMSIZE
593 0x00000000, // SQ_GSTMP_RING_ITEMSIZE
594 0x00000000, // SQ_VSTMP_RING_ITEMSIZE
595 0x00000000, // SQ_PSTMP_RING_ITEMSIZE
596 0, // HOLE
597 0x00000000, // SQ_GS_VERT_ITEMSIZE
598 0x00000000, // SQ_GS_VERT_ITEMSIZE_1
599 0x00000000, // SQ_GS_VERT_ITEMSIZE_2
600 0x00000000, // SQ_GS_VERT_ITEMSIZE_3
601 0x00000000, // SQ_GSVS_RING_OFFSET_1
602 0x00000000, // SQ_GSVS_RING_OFFSET_2
603 0x00000000, // SQ_GSVS_RING_OFFSET_3
604 0, // HOLE
605 0, // HOLE
606 0x00000000, // SQ_ALU_CONST_CACHE_PS_0
607 0x00000000, // SQ_ALU_CONST_CACHE_PS_1
608 0x00000000, // SQ_ALU_CONST_CACHE_PS_2
609 0x00000000, // SQ_ALU_CONST_CACHE_PS_3
610 0x00000000, // SQ_ALU_CONST_CACHE_PS_4
611 0x00000000, // SQ_ALU_CONST_CACHE_PS_5
612 0x00000000, // SQ_ALU_CONST_CACHE_PS_6
613 0x00000000, // SQ_ALU_CONST_CACHE_PS_7
614 0x00000000, // SQ_ALU_CONST_CACHE_PS_8
615 0x00000000, // SQ_ALU_CONST_CACHE_PS_9
616 0x00000000, // SQ_ALU_CONST_CACHE_PS_10
617 0x00000000, // SQ_ALU_CONST_CACHE_PS_11
618 0x00000000, // SQ_ALU_CONST_CACHE_PS_12
619 0x00000000, // SQ_ALU_CONST_CACHE_PS_13
620 0x00000000, // SQ_ALU_CONST_CACHE_PS_14
621 0x00000000, // SQ_ALU_CONST_CACHE_PS_15
622 0x00000000, // SQ_ALU_CONST_CACHE_VS_0
623 0x00000000, // SQ_ALU_CONST_CACHE_VS_1
624 0x00000000, // SQ_ALU_CONST_CACHE_VS_2
625 0x00000000, // SQ_ALU_CONST_CACHE_VS_3
626 0x00000000, // SQ_ALU_CONST_CACHE_VS_4
627 0x00000000, // SQ_ALU_CONST_CACHE_VS_5
628 0x00000000, // SQ_ALU_CONST_CACHE_VS_6
629 0x00000000, // SQ_ALU_CONST_CACHE_VS_7
630 0x00000000, // SQ_ALU_CONST_CACHE_VS_8
631 0x00000000, // SQ_ALU_CONST_CACHE_VS_9
632 0x00000000, // SQ_ALU_CONST_CACHE_VS_10
633 0x00000000, // SQ_ALU_CONST_CACHE_VS_11
634 0x00000000, // SQ_ALU_CONST_CACHE_VS_12
635 0x00000000, // SQ_ALU_CONST_CACHE_VS_13
636 0x00000000, // SQ_ALU_CONST_CACHE_VS_14
637 0x00000000, // SQ_ALU_CONST_CACHE_VS_15
638 0x00000000, // SQ_ALU_CONST_CACHE_GS_0
639 0x00000000, // SQ_ALU_CONST_CACHE_GS_1
640 0x00000000, // SQ_ALU_CONST_CACHE_GS_2
641 0x00000000, // SQ_ALU_CONST_CACHE_GS_3
642 0x00000000, // SQ_ALU_CONST_CACHE_GS_4
643 0x00000000, // SQ_ALU_CONST_CACHE_GS_5
644 0x00000000, // SQ_ALU_CONST_CACHE_GS_6
645 0x00000000, // SQ_ALU_CONST_CACHE_GS_7
646 0x00000000, // SQ_ALU_CONST_CACHE_GS_8
647 0x00000000, // SQ_ALU_CONST_CACHE_GS_9
648 0x00000000, // SQ_ALU_CONST_CACHE_GS_10
649 0x00000000, // SQ_ALU_CONST_CACHE_GS_11
650 0x00000000, // SQ_ALU_CONST_CACHE_GS_12
651 0x00000000, // SQ_ALU_CONST_CACHE_GS_13
652 0x00000000, // SQ_ALU_CONST_CACHE_GS_14
653 0x00000000, // SQ_ALU_CONST_CACHE_GS_15
654 0x00000000, // PA_SU_POINT_SIZE
655 0x00000000, // PA_SU_POINT_MINMAX
656 0x00000000, // PA_SU_LINE_CNTL
657 0x00000000, // PA_SC_LINE_STIPPLE
658 0x00000000, // VGT_OUTPUT_PATH_CNTL
659 0x00000000, // VGT_HOS_CNTL
660 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
661 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
662 0x00000000, // VGT_HOS_REUSE_DEPTH
663 0x00000000, // VGT_GROUP_PRIM_TYPE
664 0x00000000, // VGT_GROUP_FIRST_DECR
665 0x00000000, // VGT_GROUP_DECR
666 0x00000000, // VGT_GROUP_VECT_0_CNTL
667 0x00000000, // VGT_GROUP_VECT_1_CNTL
668 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
669 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
670 0x00000000, // VGT_GS_MODE
671 0, // HOLE
672 0x00000000, // PA_SC_MODE_CNTL_0
673 0x00000000, // PA_SC_MODE_CNTL_1
674 0x00000000, // VGT_ENHANCE
675 0x00000000, // VGT_GS_PER_ES
676 0x00000000, // VGT_ES_PER_GS
677 0x00000000, // VGT_GS_PER_VS
678 0, // HOLE
679 0, // HOLE
680 0, // HOLE
681 0x00000000, // VGT_GS_OUT_PRIM_TYPE
682};
683static const u32 SECT_CONTEXT_def_5[] =
684{
685 0x00000000, // VGT_DMA_MAX_SIZE
686 0x00000000, // VGT_DMA_INDEX_TYPE
687 0, // HOLE
688 0x00000000, // VGT_PRIMITIVEID_EN
689 0x00000000, // VGT_DMA_NUM_INSTANCES
690};
691static const u32 SECT_CONTEXT_def_6[] =
692{
693 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
694 0, // HOLE
695 0, // HOLE
696 0x00000000, // VGT_INSTANCE_STEP_RATE_0
697 0x00000000, // VGT_INSTANCE_STEP_RATE_1
698 0, // HOLE
699 0, // HOLE
700 0, // HOLE
701 0x00000000, // VGT_REUSE_OFF
702 0x00000000, // VGT_VTX_CNT_EN
703 0x00000000, // DB_HTILE_SURFACE
704 0x00000000, // DB_SRESULTS_COMPARE_STATE0
705 0x00000000, // DB_SRESULTS_COMPARE_STATE1
706 0x00000000, // DB_PRELOAD_CONTROL
707 0, // HOLE
708 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
709 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
710 0x00000000, // VGT_STRMOUT_BUFFER_BASE_0
711 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
712 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
713 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
714 0x00000000, // VGT_STRMOUT_BUFFER_BASE_1
715 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
716 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
717 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
718 0x00000000, // VGT_STRMOUT_BUFFER_BASE_2
719 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
720 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
721 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
722 0x00000000, // VGT_STRMOUT_BUFFER_BASE_3
723 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
724 0x00000000, // VGT_STRMOUT_BASE_OFFSET_0
725 0x00000000, // VGT_STRMOUT_BASE_OFFSET_1
726 0x00000000, // VGT_STRMOUT_BASE_OFFSET_2
727 0x00000000, // VGT_STRMOUT_BASE_OFFSET_3
728 0, // HOLE
729 0, // HOLE
730 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
731 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
732 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
733 0, // HOLE
734 0x00000000, // VGT_GS_MAX_VERT_OUT
735 0, // HOLE
736 0, // HOLE
737 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_0
738 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_1
739 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_2
740 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_3
741 0x00000000, // VGT_SHADER_STAGES_EN
742 0x00000000, // VGT_LS_HS_CONFIG
743 0x00000000, // VGT_LS_SIZE
744 0x00000000, // VGT_HS_SIZE
745 0x00000000, // VGT_LS_HS_ALLOC
746 0x00000000, // VGT_HS_PATCH_CONST
747 0x00000000, // VGT_TF_PARAM
748 0x00000000, // DB_ALPHA_TO_MASK
749};
750static const u32 SECT_CONTEXT_def_7[] =
751{
752 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
753 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
754 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
755 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
756 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
757 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
758 0x00000000, // VGT_GS_INSTANCE_CNT
759 0x00000000, // VGT_STRMOUT_CONFIG
760 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
761 0x00000000, // CB_IMMED0_BASE
762 0x00000000, // CB_IMMED1_BASE
763 0x00000000, // CB_IMMED2_BASE
764 0x00000000, // CB_IMMED3_BASE
765 0x00000000, // CB_IMMED4_BASE
766 0x00000000, // CB_IMMED5_BASE
767 0x00000000, // CB_IMMED6_BASE
768 0x00000000, // CB_IMMED7_BASE
769 0x00000000, // CB_IMMED8_BASE
770 0x00000000, // CB_IMMED9_BASE
771 0x00000000, // CB_IMMED10_BASE
772 0x00000000, // CB_IMMED11_BASE
773 0, // HOLE
774 0, // HOLE
775 0, // HOLE
776 0, // HOLE
777 0, // HOLE
778 0, // HOLE
779 0, // HOLE
780 0, // HOLE
781 0, // HOLE
782 0, // HOLE
783 0, // HOLE
784 0, // HOLE
785 0, // HOLE
786 0x00001000, // PA_SC_LINE_CNTL
787 0x00000000, // PA_SC_AA_CONFIG
788 0x00000005, // PA_SU_VTX_CNTL
789 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
790 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
791 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
792 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
793 0x00000000, // PA_SC_AA_SAMPLE_LOCS_0
794 0x00000000, // PA_SC_AA_SAMPLE_LOCS_1
795 0x00000000, // PA_SC_AA_SAMPLE_LOCS_2
796 0x00000000, // PA_SC_AA_SAMPLE_LOCS_3
797 0x00000000, // PA_SC_AA_SAMPLE_LOCS_4
798 0x00000000, // PA_SC_AA_SAMPLE_LOCS_5
799 0x00000000, // PA_SC_AA_SAMPLE_LOCS_6
800 0x00000000, // PA_SC_AA_SAMPLE_LOCS_7
801 0xffffffff, // PA_SC_AA_MASK
802 0x00000000, // CB_CLRCMP_CONTROL
803 0x00000000, // CB_CLRCMP_SRC
804 0x00000000, // CB_CLRCMP_DST
805 0x00000000, // CB_CLRCMP_MSK
806 0, // HOLE
807 0, // HOLE
808 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
809 0x00000010, // VGT_OUT_DEALLOC_CNTL
810 0x00000000, // CB_COLOR0_BASE
811 0x00000000, // CB_COLOR0_PITCH
812 0x00000000, // CB_COLOR0_SLICE
813 0x00000000, // CB_COLOR0_VIEW
814 0x00000000, // CB_COLOR0_INFO
815 0x00000000, // CB_COLOR0_ATTRIB
816 0x00000000, // CB_COLOR0_DIM
817 0x00000000, // CB_COLOR0_CMASK
818 0x00000000, // CB_COLOR0_CMASK_SLICE
819 0x00000000, // CB_COLOR0_FMASK
820 0x00000000, // CB_COLOR0_FMASK_SLICE
821 0x00000000, // CB_COLOR0_CLEAR_WORD0
822 0x00000000, // CB_COLOR0_CLEAR_WORD1
823 0x00000000, // CB_COLOR0_CLEAR_WORD2
824 0x00000000, // CB_COLOR0_CLEAR_WORD3
825 0x00000000, // CB_COLOR1_BASE
826 0x00000000, // CB_COLOR1_PITCH
827 0x00000000, // CB_COLOR1_SLICE
828 0x00000000, // CB_COLOR1_VIEW
829 0x00000000, // CB_COLOR1_INFO
830 0x00000000, // CB_COLOR1_ATTRIB
831 0x00000000, // CB_COLOR1_DIM
832 0x00000000, // CB_COLOR1_CMASK
833 0x00000000, // CB_COLOR1_CMASK_SLICE
834 0x00000000, // CB_COLOR1_FMASK
835 0x00000000, // CB_COLOR1_FMASK_SLICE
836 0x00000000, // CB_COLOR1_CLEAR_WORD0
837 0x00000000, // CB_COLOR1_CLEAR_WORD1
838 0x00000000, // CB_COLOR1_CLEAR_WORD2
839 0x00000000, // CB_COLOR1_CLEAR_WORD3
840 0x00000000, // CB_COLOR2_BASE
841 0x00000000, // CB_COLOR2_PITCH
842 0x00000000, // CB_COLOR2_SLICE
843 0x00000000, // CB_COLOR2_VIEW
844 0x00000000, // CB_COLOR2_INFO
845 0x00000000, // CB_COLOR2_ATTRIB
846 0x00000000, // CB_COLOR2_DIM
847 0x00000000, // CB_COLOR2_CMASK
848 0x00000000, // CB_COLOR2_CMASK_SLICE
849 0x00000000, // CB_COLOR2_FMASK
850 0x00000000, // CB_COLOR2_FMASK_SLICE
851 0x00000000, // CB_COLOR2_CLEAR_WORD0
852 0x00000000, // CB_COLOR2_CLEAR_WORD1
853 0x00000000, // CB_COLOR2_CLEAR_WORD2
854 0x00000000, // CB_COLOR2_CLEAR_WORD3
855 0x00000000, // CB_COLOR3_BASE
856 0x00000000, // CB_COLOR3_PITCH
857 0x00000000, // CB_COLOR3_SLICE
858 0x00000000, // CB_COLOR3_VIEW
859 0x00000000, // CB_COLOR3_INFO
860 0x00000000, // CB_COLOR3_ATTRIB
861 0x00000000, // CB_COLOR3_DIM
862 0x00000000, // CB_COLOR3_CMASK
863 0x00000000, // CB_COLOR3_CMASK_SLICE
864 0x00000000, // CB_COLOR3_FMASK
865 0x00000000, // CB_COLOR3_FMASK_SLICE
866 0x00000000, // CB_COLOR3_CLEAR_WORD0
867 0x00000000, // CB_COLOR3_CLEAR_WORD1
868 0x00000000, // CB_COLOR3_CLEAR_WORD2
869 0x00000000, // CB_COLOR3_CLEAR_WORD3
870 0x00000000, // CB_COLOR4_BASE
871 0x00000000, // CB_COLOR4_PITCH
872 0x00000000, // CB_COLOR4_SLICE
873 0x00000000, // CB_COLOR4_VIEW
874 0x00000000, // CB_COLOR4_INFO
875 0x00000000, // CB_COLOR4_ATTRIB
876 0x00000000, // CB_COLOR4_DIM
877 0x00000000, // CB_COLOR4_CMASK
878 0x00000000, // CB_COLOR4_CMASK_SLICE
879 0x00000000, // CB_COLOR4_FMASK
880 0x00000000, // CB_COLOR4_FMASK_SLICE
881 0x00000000, // CB_COLOR4_CLEAR_WORD0
882 0x00000000, // CB_COLOR4_CLEAR_WORD1
883 0x00000000, // CB_COLOR4_CLEAR_WORD2
884 0x00000000, // CB_COLOR4_CLEAR_WORD3
885 0x00000000, // CB_COLOR5_BASE
886 0x00000000, // CB_COLOR5_PITCH
887 0x00000000, // CB_COLOR5_SLICE
888 0x00000000, // CB_COLOR5_VIEW
889 0x00000000, // CB_COLOR5_INFO
890 0x00000000, // CB_COLOR5_ATTRIB
891 0x00000000, // CB_COLOR5_DIM
892 0x00000000, // CB_COLOR5_CMASK
893 0x00000000, // CB_COLOR5_CMASK_SLICE
894 0x00000000, // CB_COLOR5_FMASK
895 0x00000000, // CB_COLOR5_FMASK_SLICE
896 0x00000000, // CB_COLOR5_CLEAR_WORD0
897 0x00000000, // CB_COLOR5_CLEAR_WORD1
898 0x00000000, // CB_COLOR5_CLEAR_WORD2
899 0x00000000, // CB_COLOR5_CLEAR_WORD3
900 0x00000000, // CB_COLOR6_BASE
901 0x00000000, // CB_COLOR6_PITCH
902 0x00000000, // CB_COLOR6_SLICE
903 0x00000000, // CB_COLOR6_VIEW
904 0x00000000, // CB_COLOR6_INFO
905 0x00000000, // CB_COLOR6_ATTRIB
906 0x00000000, // CB_COLOR6_DIM
907 0x00000000, // CB_COLOR6_CMASK
908 0x00000000, // CB_COLOR6_CMASK_SLICE
909 0x00000000, // CB_COLOR6_FMASK
910 0x00000000, // CB_COLOR6_FMASK_SLICE
911 0x00000000, // CB_COLOR6_CLEAR_WORD0
912 0x00000000, // CB_COLOR6_CLEAR_WORD1
913 0x00000000, // CB_COLOR6_CLEAR_WORD2
914 0x00000000, // CB_COLOR6_CLEAR_WORD3
915 0x00000000, // CB_COLOR7_BASE
916 0x00000000, // CB_COLOR7_PITCH
917 0x00000000, // CB_COLOR7_SLICE
918 0x00000000, // CB_COLOR7_VIEW
919 0x00000000, // CB_COLOR7_INFO
920 0x00000000, // CB_COLOR7_ATTRIB
921 0x00000000, // CB_COLOR7_DIM
922 0x00000000, // CB_COLOR7_CMASK
923 0x00000000, // CB_COLOR7_CMASK_SLICE
924 0x00000000, // CB_COLOR7_FMASK
925 0x00000000, // CB_COLOR7_FMASK_SLICE
926 0x00000000, // CB_COLOR7_CLEAR_WORD0
927 0x00000000, // CB_COLOR7_CLEAR_WORD1
928 0x00000000, // CB_COLOR7_CLEAR_WORD2
929 0x00000000, // CB_COLOR7_CLEAR_WORD3
930 0x00000000, // CB_COLOR8_BASE
931 0x00000000, // CB_COLOR8_PITCH
932 0x00000000, // CB_COLOR8_SLICE
933 0x00000000, // CB_COLOR8_VIEW
934 0x00000000, // CB_COLOR8_INFO
935 0x00000000, // CB_COLOR8_ATTRIB
936 0x00000000, // CB_COLOR8_DIM
937 0x00000000, // CB_COLOR9_BASE
938 0x00000000, // CB_COLOR9_PITCH
939 0x00000000, // CB_COLOR9_SLICE
940 0x00000000, // CB_COLOR9_VIEW
941 0x00000000, // CB_COLOR9_INFO
942 0x00000000, // CB_COLOR9_ATTRIB
943 0x00000000, // CB_COLOR9_DIM
944 0x00000000, // CB_COLOR10_BASE
945 0x00000000, // CB_COLOR10_PITCH
946 0x00000000, // CB_COLOR10_SLICE
947 0x00000000, // CB_COLOR10_VIEW
948 0x00000000, // CB_COLOR10_INFO
949 0x00000000, // CB_COLOR10_ATTRIB
950 0x00000000, // CB_COLOR10_DIM
951 0x00000000, // CB_COLOR11_BASE
952 0x00000000, // CB_COLOR11_PITCH
953 0x00000000, // CB_COLOR11_SLICE
954 0x00000000, // CB_COLOR11_VIEW
955 0x00000000, // CB_COLOR11_INFO
956 0x00000000, // CB_COLOR11_ATTRIB
957 0x00000000, // CB_COLOR11_DIM
958 0, // HOLE
959 0, // HOLE
960 0, // HOLE
961 0, // HOLE
962 0, // HOLE
963 0, // HOLE
964 0, // HOLE
965 0, // HOLE
966 0, // HOLE
967 0, // HOLE
968 0, // HOLE
969 0, // HOLE
970 0, // HOLE
971 0, // HOLE
972 0, // HOLE
973 0, // HOLE
974 0, // HOLE
975 0, // HOLE
976 0, // HOLE
977 0, // HOLE
978 0x00000000, // SQ_ALU_CONST_CACHE_HS_0
979 0x00000000, // SQ_ALU_CONST_CACHE_HS_1
980 0x00000000, // SQ_ALU_CONST_CACHE_HS_2
981 0x00000000, // SQ_ALU_CONST_CACHE_HS_3
982 0x00000000, // SQ_ALU_CONST_CACHE_HS_4
983 0x00000000, // SQ_ALU_CONST_CACHE_HS_5
984 0x00000000, // SQ_ALU_CONST_CACHE_HS_6
985 0x00000000, // SQ_ALU_CONST_CACHE_HS_7
986 0x00000000, // SQ_ALU_CONST_CACHE_HS_8
987 0x00000000, // SQ_ALU_CONST_CACHE_HS_9
988 0x00000000, // SQ_ALU_CONST_CACHE_HS_10
989 0x00000000, // SQ_ALU_CONST_CACHE_HS_11
990 0x00000000, // SQ_ALU_CONST_CACHE_HS_12
991 0x00000000, // SQ_ALU_CONST_CACHE_HS_13
992 0x00000000, // SQ_ALU_CONST_CACHE_HS_14
993 0x00000000, // SQ_ALU_CONST_CACHE_HS_15
994 0x00000000, // SQ_ALU_CONST_CACHE_LS_0
995 0x00000000, // SQ_ALU_CONST_CACHE_LS_1
996 0x00000000, // SQ_ALU_CONST_CACHE_LS_2
997 0x00000000, // SQ_ALU_CONST_CACHE_LS_3
998 0x00000000, // SQ_ALU_CONST_CACHE_LS_4
999 0x00000000, // SQ_ALU_CONST_CACHE_LS_5
1000 0x00000000, // SQ_ALU_CONST_CACHE_LS_6
1001 0x00000000, // SQ_ALU_CONST_CACHE_LS_7
1002 0x00000000, // SQ_ALU_CONST_CACHE_LS_8
1003 0x00000000, // SQ_ALU_CONST_CACHE_LS_9
1004 0x00000000, // SQ_ALU_CONST_CACHE_LS_10
1005 0x00000000, // SQ_ALU_CONST_CACHE_LS_11
1006 0x00000000, // SQ_ALU_CONST_CACHE_LS_12
1007 0x00000000, // SQ_ALU_CONST_CACHE_LS_13
1008 0x00000000, // SQ_ALU_CONST_CACHE_LS_14
1009 0x00000000, // SQ_ALU_CONST_CACHE_LS_15
1010 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_0
1011 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_1
1012 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_2
1013 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_3
1014 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_4
1015 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_5
1016 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_6
1017 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_7
1018 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_8
1019 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_9
1020 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_10
1021 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_11
1022 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_12
1023 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_13
1024 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_14
1025 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_15
1026 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_0
1027 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_1
1028 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_2
1029 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_3
1030 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_4
1031 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_5
1032 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_6
1033 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_7
1034 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_8
1035 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_9
1036 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_10
1037 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_11
1038 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_12
1039 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_13
1040 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_14
1041 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_15
1042};
1043static const struct cs_extent_def SECT_CONTEXT_defs[] =
1044{
1045 {SECT_CONTEXT_def_1, 0x0000a000, 488 },
1046 {SECT_CONTEXT_def_2, 0x0000a1f5, 6 },
1047 {SECT_CONTEXT_def_3, 0x0000a200, 55 },
1048 {SECT_CONTEXT_def_4, 0x0000a23a, 98 },
1049 {SECT_CONTEXT_def_5, 0x0000a29e, 5 },
1050 {SECT_CONTEXT_def_6, 0x0000a2a5, 56 },
1051 {SECT_CONTEXT_def_7, 0x0000a2de, 290 },
1052 { 0, 0, 0 }
1053};
1054static const u32 SECT_CLEAR_def_1[] =
1055{
1056 0xffffffff, // SQ_TEX_SAMPLER_CLEAR
1057 0xffffffff, // SQ_TEX_RESOURCE_CLEAR
1058 0xffffffff, // SQ_LOOP_BOOL_CLEAR
1059};
1060static const struct cs_extent_def SECT_CLEAR_defs[] =
1061{
1062 {SECT_CLEAR_def_1, 0x0000ffc0, 3 },
1063 { 0, 0, 0 }
1064};
1065static const u32 SECT_CTRLCONST_def_1[] =
1066{
1067 0x00000000, // SQ_VTX_BASE_VTX_LOC
1068 0x00000000, // SQ_VTX_START_INST_LOC
1069};
1070static const struct cs_extent_def SECT_CTRLCONST_defs[] =
1071{
1072 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
1073 { 0, 0, 0 }
1074};
1075struct cs_section_def evergreen_cs_data[] = {
1076 { SECT_CONTEXT_defs, SECT_CONTEXT },
1077 { SECT_CLEAR_defs, SECT_CLEAR },
1078 { SECT_CTRLCONST_defs, SECT_CTRLCONST },
1079 { 0, SECT_NONE }
1080};
diff --git a/drivers/gpu/drm/radeon/clearstate_si.h b/drivers/gpu/drm/radeon/clearstate_si.h
new file mode 100644
index 000000000000..b994cb2a35a0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_si.h
@@ -0,0 +1,941 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24static const u32 si_SECT_CONTEXT_def_1[] =
25{
26 0x00000000, // DB_RENDER_CONTROL
27 0x00000000, // DB_COUNT_CONTROL
28 0x00000000, // DB_DEPTH_VIEW
29 0x00000000, // DB_RENDER_OVERRIDE
30 0x00000000, // DB_RENDER_OVERRIDE2
31 0x00000000, // DB_HTILE_DATA_BASE
32 0, // HOLE
33 0, // HOLE
34 0x00000000, // DB_DEPTH_BOUNDS_MIN
35 0x00000000, // DB_DEPTH_BOUNDS_MAX
36 0x00000000, // DB_STENCIL_CLEAR
37 0x00000000, // DB_DEPTH_CLEAR
38 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
39 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
40 0, // HOLE
41 0x00000000, // DB_DEPTH_INFO
42 0x00000000, // DB_Z_INFO
43 0x00000000, // DB_STENCIL_INFO
44 0x00000000, // DB_Z_READ_BASE
45 0x00000000, // DB_STENCIL_READ_BASE
46 0x00000000, // DB_Z_WRITE_BASE
47 0x00000000, // DB_STENCIL_WRITE_BASE
48 0x00000000, // DB_DEPTH_SIZE
49 0x00000000, // DB_DEPTH_SLICE
50 0, // HOLE
51 0, // HOLE
52 0, // HOLE
53 0, // HOLE
54 0, // HOLE
55 0, // HOLE
56 0, // HOLE
57 0, // HOLE
58 0x00000000, // TA_BC_BASE_ADDR
59 0, // HOLE
60 0, // HOLE
61 0, // HOLE
62 0, // HOLE
63 0, // HOLE
64 0, // HOLE
65 0, // HOLE
66 0, // HOLE
67 0, // HOLE
68 0, // HOLE
69 0, // HOLE
70 0, // HOLE
71 0, // HOLE
72 0, // HOLE
73 0, // HOLE
74 0, // HOLE
75 0, // HOLE
76 0, // HOLE
77 0, // HOLE
78 0, // HOLE
79 0, // HOLE
80 0, // HOLE
81 0, // HOLE
82 0, // HOLE
83 0, // HOLE
84 0, // HOLE
85 0, // HOLE
86 0, // HOLE
87 0, // HOLE
88 0, // HOLE
89 0, // HOLE
90 0, // HOLE
91 0, // HOLE
92 0, // HOLE
93 0, // HOLE
94 0, // HOLE
95 0, // HOLE
96 0, // HOLE
97 0, // HOLE
98 0, // HOLE
99 0, // HOLE
100 0, // HOLE
101 0, // HOLE
102 0, // HOLE
103 0, // HOLE
104 0, // HOLE
105 0, // HOLE
106 0, // HOLE
107 0, // HOLE
108 0, // HOLE
109 0, // HOLE
110 0, // HOLE
111 0, // HOLE
112 0, // HOLE
113 0, // HOLE
114 0, // HOLE
115 0, // HOLE
116 0, // HOLE
117 0, // HOLE
118 0, // HOLE
119 0, // HOLE
120 0, // HOLE
121 0, // HOLE
122 0, // HOLE
123 0, // HOLE
124 0, // HOLE
125 0, // HOLE
126 0, // HOLE
127 0, // HOLE
128 0, // HOLE
129 0, // HOLE
130 0, // HOLE
131 0, // HOLE
132 0, // HOLE
133 0, // HOLE
134 0, // HOLE
135 0, // HOLE
136 0, // HOLE
137 0, // HOLE
138 0, // HOLE
139 0, // HOLE
140 0, // HOLE
141 0, // HOLE
142 0, // HOLE
143 0, // HOLE
144 0, // HOLE
145 0, // HOLE
146 0, // HOLE
147 0, // HOLE
148 0, // HOLE
149 0, // HOLE
150 0, // HOLE
151 0, // HOLE
152 0x00000000, // COHER_DEST_BASE_2
153 0x00000000, // COHER_DEST_BASE_3
154 0x00000000, // PA_SC_WINDOW_OFFSET
155 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
156 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
157 0x0000ffff, // PA_SC_CLIPRECT_RULE
158 0x00000000, // PA_SC_CLIPRECT_0_TL
159 0x40004000, // PA_SC_CLIPRECT_0_BR
160 0x00000000, // PA_SC_CLIPRECT_1_TL
161 0x40004000, // PA_SC_CLIPRECT_1_BR
162 0x00000000, // PA_SC_CLIPRECT_2_TL
163 0x40004000, // PA_SC_CLIPRECT_2_BR
164 0x00000000, // PA_SC_CLIPRECT_3_TL
165 0x40004000, // PA_SC_CLIPRECT_3_BR
166 0xaa99aaaa, // PA_SC_EDGERULE
167 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
168 0xffffffff, // CB_TARGET_MASK
169 0xffffffff, // CB_SHADER_MASK
170 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
171 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
172 0x00000000, // COHER_DEST_BASE_0
173 0x00000000, // COHER_DEST_BASE_1
174 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
175 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
176 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
177 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
178 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
179 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
180 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
181 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
182 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
183 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
184 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
185 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
186 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
187 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
188 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
189 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
190 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
191 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
192 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
193 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
194 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
195 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
196 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
197 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
198 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
199 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
200 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
201 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
202 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
203 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
204 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
205 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
206 0x00000000, // PA_SC_VPORT_ZMIN_0
207 0x3f800000, // PA_SC_VPORT_ZMAX_0
208 0x00000000, // PA_SC_VPORT_ZMIN_1
209 0x3f800000, // PA_SC_VPORT_ZMAX_1
210 0x00000000, // PA_SC_VPORT_ZMIN_2
211 0x3f800000, // PA_SC_VPORT_ZMAX_2
212 0x00000000, // PA_SC_VPORT_ZMIN_3
213 0x3f800000, // PA_SC_VPORT_ZMAX_3
214 0x00000000, // PA_SC_VPORT_ZMIN_4
215 0x3f800000, // PA_SC_VPORT_ZMAX_4
216 0x00000000, // PA_SC_VPORT_ZMIN_5
217 0x3f800000, // PA_SC_VPORT_ZMAX_5
218 0x00000000, // PA_SC_VPORT_ZMIN_6
219 0x3f800000, // PA_SC_VPORT_ZMAX_6
220 0x00000000, // PA_SC_VPORT_ZMIN_7
221 0x3f800000, // PA_SC_VPORT_ZMAX_7
222 0x00000000, // PA_SC_VPORT_ZMIN_8
223 0x3f800000, // PA_SC_VPORT_ZMAX_8
224 0x00000000, // PA_SC_VPORT_ZMIN_9
225 0x3f800000, // PA_SC_VPORT_ZMAX_9
226 0x00000000, // PA_SC_VPORT_ZMIN_10
227 0x3f800000, // PA_SC_VPORT_ZMAX_10
228 0x00000000, // PA_SC_VPORT_ZMIN_11
229 0x3f800000, // PA_SC_VPORT_ZMAX_11
230 0x00000000, // PA_SC_VPORT_ZMIN_12
231 0x3f800000, // PA_SC_VPORT_ZMAX_12
232 0x00000000, // PA_SC_VPORT_ZMIN_13
233 0x3f800000, // PA_SC_VPORT_ZMAX_13
234 0x00000000, // PA_SC_VPORT_ZMIN_14
235 0x3f800000, // PA_SC_VPORT_ZMAX_14
236 0x00000000, // PA_SC_VPORT_ZMIN_15
237 0x3f800000, // PA_SC_VPORT_ZMAX_15
238};
239static const u32 si_SECT_CONTEXT_def_2[] =
240{
241 0x00000000, // CP_PERFMON_CNTX_CNTL
242 0x00000000, // CP_RINGID
243 0x00000000, // CP_VMID
244 0, // HOLE
245 0, // HOLE
246 0, // HOLE
247 0, // HOLE
248 0, // HOLE
249 0, // HOLE
250 0, // HOLE
251 0, // HOLE
252 0, // HOLE
253 0, // HOLE
254 0, // HOLE
255 0, // HOLE
256 0, // HOLE
257 0, // HOLE
258 0, // HOLE
259 0, // HOLE
260 0, // HOLE
261 0, // HOLE
262 0, // HOLE
263 0, // HOLE
264 0, // HOLE
265 0, // HOLE
266 0, // HOLE
267 0, // HOLE
268 0, // HOLE
269 0, // HOLE
270 0, // HOLE
271 0, // HOLE
272 0, // HOLE
273 0, // HOLE
274 0, // HOLE
275 0, // HOLE
276 0, // HOLE
277 0, // HOLE
278 0, // HOLE
279 0, // HOLE
280 0, // HOLE
281 0xffffffff, // VGT_MAX_VTX_INDX
282 0x00000000, // VGT_MIN_VTX_INDX
283 0x00000000, // VGT_INDX_OFFSET
284 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
285 0, // HOLE
286 0x00000000, // CB_BLEND_RED
287 0x00000000, // CB_BLEND_GREEN
288 0x00000000, // CB_BLEND_BLUE
289 0x00000000, // CB_BLEND_ALPHA
290 0, // HOLE
291 0, // HOLE
292 0x00000000, // DB_STENCIL_CONTROL
293 0x00000000, // DB_STENCILREFMASK
294 0x00000000, // DB_STENCILREFMASK_BF
295 0, // HOLE
296 0x00000000, // PA_CL_VPORT_XSCALE
297 0x00000000, // PA_CL_VPORT_XOFFSET
298 0x00000000, // PA_CL_VPORT_YSCALE
299 0x00000000, // PA_CL_VPORT_YOFFSET
300 0x00000000, // PA_CL_VPORT_ZSCALE
301 0x00000000, // PA_CL_VPORT_ZOFFSET
302 0x00000000, // PA_CL_VPORT_XSCALE_1
303 0x00000000, // PA_CL_VPORT_XOFFSET_1
304 0x00000000, // PA_CL_VPORT_YSCALE_1
305 0x00000000, // PA_CL_VPORT_YOFFSET_1
306 0x00000000, // PA_CL_VPORT_ZSCALE_1
307 0x00000000, // PA_CL_VPORT_ZOFFSET_1
308 0x00000000, // PA_CL_VPORT_XSCALE_2
309 0x00000000, // PA_CL_VPORT_XOFFSET_2
310 0x00000000, // PA_CL_VPORT_YSCALE_2
311 0x00000000, // PA_CL_VPORT_YOFFSET_2
312 0x00000000, // PA_CL_VPORT_ZSCALE_2
313 0x00000000, // PA_CL_VPORT_ZOFFSET_2
314 0x00000000, // PA_CL_VPORT_XSCALE_3
315 0x00000000, // PA_CL_VPORT_XOFFSET_3
316 0x00000000, // PA_CL_VPORT_YSCALE_3
317 0x00000000, // PA_CL_VPORT_YOFFSET_3
318 0x00000000, // PA_CL_VPORT_ZSCALE_3
319 0x00000000, // PA_CL_VPORT_ZOFFSET_3
320 0x00000000, // PA_CL_VPORT_XSCALE_4
321 0x00000000, // PA_CL_VPORT_XOFFSET_4
322 0x00000000, // PA_CL_VPORT_YSCALE_4
323 0x00000000, // PA_CL_VPORT_YOFFSET_4
324 0x00000000, // PA_CL_VPORT_ZSCALE_4
325 0x00000000, // PA_CL_VPORT_ZOFFSET_4
326 0x00000000, // PA_CL_VPORT_XSCALE_5
327 0x00000000, // PA_CL_VPORT_XOFFSET_5
328 0x00000000, // PA_CL_VPORT_YSCALE_5
329 0x00000000, // PA_CL_VPORT_YOFFSET_5
330 0x00000000, // PA_CL_VPORT_ZSCALE_5
331 0x00000000, // PA_CL_VPORT_ZOFFSET_5
332 0x00000000, // PA_CL_VPORT_XSCALE_6
333 0x00000000, // PA_CL_VPORT_XOFFSET_6
334 0x00000000, // PA_CL_VPORT_YSCALE_6
335 0x00000000, // PA_CL_VPORT_YOFFSET_6
336 0x00000000, // PA_CL_VPORT_ZSCALE_6
337 0x00000000, // PA_CL_VPORT_ZOFFSET_6
338 0x00000000, // PA_CL_VPORT_XSCALE_7
339 0x00000000, // PA_CL_VPORT_XOFFSET_7
340 0x00000000, // PA_CL_VPORT_YSCALE_7
341 0x00000000, // PA_CL_VPORT_YOFFSET_7
342 0x00000000, // PA_CL_VPORT_ZSCALE_7
343 0x00000000, // PA_CL_VPORT_ZOFFSET_7
344 0x00000000, // PA_CL_VPORT_XSCALE_8
345 0x00000000, // PA_CL_VPORT_XOFFSET_8
346 0x00000000, // PA_CL_VPORT_YSCALE_8
347 0x00000000, // PA_CL_VPORT_YOFFSET_8
348 0x00000000, // PA_CL_VPORT_ZSCALE_8
349 0x00000000, // PA_CL_VPORT_ZOFFSET_8
350 0x00000000, // PA_CL_VPORT_XSCALE_9
351 0x00000000, // PA_CL_VPORT_XOFFSET_9
352 0x00000000, // PA_CL_VPORT_YSCALE_9
353 0x00000000, // PA_CL_VPORT_YOFFSET_9
354 0x00000000, // PA_CL_VPORT_ZSCALE_9
355 0x00000000, // PA_CL_VPORT_ZOFFSET_9
356 0x00000000, // PA_CL_VPORT_XSCALE_10
357 0x00000000, // PA_CL_VPORT_XOFFSET_10
358 0x00000000, // PA_CL_VPORT_YSCALE_10
359 0x00000000, // PA_CL_VPORT_YOFFSET_10
360 0x00000000, // PA_CL_VPORT_ZSCALE_10
361 0x00000000, // PA_CL_VPORT_ZOFFSET_10
362 0x00000000, // PA_CL_VPORT_XSCALE_11
363 0x00000000, // PA_CL_VPORT_XOFFSET_11
364 0x00000000, // PA_CL_VPORT_YSCALE_11
365 0x00000000, // PA_CL_VPORT_YOFFSET_11
366 0x00000000, // PA_CL_VPORT_ZSCALE_11
367 0x00000000, // PA_CL_VPORT_ZOFFSET_11
368 0x00000000, // PA_CL_VPORT_XSCALE_12
369 0x00000000, // PA_CL_VPORT_XOFFSET_12
370 0x00000000, // PA_CL_VPORT_YSCALE_12
371 0x00000000, // PA_CL_VPORT_YOFFSET_12
372 0x00000000, // PA_CL_VPORT_ZSCALE_12
373 0x00000000, // PA_CL_VPORT_ZOFFSET_12
374 0x00000000, // PA_CL_VPORT_XSCALE_13
375 0x00000000, // PA_CL_VPORT_XOFFSET_13
376 0x00000000, // PA_CL_VPORT_YSCALE_13
377 0x00000000, // PA_CL_VPORT_YOFFSET_13
378 0x00000000, // PA_CL_VPORT_ZSCALE_13
379 0x00000000, // PA_CL_VPORT_ZOFFSET_13
380 0x00000000, // PA_CL_VPORT_XSCALE_14
381 0x00000000, // PA_CL_VPORT_XOFFSET_14
382 0x00000000, // PA_CL_VPORT_YSCALE_14
383 0x00000000, // PA_CL_VPORT_YOFFSET_14
384 0x00000000, // PA_CL_VPORT_ZSCALE_14
385 0x00000000, // PA_CL_VPORT_ZOFFSET_14
386 0x00000000, // PA_CL_VPORT_XSCALE_15
387 0x00000000, // PA_CL_VPORT_XOFFSET_15
388 0x00000000, // PA_CL_VPORT_YSCALE_15
389 0x00000000, // PA_CL_VPORT_YOFFSET_15
390 0x00000000, // PA_CL_VPORT_ZSCALE_15
391 0x00000000, // PA_CL_VPORT_ZOFFSET_15
392 0x00000000, // PA_CL_UCP_0_X
393 0x00000000, // PA_CL_UCP_0_Y
394 0x00000000, // PA_CL_UCP_0_Z
395 0x00000000, // PA_CL_UCP_0_W
396 0x00000000, // PA_CL_UCP_1_X
397 0x00000000, // PA_CL_UCP_1_Y
398 0x00000000, // PA_CL_UCP_1_Z
399 0x00000000, // PA_CL_UCP_1_W
400 0x00000000, // PA_CL_UCP_2_X
401 0x00000000, // PA_CL_UCP_2_Y
402 0x00000000, // PA_CL_UCP_2_Z
403 0x00000000, // PA_CL_UCP_2_W
404 0x00000000, // PA_CL_UCP_3_X
405 0x00000000, // PA_CL_UCP_3_Y
406 0x00000000, // PA_CL_UCP_3_Z
407 0x00000000, // PA_CL_UCP_3_W
408 0x00000000, // PA_CL_UCP_4_X
409 0x00000000, // PA_CL_UCP_4_Y
410 0x00000000, // PA_CL_UCP_4_Z
411 0x00000000, // PA_CL_UCP_4_W
412 0x00000000, // PA_CL_UCP_5_X
413 0x00000000, // PA_CL_UCP_5_Y
414 0x00000000, // PA_CL_UCP_5_Z
415 0x00000000, // PA_CL_UCP_5_W
416 0, // HOLE
417 0, // HOLE
418 0, // HOLE
419 0, // HOLE
420 0, // HOLE
421 0, // HOLE
422 0, // HOLE
423 0, // HOLE
424 0, // HOLE
425 0, // HOLE
426 0x00000000, // SPI_PS_INPUT_CNTL_0
427 0x00000000, // SPI_PS_INPUT_CNTL_1
428 0x00000000, // SPI_PS_INPUT_CNTL_2
429 0x00000000, // SPI_PS_INPUT_CNTL_3
430 0x00000000, // SPI_PS_INPUT_CNTL_4
431 0x00000000, // SPI_PS_INPUT_CNTL_5
432 0x00000000, // SPI_PS_INPUT_CNTL_6
433 0x00000000, // SPI_PS_INPUT_CNTL_7
434 0x00000000, // SPI_PS_INPUT_CNTL_8
435 0x00000000, // SPI_PS_INPUT_CNTL_9
436 0x00000000, // SPI_PS_INPUT_CNTL_10
437 0x00000000, // SPI_PS_INPUT_CNTL_11
438 0x00000000, // SPI_PS_INPUT_CNTL_12
439 0x00000000, // SPI_PS_INPUT_CNTL_13
440 0x00000000, // SPI_PS_INPUT_CNTL_14
441 0x00000000, // SPI_PS_INPUT_CNTL_15
442 0x00000000, // SPI_PS_INPUT_CNTL_16
443 0x00000000, // SPI_PS_INPUT_CNTL_17
444 0x00000000, // SPI_PS_INPUT_CNTL_18
445 0x00000000, // SPI_PS_INPUT_CNTL_19
446 0x00000000, // SPI_PS_INPUT_CNTL_20
447 0x00000000, // SPI_PS_INPUT_CNTL_21
448 0x00000000, // SPI_PS_INPUT_CNTL_22
449 0x00000000, // SPI_PS_INPUT_CNTL_23
450 0x00000000, // SPI_PS_INPUT_CNTL_24
451 0x00000000, // SPI_PS_INPUT_CNTL_25
452 0x00000000, // SPI_PS_INPUT_CNTL_26
453 0x00000000, // SPI_PS_INPUT_CNTL_27
454 0x00000000, // SPI_PS_INPUT_CNTL_28
455 0x00000000, // SPI_PS_INPUT_CNTL_29
456 0x00000000, // SPI_PS_INPUT_CNTL_30
457 0x00000000, // SPI_PS_INPUT_CNTL_31
458 0x00000000, // SPI_VS_OUT_CONFIG
459 0, // HOLE
460 0x00000000, // SPI_PS_INPUT_ENA
461 0x00000000, // SPI_PS_INPUT_ADDR
462 0x00000000, // SPI_INTERP_CONTROL_0
463 0x00000002, // SPI_PS_IN_CONTROL
464 0, // HOLE
465 0x00000000, // SPI_BARYC_CNTL
466 0, // HOLE
467 0x00000000, // SPI_TMPRING_SIZE
468 0, // HOLE
469 0, // HOLE
470 0, // HOLE
471 0, // HOLE
472 0, // HOLE
473 0, // HOLE
474 0x00000000, // SPI_WAVE_MGMT_1
475 0x00000000, // SPI_WAVE_MGMT_2
476 0x00000000, // SPI_SHADER_POS_FORMAT
477 0x00000000, // SPI_SHADER_Z_FORMAT
478 0x00000000, // SPI_SHADER_COL_FORMAT
479 0, // HOLE
480 0, // HOLE
481 0, // HOLE
482 0, // HOLE
483 0, // HOLE
484 0, // HOLE
485 0, // HOLE
486 0, // HOLE
487 0, // HOLE
488 0, // HOLE
489 0, // HOLE
490 0, // HOLE
491 0, // HOLE
492 0, // HOLE
493 0, // HOLE
494 0, // HOLE
495 0, // HOLE
496 0, // HOLE
497 0, // HOLE
498 0, // HOLE
499 0, // HOLE
500 0, // HOLE
501 0, // HOLE
502 0, // HOLE
503 0, // HOLE
504 0, // HOLE
505 0x00000000, // CB_BLEND0_CONTROL
506 0x00000000, // CB_BLEND1_CONTROL
507 0x00000000, // CB_BLEND2_CONTROL
508 0x00000000, // CB_BLEND3_CONTROL
509 0x00000000, // CB_BLEND4_CONTROL
510 0x00000000, // CB_BLEND5_CONTROL
511 0x00000000, // CB_BLEND6_CONTROL
512 0x00000000, // CB_BLEND7_CONTROL
513};
514static const u32 si_SECT_CONTEXT_def_3[] =
515{
516 0x00000000, // PA_CL_POINT_X_RAD
517 0x00000000, // PA_CL_POINT_Y_RAD
518 0x00000000, // PA_CL_POINT_SIZE
519 0x00000000, // PA_CL_POINT_CULL_RAD
520 0x00000000, // VGT_DMA_BASE_HI
521 0x00000000, // VGT_DMA_BASE
522};
523static const u32 si_SECT_CONTEXT_def_4[] =
524{
525 0x00000000, // DB_DEPTH_CONTROL
526 0x00000000, // DB_EQAA
527 0x00000000, // CB_COLOR_CONTROL
528 0x00000000, // DB_SHADER_CONTROL
529 0x00090000, // PA_CL_CLIP_CNTL
530 0x00000004, // PA_SU_SC_MODE_CNTL
531 0x00000000, // PA_CL_VTE_CNTL
532 0x00000000, // PA_CL_VS_OUT_CNTL
533 0x00000000, // PA_CL_NANINF_CNTL
534 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
535 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
536 0x00000000, // PA_SU_PRIM_FILTER_CNTL
537 0, // HOLE
538 0, // HOLE
539 0, // HOLE
540 0, // HOLE
541 0, // HOLE
542 0, // HOLE
543 0, // HOLE
544 0, // HOLE
545 0, // HOLE
546 0, // HOLE
547 0, // HOLE
548 0, // HOLE
549 0, // HOLE
550 0, // HOLE
551 0, // HOLE
552 0, // HOLE
553 0, // HOLE
554 0, // HOLE
555 0, // HOLE
556 0, // HOLE
557 0, // HOLE
558 0, // HOLE
559 0, // HOLE
560 0, // HOLE
561 0, // HOLE
562 0, // HOLE
563 0, // HOLE
564 0, // HOLE
565 0, // HOLE
566 0, // HOLE
567 0, // HOLE
568 0, // HOLE
569 0, // HOLE
570 0, // HOLE
571 0, // HOLE
572 0, // HOLE
573 0, // HOLE
574 0, // HOLE
575 0, // HOLE
576 0, // HOLE
577 0, // HOLE
578 0, // HOLE
579 0, // HOLE
580 0, // HOLE
581 0, // HOLE
582 0, // HOLE
583 0, // HOLE
584 0, // HOLE
585 0, // HOLE
586 0, // HOLE
587 0, // HOLE
588 0, // HOLE
589 0, // HOLE
590 0, // HOLE
591 0, // HOLE
592 0, // HOLE
593 0, // HOLE
594 0, // HOLE
595 0, // HOLE
596 0, // HOLE
597 0, // HOLE
598 0, // HOLE
599 0, // HOLE
600 0, // HOLE
601 0, // HOLE
602 0, // HOLE
603 0, // HOLE
604 0, // HOLE
605 0, // HOLE
606 0, // HOLE
607 0, // HOLE
608 0, // HOLE
609 0, // HOLE
610 0, // HOLE
611 0, // HOLE
612 0, // HOLE
613 0, // HOLE
614 0, // HOLE
615 0, // HOLE
616 0, // HOLE
617 0, // HOLE
618 0, // HOLE
619 0, // HOLE
620 0, // HOLE
621 0, // HOLE
622 0, // HOLE
623 0, // HOLE
624 0, // HOLE
625 0, // HOLE
626 0, // HOLE
627 0, // HOLE
628 0, // HOLE
629 0, // HOLE
630 0, // HOLE
631 0, // HOLE
632 0, // HOLE
633 0, // HOLE
634 0, // HOLE
635 0, // HOLE
636 0, // HOLE
637 0, // HOLE
638 0, // HOLE
639 0, // HOLE
640 0, // HOLE
641 0, // HOLE
642 0, // HOLE
643 0, // HOLE
644 0, // HOLE
645 0, // HOLE
646 0, // HOLE
647 0, // HOLE
648 0, // HOLE
649 0, // HOLE
650 0, // HOLE
651 0, // HOLE
652 0, // HOLE
653 0x00000000, // PA_SU_POINT_SIZE
654 0x00000000, // PA_SU_POINT_MINMAX
655 0x00000000, // PA_SU_LINE_CNTL
656 0x00000000, // PA_SC_LINE_STIPPLE
657 0x00000000, // VGT_OUTPUT_PATH_CNTL
658 0x00000000, // VGT_HOS_CNTL
659 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
660 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
661 0x00000000, // VGT_HOS_REUSE_DEPTH
662 0x00000000, // VGT_GROUP_PRIM_TYPE
663 0x00000000, // VGT_GROUP_FIRST_DECR
664 0x00000000, // VGT_GROUP_DECR
665 0x00000000, // VGT_GROUP_VECT_0_CNTL
666 0x00000000, // VGT_GROUP_VECT_1_CNTL
667 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
668 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
669 0x00000000, // VGT_GS_MODE
670 0, // HOLE
671 0x00000000, // PA_SC_MODE_CNTL_0
672 0x00000000, // PA_SC_MODE_CNTL_1
673 0x00000000, // VGT_ENHANCE
674 0x00000100, // VGT_GS_PER_ES
675 0x00000080, // VGT_ES_PER_GS
676 0x00000002, // VGT_GS_PER_VS
677 0x00000000, // VGT_GSVS_RING_OFFSET_1
678 0x00000000, // VGT_GSVS_RING_OFFSET_2
679 0x00000000, // VGT_GSVS_RING_OFFSET_3
680 0x00000000, // VGT_GS_OUT_PRIM_TYPE
681 0x00000000, // IA_ENHANCE
682};
683static const u32 si_SECT_CONTEXT_def_5[] =
684{
685 0x00000000, // VGT_PRIMITIVEID_EN
686};
687static const u32 si_SECT_CONTEXT_def_6[] =
688{
689 0x00000000, // VGT_PRIMITIVEID_RESET
690};
691static const u32 si_SECT_CONTEXT_def_7[] =
692{
693 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
694 0, // HOLE
695 0, // HOLE
696 0x00000000, // VGT_INSTANCE_STEP_RATE_0
697 0x00000000, // VGT_INSTANCE_STEP_RATE_1
698 0x000000ff, // IA_MULTI_VGT_PARAM
699 0x00000000, // VGT_ESGS_RING_ITEMSIZE
700 0x00000000, // VGT_GSVS_RING_ITEMSIZE
701 0x00000000, // VGT_REUSE_OFF
702 0x00000000, // VGT_VTX_CNT_EN
703 0x00000000, // DB_HTILE_SURFACE
704 0x00000000, // DB_SRESULTS_COMPARE_STATE0
705 0x00000000, // DB_SRESULTS_COMPARE_STATE1
706 0x00000000, // DB_PRELOAD_CONTROL
707 0, // HOLE
708 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
709 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
710 0, // HOLE
711 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
712 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
713 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
714 0, // HOLE
715 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
716 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
717 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
718 0, // HOLE
719 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
720 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
721 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
722 0, // HOLE
723 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
724 0, // HOLE
725 0, // HOLE
726 0, // HOLE
727 0, // HOLE
728 0, // HOLE
729 0, // HOLE
730 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
731 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
732 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
733 0, // HOLE
734 0x00000000, // VGT_GS_MAX_VERT_OUT
735 0, // HOLE
736 0, // HOLE
737 0, // HOLE
738 0, // HOLE
739 0, // HOLE
740 0, // HOLE
741 0x00000000, // VGT_SHADER_STAGES_EN
742 0x00000000, // VGT_LS_HS_CONFIG
743 0x00000000, // VGT_GS_VERT_ITEMSIZE
744 0x00000000, // VGT_GS_VERT_ITEMSIZE_1
745 0x00000000, // VGT_GS_VERT_ITEMSIZE_2
746 0x00000000, // VGT_GS_VERT_ITEMSIZE_3
747 0x00000000, // VGT_TF_PARAM
748 0x00000000, // DB_ALPHA_TO_MASK
749 0, // HOLE
750 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
751 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
752 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
753 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
754 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
755 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
756 0x00000000, // VGT_GS_INSTANCE_CNT
757 0x00000000, // VGT_STRMOUT_CONFIG
758 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
759 0, // HOLE
760 0, // HOLE
761 0, // HOLE
762 0, // HOLE
763 0, // HOLE
764 0, // HOLE
765 0, // HOLE
766 0, // HOLE
767 0, // HOLE
768 0, // HOLE
769 0, // HOLE
770 0, // HOLE
771 0, // HOLE
772 0, // HOLE
773 0x00000000, // PA_SC_CENTROID_PRIORITY_0
774 0x00000000, // PA_SC_CENTROID_PRIORITY_1
775 0x00001000, // PA_SC_LINE_CNTL
776 0x00000000, // PA_SC_AA_CONFIG
777 0x00000005, // PA_SU_VTX_CNTL
778 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
779 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
780 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
781 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
782 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
783 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
784 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
785 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
786 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
787 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
788 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
789 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
790 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
791 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
792 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
793 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
794 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
795 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
796 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
797 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
798 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
799 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
800 0, // HOLE
801 0, // HOLE
802 0, // HOLE
803 0, // HOLE
804 0, // HOLE
805 0, // HOLE
806 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
807 0x00000010, // VGT_OUT_DEALLOC_CNTL
808 0x00000000, // CB_COLOR0_BASE
809 0x00000000, // CB_COLOR0_PITCH
810 0x00000000, // CB_COLOR0_SLICE
811 0x00000000, // CB_COLOR0_VIEW
812 0x00000000, // CB_COLOR0_INFO
813 0x00000000, // CB_COLOR0_ATTRIB
814 0, // HOLE
815 0x00000000, // CB_COLOR0_CMASK
816 0x00000000, // CB_COLOR0_CMASK_SLICE
817 0x00000000, // CB_COLOR0_FMASK
818 0x00000000, // CB_COLOR0_FMASK_SLICE
819 0x00000000, // CB_COLOR0_CLEAR_WORD0
820 0x00000000, // CB_COLOR0_CLEAR_WORD1
821 0, // HOLE
822 0, // HOLE
823 0x00000000, // CB_COLOR1_BASE
824 0x00000000, // CB_COLOR1_PITCH
825 0x00000000, // CB_COLOR1_SLICE
826 0x00000000, // CB_COLOR1_VIEW
827 0x00000000, // CB_COLOR1_INFO
828 0x00000000, // CB_COLOR1_ATTRIB
829 0, // HOLE
830 0x00000000, // CB_COLOR1_CMASK
831 0x00000000, // CB_COLOR1_CMASK_SLICE
832 0x00000000, // CB_COLOR1_FMASK
833 0x00000000, // CB_COLOR1_FMASK_SLICE
834 0x00000000, // CB_COLOR1_CLEAR_WORD0
835 0x00000000, // CB_COLOR1_CLEAR_WORD1
836 0, // HOLE
837 0, // HOLE
838 0x00000000, // CB_COLOR2_BASE
839 0x00000000, // CB_COLOR2_PITCH
840 0x00000000, // CB_COLOR2_SLICE
841 0x00000000, // CB_COLOR2_VIEW
842 0x00000000, // CB_COLOR2_INFO
843 0x00000000, // CB_COLOR2_ATTRIB
844 0, // HOLE
845 0x00000000, // CB_COLOR2_CMASK
846 0x00000000, // CB_COLOR2_CMASK_SLICE
847 0x00000000, // CB_COLOR2_FMASK
848 0x00000000, // CB_COLOR2_FMASK_SLICE
849 0x00000000, // CB_COLOR2_CLEAR_WORD0
850 0x00000000, // CB_COLOR2_CLEAR_WORD1
851 0, // HOLE
852 0, // HOLE
853 0x00000000, // CB_COLOR3_BASE
854 0x00000000, // CB_COLOR3_PITCH
855 0x00000000, // CB_COLOR3_SLICE
856 0x00000000, // CB_COLOR3_VIEW
857 0x00000000, // CB_COLOR3_INFO
858 0x00000000, // CB_COLOR3_ATTRIB
859 0, // HOLE
860 0x00000000, // CB_COLOR3_CMASK
861 0x00000000, // CB_COLOR3_CMASK_SLICE
862 0x00000000, // CB_COLOR3_FMASK
863 0x00000000, // CB_COLOR3_FMASK_SLICE
864 0x00000000, // CB_COLOR3_CLEAR_WORD0
865 0x00000000, // CB_COLOR3_CLEAR_WORD1
866 0, // HOLE
867 0, // HOLE
868 0x00000000, // CB_COLOR4_BASE
869 0x00000000, // CB_COLOR4_PITCH
870 0x00000000, // CB_COLOR4_SLICE
871 0x00000000, // CB_COLOR4_VIEW
872 0x00000000, // CB_COLOR4_INFO
873 0x00000000, // CB_COLOR4_ATTRIB
874 0, // HOLE
875 0x00000000, // CB_COLOR4_CMASK
876 0x00000000, // CB_COLOR4_CMASK_SLICE
877 0x00000000, // CB_COLOR4_FMASK
878 0x00000000, // CB_COLOR4_FMASK_SLICE
879 0x00000000, // CB_COLOR4_CLEAR_WORD0
880 0x00000000, // CB_COLOR4_CLEAR_WORD1
881 0, // HOLE
882 0, // HOLE
883 0x00000000, // CB_COLOR5_BASE
884 0x00000000, // CB_COLOR5_PITCH
885 0x00000000, // CB_COLOR5_SLICE
886 0x00000000, // CB_COLOR5_VIEW
887 0x00000000, // CB_COLOR5_INFO
888 0x00000000, // CB_COLOR5_ATTRIB
889 0, // HOLE
890 0x00000000, // CB_COLOR5_CMASK
891 0x00000000, // CB_COLOR5_CMASK_SLICE
892 0x00000000, // CB_COLOR5_FMASK
893 0x00000000, // CB_COLOR5_FMASK_SLICE
894 0x00000000, // CB_COLOR5_CLEAR_WORD0
895 0x00000000, // CB_COLOR5_CLEAR_WORD1
896 0, // HOLE
897 0, // HOLE
898 0x00000000, // CB_COLOR6_BASE
899 0x00000000, // CB_COLOR6_PITCH
900 0x00000000, // CB_COLOR6_SLICE
901 0x00000000, // CB_COLOR6_VIEW
902 0x00000000, // CB_COLOR6_INFO
903 0x00000000, // CB_COLOR6_ATTRIB
904 0, // HOLE
905 0x00000000, // CB_COLOR6_CMASK
906 0x00000000, // CB_COLOR6_CMASK_SLICE
907 0x00000000, // CB_COLOR6_FMASK
908 0x00000000, // CB_COLOR6_FMASK_SLICE
909 0x00000000, // CB_COLOR6_CLEAR_WORD0
910 0x00000000, // CB_COLOR6_CLEAR_WORD1
911 0, // HOLE
912 0, // HOLE
913 0x00000000, // CB_COLOR7_BASE
914 0x00000000, // CB_COLOR7_PITCH
915 0x00000000, // CB_COLOR7_SLICE
916 0x00000000, // CB_COLOR7_VIEW
917 0x00000000, // CB_COLOR7_INFO
918 0x00000000, // CB_COLOR7_ATTRIB
919 0, // HOLE
920 0x00000000, // CB_COLOR7_CMASK
921 0x00000000, // CB_COLOR7_CMASK_SLICE
922 0x00000000, // CB_COLOR7_FMASK
923 0x00000000, // CB_COLOR7_FMASK_SLICE
924 0x00000000, // CB_COLOR7_CLEAR_WORD0
925 0x00000000, // CB_COLOR7_CLEAR_WORD1
926};
927static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
928{
929 {si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
930 {si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
931 {si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
932 {si_SECT_CONTEXT_def_4, 0x0000a200, 157 },
933 {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
934 {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
935 {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
936 { 0, 0, 0 }
937};
938static const struct cs_section_def si_cs_data[] = {
939 { si_SECT_CONTEXT_defs, SECT_CONTEXT },
940 { 0, SECT_NONE }
941};
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
new file mode 100644
index 000000000000..5ada922e5cec
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -0,0 +1,2176 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "evergreend.h"
28#include "r600_dpm.h"
29#include "cypress_dpm.h"
30#include "atom.h"
31
32#define SMC_RAM_END 0x8000
33
34#define MC_CG_ARB_FREQ_F0 0x0a
35#define MC_CG_ARB_FREQ_F1 0x0b
36#define MC_CG_ARB_FREQ_F2 0x0c
37#define MC_CG_ARB_FREQ_F3 0x0d
38
39#define MC_CG_SEQ_DRAMCONF_S0 0x05
40#define MC_CG_SEQ_DRAMCONF_S1 0x06
41#define MC_CG_SEQ_YCLK_SUSPEND 0x04
42#define MC_CG_SEQ_YCLK_RESUME 0x0a
43
44struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
45struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
46struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
47
48static void cypress_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
49 bool enable)
50{
51 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
52 u32 tmp, bif;
53
54 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
55 if (enable) {
56 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
57 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
58 if (!pi->boot_in_gen2) {
59 bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
60 bif |= CG_CLIENT_REQ(0xd);
61 WREG32(CG_BIF_REQ_AND_RSP, bif);
62
63 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
64 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
65 tmp |= LC_GEN2_EN_STRAP;
66
67 tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
68 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
69 udelay(10);
70 tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
71 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
72 }
73 }
74 } else {
75 if (!pi->boot_in_gen2) {
76 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
77 tmp &= ~LC_GEN2_EN_STRAP;
78 }
79 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
80 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
81 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
82 }
83}
84
85static void cypress_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
86 bool enable)
87{
88 cypress_enable_bif_dynamic_pcie_gen2(rdev, enable);
89
90 if (enable)
91 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
92 else
93 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
94}
95
96#if 0
97static int cypress_enter_ulp_state(struct radeon_device *rdev)
98{
99 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
100
101 if (pi->gfx_clock_gating) {
102 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
103 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
104 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
105
106 RREG32(GB_ADDR_CONFIG);
107 }
108
109 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
110 ~HOST_SMC_MSG_MASK);
111
112 udelay(7000);
113
114 return 0;
115}
116#endif
117
118static void cypress_gfx_clock_gating_enable(struct radeon_device *rdev,
119 bool enable)
120{
121 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
122
123 if (enable) {
124 if (eg_pi->light_sleep) {
125 WREG32(GRBM_GFX_INDEX, 0xC0000000);
126
127 WREG32_CG(CG_CGLS_TILE_0, 0xFFFFFFFF);
128 WREG32_CG(CG_CGLS_TILE_1, 0xFFFFFFFF);
129 WREG32_CG(CG_CGLS_TILE_2, 0xFFFFFFFF);
130 WREG32_CG(CG_CGLS_TILE_3, 0xFFFFFFFF);
131 WREG32_CG(CG_CGLS_TILE_4, 0xFFFFFFFF);
132 WREG32_CG(CG_CGLS_TILE_5, 0xFFFFFFFF);
133 WREG32_CG(CG_CGLS_TILE_6, 0xFFFFFFFF);
134 WREG32_CG(CG_CGLS_TILE_7, 0xFFFFFFFF);
135 WREG32_CG(CG_CGLS_TILE_8, 0xFFFFFFFF);
136 WREG32_CG(CG_CGLS_TILE_9, 0xFFFFFFFF);
137 WREG32_CG(CG_CGLS_TILE_10, 0xFFFFFFFF);
138 WREG32_CG(CG_CGLS_TILE_11, 0xFFFFFFFF);
139
140 WREG32_P(SCLK_PWRMGT_CNTL, DYN_LIGHT_SLEEP_EN, ~DYN_LIGHT_SLEEP_EN);
141 }
142 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
143 } else {
144 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
145 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
146 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
147 RREG32(GB_ADDR_CONFIG);
148
149 if (eg_pi->light_sleep) {
150 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_LIGHT_SLEEP_EN);
151
152 WREG32(GRBM_GFX_INDEX, 0xC0000000);
153
154 WREG32_CG(CG_CGLS_TILE_0, 0);
155 WREG32_CG(CG_CGLS_TILE_1, 0);
156 WREG32_CG(CG_CGLS_TILE_2, 0);
157 WREG32_CG(CG_CGLS_TILE_3, 0);
158 WREG32_CG(CG_CGLS_TILE_4, 0);
159 WREG32_CG(CG_CGLS_TILE_5, 0);
160 WREG32_CG(CG_CGLS_TILE_6, 0);
161 WREG32_CG(CG_CGLS_TILE_7, 0);
162 WREG32_CG(CG_CGLS_TILE_8, 0);
163 WREG32_CG(CG_CGLS_TILE_9, 0);
164 WREG32_CG(CG_CGLS_TILE_10, 0);
165 WREG32_CG(CG_CGLS_TILE_11, 0);
166 }
167 }
168}
169
170static void cypress_mg_clock_gating_enable(struct radeon_device *rdev,
171 bool enable)
172{
173 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
174 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
175
176 if (enable) {
177 u32 cgts_sm_ctrl_reg;
178
179 if (rdev->family == CHIP_CEDAR)
180 cgts_sm_ctrl_reg = CEDAR_MGCGCGTSSMCTRL_DFLT;
181 else if (rdev->family == CHIP_REDWOOD)
182 cgts_sm_ctrl_reg = REDWOOD_MGCGCGTSSMCTRL_DFLT;
183 else
184 cgts_sm_ctrl_reg = CYPRESS_MGCGCGTSSMCTRL_DFLT;
185
186 WREG32(GRBM_GFX_INDEX, 0xC0000000);
187
188 WREG32_CG(CG_CGTT_LOCAL_0, CYPRESS_MGCGTTLOCAL0_DFLT);
189 WREG32_CG(CG_CGTT_LOCAL_1, CYPRESS_MGCGTTLOCAL1_DFLT & 0xFFFFCFFF);
190 WREG32_CG(CG_CGTT_LOCAL_2, CYPRESS_MGCGTTLOCAL2_DFLT);
191 WREG32_CG(CG_CGTT_LOCAL_3, CYPRESS_MGCGTTLOCAL3_DFLT);
192
193 if (pi->mgcgtssm)
194 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
195
196 if (eg_pi->mcls) {
197 WREG32_P(MC_CITF_MISC_RD_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
198 WREG32_P(MC_CITF_MISC_WR_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
199 WREG32_P(MC_CITF_MISC_VM_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
200 WREG32_P(MC_HUB_MISC_HUB_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
201 WREG32_P(MC_HUB_MISC_VM_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
202 WREG32_P(MC_HUB_MISC_SIP_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
203 WREG32_P(MC_XPB_CLK_GAT, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
204 WREG32_P(VM_L2_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
205 }
206 } else {
207 WREG32(GRBM_GFX_INDEX, 0xC0000000);
208
209 WREG32_CG(CG_CGTT_LOCAL_0, 0xFFFFFFFF);
210 WREG32_CG(CG_CGTT_LOCAL_1, 0xFFFFFFFF);
211 WREG32_CG(CG_CGTT_LOCAL_2, 0xFFFFFFFF);
212 WREG32_CG(CG_CGTT_LOCAL_3, 0xFFFFFFFF);
213
214 if (pi->mgcgtssm)
215 WREG32(CGTS_SM_CTRL_REG, 0x81f44bc0);
216 }
217}
218
219void cypress_enable_spread_spectrum(struct radeon_device *rdev,
220 bool enable)
221{
222 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
223
224 if (enable) {
225 if (pi->sclk_ss)
226 WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
227
228 if (pi->mclk_ss)
229 WREG32_P(MPLL_CNTL_MODE, SS_SSEN, ~SS_SSEN);
230 } else {
231 WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
232 WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
233 WREG32_P(MPLL_CNTL_MODE, 0, ~SS_SSEN);
234 WREG32_P(MPLL_CNTL_MODE, 0, ~SS_DSMODE_EN);
235 }
236}
237
238void cypress_start_dpm(struct radeon_device *rdev)
239{
240 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
241}
242
243void cypress_enable_sclk_control(struct radeon_device *rdev,
244 bool enable)
245{
246 if (enable)
247 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
248 else
249 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
250}
251
252void cypress_enable_mclk_control(struct radeon_device *rdev,
253 bool enable)
254{
255 if (enable)
256 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
257 else
258 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
259}
260
261int cypress_notify_smc_display_change(struct radeon_device *rdev,
262 bool has_display)
263{
264 PPSMC_Msg msg = has_display ?
265 (PPSMC_Msg)PPSMC_MSG_HasDisplay : (PPSMC_Msg)PPSMC_MSG_NoDisplay;
266
267 if (rv770_send_msg_to_smc(rdev, msg) != PPSMC_Result_OK)
268 return -EINVAL;
269
270 return 0;
271}
272
273void cypress_program_response_times(struct radeon_device *rdev)
274{
275 u32 reference_clock;
276 u32 mclk_switch_limit;
277
278 reference_clock = radeon_get_xclk(rdev);
279 mclk_switch_limit = (460 * reference_clock) / 100;
280
281 rv770_write_smc_soft_register(rdev,
282 RV770_SMC_SOFT_REGISTER_mclk_switch_lim,
283 mclk_switch_limit);
284
285 rv770_write_smc_soft_register(rdev,
286 RV770_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
287
288 rv770_write_smc_soft_register(rdev,
289 RV770_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
290
291 rv770_program_response_times(rdev);
292
293 if (ASIC_IS_LOMBOK(rdev))
294 rv770_write_smc_soft_register(rdev,
295 RV770_SMC_SOFT_REGISTER_is_asic_lombok, 1);
296
297}
298
299static int cypress_pcie_performance_request(struct radeon_device *rdev,
300 u8 perf_req, bool advertise)
301{
302 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
303 u32 tmp;
304
305 udelay(10);
306 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
307 if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) && (tmp & LC_CURRENT_DATA_RATE))
308 return 0;
309
310#if defined(CONFIG_ACPI)
311 if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
312 (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
313 eg_pi->pcie_performance_request_registered = true;
314 return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
315 } else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
316 eg_pi->pcie_performance_request_registered) {
317 eg_pi->pcie_performance_request_registered = false;
318 return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
319 }
320#endif
321
322 return 0;
323}
324
325void cypress_advertise_gen2_capability(struct radeon_device *rdev)
326{
327 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
328 u32 tmp;
329
330#if defined(CONFIG_ACPI)
331 radeon_acpi_pcie_notify_device_ready(rdev);
332#endif
333
334 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
335
336 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
337 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
338 pi->pcie_gen2 = true;
339 else
340 pi->pcie_gen2 = false;
341
342 if (!pi->pcie_gen2)
343 cypress_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, true);
344
345}
346
347static enum radeon_pcie_gen cypress_get_maximum_link_speed(struct radeon_ps *radeon_state)
348{
349 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
350
351 if (state->high.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
352 return 1;
353 return 0;
354}
355
356void cypress_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
357 struct radeon_ps *radeon_new_state,
358 struct radeon_ps *radeon_current_state)
359{
360 enum radeon_pcie_gen pcie_link_speed_target =
361 cypress_get_maximum_link_speed(radeon_new_state);
362 enum radeon_pcie_gen pcie_link_speed_current =
363 cypress_get_maximum_link_speed(radeon_current_state);
364 u8 request;
365
366 if (pcie_link_speed_target < pcie_link_speed_current) {
367 if (pcie_link_speed_target == RADEON_PCIE_GEN1)
368 request = PCIE_PERF_REQ_PECI_GEN1;
369 else if (pcie_link_speed_target == RADEON_PCIE_GEN2)
370 request = PCIE_PERF_REQ_PECI_GEN2;
371 else
372 request = PCIE_PERF_REQ_PECI_GEN3;
373
374 cypress_pcie_performance_request(rdev, request, false);
375 }
376}
377
378void cypress_notify_link_speed_change_before_state_change(struct radeon_device *rdev,
379 struct radeon_ps *radeon_new_state,
380 struct radeon_ps *radeon_current_state)
381{
382 enum radeon_pcie_gen pcie_link_speed_target =
383 cypress_get_maximum_link_speed(radeon_new_state);
384 enum radeon_pcie_gen pcie_link_speed_current =
385 cypress_get_maximum_link_speed(radeon_current_state);
386 u8 request;
387
388 if (pcie_link_speed_target > pcie_link_speed_current) {
389 if (pcie_link_speed_target == RADEON_PCIE_GEN1)
390 request = PCIE_PERF_REQ_PECI_GEN1;
391 else if (pcie_link_speed_target == RADEON_PCIE_GEN2)
392 request = PCIE_PERF_REQ_PECI_GEN2;
393 else
394 request = PCIE_PERF_REQ_PECI_GEN3;
395
396 cypress_pcie_performance_request(rdev, request, false);
397 }
398}
399
400static int cypress_populate_voltage_value(struct radeon_device *rdev,
401 struct atom_voltage_table *table,
402 u16 value, RV770_SMC_VOLTAGE_VALUE *voltage)
403{
404 unsigned int i;
405
406 for (i = 0; i < table->count; i++) {
407 if (value <= table->entries[i].value) {
408 voltage->index = (u8)i;
409 voltage->value = cpu_to_be16(table->entries[i].value);
410 break;
411 }
412 }
413
414 if (i == table->count)
415 return -EINVAL;
416
417 return 0;
418}
419
420u8 cypress_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk)
421{
422 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
423 u8 result = 0;
424 bool strobe_mode = false;
425
426 if (pi->mem_gddr5) {
427 if (mclk <= pi->mclk_strobe_mode_threshold)
428 strobe_mode = true;
429 result = cypress_get_mclk_frequency_ratio(rdev, mclk, strobe_mode);
430
431 if (strobe_mode)
432 result |= SMC_STROBE_ENABLE;
433 }
434
435 return result;
436}
437
438u32 cypress_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
439{
440 u32 ref_clk = rdev->clock.mpll.reference_freq;
441 u32 vco = clkf * ref_clk;
442
443 /* 100 Mhz ref clk */
444 if (ref_clk == 10000) {
445 if (vco > 500000)
446 return 0xC6;
447 if (vco > 400000)
448 return 0x9D;
449 if (vco > 330000)
450 return 0x6C;
451 if (vco > 250000)
452 return 0x2B;
453 if (vco > 160000)
454 return 0x5B;
455 if (vco > 120000)
456 return 0x0A;
457 return 0x4B;
458 }
459
460 /* 27 Mhz ref clk */
461 if (vco > 250000)
462 return 0x8B;
463 if (vco > 200000)
464 return 0xCC;
465 if (vco > 150000)
466 return 0x9B;
467 return 0x6B;
468}
469
470static int cypress_populate_mclk_value(struct radeon_device *rdev,
471 u32 engine_clock, u32 memory_clock,
472 RV7XX_SMC_MCLK_VALUE *mclk,
473 bool strobe_mode, bool dll_state_on)
474{
475 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
476
477 u32 mpll_ad_func_cntl =
478 pi->clk_regs.rv770.mpll_ad_func_cntl;
479 u32 mpll_ad_func_cntl_2 =
480 pi->clk_regs.rv770.mpll_ad_func_cntl_2;
481 u32 mpll_dq_func_cntl =
482 pi->clk_regs.rv770.mpll_dq_func_cntl;
483 u32 mpll_dq_func_cntl_2 =
484 pi->clk_regs.rv770.mpll_dq_func_cntl_2;
485 u32 mclk_pwrmgt_cntl =
486 pi->clk_regs.rv770.mclk_pwrmgt_cntl;
487 u32 dll_cntl =
488 pi->clk_regs.rv770.dll_cntl;
489 u32 mpll_ss1 = pi->clk_regs.rv770.mpll_ss1;
490 u32 mpll_ss2 = pi->clk_regs.rv770.mpll_ss2;
491 struct atom_clock_dividers dividers;
492 u32 ibias;
493 u32 dll_speed;
494 int ret;
495 u32 mc_seq_misc7;
496
497 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
498 memory_clock, strobe_mode, &dividers);
499 if (ret)
500 return ret;
501
502 if (!strobe_mode) {
503 mc_seq_misc7 = RREG32(MC_SEQ_MISC7);
504
505 if(mc_seq_misc7 & 0x8000000)
506 dividers.post_div = 1;
507 }
508
509 ibias = cypress_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
510
511 mpll_ad_func_cntl &= ~(CLKR_MASK |
512 YCLK_POST_DIV_MASK |
513 CLKF_MASK |
514 CLKFRAC_MASK |
515 IBIAS_MASK);
516 mpll_ad_func_cntl |= CLKR(dividers.ref_div);
517 mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
518 mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
519 mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
520 mpll_ad_func_cntl |= IBIAS(ibias);
521
522 if (dividers.vco_mode)
523 mpll_ad_func_cntl_2 |= VCO_MODE;
524 else
525 mpll_ad_func_cntl_2 &= ~VCO_MODE;
526
527 if (pi->mem_gddr5) {
528 mpll_dq_func_cntl &= ~(CLKR_MASK |
529 YCLK_POST_DIV_MASK |
530 CLKF_MASK |
531 CLKFRAC_MASK |
532 IBIAS_MASK);
533 mpll_dq_func_cntl |= CLKR(dividers.ref_div);
534 mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
535 mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
536 mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
537 mpll_dq_func_cntl |= IBIAS(ibias);
538
539 if (strobe_mode)
540 mpll_dq_func_cntl &= ~PDNB;
541 else
542 mpll_dq_func_cntl |= PDNB;
543
544 if (dividers.vco_mode)
545 mpll_dq_func_cntl_2 |= VCO_MODE;
546 else
547 mpll_dq_func_cntl_2 &= ~VCO_MODE;
548 }
549
550 if (pi->mclk_ss) {
551 struct radeon_atom_ss ss;
552 u32 vco_freq = memory_clock * dividers.post_div;
553
554 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
555 ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
556 u32 reference_clock = rdev->clock.mpll.reference_freq;
557 u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
558 u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
559 u32 clk_v = ss.percentage *
560 (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
561
562 mpll_ss1 &= ~CLKV_MASK;
563 mpll_ss1 |= CLKV(clk_v);
564
565 mpll_ss2 &= ~CLKS_MASK;
566 mpll_ss2 |= CLKS(clk_s);
567 }
568 }
569
570 dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
571 memory_clock);
572
573 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
574 mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
575 if (dll_state_on)
576 mclk_pwrmgt_cntl |= (MRDCKA0_PDNB |
577 MRDCKA1_PDNB |
578 MRDCKB0_PDNB |
579 MRDCKB1_PDNB |
580 MRDCKC0_PDNB |
581 MRDCKC1_PDNB |
582 MRDCKD0_PDNB |
583 MRDCKD1_PDNB);
584 else
585 mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
586 MRDCKA1_PDNB |
587 MRDCKB0_PDNB |
588 MRDCKB1_PDNB |
589 MRDCKC0_PDNB |
590 MRDCKC1_PDNB |
591 MRDCKD0_PDNB |
592 MRDCKD1_PDNB);
593
594 mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
595 mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
596 mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
597 mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
598 mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
599 mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
600 mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
601 mclk->mclk770.vMPLL_SS = cpu_to_be32(mpll_ss1);
602 mclk->mclk770.vMPLL_SS2 = cpu_to_be32(mpll_ss2);
603
604 return 0;
605}
606
607u8 cypress_get_mclk_frequency_ratio(struct radeon_device *rdev,
608 u32 memory_clock, bool strobe_mode)
609{
610 u8 mc_para_index;
611
612 if (rdev->family >= CHIP_BARTS) {
613 if (strobe_mode) {
614 if (memory_clock < 10000)
615 mc_para_index = 0x00;
616 else if (memory_clock > 47500)
617 mc_para_index = 0x0f;
618 else
619 mc_para_index = (u8)((memory_clock - 10000) / 2500);
620 } else {
621 if (memory_clock < 65000)
622 mc_para_index = 0x00;
623 else if (memory_clock > 135000)
624 mc_para_index = 0x0f;
625 else
626 mc_para_index = (u8)((memory_clock - 60000) / 5000);
627 }
628 } else {
629 if (strobe_mode) {
630 if (memory_clock < 10000)
631 mc_para_index = 0x00;
632 else if (memory_clock > 47500)
633 mc_para_index = 0x0f;
634 else
635 mc_para_index = (u8)((memory_clock - 10000) / 2500);
636 } else {
637 if (memory_clock < 40000)
638 mc_para_index = 0x00;
639 else if (memory_clock > 115000)
640 mc_para_index = 0x0f;
641 else
642 mc_para_index = (u8)((memory_clock - 40000) / 5000);
643 }
644 }
645 return mc_para_index;
646}
647
648static int cypress_populate_mvdd_value(struct radeon_device *rdev,
649 u32 mclk,
650 RV770_SMC_VOLTAGE_VALUE *voltage)
651{
652 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
653 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
654
655 if (!pi->mvdd_control) {
656 voltage->index = eg_pi->mvdd_high_index;
657 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
658 return 0;
659 }
660
661 if (mclk <= pi->mvdd_split_frequency) {
662 voltage->index = eg_pi->mvdd_low_index;
663 voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
664 } else {
665 voltage->index = eg_pi->mvdd_high_index;
666 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
667 }
668
669 return 0;
670}
671
672int cypress_convert_power_level_to_smc(struct radeon_device *rdev,
673 struct rv7xx_pl *pl,
674 RV770_SMC_HW_PERFORMANCE_LEVEL *level,
675 u8 watermark_level)
676{
677 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
678 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
679 int ret;
680 bool dll_state_on;
681
682 level->gen2PCIE = pi->pcie_gen2 ?
683 ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
684 level->gen2XSP = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0;
685 level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0;
686 level->displayWatermark = watermark_level;
687
688 ret = rv740_populate_sclk_value(rdev, pl->sclk, &level->sclk);
689 if (ret)
690 return ret;
691
692 level->mcFlags = 0;
693 if (pi->mclk_stutter_mode_threshold &&
694 (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
695 !eg_pi->uvd_enabled) {
696 level->mcFlags |= SMC_MC_STUTTER_EN;
697 if (eg_pi->sclk_deep_sleep)
698 level->stateFlags |= PPSMC_STATEFLAG_AUTO_PULSE_SKIP;
699 else
700 level->stateFlags &= ~PPSMC_STATEFLAG_AUTO_PULSE_SKIP;
701 }
702
703 if (pi->mem_gddr5) {
704 if (pl->mclk > pi->mclk_edc_enable_threshold)
705 level->mcFlags |= SMC_MC_EDC_RD_FLAG;
706
707 if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
708 level->mcFlags |= SMC_MC_EDC_WR_FLAG;
709
710 level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk);
711
712 if (level->strobeMode & SMC_STROBE_ENABLE) {
713 if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >=
714 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
715 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
716 else
717 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
718 } else
719 dll_state_on = eg_pi->dll_default_on;
720
721 ret = cypress_populate_mclk_value(rdev,
722 pl->sclk,
723 pl->mclk,
724 &level->mclk,
725 (level->strobeMode & SMC_STROBE_ENABLE) != 0,
726 dll_state_on);
727 } else {
728 ret = cypress_populate_mclk_value(rdev,
729 pl->sclk,
730 pl->mclk,
731 &level->mclk,
732 true,
733 true);
734 }
735 if (ret)
736 return ret;
737
738 ret = cypress_populate_voltage_value(rdev,
739 &eg_pi->vddc_voltage_table,
740 pl->vddc,
741 &level->vddc);
742 if (ret)
743 return ret;
744
745 if (eg_pi->vddci_control) {
746 ret = cypress_populate_voltage_value(rdev,
747 &eg_pi->vddci_voltage_table,
748 pl->vddci,
749 &level->vddci);
750 if (ret)
751 return ret;
752 }
753
754 ret = cypress_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
755
756 return ret;
757}
758
759static int cypress_convert_power_state_to_smc(struct radeon_device *rdev,
760 struct radeon_ps *radeon_state,
761 RV770_SMC_SWSTATE *smc_state)
762{
763 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
764 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
765 int ret;
766
767 if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
768 smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
769
770 ret = cypress_convert_power_level_to_smc(rdev,
771 &state->low,
772 &smc_state->levels[0],
773 PPSMC_DISPLAY_WATERMARK_LOW);
774 if (ret)
775 return ret;
776
777 ret = cypress_convert_power_level_to_smc(rdev,
778 &state->medium,
779 &smc_state->levels[1],
780 PPSMC_DISPLAY_WATERMARK_LOW);
781 if (ret)
782 return ret;
783
784 ret = cypress_convert_power_level_to_smc(rdev,
785 &state->high,
786 &smc_state->levels[2],
787 PPSMC_DISPLAY_WATERMARK_HIGH);
788 if (ret)
789 return ret;
790
791 smc_state->levels[0].arbValue = MC_CG_ARB_FREQ_F1;
792 smc_state->levels[1].arbValue = MC_CG_ARB_FREQ_F2;
793 smc_state->levels[2].arbValue = MC_CG_ARB_FREQ_F3;
794
795 if (eg_pi->dynamic_ac_timing) {
796 smc_state->levels[0].ACIndex = 2;
797 smc_state->levels[1].ACIndex = 3;
798 smc_state->levels[2].ACIndex = 4;
799 } else {
800 smc_state->levels[0].ACIndex = 0;
801 smc_state->levels[1].ACIndex = 0;
802 smc_state->levels[2].ACIndex = 0;
803 }
804
805 rv770_populate_smc_sp(rdev, radeon_state, smc_state);
806
807 return rv770_populate_smc_t(rdev, radeon_state, smc_state);
808}
809
810static void cypress_convert_mc_registers(struct evergreen_mc_reg_entry *entry,
811 SMC_Evergreen_MCRegisterSet *data,
812 u32 num_entries, u32 valid_flag)
813{
814 u32 i, j;
815
816 for (i = 0, j = 0; j < num_entries; j++) {
817 if (valid_flag & (1 << j)) {
818 data->value[i] = cpu_to_be32(entry->mc_data[j]);
819 i++;
820 }
821 }
822}
823
824static void cypress_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
825 struct rv7xx_pl *pl,
826 SMC_Evergreen_MCRegisterSet *mc_reg_table_data)
827{
828 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
829 u32 i = 0;
830
831 for (i = 0; i < eg_pi->mc_reg_table.num_entries; i++) {
832 if (pl->mclk <=
833 eg_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
834 break;
835 }
836
837 if ((i == eg_pi->mc_reg_table.num_entries) && (i > 0))
838 --i;
839
840 cypress_convert_mc_registers(&eg_pi->mc_reg_table.mc_reg_table_entry[i],
841 mc_reg_table_data,
842 eg_pi->mc_reg_table.last,
843 eg_pi->mc_reg_table.valid_flag);
844}
845
846static void cypress_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
847 struct radeon_ps *radeon_state,
848 SMC_Evergreen_MCRegisters *mc_reg_table)
849{
850 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
851
852 cypress_convert_mc_reg_table_entry_to_smc(rdev,
853 &state->low,
854 &mc_reg_table->data[2]);
855 cypress_convert_mc_reg_table_entry_to_smc(rdev,
856 &state->medium,
857 &mc_reg_table->data[3]);
858 cypress_convert_mc_reg_table_entry_to_smc(rdev,
859 &state->high,
860 &mc_reg_table->data[4]);
861}
862
863int cypress_upload_sw_state(struct radeon_device *rdev,
864 struct radeon_ps *radeon_new_state)
865{
866 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
867 u16 address = pi->state_table_start +
868 offsetof(RV770_SMC_STATETABLE, driverState);
869 RV770_SMC_SWSTATE state = { 0 };
870 int ret;
871
872 ret = cypress_convert_power_state_to_smc(rdev, radeon_new_state, &state);
873 if (ret)
874 return ret;
875
876 return rv770_copy_bytes_to_smc(rdev, address, (u8 *)&state,
877 sizeof(RV770_SMC_SWSTATE),
878 pi->sram_end);
879}
880
881int cypress_upload_mc_reg_table(struct radeon_device *rdev,
882 struct radeon_ps *radeon_new_state)
883{
884 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
885 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
886 SMC_Evergreen_MCRegisters mc_reg_table = { 0 };
887 u16 address;
888
889 cypress_convert_mc_reg_table_to_smc(rdev, radeon_new_state, &mc_reg_table);
890
891 address = eg_pi->mc_reg_table_start +
892 (u16)offsetof(SMC_Evergreen_MCRegisters, data[2]);
893
894 return rv770_copy_bytes_to_smc(rdev, address,
895 (u8 *)&mc_reg_table.data[2],
896 sizeof(SMC_Evergreen_MCRegisterSet) * 3,
897 pi->sram_end);
898}
899
900u32 cypress_calculate_burst_time(struct radeon_device *rdev,
901 u32 engine_clock, u32 memory_clock)
902{
903 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
904 u32 multiplier = pi->mem_gddr5 ? 1 : 2;
905 u32 result = (4 * multiplier * engine_clock) / (memory_clock / 2);
906 u32 burst_time;
907
908 if (result <= 4)
909 burst_time = 0;
910 else if (result < 8)
911 burst_time = result - 4;
912 else {
913 burst_time = result / 2 ;
914 if (burst_time > 18)
915 burst_time = 18;
916 }
917
918 return burst_time;
919}
920
921void cypress_program_memory_timing_parameters(struct radeon_device *rdev,
922 struct radeon_ps *radeon_new_state)
923{
924 struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
925 u32 mc_arb_burst_time = RREG32(MC_ARB_BURST_TIME);
926
927 mc_arb_burst_time &= ~(STATE1_MASK | STATE2_MASK | STATE3_MASK);
928
929 mc_arb_burst_time |= STATE1(cypress_calculate_burst_time(rdev,
930 new_state->low.sclk,
931 new_state->low.mclk));
932 mc_arb_burst_time |= STATE2(cypress_calculate_burst_time(rdev,
933 new_state->medium.sclk,
934 new_state->medium.mclk));
935 mc_arb_burst_time |= STATE3(cypress_calculate_burst_time(rdev,
936 new_state->high.sclk,
937 new_state->high.mclk));
938
939 rv730_program_memory_timing_parameters(rdev, radeon_new_state);
940
941 WREG32(MC_ARB_BURST_TIME, mc_arb_burst_time);
942}
943
944static void cypress_populate_mc_reg_addresses(struct radeon_device *rdev,
945 SMC_Evergreen_MCRegisters *mc_reg_table)
946{
947 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
948 u32 i, j;
949
950 for (i = 0, j = 0; j < eg_pi->mc_reg_table.last; j++) {
951 if (eg_pi->mc_reg_table.valid_flag & (1 << j)) {
952 mc_reg_table->address[i].s0 =
953 cpu_to_be16(eg_pi->mc_reg_table.mc_reg_address[j].s0);
954 mc_reg_table->address[i].s1 =
955 cpu_to_be16(eg_pi->mc_reg_table.mc_reg_address[j].s1);
956 i++;
957 }
958 }
959
960 mc_reg_table->last = (u8)i;
961}
962
963static void cypress_set_mc_reg_address_table(struct radeon_device *rdev)
964{
965 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
966 u32 i = 0;
967
968 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RAS_TIMING_LP >> 2;
969 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RAS_TIMING >> 2;
970 i++;
971
972 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_CAS_TIMING_LP >> 2;
973 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_CAS_TIMING >> 2;
974 i++;
975
976 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC_TIMING_LP >> 2;
977 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC_TIMING >> 2;
978 i++;
979
980 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC_TIMING2_LP >> 2;
981 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC_TIMING2 >> 2;
982 i++;
983
984 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RD_CTL_D0_LP >> 2;
985 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RD_CTL_D0 >> 2;
986 i++;
987
988 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RD_CTL_D1_LP >> 2;
989 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RD_CTL_D1 >> 2;
990 i++;
991
992 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_WR_CTL_D0_LP >> 2;
993 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_WR_CTL_D0 >> 2;
994 i++;
995
996 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_WR_CTL_D1_LP >> 2;
997 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_WR_CTL_D1 >> 2;
998 i++;
999
1000 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
1001 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_EMRS >> 2;
1002 i++;
1003
1004 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
1005 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_MRS >> 2;
1006 i++;
1007
1008 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
1009 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_MRS1 >> 2;
1010 i++;
1011
1012 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC1 >> 2;
1013 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC1 >> 2;
1014 i++;
1015
1016 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RESERVE_M >> 2;
1017 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RESERVE_M >> 2;
1018 i++;
1019
1020 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC3 >> 2;
1021 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC3 >> 2;
1022 i++;
1023
1024 eg_pi->mc_reg_table.last = (u8)i;
1025}
1026
1027static void cypress_retrieve_ac_timing_for_one_entry(struct radeon_device *rdev,
1028 struct evergreen_mc_reg_entry *entry)
1029{
1030 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1031 u32 i;
1032
1033 for (i = 0; i < eg_pi->mc_reg_table.last; i++)
1034 entry->mc_data[i] =
1035 RREG32(eg_pi->mc_reg_table.mc_reg_address[i].s1 << 2);
1036
1037}
1038
1039static void cypress_retrieve_ac_timing_for_all_ranges(struct radeon_device *rdev,
1040 struct atom_memory_clock_range_table *range_table)
1041{
1042 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1043 u32 i, j;
1044
1045 for (i = 0; i < range_table->num_entries; i++) {
1046 eg_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max =
1047 range_table->mclk[i];
1048 radeon_atom_set_ac_timing(rdev, range_table->mclk[i]);
1049 cypress_retrieve_ac_timing_for_one_entry(rdev,
1050 &eg_pi->mc_reg_table.mc_reg_table_entry[i]);
1051 }
1052
1053 eg_pi->mc_reg_table.num_entries = range_table->num_entries;
1054 eg_pi->mc_reg_table.valid_flag = 0;
1055
1056 for (i = 0; i < eg_pi->mc_reg_table.last; i++) {
1057 for (j = 1; j < range_table->num_entries; j++) {
1058 if (eg_pi->mc_reg_table.mc_reg_table_entry[j-1].mc_data[i] !=
1059 eg_pi->mc_reg_table.mc_reg_table_entry[j].mc_data[i]) {
1060 eg_pi->mc_reg_table.valid_flag |= (1 << i);
1061 break;
1062 }
1063 }
1064 }
1065}
1066
1067static int cypress_initialize_mc_reg_table(struct radeon_device *rdev)
1068{
1069 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1070 u8 module_index = rv770_get_memory_module_index(rdev);
1071 struct atom_memory_clock_range_table range_table = { 0 };
1072 int ret;
1073
1074 ret = radeon_atom_get_mclk_range_table(rdev,
1075 pi->mem_gddr5,
1076 module_index, &range_table);
1077 if (ret)
1078 return ret;
1079
1080 cypress_retrieve_ac_timing_for_all_ranges(rdev, &range_table);
1081
1082 return 0;
1083}
1084
1085static void cypress_wait_for_mc_sequencer(struct radeon_device *rdev, u8 value)
1086{
1087 u32 i, j;
1088 u32 channels = 2;
1089
1090 if ((rdev->family == CHIP_CYPRESS) ||
1091 (rdev->family == CHIP_HEMLOCK))
1092 channels = 4;
1093 else if (rdev->family == CHIP_CEDAR)
1094 channels = 1;
1095
1096 for (i = 0; i < channels; i++) {
1097 if ((rdev->family == CHIP_CYPRESS) ||
1098 (rdev->family == CHIP_HEMLOCK)) {
1099 WREG32_P(MC_CONFIG_MCD, MC_RD_ENABLE_MCD(i), ~MC_RD_ENABLE_MCD_MASK);
1100 WREG32_P(MC_CG_CONFIG_MCD, MC_RD_ENABLE_MCD(i), ~MC_RD_ENABLE_MCD_MASK);
1101 } else {
1102 WREG32_P(MC_CONFIG, MC_RD_ENABLE(i), ~MC_RD_ENABLE_MASK);
1103 WREG32_P(MC_CG_CONFIG, MC_RD_ENABLE(i), ~MC_RD_ENABLE_MASK);
1104 }
1105 for (j = 0; j < rdev->usec_timeout; j++) {
1106 if (((RREG32(MC_SEQ_CG) & CG_SEQ_RESP_MASK) >> CG_SEQ_RESP_SHIFT) == value)
1107 break;
1108 udelay(1);
1109 }
1110 }
1111}
1112
1113static void cypress_force_mc_use_s1(struct radeon_device *rdev,
1114 struct radeon_ps *radeon_boot_state)
1115{
1116 struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1117 u32 strobe_mode;
1118 u32 mc_seq_cg;
1119 int i;
1120
1121 if (RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE)
1122 return;
1123
1124 radeon_atom_set_ac_timing(rdev, boot_state->low.mclk);
1125 radeon_mc_wait_for_idle(rdev);
1126
1127 if ((rdev->family == CHIP_CYPRESS) ||
1128 (rdev->family == CHIP_HEMLOCK)) {
1129 WREG32(MC_CONFIG_MCD, 0xf);
1130 WREG32(MC_CG_CONFIG_MCD, 0xf);
1131 } else {
1132 WREG32(MC_CONFIG, 0xf);
1133 WREG32(MC_CG_CONFIG, 0xf);
1134 }
1135
1136 for (i = 0; i < rdev->num_crtc; i++)
1137 radeon_wait_for_vblank(rdev, i);
1138
1139 WREG32(MC_SEQ_CG, MC_CG_SEQ_YCLK_SUSPEND);
1140 cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_SUSPEND);
1141
1142 strobe_mode = cypress_get_strobe_mode_settings(rdev,
1143 boot_state->low.mclk);
1144
1145 mc_seq_cg = CG_SEQ_REQ(MC_CG_SEQ_DRAMCONF_S1);
1146 mc_seq_cg |= SEQ_CG_RESP(strobe_mode);
1147 WREG32(MC_SEQ_CG, mc_seq_cg);
1148
1149 for (i = 0; i < rdev->usec_timeout; i++) {
1150 if (RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE)
1151 break;
1152 udelay(1);
1153 }
1154
1155 mc_seq_cg &= ~CG_SEQ_REQ_MASK;
1156 mc_seq_cg |= CG_SEQ_REQ(MC_CG_SEQ_YCLK_RESUME);
1157 WREG32(MC_SEQ_CG, mc_seq_cg);
1158
1159 cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_RESUME);
1160}
1161
1162static void cypress_copy_ac_timing_from_s1_to_s0(struct radeon_device *rdev)
1163{
1164 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1165 u32 value;
1166 u32 i;
1167
1168 for (i = 0; i < eg_pi->mc_reg_table.last; i++) {
1169 value = RREG32(eg_pi->mc_reg_table.mc_reg_address[i].s1 << 2);
1170 WREG32(eg_pi->mc_reg_table.mc_reg_address[i].s0 << 2, value);
1171 }
1172}
1173
1174static void cypress_force_mc_use_s0(struct radeon_device *rdev,
1175 struct radeon_ps *radeon_boot_state)
1176{
1177 struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1178 u32 strobe_mode;
1179 u32 mc_seq_cg;
1180 int i;
1181
1182 cypress_copy_ac_timing_from_s1_to_s0(rdev);
1183 radeon_mc_wait_for_idle(rdev);
1184
1185 if ((rdev->family == CHIP_CYPRESS) ||
1186 (rdev->family == CHIP_HEMLOCK)) {
1187 WREG32(MC_CONFIG_MCD, 0xf);
1188 WREG32(MC_CG_CONFIG_MCD, 0xf);
1189 } else {
1190 WREG32(MC_CONFIG, 0xf);
1191 WREG32(MC_CG_CONFIG, 0xf);
1192 }
1193
1194 for (i = 0; i < rdev->num_crtc; i++)
1195 radeon_wait_for_vblank(rdev, i);
1196
1197 WREG32(MC_SEQ_CG, MC_CG_SEQ_YCLK_SUSPEND);
1198 cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_SUSPEND);
1199
1200 strobe_mode = cypress_get_strobe_mode_settings(rdev,
1201 boot_state->low.mclk);
1202
1203 mc_seq_cg = CG_SEQ_REQ(MC_CG_SEQ_DRAMCONF_S0);
1204 mc_seq_cg |= SEQ_CG_RESP(strobe_mode);
1205 WREG32(MC_SEQ_CG, mc_seq_cg);
1206
1207 for (i = 0; i < rdev->usec_timeout; i++) {
1208 if (!(RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE))
1209 break;
1210 udelay(1);
1211 }
1212
1213 mc_seq_cg &= ~CG_SEQ_REQ_MASK;
1214 mc_seq_cg |= CG_SEQ_REQ(MC_CG_SEQ_YCLK_RESUME);
1215 WREG32(MC_SEQ_CG, mc_seq_cg);
1216
1217 cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_RESUME);
1218}
1219
1220static int cypress_populate_initial_mvdd_value(struct radeon_device *rdev,
1221 RV770_SMC_VOLTAGE_VALUE *voltage)
1222{
1223 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1224
1225 voltage->index = eg_pi->mvdd_high_index;
1226 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1227
1228 return 0;
1229}
1230
1231int cypress_populate_smc_initial_state(struct radeon_device *rdev,
1232 struct radeon_ps *radeon_initial_state,
1233 RV770_SMC_STATETABLE *table)
1234{
1235 struct rv7xx_ps *initial_state = rv770_get_ps(radeon_initial_state);
1236 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1237 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1238 u32 a_t;
1239
1240 table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
1241 cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl);
1242 table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
1243 cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl_2);
1244 table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
1245 cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl);
1246 table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
1247 cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl_2);
1248 table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
1249 cpu_to_be32(pi->clk_regs.rv770.mclk_pwrmgt_cntl);
1250 table->initialState.levels[0].mclk.mclk770.vDLL_CNTL =
1251 cpu_to_be32(pi->clk_regs.rv770.dll_cntl);
1252
1253 table->initialState.levels[0].mclk.mclk770.vMPLL_SS =
1254 cpu_to_be32(pi->clk_regs.rv770.mpll_ss1);
1255 table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 =
1256 cpu_to_be32(pi->clk_regs.rv770.mpll_ss2);
1257
1258 table->initialState.levels[0].mclk.mclk770.mclk_value =
1259 cpu_to_be32(initial_state->low.mclk);
1260
1261 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1262 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl);
1263 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1264 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_2);
1265 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1266 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_3);
1267 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
1268 cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum);
1269 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
1270 cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum_2);
1271
1272 table->initialState.levels[0].sclk.sclk_value =
1273 cpu_to_be32(initial_state->low.sclk);
1274
1275 table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
1276
1277 table->initialState.levels[0].ACIndex = 0;
1278
1279 cypress_populate_voltage_value(rdev,
1280 &eg_pi->vddc_voltage_table,
1281 initial_state->low.vddc,
1282 &table->initialState.levels[0].vddc);
1283
1284 if (eg_pi->vddci_control)
1285 cypress_populate_voltage_value(rdev,
1286 &eg_pi->vddci_voltage_table,
1287 initial_state->low.vddci,
1288 &table->initialState.levels[0].vddci);
1289
1290 cypress_populate_initial_mvdd_value(rdev,
1291 &table->initialState.levels[0].mvdd);
1292
1293 a_t = CG_R(0xffff) | CG_L(0);
1294 table->initialState.levels[0].aT = cpu_to_be32(a_t);
1295
1296 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
1297
1298
1299 if (pi->boot_in_gen2)
1300 table->initialState.levels[0].gen2PCIE = 1;
1301 else
1302 table->initialState.levels[0].gen2PCIE = 0;
1303 if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
1304 table->initialState.levels[0].gen2XSP = 1;
1305 else
1306 table->initialState.levels[0].gen2XSP = 0;
1307
1308 if (pi->mem_gddr5) {
1309 table->initialState.levels[0].strobeMode =
1310 cypress_get_strobe_mode_settings(rdev,
1311 initial_state->low.mclk);
1312
1313 if (initial_state->low.mclk > pi->mclk_edc_enable_threshold)
1314 table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
1315 else
1316 table->initialState.levels[0].mcFlags = 0;
1317 }
1318
1319 table->initialState.levels[1] = table->initialState.levels[0];
1320 table->initialState.levels[2] = table->initialState.levels[0];
1321
1322 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
1323
1324 return 0;
1325}
1326
1327int cypress_populate_smc_acpi_state(struct radeon_device *rdev,
1328 RV770_SMC_STATETABLE *table)
1329{
1330 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1331 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1332 u32 mpll_ad_func_cntl =
1333 pi->clk_regs.rv770.mpll_ad_func_cntl;
1334 u32 mpll_ad_func_cntl_2 =
1335 pi->clk_regs.rv770.mpll_ad_func_cntl_2;
1336 u32 mpll_dq_func_cntl =
1337 pi->clk_regs.rv770.mpll_dq_func_cntl;
1338 u32 mpll_dq_func_cntl_2 =
1339 pi->clk_regs.rv770.mpll_dq_func_cntl_2;
1340 u32 spll_func_cntl =
1341 pi->clk_regs.rv770.cg_spll_func_cntl;
1342 u32 spll_func_cntl_2 =
1343 pi->clk_regs.rv770.cg_spll_func_cntl_2;
1344 u32 spll_func_cntl_3 =
1345 pi->clk_regs.rv770.cg_spll_func_cntl_3;
1346 u32 mclk_pwrmgt_cntl =
1347 pi->clk_regs.rv770.mclk_pwrmgt_cntl;
1348 u32 dll_cntl =
1349 pi->clk_regs.rv770.dll_cntl;
1350
1351 table->ACPIState = table->initialState;
1352
1353 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
1354
1355 if (pi->acpi_vddc) {
1356 cypress_populate_voltage_value(rdev,
1357 &eg_pi->vddc_voltage_table,
1358 pi->acpi_vddc,
1359 &table->ACPIState.levels[0].vddc);
1360 if (pi->pcie_gen2) {
1361 if (pi->acpi_pcie_gen2)
1362 table->ACPIState.levels[0].gen2PCIE = 1;
1363 else
1364 table->ACPIState.levels[0].gen2PCIE = 0;
1365 } else
1366 table->ACPIState.levels[0].gen2PCIE = 0;
1367 if (pi->acpi_pcie_gen2)
1368 table->ACPIState.levels[0].gen2XSP = 1;
1369 else
1370 table->ACPIState.levels[0].gen2XSP = 0;
1371 } else {
1372 cypress_populate_voltage_value(rdev,
1373 &eg_pi->vddc_voltage_table,
1374 pi->min_vddc_in_table,
1375 &table->ACPIState.levels[0].vddc);
1376 table->ACPIState.levels[0].gen2PCIE = 0;
1377 }
1378
1379 if (eg_pi->acpi_vddci) {
1380 if (eg_pi->vddci_control) {
1381 cypress_populate_voltage_value(rdev,
1382 &eg_pi->vddci_voltage_table,
1383 eg_pi->acpi_vddci,
1384 &table->ACPIState.levels[0].vddci);
1385 }
1386 }
1387
1388 mpll_ad_func_cntl &= ~PDNB;
1389
1390 mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
1391
1392 if (pi->mem_gddr5)
1393 mpll_dq_func_cntl &= ~PDNB;
1394 mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
1395
1396 mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
1397 MRDCKA1_RESET |
1398 MRDCKB0_RESET |
1399 MRDCKB1_RESET |
1400 MRDCKC0_RESET |
1401 MRDCKC1_RESET |
1402 MRDCKD0_RESET |
1403 MRDCKD1_RESET);
1404
1405 mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
1406 MRDCKA1_PDNB |
1407 MRDCKB0_PDNB |
1408 MRDCKB1_PDNB |
1409 MRDCKC0_PDNB |
1410 MRDCKC1_PDNB |
1411 MRDCKD0_PDNB |
1412 MRDCKD1_PDNB);
1413
1414 dll_cntl |= (MRDCKA0_BYPASS |
1415 MRDCKA1_BYPASS |
1416 MRDCKB0_BYPASS |
1417 MRDCKB1_BYPASS |
1418 MRDCKC0_BYPASS |
1419 MRDCKC1_BYPASS |
1420 MRDCKD0_BYPASS |
1421 MRDCKD1_BYPASS);
1422
1423 /* evergreen only */
1424 if (rdev->family <= CHIP_HEMLOCK)
1425 spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
1426
1427 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
1428 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
1429
1430 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
1431 cpu_to_be32(mpll_ad_func_cntl);
1432 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
1433 cpu_to_be32(mpll_ad_func_cntl_2);
1434 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
1435 cpu_to_be32(mpll_dq_func_cntl);
1436 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
1437 cpu_to_be32(mpll_dq_func_cntl_2);
1438 table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
1439 cpu_to_be32(mclk_pwrmgt_cntl);
1440 table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
1441
1442 table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
1443
1444 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1445 cpu_to_be32(spll_func_cntl);
1446 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1447 cpu_to_be32(spll_func_cntl_2);
1448 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1449 cpu_to_be32(spll_func_cntl_3);
1450
1451 table->ACPIState.levels[0].sclk.sclk_value = 0;
1452
1453 cypress_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
1454
1455 if (eg_pi->dynamic_ac_timing)
1456 table->ACPIState.levels[0].ACIndex = 1;
1457
1458 table->ACPIState.levels[1] = table->ACPIState.levels[0];
1459 table->ACPIState.levels[2] = table->ACPIState.levels[0];
1460
1461 return 0;
1462}
1463
1464static void cypress_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
1465 struct atom_voltage_table *voltage_table)
1466{
1467 unsigned int i, diff;
1468
1469 if (voltage_table->count <= MAX_NO_VREG_STEPS)
1470 return;
1471
1472 diff = voltage_table->count - MAX_NO_VREG_STEPS;
1473
1474 for (i= 0; i < MAX_NO_VREG_STEPS; i++)
1475 voltage_table->entries[i] = voltage_table->entries[i + diff];
1476
1477 voltage_table->count = MAX_NO_VREG_STEPS;
1478}
1479
1480int cypress_construct_voltage_tables(struct radeon_device *rdev)
1481{
1482 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1483 int ret;
1484
1485 ret = radeon_atom_get_voltage_table(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0,
1486 &eg_pi->vddc_voltage_table);
1487 if (ret)
1488 return ret;
1489
1490 if (eg_pi->vddc_voltage_table.count > MAX_NO_VREG_STEPS)
1491 cypress_trim_voltage_table_to_fit_state_table(rdev,
1492 &eg_pi->vddc_voltage_table);
1493
1494 if (eg_pi->vddci_control) {
1495 ret = radeon_atom_get_voltage_table(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0,
1496 &eg_pi->vddci_voltage_table);
1497 if (ret)
1498 return ret;
1499
1500 if (eg_pi->vddci_voltage_table.count > MAX_NO_VREG_STEPS)
1501 cypress_trim_voltage_table_to_fit_state_table(rdev,
1502 &eg_pi->vddci_voltage_table);
1503 }
1504
1505 return 0;
1506}
1507
1508static void cypress_populate_smc_voltage_table(struct radeon_device *rdev,
1509 struct atom_voltage_table *voltage_table,
1510 RV770_SMC_STATETABLE *table)
1511{
1512 unsigned int i;
1513
1514 for (i = 0; i < voltage_table->count; i++) {
1515 table->highSMIO[i] = 0;
1516 table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
1517 }
1518}
1519
1520int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
1521 RV770_SMC_STATETABLE *table)
1522{
1523 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1524 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1525 unsigned char i;
1526
1527 if (eg_pi->vddc_voltage_table.count) {
1528 cypress_populate_smc_voltage_table(rdev,
1529 &eg_pi->vddc_voltage_table,
1530 table);
1531
1532 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0;
1533 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] =
1534 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
1535
1536 for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
1537 if (pi->max_vddc_in_table <=
1538 eg_pi->vddc_voltage_table.entries[i].value) {
1539 table->maxVDDCIndexInPPTable = i;
1540 break;
1541 }
1542 }
1543 }
1544
1545 if (eg_pi->vddci_voltage_table.count) {
1546 cypress_populate_smc_voltage_table(rdev,
1547 &eg_pi->vddci_voltage_table,
1548 table);
1549
1550 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
1551 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
1552 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
1553 }
1554
1555 return 0;
1556}
1557
1558static u32 cypress_get_mclk_split_point(struct atom_memory_info *memory_info)
1559{
1560 if ((memory_info->mem_type == MEM_TYPE_GDDR3) ||
1561 (memory_info->mem_type == MEM_TYPE_DDR3))
1562 return 30000;
1563
1564 return 0;
1565}
1566
1567int cypress_get_mvdd_configuration(struct radeon_device *rdev)
1568{
1569 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1570 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1571 u8 module_index;
1572 struct atom_memory_info memory_info;
1573 u32 tmp = RREG32(GENERAL_PWRMGT);
1574
1575 if (!(tmp & BACKBIAS_PAD_EN)) {
1576 eg_pi->mvdd_high_index = 0;
1577 eg_pi->mvdd_low_index = 1;
1578 pi->mvdd_control = false;
1579 return 0;
1580 }
1581
1582 if (tmp & BACKBIAS_VALUE)
1583 eg_pi->mvdd_high_index = 1;
1584 else
1585 eg_pi->mvdd_high_index = 0;
1586
1587 eg_pi->mvdd_low_index =
1588 (eg_pi->mvdd_high_index == 0) ? 1 : 0;
1589
1590 module_index = rv770_get_memory_module_index(rdev);
1591
1592 if (radeon_atom_get_memory_info(rdev, module_index, &memory_info)) {
1593 pi->mvdd_control = false;
1594 return 0;
1595 }
1596
1597 pi->mvdd_split_frequency =
1598 cypress_get_mclk_split_point(&memory_info);
1599
1600 if (pi->mvdd_split_frequency == 0) {
1601 pi->mvdd_control = false;
1602 return 0;
1603 }
1604
1605 return 0;
1606}
1607
1608static int cypress_init_smc_table(struct radeon_device *rdev,
1609 struct radeon_ps *radeon_boot_state)
1610{
1611 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1612 RV770_SMC_STATETABLE *table = &pi->smc_statetable;
1613 int ret;
1614
1615 memset(table, 0, sizeof(RV770_SMC_STATETABLE));
1616
1617 cypress_populate_smc_voltage_tables(rdev, table);
1618
1619 switch (rdev->pm.int_thermal_type) {
1620 case THERMAL_TYPE_EVERGREEN:
1621 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
1622 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
1623 break;
1624 case THERMAL_TYPE_NONE:
1625 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
1626 break;
1627 default:
1628 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
1629 break;
1630 }
1631
1632 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
1633 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1634
1635 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1636 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
1637
1638 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1639 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1640
1641 if (pi->mem_gddr5)
1642 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1643
1644 ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table);
1645 if (ret)
1646 return ret;
1647
1648 ret = cypress_populate_smc_acpi_state(rdev, table);
1649 if (ret)
1650 return ret;
1651
1652 table->driverState = table->initialState;
1653
1654 return rv770_copy_bytes_to_smc(rdev,
1655 pi->state_table_start,
1656 (u8 *)table, sizeof(RV770_SMC_STATETABLE),
1657 pi->sram_end);
1658}
1659
1660int cypress_populate_mc_reg_table(struct radeon_device *rdev,
1661 struct radeon_ps *radeon_boot_state)
1662{
1663 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1664 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1665 struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1666 SMC_Evergreen_MCRegisters mc_reg_table = { 0 };
1667
1668 rv770_write_smc_soft_register(rdev,
1669 RV770_SMC_SOFT_REGISTER_seq_index, 1);
1670
1671 cypress_populate_mc_reg_addresses(rdev, &mc_reg_table);
1672
1673 cypress_convert_mc_reg_table_entry_to_smc(rdev,
1674 &boot_state->low,
1675 &mc_reg_table.data[0]);
1676
1677 cypress_convert_mc_registers(&eg_pi->mc_reg_table.mc_reg_table_entry[0],
1678 &mc_reg_table.data[1], eg_pi->mc_reg_table.last,
1679 eg_pi->mc_reg_table.valid_flag);
1680
1681 cypress_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, &mc_reg_table);
1682
1683 return rv770_copy_bytes_to_smc(rdev, eg_pi->mc_reg_table_start,
1684 (u8 *)&mc_reg_table, sizeof(SMC_Evergreen_MCRegisters),
1685 pi->sram_end);
1686}
1687
1688int cypress_get_table_locations(struct radeon_device *rdev)
1689{
1690 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1691 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1692 u32 tmp;
1693 int ret;
1694
1695 ret = rv770_read_smc_sram_dword(rdev,
1696 EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
1697 EVERGREEN_SMC_FIRMWARE_HEADER_stateTable,
1698 &tmp, pi->sram_end);
1699 if (ret)
1700 return ret;
1701
1702 pi->state_table_start = (u16)tmp;
1703
1704 ret = rv770_read_smc_sram_dword(rdev,
1705 EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
1706 EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters,
1707 &tmp, pi->sram_end);
1708 if (ret)
1709 return ret;
1710
1711 pi->soft_regs_start = (u16)tmp;
1712
1713 ret = rv770_read_smc_sram_dword(rdev,
1714 EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
1715 EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable,
1716 &tmp, pi->sram_end);
1717 if (ret)
1718 return ret;
1719
1720 eg_pi->mc_reg_table_start = (u16)tmp;
1721
1722 return 0;
1723}
1724
1725void cypress_enable_display_gap(struct radeon_device *rdev)
1726{
1727 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1728
1729 tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
1730 tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1731 DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
1732
1733 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1734 tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
1735 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
1736 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1737}
1738
1739static void cypress_program_display_gap(struct radeon_device *rdev)
1740{
1741 u32 tmp, pipe;
1742 int i;
1743
1744 tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
1745 if (rdev->pm.dpm.new_active_crtc_count > 0)
1746 tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1747 else
1748 tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1749
1750 if (rdev->pm.dpm.new_active_crtc_count > 1)
1751 tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1752 else
1753 tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1754
1755 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1756
1757 tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
1758 pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
1759
1760 if ((rdev->pm.dpm.new_active_crtc_count > 0) &&
1761 (!(rdev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
1762 /* find the first active crtc */
1763 for (i = 0; i < rdev->num_crtc; i++) {
1764 if (rdev->pm.dpm.new_active_crtcs & (1 << i))
1765 break;
1766 }
1767 if (i == rdev->num_crtc)
1768 pipe = 0;
1769 else
1770 pipe = i;
1771
1772 tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
1773 tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
1774 WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
1775 }
1776
1777 cypress_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
1778}
1779
1780void cypress_dpm_setup_asic(struct radeon_device *rdev)
1781{
1782 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1783
1784 rv740_read_clock_registers(rdev);
1785 rv770_read_voltage_smio_registers(rdev);
1786 rv770_get_max_vddc(rdev);
1787 rv770_get_memory_type(rdev);
1788
1789 if (eg_pi->pcie_performance_request)
1790 eg_pi->pcie_performance_request_registered = false;
1791
1792 if (eg_pi->pcie_performance_request)
1793 cypress_advertise_gen2_capability(rdev);
1794
1795 rv770_get_pcie_gen2_status(rdev);
1796
1797 rv770_enable_acpi_pm(rdev);
1798}
1799
1800int cypress_dpm_enable(struct radeon_device *rdev)
1801{
1802 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1803 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1804 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1805 int ret;
1806
1807 if (pi->gfx_clock_gating)
1808 rv770_restore_cgcg(rdev);
1809
1810 if (rv770_dpm_enabled(rdev))
1811 return -EINVAL;
1812
1813 if (pi->voltage_control) {
1814 rv770_enable_voltage_control(rdev, true);
1815 ret = cypress_construct_voltage_tables(rdev);
1816 if (ret) {
1817 DRM_ERROR("cypress_construct_voltage_tables failed\n");
1818 return ret;
1819 }
1820 }
1821
1822 if (pi->mvdd_control) {
1823 ret = cypress_get_mvdd_configuration(rdev);
1824 if (ret) {
1825 DRM_ERROR("cypress_get_mvdd_configuration failed\n");
1826 return ret;
1827 }
1828 }
1829
1830 if (eg_pi->dynamic_ac_timing) {
1831 cypress_set_mc_reg_address_table(rdev);
1832 cypress_force_mc_use_s0(rdev, boot_ps);
1833 ret = cypress_initialize_mc_reg_table(rdev);
1834 if (ret)
1835 eg_pi->dynamic_ac_timing = false;
1836 cypress_force_mc_use_s1(rdev, boot_ps);
1837 }
1838
1839 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1840 rv770_enable_backbias(rdev, true);
1841
1842 if (pi->dynamic_ss)
1843 cypress_enable_spread_spectrum(rdev, true);
1844
1845 if (pi->thermal_protection)
1846 rv770_enable_thermal_protection(rdev, true);
1847
1848 rv770_setup_bsp(rdev);
1849 rv770_program_git(rdev);
1850 rv770_program_tp(rdev);
1851 rv770_program_tpp(rdev);
1852 rv770_program_sstp(rdev);
1853 rv770_program_engine_speed_parameters(rdev);
1854 cypress_enable_display_gap(rdev);
1855 rv770_program_vc(rdev);
1856
1857 if (pi->dynamic_pcie_gen2)
1858 cypress_enable_dynamic_pcie_gen2(rdev, true);
1859
1860 ret = rv770_upload_firmware(rdev);
1861 if (ret) {
1862 DRM_ERROR("rv770_upload_firmware failed\n");
1863 return ret;
1864 }
1865
1866 ret = cypress_get_table_locations(rdev);
1867 if (ret) {
1868 DRM_ERROR("cypress_get_table_locations failed\n");
1869 return ret;
1870 }
1871 ret = cypress_init_smc_table(rdev, boot_ps);
1872 if (ret) {
1873 DRM_ERROR("cypress_init_smc_table failed\n");
1874 return ret;
1875 }
1876 if (eg_pi->dynamic_ac_timing) {
1877 ret = cypress_populate_mc_reg_table(rdev, boot_ps);
1878 if (ret) {
1879 DRM_ERROR("cypress_populate_mc_reg_table failed\n");
1880 return ret;
1881 }
1882 }
1883
1884 cypress_program_response_times(rdev);
1885
1886 r7xx_start_smc(rdev);
1887
1888 ret = cypress_notify_smc_display_change(rdev, false);
1889 if (ret) {
1890 DRM_ERROR("cypress_notify_smc_display_change failed\n");
1891 return ret;
1892 }
1893 cypress_enable_sclk_control(rdev, true);
1894
1895 if (eg_pi->memory_transition)
1896 cypress_enable_mclk_control(rdev, true);
1897
1898 cypress_start_dpm(rdev);
1899
1900 if (pi->gfx_clock_gating)
1901 cypress_gfx_clock_gating_enable(rdev, true);
1902
1903 if (pi->mg_clock_gating)
1904 cypress_mg_clock_gating_enable(rdev, true);
1905
1906 if (rdev->irq.installed &&
1907 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1908 PPSMC_Result result;
1909
1910 ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1911 if (ret)
1912 return ret;
1913 rdev->irq.dpm_thermal = true;
1914 radeon_irq_set(rdev);
1915 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
1916
1917 if (result != PPSMC_Result_OK)
1918 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1919 }
1920
1921 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
1922
1923 return 0;
1924}
1925
1926void cypress_dpm_disable(struct radeon_device *rdev)
1927{
1928 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1929 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1930 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1931
1932 if (!rv770_dpm_enabled(rdev))
1933 return;
1934
1935 rv770_clear_vc(rdev);
1936
1937 if (pi->thermal_protection)
1938 rv770_enable_thermal_protection(rdev, false);
1939
1940 if (pi->dynamic_pcie_gen2)
1941 cypress_enable_dynamic_pcie_gen2(rdev, false);
1942
1943 if (rdev->irq.installed &&
1944 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1945 rdev->irq.dpm_thermal = false;
1946 radeon_irq_set(rdev);
1947 }
1948
1949 if (pi->gfx_clock_gating)
1950 cypress_gfx_clock_gating_enable(rdev, false);
1951
1952 if (pi->mg_clock_gating)
1953 cypress_mg_clock_gating_enable(rdev, false);
1954
1955 rv770_stop_dpm(rdev);
1956 r7xx_stop_smc(rdev);
1957
1958 cypress_enable_spread_spectrum(rdev, false);
1959
1960 if (eg_pi->dynamic_ac_timing)
1961 cypress_force_mc_use_s1(rdev, boot_ps);
1962
1963 rv770_reset_smio_status(rdev);
1964}
1965
1966int cypress_dpm_set_power_state(struct radeon_device *rdev)
1967{
1968 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1969 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
1970 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
1971 int ret;
1972
1973 ret = rv770_restrict_performance_levels_before_switch(rdev);
1974 if (ret) {
1975 DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n");
1976 return ret;
1977 }
1978 if (eg_pi->pcie_performance_request)
1979 cypress_notify_link_speed_change_before_state_change(rdev, new_ps, old_ps);
1980
1981 rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1982 ret = rv770_halt_smc(rdev);
1983 if (ret) {
1984 DRM_ERROR("rv770_halt_smc failed\n");
1985 return ret;
1986 }
1987 ret = cypress_upload_sw_state(rdev, new_ps);
1988 if (ret) {
1989 DRM_ERROR("cypress_upload_sw_state failed\n");
1990 return ret;
1991 }
1992 if (eg_pi->dynamic_ac_timing) {
1993 ret = cypress_upload_mc_reg_table(rdev, new_ps);
1994 if (ret) {
1995 DRM_ERROR("cypress_upload_mc_reg_table failed\n");
1996 return ret;
1997 }
1998 }
1999
2000 cypress_program_memory_timing_parameters(rdev, new_ps);
2001
2002 ret = rv770_resume_smc(rdev);
2003 if (ret) {
2004 DRM_ERROR("rv770_resume_smc failed\n");
2005 return ret;
2006 }
2007 ret = rv770_set_sw_state(rdev);
2008 if (ret) {
2009 DRM_ERROR("rv770_set_sw_state failed\n");
2010 return ret;
2011 }
2012 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
2013
2014 if (eg_pi->pcie_performance_request)
2015 cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
2016
2017 ret = rv770_unrestrict_performance_levels_after_switch(rdev);
2018 if (ret) {
2019 DRM_ERROR("rv770_unrestrict_performance_levels_after_switch failed\n");
2020 return ret;
2021 }
2022
2023 return 0;
2024}
2025
2026void cypress_dpm_reset_asic(struct radeon_device *rdev)
2027{
2028 rv770_restrict_performance_levels_before_switch(rdev);
2029 rv770_set_boot_state(rdev);
2030}
2031
2032void cypress_dpm_display_configuration_changed(struct radeon_device *rdev)
2033{
2034 cypress_program_display_gap(rdev);
2035}
2036
2037int cypress_dpm_init(struct radeon_device *rdev)
2038{
2039 struct rv7xx_power_info *pi;
2040 struct evergreen_power_info *eg_pi;
2041 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
2042 uint16_t data_offset, size;
2043 uint8_t frev, crev;
2044 struct atom_clock_dividers dividers;
2045 int ret;
2046
2047 eg_pi = kzalloc(sizeof(struct evergreen_power_info), GFP_KERNEL);
2048 if (eg_pi == NULL)
2049 return -ENOMEM;
2050 rdev->pm.dpm.priv = eg_pi;
2051 pi = &eg_pi->rv7xx;
2052
2053 rv770_get_max_vddc(rdev);
2054
2055 eg_pi->ulv.supported = false;
2056 pi->acpi_vddc = 0;
2057 eg_pi->acpi_vddci = 0;
2058 pi->min_vddc_in_table = 0;
2059 pi->max_vddc_in_table = 0;
2060
2061 ret = rv7xx_parse_power_table(rdev);
2062 if (ret)
2063 return ret;
2064
2065 if (rdev->pm.dpm.voltage_response_time == 0)
2066 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
2067 if (rdev->pm.dpm.backbias_response_time == 0)
2068 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
2069
2070 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2071 0, false, &dividers);
2072 if (ret)
2073 pi->ref_div = dividers.ref_div + 1;
2074 else
2075 pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
2076
2077 pi->mclk_strobe_mode_threshold = 40000;
2078 pi->mclk_edc_enable_threshold = 40000;
2079 eg_pi->mclk_edc_wr_enable_threshold = 40000;
2080
2081 pi->rlp = RV770_RLP_DFLT;
2082 pi->rmp = RV770_RMP_DFLT;
2083 pi->lhp = RV770_LHP_DFLT;
2084 pi->lmp = RV770_LMP_DFLT;
2085
2086 pi->voltage_control =
2087 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
2088
2089 pi->mvdd_control =
2090 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
2091
2092 eg_pi->vddci_control =
2093 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
2094
2095 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
2096 &frev, &crev, &data_offset)) {
2097 pi->sclk_ss = true;
2098 pi->mclk_ss = true;
2099 pi->dynamic_ss = true;
2100 } else {
2101 pi->sclk_ss = false;
2102 pi->mclk_ss = false;
2103 pi->dynamic_ss = true;
2104 }
2105
2106 pi->asi = RV770_ASI_DFLT;
2107 pi->pasi = CYPRESS_HASI_DFLT;
2108 pi->vrc = CYPRESS_VRC_DFLT;
2109
2110 pi->power_gating = false;
2111
2112 if ((rdev->family == CHIP_CYPRESS) ||
2113 (rdev->family == CHIP_HEMLOCK))
2114 pi->gfx_clock_gating = false;
2115 else
2116 pi->gfx_clock_gating = true;
2117
2118 pi->mg_clock_gating = true;
2119 pi->mgcgtssm = true;
2120 eg_pi->ls_clock_gating = false;
2121 eg_pi->sclk_deep_sleep = false;
2122
2123 pi->dynamic_pcie_gen2 = true;
2124
2125 if (pi->gfx_clock_gating &&
2126 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2127 pi->thermal_protection = true;
2128 else
2129 pi->thermal_protection = false;
2130
2131 pi->display_gap = true;
2132
2133 if (rdev->flags & RADEON_IS_MOBILITY)
2134 pi->dcodt = true;
2135 else
2136 pi->dcodt = false;
2137
2138 pi->ulps = true;
2139
2140 eg_pi->dynamic_ac_timing = true;
2141 eg_pi->abm = true;
2142 eg_pi->mcls = true;
2143 eg_pi->light_sleep = true;
2144 eg_pi->memory_transition = true;
2145#if defined(CONFIG_ACPI)
2146 eg_pi->pcie_performance_request =
2147 radeon_acpi_is_pcie_performance_request_supported(rdev);
2148#else
2149 eg_pi->pcie_performance_request = false;
2150#endif
2151
2152 if ((rdev->family == CHIP_CYPRESS) ||
2153 (rdev->family == CHIP_HEMLOCK) ||
2154 (rdev->family == CHIP_JUNIPER))
2155 eg_pi->dll_default_on = true;
2156 else
2157 eg_pi->dll_default_on = false;
2158
2159 eg_pi->sclk_deep_sleep = false;
2160 pi->mclk_stutter_mode_threshold = 0;
2161
2162 pi->sram_end = SMC_RAM_END;
2163
2164 return 0;
2165}
2166
2167void cypress_dpm_fini(struct radeon_device *rdev)
2168{
2169 int i;
2170
2171 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2172 kfree(rdev->pm.dpm.ps[i].ps_priv);
2173 }
2174 kfree(rdev->pm.dpm.ps);
2175 kfree(rdev->pm.dpm.priv);
2176}
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.h b/drivers/gpu/drm/radeon/cypress_dpm.h
new file mode 100644
index 000000000000..4c3f18c69f4f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cypress_dpm.h
@@ -0,0 +1,160 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __CYPRESS_DPM_H__
24#define __CYPRESS_DPM_H__
25
26#include "rv770_dpm.h"
27#include "evergreen_smc.h"
28
29struct evergreen_mc_reg_entry {
30 u32 mclk_max;
31 u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
32};
33
34struct evergreen_mc_reg_table {
35 u8 last;
36 u8 num_entries;
37 u16 valid_flag;
38 struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
39 SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
40};
41
42struct evergreen_ulv_param {
43 bool supported;
44 struct rv7xx_pl *pl;
45};
46
47struct evergreen_arb_registers {
48 u32 mc_arb_dram_timing;
49 u32 mc_arb_dram_timing2;
50 u32 mc_arb_rfsh_rate;
51 u32 mc_arb_burst_time;
52};
53
54struct at {
55 u32 rlp;
56 u32 rmp;
57 u32 lhp;
58 u32 lmp;
59};
60
61struct evergreen_power_info {
62 /* must be first! */
63 struct rv7xx_power_info rv7xx;
64 /* flags */
65 bool vddci_control;
66 bool dynamic_ac_timing;
67 bool abm;
68 bool mcls;
69 bool light_sleep;
70 bool memory_transition;
71 bool pcie_performance_request;
72 bool pcie_performance_request_registered;
73 bool sclk_deep_sleep;
74 bool dll_default_on;
75 bool ls_clock_gating;
76 bool smu_uvd_hs;
77 bool uvd_enabled;
78 /* stored values */
79 u16 acpi_vddci;
80 u8 mvdd_high_index;
81 u8 mvdd_low_index;
82 u32 mclk_edc_wr_enable_threshold;
83 struct evergreen_mc_reg_table mc_reg_table;
84 struct atom_voltage_table vddc_voltage_table;
85 struct atom_voltage_table vddci_voltage_table;
86 struct evergreen_arb_registers bootup_arb_registers;
87 struct evergreen_ulv_param ulv;
88 struct at ats[2];
89 /* smc offsets */
90 u16 mc_reg_table_start;
91 struct radeon_ps current_rps;
92 struct rv7xx_ps current_ps;
93 struct radeon_ps requested_rps;
94 struct rv7xx_ps requested_ps;
95};
96
97#define CYPRESS_HASI_DFLT 400000
98#define CYPRESS_MGCGTTLOCAL0_DFLT 0x00000000
99#define CYPRESS_MGCGTTLOCAL1_DFLT 0x00000000
100#define CYPRESS_MGCGTTLOCAL2_DFLT 0x00000000
101#define CYPRESS_MGCGTTLOCAL3_DFLT 0x00000000
102#define CYPRESS_MGCGCGTSSMCTRL_DFLT 0x81944bc0
103#define REDWOOD_MGCGCGTSSMCTRL_DFLT 0x6e944040
104#define CEDAR_MGCGCGTSSMCTRL_DFLT 0x46944040
105#define CYPRESS_VRC_DFLT 0xC00033
106
107#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
108#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
109#define PCIE_PERF_REQ_PECI_GEN1 2
110#define PCIE_PERF_REQ_PECI_GEN2 3
111#define PCIE_PERF_REQ_PECI_GEN3 4
112
113int cypress_convert_power_level_to_smc(struct radeon_device *rdev,
114 struct rv7xx_pl *pl,
115 RV770_SMC_HW_PERFORMANCE_LEVEL *level,
116 u8 watermark_level);
117int cypress_populate_smc_acpi_state(struct radeon_device *rdev,
118 RV770_SMC_STATETABLE *table);
119int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
120 RV770_SMC_STATETABLE *table);
121int cypress_populate_smc_initial_state(struct radeon_device *rdev,
122 struct radeon_ps *radeon_initial_state,
123 RV770_SMC_STATETABLE *table);
124u32 cypress_calculate_burst_time(struct radeon_device *rdev,
125 u32 engine_clock, u32 memory_clock);
126void cypress_notify_link_speed_change_before_state_change(struct radeon_device *rdev,
127 struct radeon_ps *radeon_new_state,
128 struct radeon_ps *radeon_current_state);
129int cypress_upload_sw_state(struct radeon_device *rdev,
130 struct radeon_ps *radeon_new_state);
131int cypress_upload_mc_reg_table(struct radeon_device *rdev,
132 struct radeon_ps *radeon_new_state);
133void cypress_program_memory_timing_parameters(struct radeon_device *rdev,
134 struct radeon_ps *radeon_new_state);
135void cypress_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
136 struct radeon_ps *radeon_new_state,
137 struct radeon_ps *radeon_current_state);
138int cypress_construct_voltage_tables(struct radeon_device *rdev);
139int cypress_get_mvdd_configuration(struct radeon_device *rdev);
140void cypress_enable_spread_spectrum(struct radeon_device *rdev,
141 bool enable);
142void cypress_enable_display_gap(struct radeon_device *rdev);
143int cypress_get_table_locations(struct radeon_device *rdev);
144int cypress_populate_mc_reg_table(struct radeon_device *rdev,
145 struct radeon_ps *radeon_boot_state);
146void cypress_program_response_times(struct radeon_device *rdev);
147int cypress_notify_smc_display_change(struct radeon_device *rdev,
148 bool has_display);
149void cypress_enable_sclk_control(struct radeon_device *rdev,
150 bool enable);
151void cypress_enable_mclk_control(struct radeon_device *rdev,
152 bool enable);
153void cypress_start_dpm(struct radeon_device *rdev);
154void cypress_advertise_gen2_capability(struct radeon_device *rdev);
155u32 cypress_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf);
156u8 cypress_get_mclk_frequency_ratio(struct radeon_device *rdev,
157 u32 memory_clock, bool strobe_mode);
158u8 cypress_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk);
159
160#endif
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0f89ce3d02b9..0de5b74f0287 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -33,9 +33,7 @@
33#include "avivod.h" 33#include "avivod.h"
34#include "evergreen_reg.h" 34#include "evergreen_reg.h"
35#include "evergreen_blit_shaders.h" 35#include "evergreen_blit_shaders.h"
36 36#include "radeon_ucode.h"
37#define EVERGREEN_PFP_UCODE_SIZE 1120
38#define EVERGREEN_PM4_UCODE_SIZE 1376
39 37
40static const u32 crtc_offsets[6] = 38static const u32 crtc_offsets[6] =
41{ 39{
@@ -47,9 +45,98 @@ static const u32 crtc_offsets[6] =
47 EVERGREEN_CRTC5_REGISTER_OFFSET 45 EVERGREEN_CRTC5_REGISTER_OFFSET
48}; 46};
49 47
48#include "clearstate_evergreen.h"
49
50static u32 sumo_rlc_save_restore_register_list[] =
51{
52 0x98fc,
53 0x9830,
54 0x9834,
55 0x9838,
56 0x9870,
57 0x9874,
58 0x8a14,
59 0x8b24,
60 0x8bcc,
61 0x8b10,
62 0x8d00,
63 0x8d04,
64 0x8c00,
65 0x8c04,
66 0x8c08,
67 0x8c0c,
68 0x8d8c,
69 0x8c20,
70 0x8c24,
71 0x8c28,
72 0x8c18,
73 0x8c1c,
74 0x8cf0,
75 0x8e2c,
76 0x8e38,
77 0x8c30,
78 0x9508,
79 0x9688,
80 0x9608,
81 0x960c,
82 0x9610,
83 0x9614,
84 0x88c4,
85 0x88d4,
86 0xa008,
87 0x900c,
88 0x9100,
89 0x913c,
90 0x98f8,
91 0x98f4,
92 0x9b7c,
93 0x3f8c,
94 0x8950,
95 0x8954,
96 0x8a18,
97 0x8b28,
98 0x9144,
99 0x9148,
100 0x914c,
101 0x3f90,
102 0x3f94,
103 0x915c,
104 0x9160,
105 0x9178,
106 0x917c,
107 0x9180,
108 0x918c,
109 0x9190,
110 0x9194,
111 0x9198,
112 0x919c,
113 0x91a8,
114 0x91ac,
115 0x91b0,
116 0x91b4,
117 0x91b8,
118 0x91c4,
119 0x91c8,
120 0x91cc,
121 0x91d0,
122 0x91d4,
123 0x91e0,
124 0x91e4,
125 0x91ec,
126 0x91f0,
127 0x91f4,
128 0x9200,
129 0x9204,
130 0x929c,
131 0x9150,
132 0x802c,
133};
134static u32 sumo_rlc_save_restore_register_list_size = ARRAY_SIZE(sumo_rlc_save_restore_register_list);
135
50static void evergreen_gpu_init(struct radeon_device *rdev); 136static void evergreen_gpu_init(struct radeon_device *rdev);
51void evergreen_fini(struct radeon_device *rdev); 137void evergreen_fini(struct radeon_device *rdev);
52void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 138void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
139void evergreen_program_aspm(struct radeon_device *rdev);
53extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 140extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
54 int ring, u32 cp_int_cntl); 141 int ring, u32 cp_int_cntl);
55 142
@@ -2036,7 +2123,8 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
2036 u32 lb_size, u32 num_heads) 2123 u32 lb_size, u32 num_heads)
2037{ 2124{
2038 struct drm_display_mode *mode = &radeon_crtc->base.mode; 2125 struct drm_display_mode *mode = &radeon_crtc->base.mode;
2039 struct evergreen_wm_params wm; 2126 struct evergreen_wm_params wm_low, wm_high;
2127 u32 dram_channels;
2040 u32 pixel_period; 2128 u32 pixel_period;
2041 u32 line_time = 0; 2129 u32 line_time = 0;
2042 u32 latency_watermark_a = 0, latency_watermark_b = 0; 2130 u32 latency_watermark_a = 0, latency_watermark_b = 0;
@@ -2052,39 +2140,81 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
2052 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 2140 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
2053 priority_a_cnt = 0; 2141 priority_a_cnt = 0;
2054 priority_b_cnt = 0; 2142 priority_b_cnt = 0;
2143 dram_channels = evergreen_get_number_of_dram_channels(rdev);
2144
2145 /* watermark for high clocks */
2146 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2147 wm_high.yclk =
2148 radeon_dpm_get_mclk(rdev, false) * 10;
2149 wm_high.sclk =
2150 radeon_dpm_get_sclk(rdev, false) * 10;
2151 } else {
2152 wm_high.yclk = rdev->pm.current_mclk * 10;
2153 wm_high.sclk = rdev->pm.current_sclk * 10;
2154 }
2055 2155
2056 wm.yclk = rdev->pm.current_mclk * 10; 2156 wm_high.disp_clk = mode->clock;
2057 wm.sclk = rdev->pm.current_sclk * 10; 2157 wm_high.src_width = mode->crtc_hdisplay;
2058 wm.disp_clk = mode->clock; 2158 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
2059 wm.src_width = mode->crtc_hdisplay; 2159 wm_high.blank_time = line_time - wm_high.active_time;
2060 wm.active_time = mode->crtc_hdisplay * pixel_period; 2160 wm_high.interlaced = false;
2061 wm.blank_time = line_time - wm.active_time;
2062 wm.interlaced = false;
2063 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 2161 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2064 wm.interlaced = true; 2162 wm_high.interlaced = true;
2065 wm.vsc = radeon_crtc->vsc; 2163 wm_high.vsc = radeon_crtc->vsc;
2066 wm.vtaps = 1; 2164 wm_high.vtaps = 1;
2067 if (radeon_crtc->rmx_type != RMX_OFF) 2165 if (radeon_crtc->rmx_type != RMX_OFF)
2068 wm.vtaps = 2; 2166 wm_high.vtaps = 2;
2069 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ 2167 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2070 wm.lb_size = lb_size; 2168 wm_high.lb_size = lb_size;
2071 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); 2169 wm_high.dram_channels = dram_channels;
2072 wm.num_heads = num_heads; 2170 wm_high.num_heads = num_heads;
2171
2172 /* watermark for low clocks */
2173 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2174 wm_low.yclk =
2175 radeon_dpm_get_mclk(rdev, true) * 10;
2176 wm_low.sclk =
2177 radeon_dpm_get_sclk(rdev, true) * 10;
2178 } else {
2179 wm_low.yclk = rdev->pm.current_mclk * 10;
2180 wm_low.sclk = rdev->pm.current_sclk * 10;
2181 }
2182
2183 wm_low.disp_clk = mode->clock;
2184 wm_low.src_width = mode->crtc_hdisplay;
2185 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
2186 wm_low.blank_time = line_time - wm_low.active_time;
2187 wm_low.interlaced = false;
2188 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2189 wm_low.interlaced = true;
2190 wm_low.vsc = radeon_crtc->vsc;
2191 wm_low.vtaps = 1;
2192 if (radeon_crtc->rmx_type != RMX_OFF)
2193 wm_low.vtaps = 2;
2194 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2195 wm_low.lb_size = lb_size;
2196 wm_low.dram_channels = dram_channels;
2197 wm_low.num_heads = num_heads;
2073 2198
2074 /* set for high clocks */ 2199 /* set for high clocks */
2075 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535); 2200 latency_watermark_a = min(evergreen_latency_watermark(&wm_high), (u32)65535);
2076 /* set for low clocks */ 2201 /* set for low clocks */
2077 /* wm.yclk = low clk; wm.sclk = low clk */ 2202 latency_watermark_b = min(evergreen_latency_watermark(&wm_low), (u32)65535);
2078 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
2079 2203
2080 /* possibly force display priority to high */ 2204 /* possibly force display priority to high */
2081 /* should really do this at mode validation time... */ 2205 /* should really do this at mode validation time... */
2082 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || 2206 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
2083 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || 2207 !evergreen_average_bandwidth_vs_available_bandwidth(&wm_high) ||
2084 !evergreen_check_latency_hiding(&wm) || 2208 !evergreen_check_latency_hiding(&wm_high) ||
2085 (rdev->disp_priority == 2)) { 2209 (rdev->disp_priority == 2)) {
2086 DRM_DEBUG_KMS("force priority to high\n"); 2210 DRM_DEBUG_KMS("force priority a to high\n");
2087 priority_a_cnt |= PRIORITY_ALWAYS_ON; 2211 priority_a_cnt |= PRIORITY_ALWAYS_ON;
2212 }
2213 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2214 !evergreen_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2215 !evergreen_check_latency_hiding(&wm_low) ||
2216 (rdev->disp_priority == 2)) {
2217 DRM_DEBUG_KMS("force priority b to high\n");
2088 priority_b_cnt |= PRIORITY_ALWAYS_ON; 2218 priority_b_cnt |= PRIORITY_ALWAYS_ON;
2089 } 2219 }
2090 2220
@@ -2137,6 +2267,10 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
2137 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); 2267 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
2138 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); 2268 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
2139 2269
2270 /* save values for DPM */
2271 radeon_crtc->line_time = line_time;
2272 radeon_crtc->wm_high = latency_watermark_a;
2273 radeon_crtc->wm_low = latency_watermark_b;
2140} 2274}
2141 2275
2142/** 2276/**
@@ -3120,10 +3254,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3120 u32 efuse_straps_4; 3254 u32 efuse_straps_4;
3121 u32 efuse_straps_3; 3255 u32 efuse_straps_3;
3122 3256
3123 WREG32(RCU_IND_INDEX, 0x204); 3257 efuse_straps_4 = RREG32_RCU(0x204);
3124 efuse_straps_4 = RREG32(RCU_IND_DATA); 3258 efuse_straps_3 = RREG32_RCU(0x203);
3125 WREG32(RCU_IND_INDEX, 0x203);
3126 efuse_straps_3 = RREG32(RCU_IND_DATA);
3127 tmp = (((efuse_straps_4 & 0xf) << 4) | 3259 tmp = (((efuse_straps_4 & 0xf) << 4) |
3128 ((efuse_straps_3 & 0xf0000000) >> 28)); 3260 ((efuse_straps_3 & 0xf0000000) >> 28));
3129 } else { 3261 } else {
@@ -3727,6 +3859,264 @@ bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
3727 return radeon_ring_test_lockup(rdev, ring); 3859 return radeon_ring_test_lockup(rdev, ring);
3728} 3860}
3729 3861
3862/*
3863 * RLC
3864 */
3865#define RLC_SAVE_RESTORE_LIST_END_MARKER 0x00000000
3866#define RLC_CLEAR_STATE_END_MARKER 0x00000001
3867
3868void sumo_rlc_fini(struct radeon_device *rdev)
3869{
3870 int r;
3871
3872 /* save restore block */
3873 if (rdev->rlc.save_restore_obj) {
3874 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
3875 if (unlikely(r != 0))
3876 dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
3877 radeon_bo_unpin(rdev->rlc.save_restore_obj);
3878 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3879
3880 radeon_bo_unref(&rdev->rlc.save_restore_obj);
3881 rdev->rlc.save_restore_obj = NULL;
3882 }
3883
3884 /* clear state block */
3885 if (rdev->rlc.clear_state_obj) {
3886 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
3887 if (unlikely(r != 0))
3888 dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
3889 radeon_bo_unpin(rdev->rlc.clear_state_obj);
3890 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
3891
3892 radeon_bo_unref(&rdev->rlc.clear_state_obj);
3893 rdev->rlc.clear_state_obj = NULL;
3894 }
3895}
3896
3897int sumo_rlc_init(struct radeon_device *rdev)
3898{
3899 u32 *src_ptr;
3900 volatile u32 *dst_ptr;
3901 u32 dws, data, i, j, k, reg_num;
3902 u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index;
3903 u64 reg_list_mc_addr;
3904 struct cs_section_def *cs_data;
3905 int r;
3906
3907 src_ptr = rdev->rlc.reg_list;
3908 dws = rdev->rlc.reg_list_size;
3909 cs_data = rdev->rlc.cs_data;
3910
3911 /* save restore block */
3912 if (rdev->rlc.save_restore_obj == NULL) {
3913 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
3914 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
3915 if (r) {
3916 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
3917 return r;
3918 }
3919 }
3920
3921 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
3922 if (unlikely(r != 0)) {
3923 sumo_rlc_fini(rdev);
3924 return r;
3925 }
3926 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
3927 &rdev->rlc.save_restore_gpu_addr);
3928 if (r) {
3929 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3930 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
3931 sumo_rlc_fini(rdev);
3932 return r;
3933 }
3934 r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
3935 if (r) {
3936 dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
3937 sumo_rlc_fini(rdev);
3938 return r;
3939 }
3940 /* write the sr buffer */
3941 dst_ptr = rdev->rlc.sr_ptr;
3942 /* format:
3943 * dw0: (reg2 << 16) | reg1
3944 * dw1: reg1 save space
3945 * dw2: reg2 save space
3946 */
3947 for (i = 0; i < dws; i++) {
3948 data = src_ptr[i] >> 2;
3949 i++;
3950 if (i < dws)
3951 data |= (src_ptr[i] >> 2) << 16;
3952 j = (((i - 1) * 3) / 2);
3953 dst_ptr[j] = data;
3954 }
3955 j = ((i * 3) / 2);
3956 dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
3957
3958 radeon_bo_kunmap(rdev->rlc.save_restore_obj);
3959 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3960
3961 /* clear state block */
3962 reg_list_num = 0;
3963 dws = 0;
3964 for (i = 0; cs_data[i].section != NULL; i++) {
3965 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
3966 reg_list_num++;
3967 dws += cs_data[i].section[j].reg_count;
3968 }
3969 }
3970 reg_list_blk_index = (3 * reg_list_num + 2);
3971 dws += reg_list_blk_index;
3972
3973 if (rdev->rlc.clear_state_obj == NULL) {
3974 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
3975 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
3976 if (r) {
3977 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
3978 sumo_rlc_fini(rdev);
3979 return r;
3980 }
3981 }
3982 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
3983 if (unlikely(r != 0)) {
3984 sumo_rlc_fini(rdev);
3985 return r;
3986 }
3987 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
3988 &rdev->rlc.clear_state_gpu_addr);
3989 if (r) {
3990
3991 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
3992 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
3993 sumo_rlc_fini(rdev);
3994 return r;
3995 }
3996 r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
3997 if (r) {
3998 dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
3999 sumo_rlc_fini(rdev);
4000 return r;
4001 }
4002 /* set up the cs buffer */
4003 dst_ptr = rdev->rlc.cs_ptr;
4004 reg_list_hdr_blk_index = 0;
4005 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
4006 data = upper_32_bits(reg_list_mc_addr);
4007 dst_ptr[reg_list_hdr_blk_index] = data;
4008 reg_list_hdr_blk_index++;
4009 for (i = 0; cs_data[i].section != NULL; i++) {
4010 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4011 reg_num = cs_data[i].section[j].reg_count;
4012 data = reg_list_mc_addr & 0xffffffff;
4013 dst_ptr[reg_list_hdr_blk_index] = data;
4014 reg_list_hdr_blk_index++;
4015
4016 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
4017 dst_ptr[reg_list_hdr_blk_index] = data;
4018 reg_list_hdr_blk_index++;
4019
4020 data = 0x08000000 | (reg_num * 4);
4021 dst_ptr[reg_list_hdr_blk_index] = data;
4022 reg_list_hdr_blk_index++;
4023
4024 for (k = 0; k < reg_num; k++) {
4025 data = cs_data[i].section[j].extent[k];
4026 dst_ptr[reg_list_blk_index + k] = data;
4027 }
4028 reg_list_mc_addr += reg_num * 4;
4029 reg_list_blk_index += reg_num;
4030 }
4031 }
4032 dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
4033
4034 radeon_bo_kunmap(rdev->rlc.clear_state_obj);
4035 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4036
4037 return 0;
4038}
4039
4040static void evergreen_rlc_start(struct radeon_device *rdev)
4041{
4042 u32 mask = RLC_ENABLE;
4043
4044 if (rdev->flags & RADEON_IS_IGP) {
4045 mask |= GFX_POWER_GATING_ENABLE | GFX_POWER_GATING_SRC;
4046 if (rdev->family == CHIP_ARUBA)
4047 mask |= DYN_PER_SIMD_PG_ENABLE | LB_CNT_SPIM_ACTIVE | LOAD_BALANCE_ENABLE;
4048 }
4049
4050 WREG32(RLC_CNTL, mask);
4051}
4052
4053int evergreen_rlc_resume(struct radeon_device *rdev)
4054{
4055 u32 i;
4056 const __be32 *fw_data;
4057
4058 if (!rdev->rlc_fw)
4059 return -EINVAL;
4060
4061 r600_rlc_stop(rdev);
4062
4063 WREG32(RLC_HB_CNTL, 0);
4064
4065 if (rdev->flags & RADEON_IS_IGP) {
4066 if (rdev->family == CHIP_ARUBA) {
4067 u32 always_on_bitmap =
4068 3 | (3 << (16 * rdev->config.cayman.max_shader_engines));
4069 /* find out the number of active simds */
4070 u32 tmp = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
4071 tmp |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
4072 tmp = hweight32(~tmp);
4073 if (tmp == rdev->config.cayman.max_simds_per_se) {
4074 WREG32(TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK, always_on_bitmap);
4075 WREG32(TN_RLC_LB_PARAMS, 0x00601004);
4076 WREG32(TN_RLC_LB_INIT_SIMD_MASK, 0xffffffff);
4077 WREG32(TN_RLC_LB_CNTR_INIT, 0x00000000);
4078 WREG32(TN_RLC_LB_CNTR_MAX, 0x00002000);
4079 }
4080 } else {
4081 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
4082 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
4083 }
4084 WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
4085 WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
4086 } else {
4087 WREG32(RLC_HB_BASE, 0);
4088 WREG32(RLC_HB_RPTR, 0);
4089 WREG32(RLC_HB_WPTR, 0);
4090 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
4091 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
4092 }
4093 WREG32(RLC_MC_CNTL, 0);
4094 WREG32(RLC_UCODE_CNTL, 0);
4095
4096 fw_data = (const __be32 *)rdev->rlc_fw->data;
4097 if (rdev->family >= CHIP_ARUBA) {
4098 for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
4099 WREG32(RLC_UCODE_ADDR, i);
4100 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
4101 }
4102 } else if (rdev->family >= CHIP_CAYMAN) {
4103 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
4104 WREG32(RLC_UCODE_ADDR, i);
4105 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
4106 }
4107 } else {
4108 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
4109 WREG32(RLC_UCODE_ADDR, i);
4110 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
4111 }
4112 }
4113 WREG32(RLC_UCODE_ADDR, 0);
4114
4115 evergreen_rlc_start(rdev);
4116
4117 return 0;
4118}
4119
3730/* Interrupts */ 4120/* Interrupts */
3731 4121
3732u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) 4122u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
@@ -3805,6 +4195,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
3805 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 4195 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
3806 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; 4196 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
3807 u32 dma_cntl, dma_cntl1 = 0; 4197 u32 dma_cntl, dma_cntl1 = 0;
4198 u32 thermal_int = 0;
3808 4199
3809 if (!rdev->irq.installed) { 4200 if (!rdev->irq.installed) {
3810 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 4201 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3824,6 +4215,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
3824 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 4215 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3825 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 4216 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3826 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 4217 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
4218 if (rdev->family == CHIP_ARUBA)
4219 thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) &
4220 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
4221 else
4222 thermal_int = RREG32(CG_THERMAL_INT) &
4223 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3827 4224
3828 afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 4225 afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3829 afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 4226 afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
@@ -3869,6 +4266,11 @@ int evergreen_irq_set(struct radeon_device *rdev)
3869 } 4266 }
3870 } 4267 }
3871 4268
4269 if (rdev->irq.dpm_thermal) {
4270 DRM_DEBUG("dpm thermal\n");
4271 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
4272 }
4273
3872 if (rdev->irq.crtc_vblank_int[0] || 4274 if (rdev->irq.crtc_vblank_int[0] ||
3873 atomic_read(&rdev->irq.pflip[0])) { 4275 atomic_read(&rdev->irq.pflip[0])) {
3874 DRM_DEBUG("evergreen_irq_set: vblank 0\n"); 4276 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -3990,6 +4392,10 @@ int evergreen_irq_set(struct radeon_device *rdev)
3990 WREG32(DC_HPD4_INT_CONTROL, hpd4); 4392 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3991 WREG32(DC_HPD5_INT_CONTROL, hpd5); 4393 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3992 WREG32(DC_HPD6_INT_CONTROL, hpd6); 4394 WREG32(DC_HPD6_INT_CONTROL, hpd6);
4395 if (rdev->family == CHIP_ARUBA)
4396 WREG32(TN_CG_THERMAL_INT_CTRL, thermal_int);
4397 else
4398 WREG32(CG_THERMAL_INT, thermal_int);
3993 4399
3994 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1); 4400 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
3995 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2); 4401 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
@@ -4181,6 +4587,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
4181 u32 ring_index; 4587 u32 ring_index;
4182 bool queue_hotplug = false; 4588 bool queue_hotplug = false;
4183 bool queue_hdmi = false; 4589 bool queue_hdmi = false;
4590 bool queue_thermal = false;
4184 4591
4185 if (!rdev->ih.enabled || rdev->shutdown) 4592 if (!rdev->ih.enabled || rdev->shutdown)
4186 return IRQ_NONE; 4593 return IRQ_NONE;
@@ -4502,6 +4909,16 @@ restart_ih:
4502 DRM_DEBUG("IH: DMA trap\n"); 4909 DRM_DEBUG("IH: DMA trap\n");
4503 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 4910 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4504 break; 4911 break;
4912 case 230: /* thermal low to high */
4913 DRM_DEBUG("IH: thermal low to high\n");
4914 rdev->pm.dpm.thermal.high_to_low = false;
4915 queue_thermal = true;
4916 break;
4917 case 231: /* thermal high to low */
4918 DRM_DEBUG("IH: thermal high to low\n");
4919 rdev->pm.dpm.thermal.high_to_low = true;
4920 queue_thermal = true;
4921 break;
4505 case 233: /* GUI IDLE */ 4922 case 233: /* GUI IDLE */
4506 DRM_DEBUG("IH: GUI idle\n"); 4923 DRM_DEBUG("IH: GUI idle\n");
4507 break; 4924 break;
@@ -4524,6 +4941,8 @@ restart_ih:
4524 schedule_work(&rdev->hotplug_work); 4941 schedule_work(&rdev->hotplug_work);
4525 if (queue_hdmi) 4942 if (queue_hdmi)
4526 schedule_work(&rdev->audio_work); 4943 schedule_work(&rdev->audio_work);
4944 if (queue_thermal && rdev->pm.dpm_enabled)
4945 schedule_work(&rdev->pm.dpm.thermal.work);
4527 rdev->ih.rptr = rptr; 4946 rdev->ih.rptr = rptr;
4528 WREG32(IH_RB_RPTR, rdev->ih.rptr); 4947 WREG32(IH_RB_RPTR, rdev->ih.rptr);
4529 atomic_set(&rdev->ih.lock, 0); 4948 atomic_set(&rdev->ih.lock, 0);
@@ -4680,6 +5099,8 @@ static int evergreen_startup(struct radeon_device *rdev)
4680 5099
4681 /* enable pcie gen2 link */ 5100 /* enable pcie gen2 link */
4682 evergreen_pcie_gen2_enable(rdev); 5101 evergreen_pcie_gen2_enable(rdev);
5102 /* enable aspm */
5103 evergreen_program_aspm(rdev);
4683 5104
4684 if (ASIC_IS_DCE5(rdev)) { 5105 if (ASIC_IS_DCE5(rdev)) {
4685 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 5106 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
@@ -4725,6 +5146,18 @@ static int evergreen_startup(struct radeon_device *rdev)
4725 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 5146 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
4726 } 5147 }
4727 5148
5149 /* allocate rlc buffers */
5150 if (rdev->flags & RADEON_IS_IGP) {
5151 rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
5152 rdev->rlc.reg_list_size = sumo_rlc_save_restore_register_list_size;
5153 rdev->rlc.cs_data = evergreen_cs_data;
5154 r = sumo_rlc_init(rdev);
5155 if (r) {
5156 DRM_ERROR("Failed to init rlc BOs!\n");
5157 return r;
5158 }
5159 }
5160
4728 /* allocate wb buffer */ 5161 /* allocate wb buffer */
4729 r = radeon_wb_init(rdev); 5162 r = radeon_wb_init(rdev);
4730 if (r) 5163 if (r)
@@ -4956,6 +5389,8 @@ int evergreen_init(struct radeon_device *rdev)
4956 r700_cp_fini(rdev); 5389 r700_cp_fini(rdev);
4957 r600_dma_fini(rdev); 5390 r600_dma_fini(rdev);
4958 r600_irq_fini(rdev); 5391 r600_irq_fini(rdev);
5392 if (rdev->flags & RADEON_IS_IGP)
5393 sumo_rlc_fini(rdev);
4959 radeon_wb_fini(rdev); 5394 radeon_wb_fini(rdev);
4960 radeon_ib_pool_fini(rdev); 5395 radeon_ib_pool_fini(rdev);
4961 radeon_irq_kms_fini(rdev); 5396 radeon_irq_kms_fini(rdev);
@@ -4984,6 +5419,8 @@ void evergreen_fini(struct radeon_device *rdev)
4984 r700_cp_fini(rdev); 5419 r700_cp_fini(rdev);
4985 r600_dma_fini(rdev); 5420 r600_dma_fini(rdev);
4986 r600_irq_fini(rdev); 5421 r600_irq_fini(rdev);
5422 if (rdev->flags & RADEON_IS_IGP)
5423 sumo_rlc_fini(rdev);
4987 radeon_wb_fini(rdev); 5424 radeon_wb_fini(rdev);
4988 radeon_ib_pool_fini(rdev); 5425 radeon_ib_pool_fini(rdev);
4989 radeon_irq_kms_fini(rdev); 5426 radeon_irq_kms_fini(rdev);
@@ -5061,3 +5498,150 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
5061 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 5498 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
5062 } 5499 }
5063} 5500}
5501
5502void evergreen_program_aspm(struct radeon_device *rdev)
5503{
5504 u32 data, orig;
5505 u32 pcie_lc_cntl, pcie_lc_cntl_old;
5506 bool disable_l0s, disable_l1 = false, disable_plloff_in_l1 = false;
5507 /* fusion_platform = true
5508 * if the system is a fusion system
5509 * (APU or DGPU in a fusion system).
5510 * todo: check if the system is a fusion platform.
5511 */
5512 bool fusion_platform = false;
5513
5514 if (!(rdev->flags & RADEON_IS_PCIE))
5515 return;
5516
5517 switch (rdev->family) {
5518 case CHIP_CYPRESS:
5519 case CHIP_HEMLOCK:
5520 case CHIP_JUNIPER:
5521 case CHIP_REDWOOD:
5522 case CHIP_CEDAR:
5523 case CHIP_SUMO:
5524 case CHIP_SUMO2:
5525 case CHIP_PALM:
5526 case CHIP_ARUBA:
5527 disable_l0s = true;
5528 break;
5529 default:
5530 disable_l0s = false;
5531 break;
5532 }
5533
5534 if (rdev->flags & RADEON_IS_IGP)
5535 fusion_platform = true; /* XXX also dGPUs in a fusion system */
5536
5537 data = orig = RREG32_PIF_PHY0(PB0_PIF_PAIRING);
5538 if (fusion_platform)
5539 data &= ~MULTI_PIF;
5540 else
5541 data |= MULTI_PIF;
5542 if (data != orig)
5543 WREG32_PIF_PHY0(PB0_PIF_PAIRING, data);
5544
5545 data = orig = RREG32_PIF_PHY1(PB1_PIF_PAIRING);
5546 if (fusion_platform)
5547 data &= ~MULTI_PIF;
5548 else
5549 data |= MULTI_PIF;
5550 if (data != orig)
5551 WREG32_PIF_PHY1(PB1_PIF_PAIRING, data);
5552
5553 pcie_lc_cntl = pcie_lc_cntl_old = RREG32_PCIE_PORT(PCIE_LC_CNTL);
5554 pcie_lc_cntl &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
5555 if (!disable_l0s) {
5556 if (rdev->family >= CHIP_BARTS)
5557 pcie_lc_cntl |= LC_L0S_INACTIVITY(7);
5558 else
5559 pcie_lc_cntl |= LC_L0S_INACTIVITY(3);
5560 }
5561
5562 if (!disable_l1) {
5563 if (rdev->family >= CHIP_BARTS)
5564 pcie_lc_cntl |= LC_L1_INACTIVITY(7);
5565 else
5566 pcie_lc_cntl |= LC_L1_INACTIVITY(8);
5567
5568 if (!disable_plloff_in_l1) {
5569 data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
5570 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
5571 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
5572 if (data != orig)
5573 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
5574
5575 data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
5576 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
5577 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
5578 if (data != orig)
5579 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
5580
5581 data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
5582 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
5583 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
5584 if (data != orig)
5585 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
5586
5587 data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
5588 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
5589 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
5590 if (data != orig)
5591 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
5592
5593 if (rdev->family >= CHIP_BARTS) {
5594 data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
5595 data &= ~PLL_RAMP_UP_TIME_0_MASK;
5596 data |= PLL_RAMP_UP_TIME_0(4);
5597 if (data != orig)
5598 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
5599
5600 data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
5601 data &= ~PLL_RAMP_UP_TIME_1_MASK;
5602 data |= PLL_RAMP_UP_TIME_1(4);
5603 if (data != orig)
5604 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
5605
5606 data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
5607 data &= ~PLL_RAMP_UP_TIME_0_MASK;
5608 data |= PLL_RAMP_UP_TIME_0(4);
5609 if (data != orig)
5610 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
5611
5612 data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
5613 data &= ~PLL_RAMP_UP_TIME_1_MASK;
5614 data |= PLL_RAMP_UP_TIME_1(4);
5615 if (data != orig)
5616 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
5617 }
5618
5619 data = orig = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
5620 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
5621 data |= LC_DYN_LANES_PWR_STATE(3);
5622 if (data != orig)
5623 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
5624
5625 if (rdev->family >= CHIP_BARTS) {
5626 data = orig = RREG32_PIF_PHY0(PB0_PIF_CNTL);
5627 data &= ~LS2_EXIT_TIME_MASK;
5628 data |= LS2_EXIT_TIME(1);
5629 if (data != orig)
5630 WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
5631
5632 data = orig = RREG32_PIF_PHY1(PB1_PIF_CNTL);
5633 data &= ~LS2_EXIT_TIME_MASK;
5634 data |= LS2_EXIT_TIME(1);
5635 if (data != orig)
5636 WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
5637 }
5638 }
5639 }
5640
5641 /* evergreen parts only */
5642 if (rdev->family < CHIP_BARTS)
5643 pcie_lc_cntl |= LC_PMI_TO_L1_DIS;
5644
5645 if (pcie_lc_cntl != pcie_lc_cntl_old)
5646 WREG32_PCIE_PORT(PCIE_LC_CNTL, pcie_lc_cntl);
5647}
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index ed7c8a768092..b9c6f7675e59 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -128,14 +128,7 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
128 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 128 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
129 uint32_t offset = dig->afmt->offset; 129 uint32_t offset = dig->afmt->offset;
130 uint8_t *frame = buffer + 3; 130 uint8_t *frame = buffer + 3;
131 131 uint8_t *header = buffer;
132 /* Our header values (type, version, length) should be alright, Intel
133 * is using the same. Checksum function also seems to be OK, it works
134 * fine for audio infoframe. However calculated value is always lower
135 * by 2 in comparison to fglrx. It breaks displaying anything in case
136 * of TVs that strictly check the checksum. Hack it manually here to
137 * workaround this issue. */
138 frame[0x0] += 2;
139 132
140 WREG32(AFMT_AVI_INFO0 + offset, 133 WREG32(AFMT_AVI_INFO0 + offset,
141 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 134 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -144,7 +137,7 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
144 WREG32(AFMT_AVI_INFO2 + offset, 137 WREG32(AFMT_AVI_INFO2 + offset,
145 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); 138 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
146 WREG32(AFMT_AVI_INFO3 + offset, 139 WREG32(AFMT_AVI_INFO3 + offset,
147 frame[0xC] | (frame[0xD] << 8)); 140 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
148} 141}
149 142
150static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) 143static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 881aba23c477..8a4e641f0e3c 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -24,7 +24,16 @@
24#ifndef __EVERGREEN_REG_H__ 24#ifndef __EVERGREEN_REG_H__
25#define __EVERGREEN_REG_H__ 25#define __EVERGREEN_REG_H__
26 26
27/* trinity */
28#define TN_SMC_IND_INDEX_0 0x200
29#define TN_SMC_IND_DATA_0 0x204
30
27/* evergreen */ 31/* evergreen */
32#define EVERGREEN_PIF_PHY0_INDEX 0x8
33#define EVERGREEN_PIF_PHY0_DATA 0xc
34#define EVERGREEN_PIF_PHY1_INDEX 0x10
35#define EVERGREEN_PIF_PHY1_DATA 0x14
36
28#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310 37#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
29#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324 38#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
30#define EVERGREEN_D3VGA_CONTROL 0x3e0 39#define EVERGREEN_D3VGA_CONTROL 0x3e0
@@ -40,6 +49,9 @@
40#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4 49#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4
41#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc 50#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc
42 51
52#define EVERGREEN_CG_IND_ADDR 0x8f8
53#define EVERGREEN_CG_IND_DATA 0x8fc
54
43#define EVERGREEN_AUDIO_ENABLE 0x5e78 55#define EVERGREEN_AUDIO_ENABLE 0x5e78
44#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0 56#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0
45 57
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
new file mode 100644
index 000000000000..76ada8cfe902
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_smc.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __EVERGREEN_SMC_H__
24#define __EVERGREEN_SMC_H__
25
26#include "rv770_smc.h"
27
28#pragma pack(push, 1)
29
30#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
31
32struct SMC_Evergreen_MCRegisterAddress
33{
34 uint16_t s0;
35 uint16_t s1;
36};
37
38typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
39
40
41struct SMC_Evergreen_MCRegisterSet
42{
43 uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
44};
45
46typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
47
48struct SMC_Evergreen_MCRegisters
49{
50 uint8_t last;
51 uint8_t reserved[3];
52 SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
53 SMC_Evergreen_MCRegisterSet data[5];
54};
55
56typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
57
58#define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
59
60#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0
61#define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
62#define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
63
64
65#pragma pack(pop)
66
67#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 75c05631146d..a7baf67aef6c 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -48,6 +48,293 @@
48#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002 48#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
49#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002 49#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
50 50
51/* pm registers */
52#define SMC_MSG 0x20c
53#define HOST_SMC_MSG(x) ((x) << 0)
54#define HOST_SMC_MSG_MASK (0xff << 0)
55#define HOST_SMC_MSG_SHIFT 0
56#define HOST_SMC_RESP(x) ((x) << 8)
57#define HOST_SMC_RESP_MASK (0xff << 8)
58#define HOST_SMC_RESP_SHIFT 8
59#define SMC_HOST_MSG(x) ((x) << 16)
60#define SMC_HOST_MSG_MASK (0xff << 16)
61#define SMC_HOST_MSG_SHIFT 16
62#define SMC_HOST_RESP(x) ((x) << 24)
63#define SMC_HOST_RESP_MASK (0xff << 24)
64#define SMC_HOST_RESP_SHIFT 24
65
66#define DCCG_DISP_SLOW_SELECT_REG 0x4fc
67#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
68#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
69#define DCCG_DISP1_SLOW_SELECT_SHIFT 0
70#define DCCG_DISP2_SLOW_SELECT(x) ((x) << 4)
71#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
72#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
73
74#define CG_SPLL_FUNC_CNTL 0x600
75#define SPLL_RESET (1 << 0)
76#define SPLL_SLEEP (1 << 1)
77#define SPLL_BYPASS_EN (1 << 3)
78#define SPLL_REF_DIV(x) ((x) << 4)
79#define SPLL_REF_DIV_MASK (0x3f << 4)
80#define SPLL_PDIV_A(x) ((x) << 20)
81#define SPLL_PDIV_A_MASK (0x7f << 20)
82#define CG_SPLL_FUNC_CNTL_2 0x604
83#define SCLK_MUX_SEL(x) ((x) << 0)
84#define SCLK_MUX_SEL_MASK (0x1ff << 0)
85#define CG_SPLL_FUNC_CNTL_3 0x608
86#define SPLL_FB_DIV(x) ((x) << 0)
87#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
88#define SPLL_DITHEN (1 << 28)
89
90#define MPLL_CNTL_MODE 0x61c
91# define SS_SSEN (1 << 24)
92# define SS_DSMODE_EN (1 << 25)
93
94#define MPLL_AD_FUNC_CNTL 0x624
95#define CLKF(x) ((x) << 0)
96#define CLKF_MASK (0x7f << 0)
97#define CLKR(x) ((x) << 7)
98#define CLKR_MASK (0x1f << 7)
99#define CLKFRAC(x) ((x) << 12)
100#define CLKFRAC_MASK (0x1f << 12)
101#define YCLK_POST_DIV(x) ((x) << 17)
102#define YCLK_POST_DIV_MASK (3 << 17)
103#define IBIAS(x) ((x) << 20)
104#define IBIAS_MASK (0x3ff << 20)
105#define RESET (1 << 30)
106#define PDNB (1 << 31)
107#define MPLL_AD_FUNC_CNTL_2 0x628
108#define BYPASS (1 << 19)
109#define BIAS_GEN_PDNB (1 << 24)
110#define RESET_EN (1 << 25)
111#define VCO_MODE (1 << 29)
112#define MPLL_DQ_FUNC_CNTL 0x62c
113#define MPLL_DQ_FUNC_CNTL_2 0x630
114
115#define GENERAL_PWRMGT 0x63c
116# define GLOBAL_PWRMGT_EN (1 << 0)
117# define STATIC_PM_EN (1 << 1)
118# define THERMAL_PROTECTION_DIS (1 << 2)
119# define THERMAL_PROTECTION_TYPE (1 << 3)
120# define ENABLE_GEN2PCIE (1 << 4)
121# define ENABLE_GEN2XSP (1 << 5)
122# define SW_SMIO_INDEX(x) ((x) << 6)
123# define SW_SMIO_INDEX_MASK (3 << 6)
124# define SW_SMIO_INDEX_SHIFT 6
125# define LOW_VOLT_D2_ACPI (1 << 8)
126# define LOW_VOLT_D3_ACPI (1 << 9)
127# define VOLT_PWRMGT_EN (1 << 10)
128# define BACKBIAS_PAD_EN (1 << 18)
129# define BACKBIAS_VALUE (1 << 19)
130# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
131# define AC_DC_SW (1 << 24)
132
133#define SCLK_PWRMGT_CNTL 0x644
134# define SCLK_PWRMGT_OFF (1 << 0)
135# define SCLK_LOW_D1 (1 << 1)
136# define FIR_RESET (1 << 4)
137# define FIR_FORCE_TREND_SEL (1 << 5)
138# define FIR_TREND_MODE (1 << 6)
139# define DYN_GFX_CLK_OFF_EN (1 << 7)
140# define GFX_CLK_FORCE_ON (1 << 8)
141# define GFX_CLK_REQUEST_OFF (1 << 9)
142# define GFX_CLK_FORCE_OFF (1 << 10)
143# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
144# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
145# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
146# define DYN_LIGHT_SLEEP_EN (1 << 14)
147#define MCLK_PWRMGT_CNTL 0x648
148# define DLL_SPEED(x) ((x) << 0)
149# define DLL_SPEED_MASK (0x1f << 0)
150# define MPLL_PWRMGT_OFF (1 << 5)
151# define DLL_READY (1 << 6)
152# define MC_INT_CNTL (1 << 7)
153# define MRDCKA0_PDNB (1 << 8)
154# define MRDCKA1_PDNB (1 << 9)
155# define MRDCKB0_PDNB (1 << 10)
156# define MRDCKB1_PDNB (1 << 11)
157# define MRDCKC0_PDNB (1 << 12)
158# define MRDCKC1_PDNB (1 << 13)
159# define MRDCKD0_PDNB (1 << 14)
160# define MRDCKD1_PDNB (1 << 15)
161# define MRDCKA0_RESET (1 << 16)
162# define MRDCKA1_RESET (1 << 17)
163# define MRDCKB0_RESET (1 << 18)
164# define MRDCKB1_RESET (1 << 19)
165# define MRDCKC0_RESET (1 << 20)
166# define MRDCKC1_RESET (1 << 21)
167# define MRDCKD0_RESET (1 << 22)
168# define MRDCKD1_RESET (1 << 23)
169# define DLL_READY_READ (1 << 24)
170# define USE_DISPLAY_GAP (1 << 25)
171# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
172# define MPLL_TURNOFF_D2 (1 << 28)
173#define DLL_CNTL 0x64c
174# define MRDCKA0_BYPASS (1 << 24)
175# define MRDCKA1_BYPASS (1 << 25)
176# define MRDCKB0_BYPASS (1 << 26)
177# define MRDCKB1_BYPASS (1 << 27)
178# define MRDCKC0_BYPASS (1 << 28)
179# define MRDCKC1_BYPASS (1 << 29)
180# define MRDCKD0_BYPASS (1 << 30)
181# define MRDCKD1_BYPASS (1 << 31)
182
183#define CG_AT 0x6d4
184# define CG_R(x) ((x) << 0)
185# define CG_R_MASK (0xffff << 0)
186# define CG_L(x) ((x) << 16)
187# define CG_L_MASK (0xffff << 16)
188
189#define CG_DISPLAY_GAP_CNTL 0x714
190# define DISP1_GAP(x) ((x) << 0)
191# define DISP1_GAP_MASK (3 << 0)
192# define DISP2_GAP(x) ((x) << 2)
193# define DISP2_GAP_MASK (3 << 2)
194# define VBI_TIMER_COUNT(x) ((x) << 4)
195# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
196# define VBI_TIMER_UNIT(x) ((x) << 20)
197# define VBI_TIMER_UNIT_MASK (7 << 20)
198# define DISP1_GAP_MCHG(x) ((x) << 24)
199# define DISP1_GAP_MCHG_MASK (3 << 24)
200# define DISP2_GAP_MCHG(x) ((x) << 26)
201# define DISP2_GAP_MCHG_MASK (3 << 26)
202
203#define CG_BIF_REQ_AND_RSP 0x7f4
204#define CG_CLIENT_REQ(x) ((x) << 0)
205#define CG_CLIENT_REQ_MASK (0xff << 0)
206#define CG_CLIENT_REQ_SHIFT 0
207#define CG_CLIENT_RESP(x) ((x) << 8)
208#define CG_CLIENT_RESP_MASK (0xff << 8)
209#define CG_CLIENT_RESP_SHIFT 8
210#define CLIENT_CG_REQ(x) ((x) << 16)
211#define CLIENT_CG_REQ_MASK (0xff << 16)
212#define CLIENT_CG_REQ_SHIFT 16
213#define CLIENT_CG_RESP(x) ((x) << 24)
214#define CLIENT_CG_RESP_MASK (0xff << 24)
215#define CLIENT_CG_RESP_SHIFT 24
216
217#define CG_SPLL_SPREAD_SPECTRUM 0x790
218#define SSEN (1 << 0)
219#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
220
221#define MPLL_SS1 0x85c
222#define CLKV(x) ((x) << 0)
223#define CLKV_MASK (0x3ffffff << 0)
224#define MPLL_SS2 0x860
225#define CLKS(x) ((x) << 0)
226#define CLKS_MASK (0xfff << 0)
227
228#define CG_IND_ADDR 0x8f8
229#define CG_IND_DATA 0x8fc
230/* CGIND regs */
231#define CG_CGTT_LOCAL_0 0x00
232#define CG_CGTT_LOCAL_1 0x01
233#define CG_CGTT_LOCAL_2 0x02
234#define CG_CGTT_LOCAL_3 0x03
235#define CG_CGLS_TILE_0 0x20
236#define CG_CGLS_TILE_1 0x21
237#define CG_CGLS_TILE_2 0x22
238#define CG_CGLS_TILE_3 0x23
239#define CG_CGLS_TILE_4 0x24
240#define CG_CGLS_TILE_5 0x25
241#define CG_CGLS_TILE_6 0x26
242#define CG_CGLS_TILE_7 0x27
243#define CG_CGLS_TILE_8 0x28
244#define CG_CGLS_TILE_9 0x29
245#define CG_CGLS_TILE_10 0x2a
246#define CG_CGLS_TILE_11 0x2b
247
248#define VM_L2_CG 0x15c0
249
250#define MC_CONFIG 0x2000
251
252#define MC_CONFIG_MCD 0x20a0
253#define MC_CG_CONFIG_MCD 0x20a4
254#define MC_RD_ENABLE_MCD(x) ((x) << 8)
255#define MC_RD_ENABLE_MCD_MASK (7 << 8)
256
257#define MC_HUB_MISC_HUB_CG 0x20b8
258#define MC_HUB_MISC_VM_CG 0x20bc
259#define MC_HUB_MISC_SIP_CG 0x20c0
260
261#define MC_XPB_CLK_GAT 0x2478
262
263#define MC_CG_CONFIG 0x25bc
264#define MC_RD_ENABLE(x) ((x) << 4)
265#define MC_RD_ENABLE_MASK (3 << 4)
266
267#define MC_CITF_MISC_RD_CG 0x2648
268#define MC_CITF_MISC_WR_CG 0x264c
269#define MC_CITF_MISC_VM_CG 0x2650
270# define MEM_LS_ENABLE (1 << 19)
271
272#define MC_ARB_BURST_TIME 0x2808
273#define STATE0(x) ((x) << 0)
274#define STATE0_MASK (0x1f << 0)
275#define STATE1(x) ((x) << 5)
276#define STATE1_MASK (0x1f << 5)
277#define STATE2(x) ((x) << 10)
278#define STATE2_MASK (0x1f << 10)
279#define STATE3(x) ((x) << 15)
280#define STATE3_MASK (0x1f << 15)
281
282#define MC_SEQ_RAS_TIMING 0x28a0
283#define MC_SEQ_CAS_TIMING 0x28a4
284#define MC_SEQ_MISC_TIMING 0x28a8
285#define MC_SEQ_MISC_TIMING2 0x28ac
286
287#define MC_SEQ_RD_CTL_D0 0x28b4
288#define MC_SEQ_RD_CTL_D1 0x28b8
289#define MC_SEQ_WR_CTL_D0 0x28bc
290#define MC_SEQ_WR_CTL_D1 0x28c0
291
292#define MC_SEQ_STATUS_M 0x29f4
293# define PMG_PWRSTATE (1 << 16)
294
295#define MC_SEQ_MISC1 0x2a04
296#define MC_SEQ_RESERVE_M 0x2a08
297#define MC_PMG_CMD_EMRS 0x2a0c
298
299#define MC_SEQ_MISC3 0x2a2c
300
301#define MC_SEQ_MISC5 0x2a54
302#define MC_SEQ_MISC6 0x2a58
303
304#define MC_SEQ_MISC7 0x2a64
305
306#define MC_SEQ_CG 0x2a68
307#define CG_SEQ_REQ(x) ((x) << 0)
308#define CG_SEQ_REQ_MASK (0xff << 0)
309#define CG_SEQ_REQ_SHIFT 0
310#define CG_SEQ_RESP(x) ((x) << 8)
311#define CG_SEQ_RESP_MASK (0xff << 8)
312#define CG_SEQ_RESP_SHIFT 8
313#define SEQ_CG_REQ(x) ((x) << 16)
314#define SEQ_CG_REQ_MASK (0xff << 16)
315#define SEQ_CG_REQ_SHIFT 16
316#define SEQ_CG_RESP(x) ((x) << 24)
317#define SEQ_CG_RESP_MASK (0xff << 24)
318#define SEQ_CG_RESP_SHIFT 24
319#define MC_SEQ_RAS_TIMING_LP 0x2a6c
320#define MC_SEQ_CAS_TIMING_LP 0x2a70
321#define MC_SEQ_MISC_TIMING_LP 0x2a74
322#define MC_SEQ_MISC_TIMING2_LP 0x2a78
323#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
324#define MC_SEQ_WR_CTL_D1_LP 0x2a80
325#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
326#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
327
328#define MC_PMG_CMD_MRS 0x2aac
329
330#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
331#define MC_SEQ_RD_CTL_D1_LP 0x2b20
332
333#define MC_PMG_CMD_MRS1 0x2b44
334#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
335
336#define CGTS_SM_CTRL_REG 0x9150
337
51/* Registers */ 338/* Registers */
52 339
53#define RCU_IND_INDEX 0x100 340#define RCU_IND_INDEX 0x100
@@ -90,6 +377,34 @@
90#define CG_VCLK_STATUS 0x61c 377#define CG_VCLK_STATUS 0x61c
91#define CG_SCRATCH1 0x820 378#define CG_SCRATCH1 0x820
92 379
380#define RLC_CNTL 0x3f00
381# define RLC_ENABLE (1 << 0)
382# define GFX_POWER_GATING_ENABLE (1 << 7)
383# define GFX_POWER_GATING_SRC (1 << 8)
384# define DYN_PER_SIMD_PG_ENABLE (1 << 27)
385# define LB_CNT_SPIM_ACTIVE (1 << 30)
386# define LOAD_BALANCE_ENABLE (1 << 31)
387
388#define RLC_HB_BASE 0x3f10
389#define RLC_HB_CNTL 0x3f0c
390#define RLC_HB_RPTR 0x3f20
391#define RLC_HB_WPTR 0x3f1c
392#define RLC_HB_WPTR_LSB_ADDR 0x3f14
393#define RLC_HB_WPTR_MSB_ADDR 0x3f18
394#define RLC_MC_CNTL 0x3f44
395#define RLC_UCODE_CNTL 0x3f48
396#define RLC_UCODE_ADDR 0x3f2c
397#define RLC_UCODE_DATA 0x3f30
398
399/* new for TN */
400#define TN_RLC_SAVE_AND_RESTORE_BASE 0x3f10
401#define TN_RLC_LB_CNTR_MAX 0x3f14
402#define TN_RLC_LB_CNTR_INIT 0x3f18
403#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
404#define TN_RLC_LB_INIT_SIMD_MASK 0x3fe4
405#define TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK 0x3fe8
406#define TN_RLC_LB_PARAMS 0x3fec
407
93#define GRBM_GFX_INDEX 0x802C 408#define GRBM_GFX_INDEX 0x802C
94#define INSTANCE_INDEX(x) ((x) << 0) 409#define INSTANCE_INDEX(x) ((x) << 0)
95#define SE_INDEX(x) ((x) << 16) 410#define SE_INDEX(x) ((x) << 16)
@@ -503,6 +818,30 @@
503#define CG_THERMAL_CTRL 0x72c 818#define CG_THERMAL_CTRL 0x72c
504#define TOFFSET_MASK 0x00003FE0 819#define TOFFSET_MASK 0x00003FE0
505#define TOFFSET_SHIFT 5 820#define TOFFSET_SHIFT 5
821#define DIG_THERM_DPM(x) ((x) << 14)
822#define DIG_THERM_DPM_MASK 0x003FC000
823#define DIG_THERM_DPM_SHIFT 14
824
825#define CG_THERMAL_INT 0x734
826#define DIG_THERM_INTH(x) ((x) << 8)
827#define DIG_THERM_INTH_MASK 0x0000FF00
828#define DIG_THERM_INTH_SHIFT 8
829#define DIG_THERM_INTL(x) ((x) << 16)
830#define DIG_THERM_INTL_MASK 0x00FF0000
831#define DIG_THERM_INTL_SHIFT 16
832#define THERM_INT_MASK_HIGH (1 << 24)
833#define THERM_INT_MASK_LOW (1 << 25)
834
835#define TN_CG_THERMAL_INT_CTRL 0x738
836#define TN_DIG_THERM_INTH(x) ((x) << 0)
837#define TN_DIG_THERM_INTH_MASK 0x000000FF
838#define TN_DIG_THERM_INTH_SHIFT 0
839#define TN_DIG_THERM_INTL(x) ((x) << 8)
840#define TN_DIG_THERM_INTL_MASK 0x0000FF00
841#define TN_DIG_THERM_INTL_SHIFT 8
842#define TN_THERM_INT_MASK_HIGH (1 << 24)
843#define TN_THERM_INT_MASK_LOW (1 << 25)
844
506#define CG_MULT_THERMAL_STATUS 0x740 845#define CG_MULT_THERMAL_STATUS 0x740
507#define ASIC_T(x) ((x) << 16) 846#define ASIC_T(x) ((x) << 16)
508#define ASIC_T_MASK 0x07FF0000 847#define ASIC_T_MASK 0x07FF0000
@@ -510,6 +849,7 @@
510#define CG_TS0_STATUS 0x760 849#define CG_TS0_STATUS 0x760
511#define TS0_ADC_DOUT_MASK 0x000003FF 850#define TS0_ADC_DOUT_MASK 0x000003FF
512#define TS0_ADC_DOUT_SHIFT 0 851#define TS0_ADC_DOUT_SHIFT 0
852
513/* APU */ 853/* APU */
514#define CG_THERMAL_STATUS 0x678 854#define CG_THERMAL_STATUS 0x678
515 855
@@ -992,7 +1332,48 @@
992#define DMA_PACKET_CONSTANT_FILL 0xd 1332#define DMA_PACKET_CONSTANT_FILL 0xd
993#define DMA_PACKET_NOP 0xf 1333#define DMA_PACKET_NOP 0xf
994 1334
995/* PCIE link stuff */ 1335/* PIF PHY0 indirect regs */
1336#define PB0_PIF_CNTL 0x10
1337# define LS2_EXIT_TIME(x) ((x) << 17)
1338# define LS2_EXIT_TIME_MASK (0x7 << 17)
1339# define LS2_EXIT_TIME_SHIFT 17
1340#define PB0_PIF_PAIRING 0x11
1341# define MULTI_PIF (1 << 25)
1342#define PB0_PIF_PWRDOWN_0 0x12
1343# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
1344# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
1345# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
1346# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
1347# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
1348# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
1349# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
1350# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
1351# define PLL_RAMP_UP_TIME_0_SHIFT 24
1352#define PB0_PIF_PWRDOWN_1 0x13
1353# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
1354# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
1355# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
1356# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
1357# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
1358# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
1359# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
1360# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
1361# define PLL_RAMP_UP_TIME_1_SHIFT 24
1362/* PIF PHY1 indirect regs */
1363#define PB1_PIF_CNTL 0x10
1364#define PB1_PIF_PAIRING 0x11
1365#define PB1_PIF_PWRDOWN_0 0x12
1366#define PB1_PIF_PWRDOWN_1 0x13
1367/* PCIE PORT indirect regs */
1368#define PCIE_LC_CNTL 0xa0
1369# define LC_L0S_INACTIVITY(x) ((x) << 8)
1370# define LC_L0S_INACTIVITY_MASK (0xf << 8)
1371# define LC_L0S_INACTIVITY_SHIFT 8
1372# define LC_L1_INACTIVITY(x) ((x) << 12)
1373# define LC_L1_INACTIVITY_MASK (0xf << 12)
1374# define LC_L1_INACTIVITY_SHIFT 12
1375# define LC_PMI_TO_L1_DIS (1 << 16)
1376# define LC_ASPM_TO_L1_DIS (1 << 24)
996#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ 1377#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
997#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ 1378#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
998# define LC_LINK_WIDTH_SHIFT 0 1379# define LC_LINK_WIDTH_SHIFT 0
@@ -1012,6 +1393,9 @@
1012# define LC_SHORT_RECONFIG_EN (1 << 11) 1393# define LC_SHORT_RECONFIG_EN (1 << 11)
1013# define LC_UPCONFIGURE_SUPPORT (1 << 12) 1394# define LC_UPCONFIGURE_SUPPORT (1 << 12)
1014# define LC_UPCONFIGURE_DIS (1 << 13) 1395# define LC_UPCONFIGURE_DIS (1 << 13)
1396# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
1397# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
1398# define LC_DYN_LANES_PWR_STATE_SHIFT 21
1015#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ 1399#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
1016# define LC_GEN2_EN_STRAP (1 << 0) 1400# define LC_GEN2_EN_STRAP (1 << 0)
1017# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) 1401# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
@@ -1020,6 +1404,9 @@
1020# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) 1404# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
1021# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 1405# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
1022# define LC_CURRENT_DATA_RATE (1 << 11) 1406# define LC_CURRENT_DATA_RATE (1 << 11)
1407# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
1408# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
1409# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
1023# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) 1410# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
1024# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) 1411# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
1025# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) 1412# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 84583302b081..f30127cb30ef 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -33,6 +33,135 @@
33#include "atom.h" 33#include "atom.h"
34#include "ni_reg.h" 34#include "ni_reg.h"
35#include "cayman_blit_shaders.h" 35#include "cayman_blit_shaders.h"
36#include "radeon_ucode.h"
37#include "clearstate_cayman.h"
38
39static u32 tn_rlc_save_restore_register_list[] =
40{
41 0x98fc,
42 0x98f0,
43 0x9834,
44 0x9838,
45 0x9870,
46 0x9874,
47 0x8a14,
48 0x8b24,
49 0x8bcc,
50 0x8b10,
51 0x8c30,
52 0x8d00,
53 0x8d04,
54 0x8c00,
55 0x8c04,
56 0x8c10,
57 0x8c14,
58 0x8d8c,
59 0x8cf0,
60 0x8e38,
61 0x9508,
62 0x9688,
63 0x9608,
64 0x960c,
65 0x9610,
66 0x9614,
67 0x88c4,
68 0x8978,
69 0x88d4,
70 0x900c,
71 0x9100,
72 0x913c,
73 0x90e8,
74 0x9354,
75 0xa008,
76 0x98f8,
77 0x9148,
78 0x914c,
79 0x3f94,
80 0x98f4,
81 0x9b7c,
82 0x3f8c,
83 0x8950,
84 0x8954,
85 0x8a18,
86 0x8b28,
87 0x9144,
88 0x3f90,
89 0x915c,
90 0x9160,
91 0x9178,
92 0x917c,
93 0x9180,
94 0x918c,
95 0x9190,
96 0x9194,
97 0x9198,
98 0x919c,
99 0x91a8,
100 0x91ac,
101 0x91b0,
102 0x91b4,
103 0x91b8,
104 0x91c4,
105 0x91c8,
106 0x91cc,
107 0x91d0,
108 0x91d4,
109 0x91e0,
110 0x91e4,
111 0x91ec,
112 0x91f0,
113 0x91f4,
114 0x9200,
115 0x9204,
116 0x929c,
117 0x8030,
118 0x9150,
119 0x9a60,
120 0x920c,
121 0x9210,
122 0x9228,
123 0x922c,
124 0x9244,
125 0x9248,
126 0x91e8,
127 0x9294,
128 0x9208,
129 0x9224,
130 0x9240,
131 0x9220,
132 0x923c,
133 0x9258,
134 0x9744,
135 0xa200,
136 0xa204,
137 0xa208,
138 0xa20c,
139 0x8d58,
140 0x9030,
141 0x9034,
142 0x9038,
143 0x903c,
144 0x9040,
145 0x9654,
146 0x897c,
147 0xa210,
148 0xa214,
149 0x9868,
150 0xa02c,
151 0x9664,
152 0x9698,
153 0x949c,
154 0x8e10,
155 0x8e18,
156 0x8c50,
157 0x8c58,
158 0x8c60,
159 0x8c68,
160 0x89b4,
161 0x9830,
162 0x802c,
163};
164static u32 tn_rlc_save_restore_register_list_size = ARRAY_SIZE(tn_rlc_save_restore_register_list);
36 165
37extern bool evergreen_is_display_hung(struct radeon_device *rdev); 166extern bool evergreen_is_display_hung(struct radeon_device *rdev);
38extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 167extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
@@ -44,36 +173,29 @@ extern void evergreen_irq_suspend(struct radeon_device *rdev);
44extern int evergreen_mc_init(struct radeon_device *rdev); 173extern int evergreen_mc_init(struct radeon_device *rdev);
45extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 174extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
46extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 175extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
47extern void si_rlc_fini(struct radeon_device *rdev); 176extern void evergreen_program_aspm(struct radeon_device *rdev);
48extern int si_rlc_init(struct radeon_device *rdev); 177extern void sumo_rlc_fini(struct radeon_device *rdev);
49 178extern int sumo_rlc_init(struct radeon_device *rdev);
50#define EVERGREEN_PFP_UCODE_SIZE 1120
51#define EVERGREEN_PM4_UCODE_SIZE 1376
52#define EVERGREEN_RLC_UCODE_SIZE 768
53#define BTC_MC_UCODE_SIZE 6024
54
55#define CAYMAN_PFP_UCODE_SIZE 2176
56#define CAYMAN_PM4_UCODE_SIZE 2176
57#define CAYMAN_RLC_UCODE_SIZE 1024
58#define CAYMAN_MC_UCODE_SIZE 6037
59
60#define ARUBA_RLC_UCODE_SIZE 1536
61 179
62/* Firmware Names */ 180/* Firmware Names */
63MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 181MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
64MODULE_FIRMWARE("radeon/BARTS_me.bin"); 182MODULE_FIRMWARE("radeon/BARTS_me.bin");
65MODULE_FIRMWARE("radeon/BARTS_mc.bin"); 183MODULE_FIRMWARE("radeon/BARTS_mc.bin");
184MODULE_FIRMWARE("radeon/BARTS_smc.bin");
66MODULE_FIRMWARE("radeon/BTC_rlc.bin"); 185MODULE_FIRMWARE("radeon/BTC_rlc.bin");
67MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); 186MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
68MODULE_FIRMWARE("radeon/TURKS_me.bin"); 187MODULE_FIRMWARE("radeon/TURKS_me.bin");
69MODULE_FIRMWARE("radeon/TURKS_mc.bin"); 188MODULE_FIRMWARE("radeon/TURKS_mc.bin");
189MODULE_FIRMWARE("radeon/TURKS_smc.bin");
70MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); 190MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
71MODULE_FIRMWARE("radeon/CAICOS_me.bin"); 191MODULE_FIRMWARE("radeon/CAICOS_me.bin");
72MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); 192MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
193MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
73MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin"); 194MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
74MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); 195MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
75MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); 196MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
76MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); 197MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
198MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
77MODULE_FIRMWARE("radeon/ARUBA_pfp.bin"); 199MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
78MODULE_FIRMWARE("radeon/ARUBA_me.bin"); 200MODULE_FIRMWARE("radeon/ARUBA_me.bin");
79MODULE_FIRMWARE("radeon/ARUBA_rlc.bin"); 201MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
@@ -566,6 +688,7 @@ int ni_init_microcode(struct radeon_device *rdev)
566 const char *chip_name; 688 const char *chip_name;
567 const char *rlc_chip_name; 689 const char *rlc_chip_name;
568 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 690 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
691 size_t smc_req_size = 0;
569 char fw_name[30]; 692 char fw_name[30];
570 int err; 693 int err;
571 694
@@ -586,6 +709,7 @@ int ni_init_microcode(struct radeon_device *rdev)
586 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 709 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
587 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 710 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
588 mc_req_size = BTC_MC_UCODE_SIZE * 4; 711 mc_req_size = BTC_MC_UCODE_SIZE * 4;
712 smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
589 break; 713 break;
590 case CHIP_TURKS: 714 case CHIP_TURKS:
591 chip_name = "TURKS"; 715 chip_name = "TURKS";
@@ -594,6 +718,7 @@ int ni_init_microcode(struct radeon_device *rdev)
594 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 718 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
595 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 719 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
596 mc_req_size = BTC_MC_UCODE_SIZE * 4; 720 mc_req_size = BTC_MC_UCODE_SIZE * 4;
721 smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
597 break; 722 break;
598 case CHIP_CAICOS: 723 case CHIP_CAICOS:
599 chip_name = "CAICOS"; 724 chip_name = "CAICOS";
@@ -602,6 +727,7 @@ int ni_init_microcode(struct radeon_device *rdev)
602 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 727 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
603 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 728 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
604 mc_req_size = BTC_MC_UCODE_SIZE * 4; 729 mc_req_size = BTC_MC_UCODE_SIZE * 4;
730 smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
605 break; 731 break;
606 case CHIP_CAYMAN: 732 case CHIP_CAYMAN:
607 chip_name = "CAYMAN"; 733 chip_name = "CAYMAN";
@@ -610,6 +736,7 @@ int ni_init_microcode(struct radeon_device *rdev)
610 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 736 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
611 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 737 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
612 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 738 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
739 smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
613 break; 740 break;
614 case CHIP_ARUBA: 741 case CHIP_ARUBA:
615 chip_name = "ARUBA"; 742 chip_name = "ARUBA";
@@ -672,6 +799,20 @@ int ni_init_microcode(struct radeon_device *rdev)
672 err = -EINVAL; 799 err = -EINVAL;
673 } 800 }
674 } 801 }
802
803 if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
804 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
805 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev);
806 if (err)
807 goto out;
808 if (rdev->smc_fw->size != smc_req_size) {
809 printk(KERN_ERR
810 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
811 rdev->mc_fw->size, fw_name);
812 err = -EINVAL;
813 }
814 }
815
675out: 816out:
676 platform_device_unregister(pdev); 817 platform_device_unregister(pdev);
677 818
@@ -692,6 +833,14 @@ out:
692 return err; 833 return err;
693} 834}
694 835
836int tn_get_temp(struct radeon_device *rdev)
837{
838 u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
839 int actual_temp = (temp / 8) - 49;
840
841 return actual_temp * 1000;
842}
843
695/* 844/*
696 * Core functions 845 * Core functions
697 */ 846 */
@@ -1027,6 +1176,16 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1027 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 1176 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1028 1177
1029 udelay(50); 1178 udelay(50);
1179
1180 /* set clockgating golden values on TN */
1181 if (rdev->family == CHIP_ARUBA) {
1182 tmp = RREG32_CG(CG_CGTT_LOCAL_0);
1183 tmp &= ~0x00380000;
1184 WREG32_CG(CG_CGTT_LOCAL_0, tmp);
1185 tmp = RREG32_CG(CG_CGTT_LOCAL_1);
1186 tmp &= ~0x0e000000;
1187 WREG32_CG(CG_CGTT_LOCAL_1, tmp);
1188 }
1030} 1189}
1031 1190
1032/* 1191/*
@@ -1928,6 +2087,8 @@ static int cayman_startup(struct radeon_device *rdev)
1928 2087
1929 /* enable pcie gen2 link */ 2088 /* enable pcie gen2 link */
1930 evergreen_pcie_gen2_enable(rdev); 2089 evergreen_pcie_gen2_enable(rdev);
2090 /* enable aspm */
2091 evergreen_program_aspm(rdev);
1931 2092
1932 if (rdev->flags & RADEON_IS_IGP) { 2093 if (rdev->flags & RADEON_IS_IGP) {
1933 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 2094 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -1972,7 +2133,10 @@ static int cayman_startup(struct radeon_device *rdev)
1972 2133
1973 /* allocate rlc buffers */ 2134 /* allocate rlc buffers */
1974 if (rdev->flags & RADEON_IS_IGP) { 2135 if (rdev->flags & RADEON_IS_IGP) {
1975 r = si_rlc_init(rdev); 2136 rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
2137 rdev->rlc.reg_list_size = tn_rlc_save_restore_register_list_size;
2138 rdev->rlc.cs_data = cayman_cs_data;
2139 r = sumo_rlc_init(rdev);
1976 if (r) { 2140 if (r) {
1977 DRM_ERROR("Failed to init rlc BOs!\n"); 2141 DRM_ERROR("Failed to init rlc BOs!\n");
1978 return r; 2142 return r;
@@ -2229,7 +2393,7 @@ int cayman_init(struct radeon_device *rdev)
2229 cayman_dma_fini(rdev); 2393 cayman_dma_fini(rdev);
2230 r600_irq_fini(rdev); 2394 r600_irq_fini(rdev);
2231 if (rdev->flags & RADEON_IS_IGP) 2395 if (rdev->flags & RADEON_IS_IGP)
2232 si_rlc_fini(rdev); 2396 sumo_rlc_fini(rdev);
2233 radeon_wb_fini(rdev); 2397 radeon_wb_fini(rdev);
2234 radeon_ib_pool_fini(rdev); 2398 radeon_ib_pool_fini(rdev);
2235 radeon_vm_manager_fini(rdev); 2399 radeon_vm_manager_fini(rdev);
@@ -2260,7 +2424,7 @@ void cayman_fini(struct radeon_device *rdev)
2260 cayman_dma_fini(rdev); 2424 cayman_dma_fini(rdev);
2261 r600_irq_fini(rdev); 2425 r600_irq_fini(rdev);
2262 if (rdev->flags & RADEON_IS_IGP) 2426 if (rdev->flags & RADEON_IS_IGP)
2263 si_rlc_fini(rdev); 2427 sumo_rlc_fini(rdev);
2264 radeon_wb_fini(rdev); 2428 radeon_wb_fini(rdev);
2265 radeon_vm_manager_fini(rdev); 2429 radeon_vm_manager_fini(rdev);
2266 radeon_ib_pool_fini(rdev); 2430 radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
new file mode 100644
index 000000000000..777d17e61312
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -0,0 +1,4316 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "nid.h"
27#include "r600_dpm.h"
28#include "ni_dpm.h"
29#include "atom.h"
30#include <linux/math64.h>
31
32#define MC_CG_ARB_FREQ_F0 0x0a
33#define MC_CG_ARB_FREQ_F1 0x0b
34#define MC_CG_ARB_FREQ_F2 0x0c
35#define MC_CG_ARB_FREQ_F3 0x0d
36
37#define SMC_RAM_END 0xC000
38
39static const struct ni_cac_weights cac_weights_cayman_xt =
40{
41 0x15,
42 0x2,
43 0x19,
44 0x2,
45 0x8,
46 0x14,
47 0x2,
48 0x16,
49 0xE,
50 0x17,
51 0x13,
52 0x2B,
53 0x10,
54 0x7,
55 0x5,
56 0x5,
57 0x5,
58 0x2,
59 0x3,
60 0x9,
61 0x10,
62 0x10,
63 0x2B,
64 0xA,
65 0x9,
66 0x4,
67 0xD,
68 0xD,
69 0x3E,
70 0x18,
71 0x14,
72 0,
73 0x3,
74 0x3,
75 0x5,
76 0,
77 0x2,
78 0,
79 0,
80 0,
81 0,
82 0,
83 0,
84 0,
85 0,
86 0,
87 0x1CC,
88 0,
89 0x164,
90 1,
91 1,
92 1,
93 1,
94 12,
95 12,
96 12,
97 0x12,
98 0x1F,
99 132,
100 5,
101 7,
102 0,
103 { 0, 0, 0, 0, 0, 0, 0, 0 },
104 { 0, 0, 0, 0 },
105 true
106};
107
108static const struct ni_cac_weights cac_weights_cayman_pro =
109{
110 0x16,
111 0x4,
112 0x10,
113 0x2,
114 0xA,
115 0x16,
116 0x2,
117 0x18,
118 0x10,
119 0x1A,
120 0x16,
121 0x2D,
122 0x12,
123 0xA,
124 0x6,
125 0x6,
126 0x6,
127 0x2,
128 0x4,
129 0xB,
130 0x11,
131 0x11,
132 0x2D,
133 0xC,
134 0xC,
135 0x7,
136 0x10,
137 0x10,
138 0x3F,
139 0x1A,
140 0x16,
141 0,
142 0x7,
143 0x4,
144 0x6,
145 1,
146 0x2,
147 0x1,
148 0,
149 0,
150 0,
151 0,
152 0,
153 0,
154 0x30,
155 0,
156 0x1CF,
157 0,
158 0x166,
159 1,
160 1,
161 1,
162 1,
163 12,
164 12,
165 12,
166 0x15,
167 0x1F,
168 132,
169 6,
170 6,
171 0,
172 { 0, 0, 0, 0, 0, 0, 0, 0 },
173 { 0, 0, 0, 0 },
174 true
175};
176
177static const struct ni_cac_weights cac_weights_cayman_le =
178{
179 0x7,
180 0xE,
181 0x1,
182 0xA,
183 0x1,
184 0x3F,
185 0x2,
186 0x18,
187 0x10,
188 0x1A,
189 0x1,
190 0x3F,
191 0x1,
192 0xE,
193 0x6,
194 0x6,
195 0x6,
196 0x2,
197 0x4,
198 0x9,
199 0x1A,
200 0x1A,
201 0x2C,
202 0xA,
203 0x11,
204 0x8,
205 0x19,
206 0x19,
207 0x1,
208 0x1,
209 0x1A,
210 0,
211 0x8,
212 0x5,
213 0x8,
214 0x1,
215 0x3,
216 0x1,
217 0,
218 0,
219 0,
220 0,
221 0,
222 0,
223 0x38,
224 0x38,
225 0x239,
226 0x3,
227 0x18A,
228 1,
229 1,
230 1,
231 1,
232 12,
233 12,
234 12,
235 0x15,
236 0x22,
237 132,
238 6,
239 6,
240 0,
241 { 0, 0, 0, 0, 0, 0, 0, 0 },
242 { 0, 0, 0, 0 },
243 true
244};
245
246#define NISLANDS_MGCG_SEQUENCE 300
247
248static const u32 cayman_cgcg_cgls_default[] =
249{
250 0x000008f8, 0x00000010, 0xffffffff,
251 0x000008fc, 0x00000000, 0xffffffff,
252 0x000008f8, 0x00000011, 0xffffffff,
253 0x000008fc, 0x00000000, 0xffffffff,
254 0x000008f8, 0x00000012, 0xffffffff,
255 0x000008fc, 0x00000000, 0xffffffff,
256 0x000008f8, 0x00000013, 0xffffffff,
257 0x000008fc, 0x00000000, 0xffffffff,
258 0x000008f8, 0x00000014, 0xffffffff,
259 0x000008fc, 0x00000000, 0xffffffff,
260 0x000008f8, 0x00000015, 0xffffffff,
261 0x000008fc, 0x00000000, 0xffffffff,
262 0x000008f8, 0x00000016, 0xffffffff,
263 0x000008fc, 0x00000000, 0xffffffff,
264 0x000008f8, 0x00000017, 0xffffffff,
265 0x000008fc, 0x00000000, 0xffffffff,
266 0x000008f8, 0x00000018, 0xffffffff,
267 0x000008fc, 0x00000000, 0xffffffff,
268 0x000008f8, 0x00000019, 0xffffffff,
269 0x000008fc, 0x00000000, 0xffffffff,
270 0x000008f8, 0x0000001a, 0xffffffff,
271 0x000008fc, 0x00000000, 0xffffffff,
272 0x000008f8, 0x0000001b, 0xffffffff,
273 0x000008fc, 0x00000000, 0xffffffff,
274 0x000008f8, 0x00000020, 0xffffffff,
275 0x000008fc, 0x00000000, 0xffffffff,
276 0x000008f8, 0x00000021, 0xffffffff,
277 0x000008fc, 0x00000000, 0xffffffff,
278 0x000008f8, 0x00000022, 0xffffffff,
279 0x000008fc, 0x00000000, 0xffffffff,
280 0x000008f8, 0x00000023, 0xffffffff,
281 0x000008fc, 0x00000000, 0xffffffff,
282 0x000008f8, 0x00000024, 0xffffffff,
283 0x000008fc, 0x00000000, 0xffffffff,
284 0x000008f8, 0x00000025, 0xffffffff,
285 0x000008fc, 0x00000000, 0xffffffff,
286 0x000008f8, 0x00000026, 0xffffffff,
287 0x000008fc, 0x00000000, 0xffffffff,
288 0x000008f8, 0x00000027, 0xffffffff,
289 0x000008fc, 0x00000000, 0xffffffff,
290 0x000008f8, 0x00000028, 0xffffffff,
291 0x000008fc, 0x00000000, 0xffffffff,
292 0x000008f8, 0x00000029, 0xffffffff,
293 0x000008fc, 0x00000000, 0xffffffff,
294 0x000008f8, 0x0000002a, 0xffffffff,
295 0x000008fc, 0x00000000, 0xffffffff,
296 0x000008f8, 0x0000002b, 0xffffffff,
297 0x000008fc, 0x00000000, 0xffffffff
298};
299#define CAYMAN_CGCG_CGLS_DEFAULT_LENGTH sizeof(cayman_cgcg_cgls_default) / (3 * sizeof(u32))
300
301static const u32 cayman_cgcg_cgls_disable[] =
302{
303 0x000008f8, 0x00000010, 0xffffffff,
304 0x000008fc, 0xffffffff, 0xffffffff,
305 0x000008f8, 0x00000011, 0xffffffff,
306 0x000008fc, 0xffffffff, 0xffffffff,
307 0x000008f8, 0x00000012, 0xffffffff,
308 0x000008fc, 0xffffffff, 0xffffffff,
309 0x000008f8, 0x00000013, 0xffffffff,
310 0x000008fc, 0xffffffff, 0xffffffff,
311 0x000008f8, 0x00000014, 0xffffffff,
312 0x000008fc, 0xffffffff, 0xffffffff,
313 0x000008f8, 0x00000015, 0xffffffff,
314 0x000008fc, 0xffffffff, 0xffffffff,
315 0x000008f8, 0x00000016, 0xffffffff,
316 0x000008fc, 0xffffffff, 0xffffffff,
317 0x000008f8, 0x00000017, 0xffffffff,
318 0x000008fc, 0xffffffff, 0xffffffff,
319 0x000008f8, 0x00000018, 0xffffffff,
320 0x000008fc, 0xffffffff, 0xffffffff,
321 0x000008f8, 0x00000019, 0xffffffff,
322 0x000008fc, 0xffffffff, 0xffffffff,
323 0x000008f8, 0x0000001a, 0xffffffff,
324 0x000008fc, 0xffffffff, 0xffffffff,
325 0x000008f8, 0x0000001b, 0xffffffff,
326 0x000008fc, 0xffffffff, 0xffffffff,
327 0x000008f8, 0x00000020, 0xffffffff,
328 0x000008fc, 0x00000000, 0xffffffff,
329 0x000008f8, 0x00000021, 0xffffffff,
330 0x000008fc, 0x00000000, 0xffffffff,
331 0x000008f8, 0x00000022, 0xffffffff,
332 0x000008fc, 0x00000000, 0xffffffff,
333 0x000008f8, 0x00000023, 0xffffffff,
334 0x000008fc, 0x00000000, 0xffffffff,
335 0x000008f8, 0x00000024, 0xffffffff,
336 0x000008fc, 0x00000000, 0xffffffff,
337 0x000008f8, 0x00000025, 0xffffffff,
338 0x000008fc, 0x00000000, 0xffffffff,
339 0x000008f8, 0x00000026, 0xffffffff,
340 0x000008fc, 0x00000000, 0xffffffff,
341 0x000008f8, 0x00000027, 0xffffffff,
342 0x000008fc, 0x00000000, 0xffffffff,
343 0x000008f8, 0x00000028, 0xffffffff,
344 0x000008fc, 0x00000000, 0xffffffff,
345 0x000008f8, 0x00000029, 0xffffffff,
346 0x000008fc, 0x00000000, 0xffffffff,
347 0x000008f8, 0x0000002a, 0xffffffff,
348 0x000008fc, 0x00000000, 0xffffffff,
349 0x000008f8, 0x0000002b, 0xffffffff,
350 0x000008fc, 0x00000000, 0xffffffff,
351 0x00000644, 0x000f7902, 0x001f4180,
352 0x00000644, 0x000f3802, 0x001f4180
353};
354#define CAYMAN_CGCG_CGLS_DISABLE_LENGTH sizeof(cayman_cgcg_cgls_disable) / (3 * sizeof(u32))
355
356static const u32 cayman_cgcg_cgls_enable[] =
357{
358 0x00000644, 0x000f7882, 0x001f4080,
359 0x000008f8, 0x00000010, 0xffffffff,
360 0x000008fc, 0x00000000, 0xffffffff,
361 0x000008f8, 0x00000011, 0xffffffff,
362 0x000008fc, 0x00000000, 0xffffffff,
363 0x000008f8, 0x00000012, 0xffffffff,
364 0x000008fc, 0x00000000, 0xffffffff,
365 0x000008f8, 0x00000013, 0xffffffff,
366 0x000008fc, 0x00000000, 0xffffffff,
367 0x000008f8, 0x00000014, 0xffffffff,
368 0x000008fc, 0x00000000, 0xffffffff,
369 0x000008f8, 0x00000015, 0xffffffff,
370 0x000008fc, 0x00000000, 0xffffffff,
371 0x000008f8, 0x00000016, 0xffffffff,
372 0x000008fc, 0x00000000, 0xffffffff,
373 0x000008f8, 0x00000017, 0xffffffff,
374 0x000008fc, 0x00000000, 0xffffffff,
375 0x000008f8, 0x00000018, 0xffffffff,
376 0x000008fc, 0x00000000, 0xffffffff,
377 0x000008f8, 0x00000019, 0xffffffff,
378 0x000008fc, 0x00000000, 0xffffffff,
379 0x000008f8, 0x0000001a, 0xffffffff,
380 0x000008fc, 0x00000000, 0xffffffff,
381 0x000008f8, 0x0000001b, 0xffffffff,
382 0x000008fc, 0x00000000, 0xffffffff,
383 0x000008f8, 0x00000020, 0xffffffff,
384 0x000008fc, 0xffffffff, 0xffffffff,
385 0x000008f8, 0x00000021, 0xffffffff,
386 0x000008fc, 0xffffffff, 0xffffffff,
387 0x000008f8, 0x00000022, 0xffffffff,
388 0x000008fc, 0xffffffff, 0xffffffff,
389 0x000008f8, 0x00000023, 0xffffffff,
390 0x000008fc, 0xffffffff, 0xffffffff,
391 0x000008f8, 0x00000024, 0xffffffff,
392 0x000008fc, 0xffffffff, 0xffffffff,
393 0x000008f8, 0x00000025, 0xffffffff,
394 0x000008fc, 0xffffffff, 0xffffffff,
395 0x000008f8, 0x00000026, 0xffffffff,
396 0x000008fc, 0xffffffff, 0xffffffff,
397 0x000008f8, 0x00000027, 0xffffffff,
398 0x000008fc, 0xffffffff, 0xffffffff,
399 0x000008f8, 0x00000028, 0xffffffff,
400 0x000008fc, 0xffffffff, 0xffffffff,
401 0x000008f8, 0x00000029, 0xffffffff,
402 0x000008fc, 0xffffffff, 0xffffffff,
403 0x000008f8, 0x0000002a, 0xffffffff,
404 0x000008fc, 0xffffffff, 0xffffffff,
405 0x000008f8, 0x0000002b, 0xffffffff,
406 0x000008fc, 0xffffffff, 0xffffffff
407};
408#define CAYMAN_CGCG_CGLS_ENABLE_LENGTH sizeof(cayman_cgcg_cgls_enable) / (3 * sizeof(u32))
409
410static const u32 cayman_mgcg_default[] =
411{
412 0x0000802c, 0xc0000000, 0xffffffff,
413 0x00003fc4, 0xc0000000, 0xffffffff,
414 0x00005448, 0x00000100, 0xffffffff,
415 0x000055e4, 0x00000100, 0xffffffff,
416 0x0000160c, 0x00000100, 0xffffffff,
417 0x00008984, 0x06000100, 0xffffffff,
418 0x0000c164, 0x00000100, 0xffffffff,
419 0x00008a18, 0x00000100, 0xffffffff,
420 0x0000897c, 0x06000100, 0xffffffff,
421 0x00008b28, 0x00000100, 0xffffffff,
422 0x00009144, 0x00800200, 0xffffffff,
423 0x00009a60, 0x00000100, 0xffffffff,
424 0x00009868, 0x00000100, 0xffffffff,
425 0x00008d58, 0x00000100, 0xffffffff,
426 0x00009510, 0x00000100, 0xffffffff,
427 0x0000949c, 0x00000100, 0xffffffff,
428 0x00009654, 0x00000100, 0xffffffff,
429 0x00009030, 0x00000100, 0xffffffff,
430 0x00009034, 0x00000100, 0xffffffff,
431 0x00009038, 0x00000100, 0xffffffff,
432 0x0000903c, 0x00000100, 0xffffffff,
433 0x00009040, 0x00000100, 0xffffffff,
434 0x0000a200, 0x00000100, 0xffffffff,
435 0x0000a204, 0x00000100, 0xffffffff,
436 0x0000a208, 0x00000100, 0xffffffff,
437 0x0000a20c, 0x00000100, 0xffffffff,
438 0x00009744, 0x00000100, 0xffffffff,
439 0x00003f80, 0x00000100, 0xffffffff,
440 0x0000a210, 0x00000100, 0xffffffff,
441 0x0000a214, 0x00000100, 0xffffffff,
442 0x000004d8, 0x00000100, 0xffffffff,
443 0x00009664, 0x00000100, 0xffffffff,
444 0x00009698, 0x00000100, 0xffffffff,
445 0x000004d4, 0x00000200, 0xffffffff,
446 0x000004d0, 0x00000000, 0xffffffff,
447 0x000030cc, 0x00000104, 0xffffffff,
448 0x0000d0c0, 0x00000100, 0xffffffff,
449 0x0000d8c0, 0x00000100, 0xffffffff,
450 0x0000802c, 0x40000000, 0xffffffff,
451 0x00003fc4, 0x40000000, 0xffffffff,
452 0x0000915c, 0x00010000, 0xffffffff,
453 0x00009160, 0x00030002, 0xffffffff,
454 0x00009164, 0x00050004, 0xffffffff,
455 0x00009168, 0x00070006, 0xffffffff,
456 0x00009178, 0x00070000, 0xffffffff,
457 0x0000917c, 0x00030002, 0xffffffff,
458 0x00009180, 0x00050004, 0xffffffff,
459 0x0000918c, 0x00010006, 0xffffffff,
460 0x00009190, 0x00090008, 0xffffffff,
461 0x00009194, 0x00070000, 0xffffffff,
462 0x00009198, 0x00030002, 0xffffffff,
463 0x0000919c, 0x00050004, 0xffffffff,
464 0x000091a8, 0x00010006, 0xffffffff,
465 0x000091ac, 0x00090008, 0xffffffff,
466 0x000091b0, 0x00070000, 0xffffffff,
467 0x000091b4, 0x00030002, 0xffffffff,
468 0x000091b8, 0x00050004, 0xffffffff,
469 0x000091c4, 0x00010006, 0xffffffff,
470 0x000091c8, 0x00090008, 0xffffffff,
471 0x000091cc, 0x00070000, 0xffffffff,
472 0x000091d0, 0x00030002, 0xffffffff,
473 0x000091d4, 0x00050004, 0xffffffff,
474 0x000091e0, 0x00010006, 0xffffffff,
475 0x000091e4, 0x00090008, 0xffffffff,
476 0x000091e8, 0x00000000, 0xffffffff,
477 0x000091ec, 0x00070000, 0xffffffff,
478 0x000091f0, 0x00030002, 0xffffffff,
479 0x000091f4, 0x00050004, 0xffffffff,
480 0x00009200, 0x00010006, 0xffffffff,
481 0x00009204, 0x00090008, 0xffffffff,
482 0x00009208, 0x00070000, 0xffffffff,
483 0x0000920c, 0x00030002, 0xffffffff,
484 0x00009210, 0x00050004, 0xffffffff,
485 0x0000921c, 0x00010006, 0xffffffff,
486 0x00009220, 0x00090008, 0xffffffff,
487 0x00009224, 0x00070000, 0xffffffff,
488 0x00009228, 0x00030002, 0xffffffff,
489 0x0000922c, 0x00050004, 0xffffffff,
490 0x00009238, 0x00010006, 0xffffffff,
491 0x0000923c, 0x00090008, 0xffffffff,
492 0x00009240, 0x00070000, 0xffffffff,
493 0x00009244, 0x00030002, 0xffffffff,
494 0x00009248, 0x00050004, 0xffffffff,
495 0x00009254, 0x00010006, 0xffffffff,
496 0x00009258, 0x00090008, 0xffffffff,
497 0x0000925c, 0x00070000, 0xffffffff,
498 0x00009260, 0x00030002, 0xffffffff,
499 0x00009264, 0x00050004, 0xffffffff,
500 0x00009270, 0x00010006, 0xffffffff,
501 0x00009274, 0x00090008, 0xffffffff,
502 0x00009278, 0x00070000, 0xffffffff,
503 0x0000927c, 0x00030002, 0xffffffff,
504 0x00009280, 0x00050004, 0xffffffff,
505 0x0000928c, 0x00010006, 0xffffffff,
506 0x00009290, 0x00090008, 0xffffffff,
507 0x000092a8, 0x00070000, 0xffffffff,
508 0x000092ac, 0x00030002, 0xffffffff,
509 0x000092b0, 0x00050004, 0xffffffff,
510 0x000092bc, 0x00010006, 0xffffffff,
511 0x000092c0, 0x00090008, 0xffffffff,
512 0x000092c4, 0x00070000, 0xffffffff,
513 0x000092c8, 0x00030002, 0xffffffff,
514 0x000092cc, 0x00050004, 0xffffffff,
515 0x000092d8, 0x00010006, 0xffffffff,
516 0x000092dc, 0x00090008, 0xffffffff,
517 0x00009294, 0x00000000, 0xffffffff,
518 0x0000802c, 0x40010000, 0xffffffff,
519 0x00003fc4, 0x40010000, 0xffffffff,
520 0x0000915c, 0x00010000, 0xffffffff,
521 0x00009160, 0x00030002, 0xffffffff,
522 0x00009164, 0x00050004, 0xffffffff,
523 0x00009168, 0x00070006, 0xffffffff,
524 0x00009178, 0x00070000, 0xffffffff,
525 0x0000917c, 0x00030002, 0xffffffff,
526 0x00009180, 0x00050004, 0xffffffff,
527 0x0000918c, 0x00010006, 0xffffffff,
528 0x00009190, 0x00090008, 0xffffffff,
529 0x00009194, 0x00070000, 0xffffffff,
530 0x00009198, 0x00030002, 0xffffffff,
531 0x0000919c, 0x00050004, 0xffffffff,
532 0x000091a8, 0x00010006, 0xffffffff,
533 0x000091ac, 0x00090008, 0xffffffff,
534 0x000091b0, 0x00070000, 0xffffffff,
535 0x000091b4, 0x00030002, 0xffffffff,
536 0x000091b8, 0x00050004, 0xffffffff,
537 0x000091c4, 0x00010006, 0xffffffff,
538 0x000091c8, 0x00090008, 0xffffffff,
539 0x000091cc, 0x00070000, 0xffffffff,
540 0x000091d0, 0x00030002, 0xffffffff,
541 0x000091d4, 0x00050004, 0xffffffff,
542 0x000091e0, 0x00010006, 0xffffffff,
543 0x000091e4, 0x00090008, 0xffffffff,
544 0x000091e8, 0x00000000, 0xffffffff,
545 0x000091ec, 0x00070000, 0xffffffff,
546 0x000091f0, 0x00030002, 0xffffffff,
547 0x000091f4, 0x00050004, 0xffffffff,
548 0x00009200, 0x00010006, 0xffffffff,
549 0x00009204, 0x00090008, 0xffffffff,
550 0x00009208, 0x00070000, 0xffffffff,
551 0x0000920c, 0x00030002, 0xffffffff,
552 0x00009210, 0x00050004, 0xffffffff,
553 0x0000921c, 0x00010006, 0xffffffff,
554 0x00009220, 0x00090008, 0xffffffff,
555 0x00009224, 0x00070000, 0xffffffff,
556 0x00009228, 0x00030002, 0xffffffff,
557 0x0000922c, 0x00050004, 0xffffffff,
558 0x00009238, 0x00010006, 0xffffffff,
559 0x0000923c, 0x00090008, 0xffffffff,
560 0x00009240, 0x00070000, 0xffffffff,
561 0x00009244, 0x00030002, 0xffffffff,
562 0x00009248, 0x00050004, 0xffffffff,
563 0x00009254, 0x00010006, 0xffffffff,
564 0x00009258, 0x00090008, 0xffffffff,
565 0x0000925c, 0x00070000, 0xffffffff,
566 0x00009260, 0x00030002, 0xffffffff,
567 0x00009264, 0x00050004, 0xffffffff,
568 0x00009270, 0x00010006, 0xffffffff,
569 0x00009274, 0x00090008, 0xffffffff,
570 0x00009278, 0x00070000, 0xffffffff,
571 0x0000927c, 0x00030002, 0xffffffff,
572 0x00009280, 0x00050004, 0xffffffff,
573 0x0000928c, 0x00010006, 0xffffffff,
574 0x00009290, 0x00090008, 0xffffffff,
575 0x000092a8, 0x00070000, 0xffffffff,
576 0x000092ac, 0x00030002, 0xffffffff,
577 0x000092b0, 0x00050004, 0xffffffff,
578 0x000092bc, 0x00010006, 0xffffffff,
579 0x000092c0, 0x00090008, 0xffffffff,
580 0x000092c4, 0x00070000, 0xffffffff,
581 0x000092c8, 0x00030002, 0xffffffff,
582 0x000092cc, 0x00050004, 0xffffffff,
583 0x000092d8, 0x00010006, 0xffffffff,
584 0x000092dc, 0x00090008, 0xffffffff,
585 0x00009294, 0x00000000, 0xffffffff,
586 0x0000802c, 0xc0000000, 0xffffffff,
587 0x00003fc4, 0xc0000000, 0xffffffff,
588 0x000008f8, 0x00000010, 0xffffffff,
589 0x000008fc, 0x00000000, 0xffffffff,
590 0x000008f8, 0x00000011, 0xffffffff,
591 0x000008fc, 0x00000000, 0xffffffff,
592 0x000008f8, 0x00000012, 0xffffffff,
593 0x000008fc, 0x00000000, 0xffffffff,
594 0x000008f8, 0x00000013, 0xffffffff,
595 0x000008fc, 0x00000000, 0xffffffff,
596 0x000008f8, 0x00000014, 0xffffffff,
597 0x000008fc, 0x00000000, 0xffffffff,
598 0x000008f8, 0x00000015, 0xffffffff,
599 0x000008fc, 0x00000000, 0xffffffff,
600 0x000008f8, 0x00000016, 0xffffffff,
601 0x000008fc, 0x00000000, 0xffffffff,
602 0x000008f8, 0x00000017, 0xffffffff,
603 0x000008fc, 0x00000000, 0xffffffff,
604 0x000008f8, 0x00000018, 0xffffffff,
605 0x000008fc, 0x00000000, 0xffffffff,
606 0x000008f8, 0x00000019, 0xffffffff,
607 0x000008fc, 0x00000000, 0xffffffff,
608 0x000008f8, 0x0000001a, 0xffffffff,
609 0x000008fc, 0x00000000, 0xffffffff,
610 0x000008f8, 0x0000001b, 0xffffffff,
611 0x000008fc, 0x00000000, 0xffffffff
612};
613#define CAYMAN_MGCG_DEFAULT_LENGTH sizeof(cayman_mgcg_default) / (3 * sizeof(u32))
614
615static const u32 cayman_mgcg_disable[] =
616{
617 0x0000802c, 0xc0000000, 0xffffffff,
618 0x000008f8, 0x00000000, 0xffffffff,
619 0x000008fc, 0xffffffff, 0xffffffff,
620 0x000008f8, 0x00000001, 0xffffffff,
621 0x000008fc, 0xffffffff, 0xffffffff,
622 0x000008f8, 0x00000002, 0xffffffff,
623 0x000008fc, 0xffffffff, 0xffffffff,
624 0x000008f8, 0x00000003, 0xffffffff,
625 0x000008fc, 0xffffffff, 0xffffffff,
626 0x00009150, 0x00600000, 0xffffffff
627};
628#define CAYMAN_MGCG_DISABLE_LENGTH sizeof(cayman_mgcg_disable) / (3 * sizeof(u32))
629
630static const u32 cayman_mgcg_enable[] =
631{
632 0x0000802c, 0xc0000000, 0xffffffff,
633 0x000008f8, 0x00000000, 0xffffffff,
634 0x000008fc, 0x00000000, 0xffffffff,
635 0x000008f8, 0x00000001, 0xffffffff,
636 0x000008fc, 0x00000000, 0xffffffff,
637 0x000008f8, 0x00000002, 0xffffffff,
638 0x000008fc, 0x00600000, 0xffffffff,
639 0x000008f8, 0x00000003, 0xffffffff,
640 0x000008fc, 0x00000000, 0xffffffff,
641 0x00009150, 0x96944200, 0xffffffff
642};
643
644#define CAYMAN_MGCG_ENABLE_LENGTH sizeof(cayman_mgcg_enable) / (3 * sizeof(u32))
645
646#define NISLANDS_SYSLS_SEQUENCE 100
647
648static const u32 cayman_sysls_default[] =
649{
650 /* Register, Value, Mask bits */
651 0x000055e8, 0x00000000, 0xffffffff,
652 0x0000d0bc, 0x00000000, 0xffffffff,
653 0x0000d8bc, 0x00000000, 0xffffffff,
654 0x000015c0, 0x000c1401, 0xffffffff,
655 0x0000264c, 0x000c0400, 0xffffffff,
656 0x00002648, 0x000c0400, 0xffffffff,
657 0x00002650, 0x000c0400, 0xffffffff,
658 0x000020b8, 0x000c0400, 0xffffffff,
659 0x000020bc, 0x000c0400, 0xffffffff,
660 0x000020c0, 0x000c0c80, 0xffffffff,
661 0x0000f4a0, 0x000000c0, 0xffffffff,
662 0x0000f4a4, 0x00680fff, 0xffffffff,
663 0x00002f50, 0x00000404, 0xffffffff,
664 0x000004c8, 0x00000001, 0xffffffff,
665 0x000064ec, 0x00000000, 0xffffffff,
666 0x00000c7c, 0x00000000, 0xffffffff,
667 0x00008dfc, 0x00000000, 0xffffffff
668};
669#define CAYMAN_SYSLS_DEFAULT_LENGTH sizeof(cayman_sysls_default) / (3 * sizeof(u32))
670
671static const u32 cayman_sysls_disable[] =
672{
673 /* Register, Value, Mask bits */
674 0x0000d0c0, 0x00000000, 0xffffffff,
675 0x0000d8c0, 0x00000000, 0xffffffff,
676 0x000055e8, 0x00000000, 0xffffffff,
677 0x0000d0bc, 0x00000000, 0xffffffff,
678 0x0000d8bc, 0x00000000, 0xffffffff,
679 0x000015c0, 0x00041401, 0xffffffff,
680 0x0000264c, 0x00040400, 0xffffffff,
681 0x00002648, 0x00040400, 0xffffffff,
682 0x00002650, 0x00040400, 0xffffffff,
683 0x000020b8, 0x00040400, 0xffffffff,
684 0x000020bc, 0x00040400, 0xffffffff,
685 0x000020c0, 0x00040c80, 0xffffffff,
686 0x0000f4a0, 0x000000c0, 0xffffffff,
687 0x0000f4a4, 0x00680000, 0xffffffff,
688 0x00002f50, 0x00000404, 0xffffffff,
689 0x000004c8, 0x00000001, 0xffffffff,
690 0x000064ec, 0x00007ffd, 0xffffffff,
691 0x00000c7c, 0x0000ff00, 0xffffffff,
692 0x00008dfc, 0x0000007f, 0xffffffff
693};
694#define CAYMAN_SYSLS_DISABLE_LENGTH sizeof(cayman_sysls_disable) / (3 * sizeof(u32))
695
696static const u32 cayman_sysls_enable[] =
697{
698 /* Register, Value, Mask bits */
699 0x000055e8, 0x00000001, 0xffffffff,
700 0x0000d0bc, 0x00000100, 0xffffffff,
701 0x0000d8bc, 0x00000100, 0xffffffff,
702 0x000015c0, 0x000c1401, 0xffffffff,
703 0x0000264c, 0x000c0400, 0xffffffff,
704 0x00002648, 0x000c0400, 0xffffffff,
705 0x00002650, 0x000c0400, 0xffffffff,
706 0x000020b8, 0x000c0400, 0xffffffff,
707 0x000020bc, 0x000c0400, 0xffffffff,
708 0x000020c0, 0x000c0c80, 0xffffffff,
709 0x0000f4a0, 0x000000c0, 0xffffffff,
710 0x0000f4a4, 0x00680fff, 0xffffffff,
711 0x00002f50, 0x00000903, 0xffffffff,
712 0x000004c8, 0x00000000, 0xffffffff,
713 0x000064ec, 0x00000000, 0xffffffff,
714 0x00000c7c, 0x00000000, 0xffffffff,
715 0x00008dfc, 0x00000000, 0xffffffff
716};
717#define CAYMAN_SYSLS_ENABLE_LENGTH sizeof(cayman_sysls_enable) / (3 * sizeof(u32))
718
719struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
720struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
721
722struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
723{
724 struct ni_power_info *pi = rdev->pm.dpm.priv;
725
726 return pi;
727}
728
729struct ni_ps *ni_get_ps(struct radeon_ps *rps)
730{
731 struct ni_ps *ps = rps->ps_priv;
732
733 return ps;
734}
735
736static void ni_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
737 u16 v, s32 t,
738 u32 ileakage,
739 u32 *leakage)
740{
741 s64 kt, kv, leakage_w, i_leakage, vddc, temperature;
742
743 i_leakage = div64_s64(drm_int2fixp(ileakage), 1000);
744 vddc = div64_s64(drm_int2fixp(v), 1000);
745 temperature = div64_s64(drm_int2fixp(t), 1000);
746
747 kt = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->at), 1000),
748 drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bt), 1000), temperature)));
749 kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 1000),
750 drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 1000), vddc)));
751
752 leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
753
754 *leakage = drm_fixp2int(leakage_w * 1000);
755}
756
757static void ni_calculate_leakage_for_v_and_t(struct radeon_device *rdev,
758 const struct ni_leakage_coeffients *coeff,
759 u16 v,
760 s32 t,
761 u32 i_leakage,
762 u32 *leakage)
763{
764 ni_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
765}
766
767static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
768 struct radeon_ps *rps)
769{
770 struct ni_ps *ps = ni_get_ps(rps);
771 struct radeon_clock_and_voltage_limits *max_limits;
772 bool disable_mclk_switching;
773 u32 mclk, sclk;
774 u16 vddc, vddci;
775 int i;
776
777 if (rdev->pm.dpm.new_active_crtc_count > 1)
778 disable_mclk_switching = true;
779 else
780 disable_mclk_switching = false;
781
782 if (rdev->pm.dpm.ac_power)
783 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
784 else
785 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
786
787 if (rdev->pm.dpm.ac_power == false) {
788 for (i = 0; i < ps->performance_level_count; i++) {
789 if (ps->performance_levels[i].mclk > max_limits->mclk)
790 ps->performance_levels[i].mclk = max_limits->mclk;
791 if (ps->performance_levels[i].sclk > max_limits->sclk)
792 ps->performance_levels[i].sclk = max_limits->sclk;
793 if (ps->performance_levels[i].vddc > max_limits->vddc)
794 ps->performance_levels[i].vddc = max_limits->vddc;
795 if (ps->performance_levels[i].vddci > max_limits->vddci)
796 ps->performance_levels[i].vddci = max_limits->vddci;
797 }
798 }
799
800 /* XXX validate the min clocks required for display */
801
802 if (disable_mclk_switching) {
803 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
804 sclk = ps->performance_levels[0].sclk;
805 vddc = ps->performance_levels[0].vddc;
806 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
807 } else {
808 sclk = ps->performance_levels[0].sclk;
809 mclk = ps->performance_levels[0].mclk;
810 vddc = ps->performance_levels[0].vddc;
811 vddci = ps->performance_levels[0].vddci;
812 }
813
814 /* adjusted low state */
815 ps->performance_levels[0].sclk = sclk;
816 ps->performance_levels[0].mclk = mclk;
817 ps->performance_levels[0].vddc = vddc;
818 ps->performance_levels[0].vddci = vddci;
819
820 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
821 &ps->performance_levels[0].sclk,
822 &ps->performance_levels[0].mclk);
823
824 for (i = 1; i < ps->performance_level_count; i++) {
825 if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
826 ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
827 if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
828 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
829 }
830
831 if (disable_mclk_switching) {
832 mclk = ps->performance_levels[0].mclk;
833 for (i = 1; i < ps->performance_level_count; i++) {
834 if (mclk < ps->performance_levels[i].mclk)
835 mclk = ps->performance_levels[i].mclk;
836 }
837 for (i = 0; i < ps->performance_level_count; i++) {
838 ps->performance_levels[i].mclk = mclk;
839 ps->performance_levels[i].vddci = vddci;
840 }
841 } else {
842 for (i = 1; i < ps->performance_level_count; i++) {
843 if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
844 ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
845 if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
846 ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
847 }
848 }
849
850 for (i = 1; i < ps->performance_level_count; i++)
851 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
852 &ps->performance_levels[i].sclk,
853 &ps->performance_levels[i].mclk);
854
855 for (i = 0; i < ps->performance_level_count; i++)
856 btc_adjust_clock_combinations(rdev, max_limits,
857 &ps->performance_levels[i]);
858
859 for (i = 0; i < ps->performance_level_count; i++) {
860 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
861 ps->performance_levels[i].sclk,
862 max_limits->vddc, &ps->performance_levels[i].vddc);
863 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
864 ps->performance_levels[i].mclk,
865 max_limits->vddci, &ps->performance_levels[i].vddci);
866 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
867 ps->performance_levels[i].mclk,
868 max_limits->vddc, &ps->performance_levels[i].vddc);
869 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
870 rdev->clock.current_dispclk,
871 max_limits->vddc, &ps->performance_levels[i].vddc);
872 }
873
874 for (i = 0; i < ps->performance_level_count; i++) {
875 btc_apply_voltage_delta_rules(rdev,
876 max_limits->vddc, max_limits->vddci,
877 &ps->performance_levels[i].vddc,
878 &ps->performance_levels[i].vddci);
879 }
880
881 ps->dc_compatible = true;
882 for (i = 0; i < ps->performance_level_count; i++) {
883 if (ps->performance_levels[i].vddc > rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
884 ps->dc_compatible = false;
885
886 if (ps->performance_levels[i].vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
887 ps->performance_levels[i].flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
888 }
889}
890
891static void ni_cg_clockgating_default(struct radeon_device *rdev)
892{
893 u32 count;
894 const u32 *ps = NULL;
895
896 ps = (const u32 *)&cayman_cgcg_cgls_default;
897 count = CAYMAN_CGCG_CGLS_DEFAULT_LENGTH;
898
899 btc_program_mgcg_hw_sequence(rdev, ps, count);
900}
901
902static void ni_gfx_clockgating_enable(struct radeon_device *rdev,
903 bool enable)
904{
905 u32 count;
906 const u32 *ps = NULL;
907
908 if (enable) {
909 ps = (const u32 *)&cayman_cgcg_cgls_enable;
910 count = CAYMAN_CGCG_CGLS_ENABLE_LENGTH;
911 } else {
912 ps = (const u32 *)&cayman_cgcg_cgls_disable;
913 count = CAYMAN_CGCG_CGLS_DISABLE_LENGTH;
914 }
915
916 btc_program_mgcg_hw_sequence(rdev, ps, count);
917}
918
919static void ni_mg_clockgating_default(struct radeon_device *rdev)
920{
921 u32 count;
922 const u32 *ps = NULL;
923
924 ps = (const u32 *)&cayman_mgcg_default;
925 count = CAYMAN_MGCG_DEFAULT_LENGTH;
926
927 btc_program_mgcg_hw_sequence(rdev, ps, count);
928}
929
930static void ni_mg_clockgating_enable(struct radeon_device *rdev,
931 bool enable)
932{
933 u32 count;
934 const u32 *ps = NULL;
935
936 if (enable) {
937 ps = (const u32 *)&cayman_mgcg_enable;
938 count = CAYMAN_MGCG_ENABLE_LENGTH;
939 } else {
940 ps = (const u32 *)&cayman_mgcg_disable;
941 count = CAYMAN_MGCG_DISABLE_LENGTH;
942 }
943
944 btc_program_mgcg_hw_sequence(rdev, ps, count);
945}
946
947static void ni_ls_clockgating_default(struct radeon_device *rdev)
948{
949 u32 count;
950 const u32 *ps = NULL;
951
952 ps = (const u32 *)&cayman_sysls_default;
953 count = CAYMAN_SYSLS_DEFAULT_LENGTH;
954
955 btc_program_mgcg_hw_sequence(rdev, ps, count);
956}
957
958static void ni_ls_clockgating_enable(struct radeon_device *rdev,
959 bool enable)
960{
961 u32 count;
962 const u32 *ps = NULL;
963
964 if (enable) {
965 ps = (const u32 *)&cayman_sysls_enable;
966 count = CAYMAN_SYSLS_ENABLE_LENGTH;
967 } else {
968 ps = (const u32 *)&cayman_sysls_disable;
969 count = CAYMAN_SYSLS_DISABLE_LENGTH;
970 }
971
972 btc_program_mgcg_hw_sequence(rdev, ps, count);
973
974}
975
976static int ni_patch_single_dependency_table_based_on_leakage(struct radeon_device *rdev,
977 struct radeon_clock_voltage_dependency_table *table)
978{
979 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
980 u32 i;
981
982 if (table) {
983 for (i = 0; i < table->count; i++) {
984 if (0xff01 == table->entries[i].v) {
985 if (pi->max_vddc == 0)
986 return -EINVAL;
987 table->entries[i].v = pi->max_vddc;
988 }
989 }
990 }
991 return 0;
992}
993
994static int ni_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev)
995{
996 int ret = 0;
997
998 ret = ni_patch_single_dependency_table_based_on_leakage(rdev,
999 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
1000
1001 ret = ni_patch_single_dependency_table_based_on_leakage(rdev,
1002 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
1003 return ret;
1004}
1005
1006static void ni_stop_dpm(struct radeon_device *rdev)
1007{
1008 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
1009}
1010
1011#if 0
1012static int ni_notify_hw_of_power_source(struct radeon_device *rdev,
1013 bool ac_power)
1014{
1015 if (ac_power)
1016 return (rv770_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
1017 0 : -EINVAL;
1018
1019 return 0;
1020}
1021#endif
1022
1023static PPSMC_Result ni_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1024 PPSMC_Msg msg, u32 parameter)
1025{
1026 WREG32(SMC_SCRATCH0, parameter);
1027 return rv770_send_msg_to_smc(rdev, msg);
1028}
1029
1030static int ni_restrict_performance_levels_before_switch(struct radeon_device *rdev)
1031{
1032 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
1033 return -EINVAL;
1034
1035 return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
1036 0 : -EINVAL;
1037}
1038
1039#if 0
1040static int ni_unrestrict_performance_levels_after_switch(struct radeon_device *rdev)
1041{
1042 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
1043 return -EINVAL;
1044
1045 return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) == PPSMC_Result_OK) ?
1046 0 : -EINVAL;
1047}
1048#endif
1049
1050static void ni_stop_smc(struct radeon_device *rdev)
1051{
1052 u32 tmp;
1053 int i;
1054
1055 for (i = 0; i < rdev->usec_timeout; i++) {
1056 tmp = RREG32(LB_SYNC_RESET_SEL) & LB_SYNC_RESET_SEL_MASK;
1057 if (tmp != 1)
1058 break;
1059 udelay(1);
1060 }
1061
1062 udelay(100);
1063
1064 r7xx_stop_smc(rdev);
1065}
1066
1067static int ni_process_firmware_header(struct radeon_device *rdev)
1068{
1069 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1070 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1071 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1072 u32 tmp;
1073 int ret;
1074
1075 ret = rv770_read_smc_sram_dword(rdev,
1076 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1077 NISLANDS_SMC_FIRMWARE_HEADER_stateTable,
1078 &tmp, pi->sram_end);
1079
1080 if (ret)
1081 return ret;
1082
1083 pi->state_table_start = (u16)tmp;
1084
1085 ret = rv770_read_smc_sram_dword(rdev,
1086 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1087 NISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
1088 &tmp, pi->sram_end);
1089
1090 if (ret)
1091 return ret;
1092
1093 pi->soft_regs_start = (u16)tmp;
1094
1095 ret = rv770_read_smc_sram_dword(rdev,
1096 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1097 NISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
1098 &tmp, pi->sram_end);
1099
1100 if (ret)
1101 return ret;
1102
1103 eg_pi->mc_reg_table_start = (u16)tmp;
1104
1105 ret = rv770_read_smc_sram_dword(rdev,
1106 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1107 NISLANDS_SMC_FIRMWARE_HEADER_fanTable,
1108 &tmp, pi->sram_end);
1109
1110 if (ret)
1111 return ret;
1112
1113 ni_pi->fan_table_start = (u16)tmp;
1114
1115 ret = rv770_read_smc_sram_dword(rdev,
1116 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1117 NISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
1118 &tmp, pi->sram_end);
1119
1120 if (ret)
1121 return ret;
1122
1123 ni_pi->arb_table_start = (u16)tmp;
1124
1125 ret = rv770_read_smc_sram_dword(rdev,
1126 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1127 NISLANDS_SMC_FIRMWARE_HEADER_cacTable,
1128 &tmp, pi->sram_end);
1129
1130 if (ret)
1131 return ret;
1132
1133 ni_pi->cac_table_start = (u16)tmp;
1134
1135 ret = rv770_read_smc_sram_dword(rdev,
1136 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1137 NISLANDS_SMC_FIRMWARE_HEADER_spllTable,
1138 &tmp, pi->sram_end);
1139
1140 if (ret)
1141 return ret;
1142
1143 ni_pi->spll_table_start = (u16)tmp;
1144
1145
1146 return ret;
1147}
1148
1149static void ni_read_clock_registers(struct radeon_device *rdev)
1150{
1151 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1152
1153 ni_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
1154 ni_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
1155 ni_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
1156 ni_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
1157 ni_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
1158 ni_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
1159 ni_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1160 ni_pi->clock_registers.mpll_ad_func_cntl_2 = RREG32(MPLL_AD_FUNC_CNTL_2);
1161 ni_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1162 ni_pi->clock_registers.mpll_dq_func_cntl_2 = RREG32(MPLL_DQ_FUNC_CNTL_2);
1163 ni_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1164 ni_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1165 ni_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1166 ni_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1167}
1168
1169#if 0
1170static int ni_enter_ulp_state(struct radeon_device *rdev)
1171{
1172 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1173
1174 if (pi->gfx_clock_gating) {
1175 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
1176 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
1177 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
1178 RREG32(GB_ADDR_CONFIG);
1179 }
1180
1181 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
1182 ~HOST_SMC_MSG_MASK);
1183
1184 udelay(25000);
1185
1186 return 0;
1187}
1188#endif
1189
1190static void ni_program_response_times(struct radeon_device *rdev)
1191{
1192 u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
1193 u32 vddc_dly, bb_dly, acpi_dly, vbi_dly, mclk_switch_limit;
1194 u32 reference_clock;
1195
1196 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
1197
1198 voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
1199 backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
1200
1201 if (voltage_response_time == 0)
1202 voltage_response_time = 1000;
1203
1204 if (backbias_response_time == 0)
1205 backbias_response_time = 1000;
1206
1207 acpi_delay_time = 15000;
1208 vbi_time_out = 100000;
1209
1210 reference_clock = radeon_get_xclk(rdev);
1211
1212 vddc_dly = (voltage_response_time * reference_clock) / 1600;
1213 bb_dly = (backbias_response_time * reference_clock) / 1600;
1214 acpi_dly = (acpi_delay_time * reference_clock) / 1600;
1215 vbi_dly = (vbi_time_out * reference_clock) / 1600;
1216
1217 mclk_switch_limit = (460 * reference_clock) / 100;
1218
1219 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
1220 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_bbias, bb_dly);
1221 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
1222 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
1223 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
1224 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_switch_lim, mclk_switch_limit);
1225}
1226
1227static void ni_populate_smc_voltage_table(struct radeon_device *rdev,
1228 struct atom_voltage_table *voltage_table,
1229 NISLANDS_SMC_STATETABLE *table)
1230{
1231 unsigned int i;
1232
1233 for (i = 0; i < voltage_table->count; i++) {
1234 table->highSMIO[i] = 0;
1235 table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
1236 }
1237}
1238
1239static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
1240 NISLANDS_SMC_STATETABLE *table)
1241{
1242 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1243 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1244 unsigned char i;
1245
1246 if (eg_pi->vddc_voltage_table.count) {
1247 ni_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
1248 table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] = 0;
1249 table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] =
1250 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
1251
1252 for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
1253 if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
1254 table->maxVDDCIndexInPPTable = i;
1255 break;
1256 }
1257 }
1258 }
1259
1260 if (eg_pi->vddci_voltage_table.count) {
1261 ni_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
1262
1263 table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
1264 table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
1265 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
1266 }
1267}
1268
1269static int ni_populate_voltage_value(struct radeon_device *rdev,
1270 struct atom_voltage_table *table,
1271 u16 value,
1272 NISLANDS_SMC_VOLTAGE_VALUE *voltage)
1273{
1274 unsigned int i;
1275
1276 for (i = 0; i < table->count; i++) {
1277 if (value <= table->entries[i].value) {
1278 voltage->index = (u8)i;
1279 voltage->value = cpu_to_be16(table->entries[i].value);
1280 break;
1281 }
1282 }
1283
1284 if (i >= table->count)
1285 return -EINVAL;
1286
1287 return 0;
1288}
1289
1290static void ni_populate_mvdd_value(struct radeon_device *rdev,
1291 u32 mclk,
1292 NISLANDS_SMC_VOLTAGE_VALUE *voltage)
1293{
1294 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1295 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1296
1297 if (!pi->mvdd_control) {
1298 voltage->index = eg_pi->mvdd_high_index;
1299 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1300 return;
1301 }
1302
1303 if (mclk <= pi->mvdd_split_frequency) {
1304 voltage->index = eg_pi->mvdd_low_index;
1305 voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
1306 } else {
1307 voltage->index = eg_pi->mvdd_high_index;
1308 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1309 }
1310}
1311
1312static int ni_get_std_voltage_value(struct radeon_device *rdev,
1313 NISLANDS_SMC_VOLTAGE_VALUE *voltage,
1314 u16 *std_voltage)
1315{
1316 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries &&
1317 ((u32)voltage->index < rdev->pm.dpm.dyn_state.cac_leakage_table.count))
1318 *std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
1319 else
1320 *std_voltage = be16_to_cpu(voltage->value);
1321
1322 return 0;
1323}
1324
1325static void ni_populate_std_voltage_value(struct radeon_device *rdev,
1326 u16 value, u8 index,
1327 NISLANDS_SMC_VOLTAGE_VALUE *voltage)
1328{
1329 voltage->index = index;
1330 voltage->value = cpu_to_be16(value);
1331}
1332
1333static u32 ni_get_smc_power_scaling_factor(struct radeon_device *rdev)
1334{
1335 u32 xclk_period;
1336 u32 xclk = radeon_get_xclk(rdev);
1337 u32 tmp = RREG32(CG_CAC_CTRL) & TID_CNT_MASK;
1338
1339 xclk_period = (1000000000UL / xclk);
1340 xclk_period /= 10000UL;
1341
1342 return tmp * xclk_period;
1343}
1344
1345static u32 ni_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
1346{
1347 return (power_in_watts * scaling_factor) << 2;
1348}
1349
1350static u32 ni_calculate_power_boost_limit(struct radeon_device *rdev,
1351 struct radeon_ps *radeon_state,
1352 u32 near_tdp_limit)
1353{
1354 struct ni_ps *state = ni_get_ps(radeon_state);
1355 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1356 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1357 u32 power_boost_limit = 0;
1358 int ret;
1359
1360 if (ni_pi->enable_power_containment &&
1361 ni_pi->use_power_boost_limit) {
1362 NISLANDS_SMC_VOLTAGE_VALUE vddc;
1363 u16 std_vddc_med;
1364 u16 std_vddc_high;
1365 u64 tmp, n, d;
1366
1367 if (state->performance_level_count < 3)
1368 return 0;
1369
1370 ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
1371 state->performance_levels[state->performance_level_count - 2].vddc,
1372 &vddc);
1373 if (ret)
1374 return 0;
1375
1376 ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_med);
1377 if (ret)
1378 return 0;
1379
1380 ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
1381 state->performance_levels[state->performance_level_count - 1].vddc,
1382 &vddc);
1383 if (ret)
1384 return 0;
1385
1386 ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_high);
1387 if (ret)
1388 return 0;
1389
1390 n = ((u64)near_tdp_limit * ((u64)std_vddc_med * (u64)std_vddc_med) * 90);
1391 d = ((u64)std_vddc_high * (u64)std_vddc_high * 100);
1392 tmp = div64_u64(n, d);
1393
1394 if (tmp >> 32)
1395 return 0;
1396 power_boost_limit = (u32)tmp;
1397 }
1398
1399 return power_boost_limit;
1400}
1401
1402static int ni_calculate_adjusted_tdp_limits(struct radeon_device *rdev,
1403 bool adjust_polarity,
1404 u32 tdp_adjustment,
1405 u32 *tdp_limit,
1406 u32 *near_tdp_limit)
1407{
1408 if (tdp_adjustment > (u32)rdev->pm.dpm.tdp_od_limit)
1409 return -EINVAL;
1410
1411 if (adjust_polarity) {
1412 *tdp_limit = ((100 + tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
1413 *near_tdp_limit = rdev->pm.dpm.near_tdp_limit + (*tdp_limit - rdev->pm.dpm.tdp_limit);
1414 } else {
1415 *tdp_limit = ((100 - tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
1416 *near_tdp_limit = rdev->pm.dpm.near_tdp_limit - (rdev->pm.dpm.tdp_limit - *tdp_limit);
1417 }
1418
1419 return 0;
1420}
1421
1422static int ni_populate_smc_tdp_limits(struct radeon_device *rdev,
1423 struct radeon_ps *radeon_state)
1424{
1425 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1426 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1427
1428 if (ni_pi->enable_power_containment) {
1429 NISLANDS_SMC_STATETABLE *smc_table = &ni_pi->smc_statetable;
1430 u32 scaling_factor = ni_get_smc_power_scaling_factor(rdev);
1431 u32 tdp_limit;
1432 u32 near_tdp_limit;
1433 u32 power_boost_limit;
1434 int ret;
1435
1436 if (scaling_factor == 0)
1437 return -EINVAL;
1438
1439 memset(smc_table, 0, sizeof(NISLANDS_SMC_STATETABLE));
1440
1441 ret = ni_calculate_adjusted_tdp_limits(rdev,
1442 false, /* ??? */
1443 rdev->pm.dpm.tdp_adjustment,
1444 &tdp_limit,
1445 &near_tdp_limit);
1446 if (ret)
1447 return ret;
1448
1449 power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state,
1450 near_tdp_limit);
1451
1452 smc_table->dpm2Params.TDPLimit =
1453 cpu_to_be32(ni_scale_power_for_smc(tdp_limit, scaling_factor));
1454 smc_table->dpm2Params.NearTDPLimit =
1455 cpu_to_be32(ni_scale_power_for_smc(near_tdp_limit, scaling_factor));
1456 smc_table->dpm2Params.SafePowerLimit =
1457 cpu_to_be32(ni_scale_power_for_smc((near_tdp_limit * NISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100,
1458 scaling_factor));
1459 smc_table->dpm2Params.PowerBoostLimit =
1460 cpu_to_be32(ni_scale_power_for_smc(power_boost_limit, scaling_factor));
1461
1462 ret = rv770_copy_bytes_to_smc(rdev,
1463 (u16)(pi->state_table_start + offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) +
1464 offsetof(PP_NIslands_DPM2Parameters, TDPLimit)),
1465 (u8 *)(&smc_table->dpm2Params.TDPLimit),
1466 sizeof(u32) * 4, pi->sram_end);
1467 if (ret)
1468 return ret;
1469 }
1470
1471 return 0;
1472}
1473
1474int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
1475 u32 arb_freq_src, u32 arb_freq_dest)
1476{
1477 u32 mc_arb_dram_timing;
1478 u32 mc_arb_dram_timing2;
1479 u32 burst_time;
1480 u32 mc_cg_config;
1481
1482 switch (arb_freq_src) {
1483 case MC_CG_ARB_FREQ_F0:
1484 mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1485 mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1486 burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
1487 break;
1488 case MC_CG_ARB_FREQ_F1:
1489 mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
1490 mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
1491 burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
1492 break;
1493 case MC_CG_ARB_FREQ_F2:
1494 mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
1495 mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
1496 burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
1497 break;
1498 case MC_CG_ARB_FREQ_F3:
1499 mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
1500 mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
1501 burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
1502 break;
1503 default:
1504 return -EINVAL;
1505 }
1506
1507 switch (arb_freq_dest) {
1508 case MC_CG_ARB_FREQ_F0:
1509 WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
1510 WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
1511 WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
1512 break;
1513 case MC_CG_ARB_FREQ_F1:
1514 WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
1515 WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
1516 WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
1517 break;
1518 case MC_CG_ARB_FREQ_F2:
1519 WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
1520 WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
1521 WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
1522 break;
1523 case MC_CG_ARB_FREQ_F3:
1524 WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
1525 WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
1526 WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
1527 break;
1528 default:
1529 return -EINVAL;
1530 }
1531
1532 mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
1533 WREG32(MC_CG_CONFIG, mc_cg_config);
1534 WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
1535
1536 return 0;
1537}
1538
1539static int ni_init_arb_table_index(struct radeon_device *rdev)
1540{
1541 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1542 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1543 u32 tmp;
1544 int ret;
1545
1546 ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start,
1547 &tmp, pi->sram_end);
1548 if (ret)
1549 return ret;
1550
1551 tmp &= 0x00FFFFFF;
1552 tmp |= ((u32)MC_CG_ARB_FREQ_F1) << 24;
1553
1554 return rv770_write_smc_sram_dword(rdev, ni_pi->arb_table_start,
1555 tmp, pi->sram_end);
1556}
1557
1558static int ni_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
1559{
1560 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1561}
1562
1563static int ni_force_switch_to_arb_f0(struct radeon_device *rdev)
1564{
1565 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1566 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1567 u32 tmp;
1568 int ret;
1569
1570 ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start,
1571 &tmp, pi->sram_end);
1572 if (ret)
1573 return ret;
1574
1575 tmp = (tmp >> 24) & 0xff;
1576
1577 if (tmp == MC_CG_ARB_FREQ_F0)
1578 return 0;
1579
1580 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
1581}
1582
1583static int ni_populate_memory_timing_parameters(struct radeon_device *rdev,
1584 struct rv7xx_pl *pl,
1585 SMC_NIslands_MCArbDramTimingRegisterSet *arb_regs)
1586{
1587 u32 dram_timing;
1588 u32 dram_timing2;
1589
1590 arb_regs->mc_arb_rfsh_rate =
1591 (u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk);
1592
1593
1594 radeon_atom_set_engine_dram_timings(rdev,
1595 pl->sclk,
1596 pl->mclk);
1597
1598 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1599 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1600
1601 arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
1602 arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
1603
1604 return 0;
1605}
1606
1607static int ni_do_program_memory_timing_parameters(struct radeon_device *rdev,
1608 struct radeon_ps *radeon_state,
1609 unsigned int first_arb_set)
1610{
1611 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1612 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1613 struct ni_ps *state = ni_get_ps(radeon_state);
1614 SMC_NIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
1615 int i, ret = 0;
1616
1617 for (i = 0; i < state->performance_level_count; i++) {
1618 ret = ni_populate_memory_timing_parameters(rdev, &state->performance_levels[i], &arb_regs);
1619 if (ret)
1620 break;
1621
1622 ret = rv770_copy_bytes_to_smc(rdev,
1623 (u16)(ni_pi->arb_table_start +
1624 offsetof(SMC_NIslands_MCArbDramTimingRegisters, data) +
1625 sizeof(SMC_NIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i)),
1626 (u8 *)&arb_regs,
1627 (u16)sizeof(SMC_NIslands_MCArbDramTimingRegisterSet),
1628 pi->sram_end);
1629 if (ret)
1630 break;
1631 }
1632 return ret;
1633}
1634
1635static int ni_program_memory_timing_parameters(struct radeon_device *rdev,
1636 struct radeon_ps *radeon_new_state)
1637{
1638 return ni_do_program_memory_timing_parameters(rdev, radeon_new_state,
1639 NISLANDS_DRIVER_STATE_ARB_INDEX);
1640}
1641
1642static void ni_populate_initial_mvdd_value(struct radeon_device *rdev,
1643 struct NISLANDS_SMC_VOLTAGE_VALUE *voltage)
1644{
1645 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1646
1647 voltage->index = eg_pi->mvdd_high_index;
1648 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1649}
1650
1651static int ni_populate_smc_initial_state(struct radeon_device *rdev,
1652 struct radeon_ps *radeon_initial_state,
1653 NISLANDS_SMC_STATETABLE *table)
1654{
1655 struct ni_ps *initial_state = ni_get_ps(radeon_initial_state);
1656 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1657 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1658 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1659 u32 reg;
1660 int ret;
1661
1662 table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
1663 cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl);
1664 table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 =
1665 cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl_2);
1666 table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
1667 cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl);
1668 table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 =
1669 cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl_2);
1670 table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
1671 cpu_to_be32(ni_pi->clock_registers.mclk_pwrmgt_cntl);
1672 table->initialState.levels[0].mclk.vDLL_CNTL =
1673 cpu_to_be32(ni_pi->clock_registers.dll_cntl);
1674 table->initialState.levels[0].mclk.vMPLL_SS =
1675 cpu_to_be32(ni_pi->clock_registers.mpll_ss1);
1676 table->initialState.levels[0].mclk.vMPLL_SS2 =
1677 cpu_to_be32(ni_pi->clock_registers.mpll_ss2);
1678 table->initialState.levels[0].mclk.mclk_value =
1679 cpu_to_be32(initial_state->performance_levels[0].mclk);
1680
1681 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1682 cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl);
1683 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1684 cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_2);
1685 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1686 cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_3);
1687 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
1688 cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_4);
1689 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
1690 cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum);
1691 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
1692 cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum_2);
1693 table->initialState.levels[0].sclk.sclk_value =
1694 cpu_to_be32(initial_state->performance_levels[0].sclk);
1695 table->initialState.levels[0].arbRefreshState =
1696 NISLANDS_INITIAL_STATE_ARB_INDEX;
1697
1698 table->initialState.levels[0].ACIndex = 0;
1699
1700 ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
1701 initial_state->performance_levels[0].vddc,
1702 &table->initialState.levels[0].vddc);
1703 if (!ret) {
1704 u16 std_vddc;
1705
1706 ret = ni_get_std_voltage_value(rdev,
1707 &table->initialState.levels[0].vddc,
1708 &std_vddc);
1709 if (!ret)
1710 ni_populate_std_voltage_value(rdev, std_vddc,
1711 table->initialState.levels[0].vddc.index,
1712 &table->initialState.levels[0].std_vddc);
1713 }
1714
1715 if (eg_pi->vddci_control)
1716 ni_populate_voltage_value(rdev,
1717 &eg_pi->vddci_voltage_table,
1718 initial_state->performance_levels[0].vddci,
1719 &table->initialState.levels[0].vddci);
1720
1721 ni_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
1722
1723 reg = CG_R(0xffff) | CG_L(0);
1724 table->initialState.levels[0].aT = cpu_to_be32(reg);
1725
1726 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
1727
1728 if (pi->boot_in_gen2)
1729 table->initialState.levels[0].gen2PCIE = 1;
1730 else
1731 table->initialState.levels[0].gen2PCIE = 0;
1732
1733 if (pi->mem_gddr5) {
1734 table->initialState.levels[0].strobeMode =
1735 cypress_get_strobe_mode_settings(rdev,
1736 initial_state->performance_levels[0].mclk);
1737
1738 if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
1739 table->initialState.levels[0].mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
1740 else
1741 table->initialState.levels[0].mcFlags = 0;
1742 }
1743
1744 table->initialState.levelCount = 1;
1745
1746 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
1747
1748 table->initialState.levels[0].dpm2.MaxPS = 0;
1749 table->initialState.levels[0].dpm2.NearTDPDec = 0;
1750 table->initialState.levels[0].dpm2.AboveSafeInc = 0;
1751 table->initialState.levels[0].dpm2.BelowSafeInc = 0;
1752
1753 reg = MIN_POWER_MASK | MAX_POWER_MASK;
1754 table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
1755
1756 reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
1757 table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
1758
1759 return 0;
1760}
1761
1762static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
1763 NISLANDS_SMC_STATETABLE *table)
1764{
1765 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1766 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1767 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1768 u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl;
1769 u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2;
1770 u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl;
1771 u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2;
1772 u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl;
1773 u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2;
1774 u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3;
1775 u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4;
1776 u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl;
1777 u32 dll_cntl = ni_pi->clock_registers.dll_cntl;
1778 u32 reg;
1779 int ret;
1780
1781 table->ACPIState = table->initialState;
1782
1783 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
1784
1785 if (pi->acpi_vddc) {
1786 ret = ni_populate_voltage_value(rdev,
1787 &eg_pi->vddc_voltage_table,
1788 pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
1789 if (!ret) {
1790 u16 std_vddc;
1791
1792 ret = ni_get_std_voltage_value(rdev,
1793 &table->ACPIState.levels[0].vddc, &std_vddc);
1794 if (!ret)
1795 ni_populate_std_voltage_value(rdev, std_vddc,
1796 table->ACPIState.levels[0].vddc.index,
1797 &table->ACPIState.levels[0].std_vddc);
1798 }
1799
1800 if (pi->pcie_gen2) {
1801 if (pi->acpi_pcie_gen2)
1802 table->ACPIState.levels[0].gen2PCIE = 1;
1803 else
1804 table->ACPIState.levels[0].gen2PCIE = 0;
1805 } else {
1806 table->ACPIState.levels[0].gen2PCIE = 0;
1807 }
1808 } else {
1809 ret = ni_populate_voltage_value(rdev,
1810 &eg_pi->vddc_voltage_table,
1811 pi->min_vddc_in_table,
1812 &table->ACPIState.levels[0].vddc);
1813 if (!ret) {
1814 u16 std_vddc;
1815
1816 ret = ni_get_std_voltage_value(rdev,
1817 &table->ACPIState.levels[0].vddc,
1818 &std_vddc);
1819 if (!ret)
1820 ni_populate_std_voltage_value(rdev, std_vddc,
1821 table->ACPIState.levels[0].vddc.index,
1822 &table->ACPIState.levels[0].std_vddc);
1823 }
1824 table->ACPIState.levels[0].gen2PCIE = 0;
1825 }
1826
1827 if (eg_pi->acpi_vddci) {
1828 if (eg_pi->vddci_control)
1829 ni_populate_voltage_value(rdev,
1830 &eg_pi->vddci_voltage_table,
1831 eg_pi->acpi_vddci,
1832 &table->ACPIState.levels[0].vddci);
1833 }
1834
1835
1836 mpll_ad_func_cntl &= ~PDNB;
1837
1838 mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
1839
1840 if (pi->mem_gddr5)
1841 mpll_dq_func_cntl &= ~PDNB;
1842 mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
1843
1844
1845 mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
1846 MRDCKA1_RESET |
1847 MRDCKB0_RESET |
1848 MRDCKB1_RESET |
1849 MRDCKC0_RESET |
1850 MRDCKC1_RESET |
1851 MRDCKD0_RESET |
1852 MRDCKD1_RESET);
1853
1854 mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
1855 MRDCKA1_PDNB |
1856 MRDCKB0_PDNB |
1857 MRDCKB1_PDNB |
1858 MRDCKC0_PDNB |
1859 MRDCKC1_PDNB |
1860 MRDCKD0_PDNB |
1861 MRDCKD1_PDNB);
1862
1863 dll_cntl |= (MRDCKA0_BYPASS |
1864 MRDCKA1_BYPASS |
1865 MRDCKB0_BYPASS |
1866 MRDCKB1_BYPASS |
1867 MRDCKC0_BYPASS |
1868 MRDCKC1_BYPASS |
1869 MRDCKD0_BYPASS |
1870 MRDCKD1_BYPASS);
1871
1872 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
1873 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
1874
1875 table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
1876 table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
1877 table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
1878 table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
1879 table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
1880 table->ACPIState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
1881
1882 table->ACPIState.levels[0].mclk.mclk_value = 0;
1883
1884 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
1885 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
1886 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
1887 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
1888
1889 table->ACPIState.levels[0].sclk.sclk_value = 0;
1890
1891 ni_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
1892
1893 if (eg_pi->dynamic_ac_timing)
1894 table->ACPIState.levels[0].ACIndex = 1;
1895
1896 table->ACPIState.levels[0].dpm2.MaxPS = 0;
1897 table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
1898 table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
1899 table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
1900
1901 reg = MIN_POWER_MASK | MAX_POWER_MASK;
1902 table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
1903
1904 reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
1905 table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
1906
1907 return 0;
1908}
1909
1910static int ni_init_smc_table(struct radeon_device *rdev)
1911{
1912 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1913 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1914 int ret;
1915 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
1916 NISLANDS_SMC_STATETABLE *table = &ni_pi->smc_statetable;
1917
1918 memset(table, 0, sizeof(NISLANDS_SMC_STATETABLE));
1919
1920 ni_populate_smc_voltage_tables(rdev, table);
1921
1922 switch (rdev->pm.int_thermal_type) {
1923 case THERMAL_TYPE_NI:
1924 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
1925 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
1926 break;
1927 case THERMAL_TYPE_NONE:
1928 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
1929 break;
1930 default:
1931 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
1932 break;
1933 }
1934
1935 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
1936 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1937
1938 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1939 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
1940
1941 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1942 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1943
1944 if (pi->mem_gddr5)
1945 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1946
1947 ret = ni_populate_smc_initial_state(rdev, radeon_boot_state, table);
1948 if (ret)
1949 return ret;
1950
1951 ret = ni_populate_smc_acpi_state(rdev, table);
1952 if (ret)
1953 return ret;
1954
1955 table->driverState = table->initialState;
1956
1957 table->ULVState = table->initialState;
1958
1959 ret = ni_do_program_memory_timing_parameters(rdev, radeon_boot_state,
1960 NISLANDS_INITIAL_STATE_ARB_INDEX);
1961 if (ret)
1962 return ret;
1963
1964 return rv770_copy_bytes_to_smc(rdev, pi->state_table_start, (u8 *)table,
1965 sizeof(NISLANDS_SMC_STATETABLE), pi->sram_end);
1966}
1967
1968static int ni_calculate_sclk_params(struct radeon_device *rdev,
1969 u32 engine_clock,
1970 NISLANDS_SMC_SCLK_VALUE *sclk)
1971{
1972 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1973 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1974 struct atom_clock_dividers dividers;
1975 u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl;
1976 u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2;
1977 u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3;
1978 u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4;
1979 u32 cg_spll_spread_spectrum = ni_pi->clock_registers.cg_spll_spread_spectrum;
1980 u32 cg_spll_spread_spectrum_2 = ni_pi->clock_registers.cg_spll_spread_spectrum_2;
1981 u64 tmp;
1982 u32 reference_clock = rdev->clock.spll.reference_freq;
1983 u32 reference_divider;
1984 u32 fbdiv;
1985 int ret;
1986
1987 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1988 engine_clock, false, &dividers);
1989 if (ret)
1990 return ret;
1991
1992 reference_divider = 1 + dividers.ref_div;
1993
1994
1995 tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16834;
1996 do_div(tmp, reference_clock);
1997 fbdiv = (u32) tmp;
1998
1999 spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
2000 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
2001 spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
2002
2003 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2004 spll_func_cntl_2 |= SCLK_MUX_SEL(2);
2005
2006 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2007 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2008 spll_func_cntl_3 |= SPLL_DITHEN;
2009
2010 if (pi->sclk_ss) {
2011 struct radeon_atom_ss ss;
2012 u32 vco_freq = engine_clock * dividers.post_div;
2013
2014 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2015 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2016 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2017 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2018
2019 cg_spll_spread_spectrum &= ~CLK_S_MASK;
2020 cg_spll_spread_spectrum |= CLK_S(clk_s);
2021 cg_spll_spread_spectrum |= SSEN;
2022
2023 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2024 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2025 }
2026 }
2027
2028 sclk->sclk_value = engine_clock;
2029 sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
2030 sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
2031 sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
2032 sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
2033 sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
2034 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
2035
2036 return 0;
2037}
2038
2039static int ni_populate_sclk_value(struct radeon_device *rdev,
2040 u32 engine_clock,
2041 NISLANDS_SMC_SCLK_VALUE *sclk)
2042{
2043 NISLANDS_SMC_SCLK_VALUE sclk_tmp;
2044 int ret;
2045
2046 ret = ni_calculate_sclk_params(rdev, engine_clock, &sclk_tmp);
2047 if (!ret) {
2048 sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
2049 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
2050 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
2051 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
2052 sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
2053 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
2054 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
2055 }
2056
2057 return ret;
2058}
2059
2060static int ni_init_smc_spll_table(struct radeon_device *rdev)
2061{
2062 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2063 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2064 SMC_NISLANDS_SPLL_DIV_TABLE *spll_table;
2065 NISLANDS_SMC_SCLK_VALUE sclk_params;
2066 u32 fb_div;
2067 u32 p_div;
2068 u32 clk_s;
2069 u32 clk_v;
2070 u32 sclk = 0;
2071 int i, ret;
2072 u32 tmp;
2073
2074 if (ni_pi->spll_table_start == 0)
2075 return -EINVAL;
2076
2077 spll_table = kzalloc(sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
2078 if (spll_table == NULL)
2079 return -ENOMEM;
2080
2081 for (i = 0; i < 256; i++) {
2082 ret = ni_calculate_sclk_params(rdev, sclk, &sclk_params);
2083 if (ret)
2084 break;
2085
2086 p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
2087 fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
2088 clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
2089 clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
2090
2091 fb_div &= ~0x00001FFF;
2092 fb_div >>= 1;
2093 clk_v >>= 6;
2094
2095 if (p_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
2096 ret = -EINVAL;
2097
2098 if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2099 ret = -EINVAL;
2100
2101 if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2102 ret = -EINVAL;
2103
2104 if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
2105 ret = -EINVAL;
2106
2107 if (ret)
2108 break;
2109
2110 tmp = ((fb_div << SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
2111 ((p_div << SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
2112 spll_table->freq[i] = cpu_to_be32(tmp);
2113
2114 tmp = ((clk_v << SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
2115 ((clk_s << SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
2116 spll_table->ss[i] = cpu_to_be32(tmp);
2117
2118 sclk += 512;
2119 }
2120
2121 if (!ret)
2122 ret = rv770_copy_bytes_to_smc(rdev, ni_pi->spll_table_start, (u8 *)spll_table,
2123 sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), pi->sram_end);
2124
2125 kfree(spll_table);
2126
2127 return ret;
2128}
2129
2130static int ni_populate_mclk_value(struct radeon_device *rdev,
2131 u32 engine_clock,
2132 u32 memory_clock,
2133 NISLANDS_SMC_MCLK_VALUE *mclk,
2134 bool strobe_mode,
2135 bool dll_state_on)
2136{
2137 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2138 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2139 u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl;
2140 u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2;
2141 u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl;
2142 u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2;
2143 u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl;
2144 u32 dll_cntl = ni_pi->clock_registers.dll_cntl;
2145 u32 mpll_ss1 = ni_pi->clock_registers.mpll_ss1;
2146 u32 mpll_ss2 = ni_pi->clock_registers.mpll_ss2;
2147 struct atom_clock_dividers dividers;
2148 u32 ibias;
2149 u32 dll_speed;
2150 int ret;
2151 u32 mc_seq_misc7;
2152
2153 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
2154 memory_clock, strobe_mode, &dividers);
2155 if (ret)
2156 return ret;
2157
2158 if (!strobe_mode) {
2159 mc_seq_misc7 = RREG32(MC_SEQ_MISC7);
2160
2161 if (mc_seq_misc7 & 0x8000000)
2162 dividers.post_div = 1;
2163 }
2164
2165 ibias = cypress_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
2166
2167 mpll_ad_func_cntl &= ~(CLKR_MASK |
2168 YCLK_POST_DIV_MASK |
2169 CLKF_MASK |
2170 CLKFRAC_MASK |
2171 IBIAS_MASK);
2172 mpll_ad_func_cntl |= CLKR(dividers.ref_div);
2173 mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
2174 mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
2175 mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
2176 mpll_ad_func_cntl |= IBIAS(ibias);
2177
2178 if (dividers.vco_mode)
2179 mpll_ad_func_cntl_2 |= VCO_MODE;
2180 else
2181 mpll_ad_func_cntl_2 &= ~VCO_MODE;
2182
2183 if (pi->mem_gddr5) {
2184 mpll_dq_func_cntl &= ~(CLKR_MASK |
2185 YCLK_POST_DIV_MASK |
2186 CLKF_MASK |
2187 CLKFRAC_MASK |
2188 IBIAS_MASK);
2189 mpll_dq_func_cntl |= CLKR(dividers.ref_div);
2190 mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
2191 mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
2192 mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
2193 mpll_dq_func_cntl |= IBIAS(ibias);
2194
2195 if (strobe_mode)
2196 mpll_dq_func_cntl &= ~PDNB;
2197 else
2198 mpll_dq_func_cntl |= PDNB;
2199
2200 if (dividers.vco_mode)
2201 mpll_dq_func_cntl_2 |= VCO_MODE;
2202 else
2203 mpll_dq_func_cntl_2 &= ~VCO_MODE;
2204 }
2205
2206 if (pi->mclk_ss) {
2207 struct radeon_atom_ss ss;
2208 u32 vco_freq = memory_clock * dividers.post_div;
2209
2210 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2211 ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
2212 u32 reference_clock = rdev->clock.mpll.reference_freq;
2213 u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
2214 u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
2215 u32 clk_v = ss.percentage *
2216 (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
2217
2218 mpll_ss1 &= ~CLKV_MASK;
2219 mpll_ss1 |= CLKV(clk_v);
2220
2221 mpll_ss2 &= ~CLKS_MASK;
2222 mpll_ss2 |= CLKS(clk_s);
2223 }
2224 }
2225
2226 dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
2227 memory_clock);
2228
2229 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2230 mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
2231 if (dll_state_on)
2232 mclk_pwrmgt_cntl |= (MRDCKA0_PDNB |
2233 MRDCKA1_PDNB |
2234 MRDCKB0_PDNB |
2235 MRDCKB1_PDNB |
2236 MRDCKC0_PDNB |
2237 MRDCKC1_PDNB |
2238 MRDCKD0_PDNB |
2239 MRDCKD1_PDNB);
2240 else
2241 mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
2242 MRDCKA1_PDNB |
2243 MRDCKB0_PDNB |
2244 MRDCKB1_PDNB |
2245 MRDCKC0_PDNB |
2246 MRDCKC1_PDNB |
2247 MRDCKD0_PDNB |
2248 MRDCKD1_PDNB);
2249
2250
2251 mclk->mclk_value = cpu_to_be32(memory_clock);
2252 mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
2253 mclk->vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
2254 mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
2255 mclk->vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
2256 mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
2257 mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
2258 mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
2259 mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
2260
2261 return 0;
2262}
2263
2264static void ni_populate_smc_sp(struct radeon_device *rdev,
2265 struct radeon_ps *radeon_state,
2266 NISLANDS_SMC_SWSTATE *smc_state)
2267{
2268 struct ni_ps *ps = ni_get_ps(radeon_state);
2269 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2270 int i;
2271
2272 for (i = 0; i < ps->performance_level_count - 1; i++)
2273 smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
2274
2275 smc_state->levels[ps->performance_level_count - 1].bSP =
2276 cpu_to_be32(pi->psp);
2277}
2278
2279static int ni_convert_power_level_to_smc(struct radeon_device *rdev,
2280 struct rv7xx_pl *pl,
2281 NISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
2282{
2283 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2284 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2285 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2286 int ret;
2287 bool dll_state_on;
2288 u16 std_vddc;
2289 u32 tmp = RREG32(DC_STUTTER_CNTL);
2290
2291 level->gen2PCIE = pi->pcie_gen2 ?
2292 ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
2293
2294 ret = ni_populate_sclk_value(rdev, pl->sclk, &level->sclk);
2295 if (ret)
2296 return ret;
2297
2298 level->mcFlags = 0;
2299 if (pi->mclk_stutter_mode_threshold &&
2300 (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
2301 !eg_pi->uvd_enabled &&
2302 (tmp & DC_STUTTER_ENABLE_A) &&
2303 (tmp & DC_STUTTER_ENABLE_B))
2304 level->mcFlags |= NISLANDS_SMC_MC_STUTTER_EN;
2305
2306 if (pi->mem_gddr5) {
2307 if (pl->mclk > pi->mclk_edc_enable_threshold)
2308 level->mcFlags |= NISLANDS_SMC_MC_EDC_RD_FLAG;
2309 if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
2310 level->mcFlags |= NISLANDS_SMC_MC_EDC_WR_FLAG;
2311
2312 level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk);
2313
2314 if (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) {
2315 if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >=
2316 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2317 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2318 else
2319 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2320 } else {
2321 dll_state_on = false;
2322 if (pl->mclk > ni_pi->mclk_rtt_mode_threshold)
2323 level->mcFlags |= NISLANDS_SMC_MC_RTT_ENABLE;
2324 }
2325
2326 ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk,
2327 &level->mclk,
2328 (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) != 0,
2329 dll_state_on);
2330 } else
2331 ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk, &level->mclk, 1, 1);
2332
2333 if (ret)
2334 return ret;
2335
2336 ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
2337 pl->vddc, &level->vddc);
2338 if (ret)
2339 return ret;
2340
2341 ret = ni_get_std_voltage_value(rdev, &level->vddc, &std_vddc);
2342 if (ret)
2343 return ret;
2344
2345 ni_populate_std_voltage_value(rdev, std_vddc,
2346 level->vddc.index, &level->std_vddc);
2347
2348 if (eg_pi->vddci_control) {
2349 ret = ni_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
2350 pl->vddci, &level->vddci);
2351 if (ret)
2352 return ret;
2353 }
2354
2355 ni_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
2356
2357 return ret;
2358}
2359
2360static int ni_populate_smc_t(struct radeon_device *rdev,
2361 struct radeon_ps *radeon_state,
2362 NISLANDS_SMC_SWSTATE *smc_state)
2363{
2364 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2365 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2366 struct ni_ps *state = ni_get_ps(radeon_state);
2367 u32 a_t;
2368 u32 t_l, t_h;
2369 u32 high_bsp;
2370 int i, ret;
2371
2372 if (state->performance_level_count >= 9)
2373 return -EINVAL;
2374
2375 if (state->performance_level_count < 2) {
2376 a_t = CG_R(0xffff) | CG_L(0);
2377 smc_state->levels[0].aT = cpu_to_be32(a_t);
2378 return 0;
2379 }
2380
2381 smc_state->levels[0].aT = cpu_to_be32(0);
2382
2383 for (i = 0; i <= state->performance_level_count - 2; i++) {
2384 if (eg_pi->uvd_enabled)
2385 ret = r600_calculate_at(
2386 1000 * (i * (eg_pi->smu_uvd_hs ? 2 : 8) + 2),
2387 100 * R600_AH_DFLT,
2388 state->performance_levels[i + 1].sclk,
2389 state->performance_levels[i].sclk,
2390 &t_l,
2391 &t_h);
2392 else
2393 ret = r600_calculate_at(
2394 1000 * (i + 1),
2395 100 * R600_AH_DFLT,
2396 state->performance_levels[i + 1].sclk,
2397 state->performance_levels[i].sclk,
2398 &t_l,
2399 &t_h);
2400
2401 if (ret) {
2402 t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
2403 t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
2404 }
2405
2406 a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
2407 a_t |= CG_R(t_l * pi->bsp / 20000);
2408 smc_state->levels[i].aT = cpu_to_be32(a_t);
2409
2410 high_bsp = (i == state->performance_level_count - 2) ?
2411 pi->pbsp : pi->bsp;
2412
2413 a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
2414 smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
2415 }
2416
2417 return 0;
2418}
2419
2420static int ni_populate_power_containment_values(struct radeon_device *rdev,
2421 struct radeon_ps *radeon_state,
2422 NISLANDS_SMC_SWSTATE *smc_state)
2423{
2424 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2425 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2426 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2427 struct ni_ps *state = ni_get_ps(radeon_state);
2428 u32 prev_sclk;
2429 u32 max_sclk;
2430 u32 min_sclk;
2431 int i, ret;
2432 u32 tdp_limit;
2433 u32 near_tdp_limit;
2434 u32 power_boost_limit;
2435 u8 max_ps_percent;
2436
2437 if (ni_pi->enable_power_containment == false)
2438 return 0;
2439
2440 if (state->performance_level_count == 0)
2441 return -EINVAL;
2442
2443 if (smc_state->levelCount != state->performance_level_count)
2444 return -EINVAL;
2445
2446 ret = ni_calculate_adjusted_tdp_limits(rdev,
2447 false, /* ??? */
2448 rdev->pm.dpm.tdp_adjustment,
2449 &tdp_limit,
2450 &near_tdp_limit);
2451 if (ret)
2452 return ret;
2453
2454 power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state, near_tdp_limit);
2455
2456 ret = rv770_write_smc_sram_dword(rdev,
2457 pi->state_table_start +
2458 offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) +
2459 offsetof(PP_NIslands_DPM2Parameters, PowerBoostLimit),
2460 ni_scale_power_for_smc(power_boost_limit, ni_get_smc_power_scaling_factor(rdev)),
2461 pi->sram_end);
2462 if (ret)
2463 power_boost_limit = 0;
2464
2465 smc_state->levels[0].dpm2.MaxPS = 0;
2466 smc_state->levels[0].dpm2.NearTDPDec = 0;
2467 smc_state->levels[0].dpm2.AboveSafeInc = 0;
2468 smc_state->levels[0].dpm2.BelowSafeInc = 0;
2469 smc_state->levels[0].stateFlags |= power_boost_limit ? PPSMC_STATEFLAG_POWERBOOST : 0;
2470
2471 for (i = 1; i < state->performance_level_count; i++) {
2472 prev_sclk = state->performance_levels[i-1].sclk;
2473 max_sclk = state->performance_levels[i].sclk;
2474 max_ps_percent = (i != (state->performance_level_count - 1)) ?
2475 NISLANDS_DPM2_MAXPS_PERCENT_M : NISLANDS_DPM2_MAXPS_PERCENT_H;
2476
2477 if (max_sclk < prev_sclk)
2478 return -EINVAL;
2479
2480 if ((max_ps_percent == 0) || (prev_sclk == max_sclk) || eg_pi->uvd_enabled)
2481 min_sclk = max_sclk;
2482 else if (1 == i)
2483 min_sclk = prev_sclk;
2484 else
2485 min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
2486
2487 if (min_sclk < state->performance_levels[0].sclk)
2488 min_sclk = state->performance_levels[0].sclk;
2489
2490 if (min_sclk == 0)
2491 return -EINVAL;
2492
2493 smc_state->levels[i].dpm2.MaxPS =
2494 (u8)((NISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
2495 smc_state->levels[i].dpm2.NearTDPDec = NISLANDS_DPM2_NEAR_TDP_DEC;
2496 smc_state->levels[i].dpm2.AboveSafeInc = NISLANDS_DPM2_ABOVE_SAFE_INC;
2497 smc_state->levels[i].dpm2.BelowSafeInc = NISLANDS_DPM2_BELOW_SAFE_INC;
2498 smc_state->levels[i].stateFlags |=
2499 ((i != (state->performance_level_count - 1)) && power_boost_limit) ?
2500 PPSMC_STATEFLAG_POWERBOOST : 0;
2501 }
2502
2503 return 0;
2504}
2505
2506static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
2507 struct radeon_ps *radeon_state,
2508 NISLANDS_SMC_SWSTATE *smc_state)
2509{
2510 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2511 struct ni_ps *state = ni_get_ps(radeon_state);
2512 u32 sq_power_throttle;
2513 u32 sq_power_throttle2;
2514 bool enable_sq_ramping = ni_pi->enable_sq_ramping;
2515 int i;
2516
2517 if (state->performance_level_count == 0)
2518 return -EINVAL;
2519
2520 if (smc_state->levelCount != state->performance_level_count)
2521 return -EINVAL;
2522
2523 if (rdev->pm.dpm.sq_ramping_threshold == 0)
2524 return -EINVAL;
2525
2526 if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
2527 enable_sq_ramping = false;
2528
2529 if (NISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
2530 enable_sq_ramping = false;
2531
2532 if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
2533 enable_sq_ramping = false;
2534
2535 if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
2536 enable_sq_ramping = false;
2537
2538 if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
2539 enable_sq_ramping = false;
2540
2541 for (i = 0; i < state->performance_level_count; i++) {
2542 sq_power_throttle = 0;
2543 sq_power_throttle2 = 0;
2544
2545 if ((state->performance_levels[i].sclk >= rdev->pm.dpm.sq_ramping_threshold) &&
2546 enable_sq_ramping) {
2547 sq_power_throttle |= MAX_POWER(NISLANDS_DPM2_SQ_RAMP_MAX_POWER);
2548 sq_power_throttle |= MIN_POWER(NISLANDS_DPM2_SQ_RAMP_MIN_POWER);
2549 sq_power_throttle2 |= MAX_POWER_DELTA(NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
2550 sq_power_throttle2 |= STI_SIZE(NISLANDS_DPM2_SQ_RAMP_STI_SIZE);
2551 sq_power_throttle2 |= LTI_RATIO(NISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
2552 } else {
2553 sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
2554 sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
2555 }
2556
2557 smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
2558 smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
2559 }
2560
2561 return 0;
2562}
2563
2564static int ni_enable_power_containment(struct radeon_device *rdev,
2565 struct radeon_ps *radeon_new_state,
2566 bool enable)
2567{
2568 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2569 PPSMC_Result smc_result;
2570 int ret = 0;
2571
2572 if (ni_pi->enable_power_containment) {
2573 if (enable) {
2574 if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
2575 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingActive);
2576 if (smc_result != PPSMC_Result_OK) {
2577 ret = -EINVAL;
2578 ni_pi->pc_enabled = false;
2579 } else {
2580 ni_pi->pc_enabled = true;
2581 }
2582 }
2583 } else {
2584 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingInactive);
2585 if (smc_result != PPSMC_Result_OK)
2586 ret = -EINVAL;
2587 ni_pi->pc_enabled = false;
2588 }
2589 }
2590
2591 return ret;
2592}
2593
2594static int ni_convert_power_state_to_smc(struct radeon_device *rdev,
2595 struct radeon_ps *radeon_state,
2596 NISLANDS_SMC_SWSTATE *smc_state)
2597{
2598 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2599 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2600 struct ni_ps *state = ni_get_ps(radeon_state);
2601 int i, ret;
2602 u32 threshold = state->performance_levels[state->performance_level_count - 1].sclk * 100 / 100;
2603
2604 if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
2605 smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
2606
2607 smc_state->levelCount = 0;
2608
2609 if (state->performance_level_count > NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE)
2610 return -EINVAL;
2611
2612 for (i = 0; i < state->performance_level_count; i++) {
2613 ret = ni_convert_power_level_to_smc(rdev, &state->performance_levels[i],
2614 &smc_state->levels[i]);
2615 smc_state->levels[i].arbRefreshState =
2616 (u8)(NISLANDS_DRIVER_STATE_ARB_INDEX + i);
2617
2618 if (ret)
2619 return ret;
2620
2621 if (ni_pi->enable_power_containment)
2622 smc_state->levels[i].displayWatermark =
2623 (state->performance_levels[i].sclk < threshold) ?
2624 PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
2625 else
2626 smc_state->levels[i].displayWatermark = (i < 2) ?
2627 PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
2628
2629 if (eg_pi->dynamic_ac_timing)
2630 smc_state->levels[i].ACIndex = NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
2631 else
2632 smc_state->levels[i].ACIndex = 0;
2633
2634 smc_state->levelCount++;
2635 }
2636
2637 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_watermark_threshold,
2638 cpu_to_be32(threshold / 512));
2639
2640 ni_populate_smc_sp(rdev, radeon_state, smc_state);
2641
2642 ret = ni_populate_power_containment_values(rdev, radeon_state, smc_state);
2643 if (ret)
2644 ni_pi->enable_power_containment = false;
2645
2646 ret = ni_populate_sq_ramping_values(rdev, radeon_state, smc_state);
2647 if (ret)
2648 ni_pi->enable_sq_ramping = false;
2649
2650 return ni_populate_smc_t(rdev, radeon_state, smc_state);
2651}
2652
2653static int ni_upload_sw_state(struct radeon_device *rdev,
2654 struct radeon_ps *radeon_new_state)
2655{
2656 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2657 u16 address = pi->state_table_start +
2658 offsetof(NISLANDS_SMC_STATETABLE, driverState);
2659 u16 state_size = sizeof(NISLANDS_SMC_SWSTATE) +
2660 ((NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1) * sizeof(NISLANDS_SMC_HW_PERFORMANCE_LEVEL));
2661 int ret;
2662 NISLANDS_SMC_SWSTATE *smc_state = kzalloc(state_size, GFP_KERNEL);
2663
2664 if (smc_state == NULL)
2665 return -ENOMEM;
2666
2667 ret = ni_convert_power_state_to_smc(rdev, radeon_new_state, smc_state);
2668 if (ret)
2669 goto done;
2670
2671 ret = rv770_copy_bytes_to_smc(rdev, address, (u8 *)smc_state, state_size, pi->sram_end);
2672
2673done:
2674 kfree(smc_state);
2675
2676 return ret;
2677}
2678
2679static int ni_set_mc_special_registers(struct radeon_device *rdev,
2680 struct ni_mc_reg_table *table)
2681{
2682 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2683 u8 i, j, k;
2684 u32 temp_reg;
2685
2686 for (i = 0, j = table->last; i < table->last; i++) {
2687 switch (table->mc_reg_address[i].s1) {
2688 case MC_SEQ_MISC1 >> 2:
2689 if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2690 return -EINVAL;
2691 temp_reg = RREG32(MC_PMG_CMD_EMRS);
2692 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
2693 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
2694 for (k = 0; k < table->num_entries; k++)
2695 table->mc_reg_table_entry[k].mc_data[j] =
2696 ((temp_reg & 0xffff0000)) |
2697 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2698 j++;
2699 if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2700 return -EINVAL;
2701
2702 temp_reg = RREG32(MC_PMG_CMD_MRS);
2703 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
2704 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
2705 for(k = 0; k < table->num_entries; k++) {
2706 table->mc_reg_table_entry[k].mc_data[j] =
2707 (temp_reg & 0xffff0000) |
2708 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2709 if (!pi->mem_gddr5)
2710 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2711 }
2712 j++;
2713 if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2714 return -EINVAL;
2715 break;
2716 case MC_SEQ_RESERVE_M >> 2:
2717 temp_reg = RREG32(MC_PMG_CMD_MRS1);
2718 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
2719 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
2720 for (k = 0; k < table->num_entries; k++)
2721 table->mc_reg_table_entry[k].mc_data[j] =
2722 (temp_reg & 0xffff0000) |
2723 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2724 j++;
2725 if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2726 return -EINVAL;
2727 break;
2728 default:
2729 break;
2730 }
2731 }
2732
2733 table->last = j;
2734
2735 return 0;
2736}
2737
2738static bool ni_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
2739{
2740 bool result = true;
2741
2742 switch (in_reg) {
2743 case MC_SEQ_RAS_TIMING >> 2:
2744 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
2745 break;
2746 case MC_SEQ_CAS_TIMING >> 2:
2747 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
2748 break;
2749 case MC_SEQ_MISC_TIMING >> 2:
2750 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
2751 break;
2752 case MC_SEQ_MISC_TIMING2 >> 2:
2753 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
2754 break;
2755 case MC_SEQ_RD_CTL_D0 >> 2:
2756 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
2757 break;
2758 case MC_SEQ_RD_CTL_D1 >> 2:
2759 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
2760 break;
2761 case MC_SEQ_WR_CTL_D0 >> 2:
2762 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
2763 break;
2764 case MC_SEQ_WR_CTL_D1 >> 2:
2765 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
2766 break;
2767 case MC_PMG_CMD_EMRS >> 2:
2768 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
2769 break;
2770 case MC_PMG_CMD_MRS >> 2:
2771 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
2772 break;
2773 case MC_PMG_CMD_MRS1 >> 2:
2774 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
2775 break;
2776 case MC_SEQ_PMG_TIMING >> 2:
2777 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
2778 break;
2779 case MC_PMG_CMD_MRS2 >> 2:
2780 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
2781 break;
2782 default:
2783 result = false;
2784 break;
2785 }
2786
2787 return result;
2788}
2789
2790static void ni_set_valid_flag(struct ni_mc_reg_table *table)
2791{
2792 u8 i, j;
2793
2794 for (i = 0; i < table->last; i++) {
2795 for (j = 1; j < table->num_entries; j++) {
2796 if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
2797 table->valid_flag |= 1 << i;
2798 break;
2799 }
2800 }
2801 }
2802}
2803
2804static void ni_set_s0_mc_reg_index(struct ni_mc_reg_table *table)
2805{
2806 u32 i;
2807 u16 address;
2808
2809 for (i = 0; i < table->last; i++)
2810 table->mc_reg_address[i].s0 =
2811 ni_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
2812 address : table->mc_reg_address[i].s1;
2813}
2814
2815static int ni_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
2816 struct ni_mc_reg_table *ni_table)
2817{
2818 u8 i, j;
2819
2820 if (table->last > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2821 return -EINVAL;
2822 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
2823 return -EINVAL;
2824
2825 for (i = 0; i < table->last; i++)
2826 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2827 ni_table->last = table->last;
2828
2829 for (i = 0; i < table->num_entries; i++) {
2830 ni_table->mc_reg_table_entry[i].mclk_max =
2831 table->mc_reg_table_entry[i].mclk_max;
2832 for (j = 0; j < table->last; j++)
2833 ni_table->mc_reg_table_entry[i].mc_data[j] =
2834 table->mc_reg_table_entry[i].mc_data[j];
2835 }
2836 ni_table->num_entries = table->num_entries;
2837
2838 return 0;
2839}
2840
2841static int ni_initialize_mc_reg_table(struct radeon_device *rdev)
2842{
2843 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2844 int ret;
2845 struct atom_mc_reg_table *table;
2846 struct ni_mc_reg_table *ni_table = &ni_pi->mc_reg_table;
2847 u8 module_index = rv770_get_memory_module_index(rdev);
2848
2849 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
2850 if (!table)
2851 return -ENOMEM;
2852
2853 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
2854 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
2855 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
2856 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
2857 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
2858 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
2859 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
2860 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
2861 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
2862 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
2863 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
2864 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
2865 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
2866
2867 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
2868
2869 if (ret)
2870 goto init_mc_done;
2871
2872 ret = ni_copy_vbios_mc_reg_table(table, ni_table);
2873
2874 if (ret)
2875 goto init_mc_done;
2876
2877 ni_set_s0_mc_reg_index(ni_table);
2878
2879 ret = ni_set_mc_special_registers(rdev, ni_table);
2880
2881 if (ret)
2882 goto init_mc_done;
2883
2884 ni_set_valid_flag(ni_table);
2885
2886init_mc_done:
2887 kfree(table);
2888
2889 return ret;
2890}
2891
2892static void ni_populate_mc_reg_addresses(struct radeon_device *rdev,
2893 SMC_NIslands_MCRegisters *mc_reg_table)
2894{
2895 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2896 u32 i, j;
2897
2898 for (i = 0, j = 0; j < ni_pi->mc_reg_table.last; j++) {
2899 if (ni_pi->mc_reg_table.valid_flag & (1 << j)) {
2900 if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2901 break;
2902 mc_reg_table->address[i].s0 =
2903 cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s0);
2904 mc_reg_table->address[i].s1 =
2905 cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s1);
2906 i++;
2907 }
2908 }
2909 mc_reg_table->last = (u8)i;
2910}
2911
2912
2913static void ni_convert_mc_registers(struct ni_mc_reg_entry *entry,
2914 SMC_NIslands_MCRegisterSet *data,
2915 u32 num_entries, u32 valid_flag)
2916{
2917 u32 i, j;
2918
2919 for (i = 0, j = 0; j < num_entries; j++) {
2920 if (valid_flag & (1 << j)) {
2921 data->value[i] = cpu_to_be32(entry->mc_data[j]);
2922 i++;
2923 }
2924 }
2925}
2926
2927static void ni_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
2928 struct rv7xx_pl *pl,
2929 SMC_NIslands_MCRegisterSet *mc_reg_table_data)
2930{
2931 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2932 u32 i = 0;
2933
2934 for (i = 0; i < ni_pi->mc_reg_table.num_entries; i++) {
2935 if (pl->mclk <= ni_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
2936 break;
2937 }
2938
2939 if ((i == ni_pi->mc_reg_table.num_entries) && (i > 0))
2940 --i;
2941
2942 ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[i],
2943 mc_reg_table_data,
2944 ni_pi->mc_reg_table.last,
2945 ni_pi->mc_reg_table.valid_flag);
2946}
2947
2948static void ni_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
2949 struct radeon_ps *radeon_state,
2950 SMC_NIslands_MCRegisters *mc_reg_table)
2951{
2952 struct ni_ps *state = ni_get_ps(radeon_state);
2953 int i;
2954
2955 for (i = 0; i < state->performance_level_count; i++) {
2956 ni_convert_mc_reg_table_entry_to_smc(rdev,
2957 &state->performance_levels[i],
2958 &mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
2959 }
2960}
2961
2962static int ni_populate_mc_reg_table(struct radeon_device *rdev,
2963 struct radeon_ps *radeon_boot_state)
2964{
2965 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2966 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2967 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2968 struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
2969 SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
2970
2971 memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters));
2972
2973 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_seq_index, 1);
2974
2975 ni_populate_mc_reg_addresses(rdev, mc_reg_table);
2976
2977 ni_convert_mc_reg_table_entry_to_smc(rdev, &boot_state->performance_levels[0],
2978 &mc_reg_table->data[0]);
2979
2980 ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[0],
2981 &mc_reg_table->data[1],
2982 ni_pi->mc_reg_table.last,
2983 ni_pi->mc_reg_table.valid_flag);
2984
2985 ni_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, mc_reg_table);
2986
2987 return rv770_copy_bytes_to_smc(rdev, eg_pi->mc_reg_table_start,
2988 (u8 *)mc_reg_table,
2989 sizeof(SMC_NIslands_MCRegisters),
2990 pi->sram_end);
2991}
2992
2993static int ni_upload_mc_reg_table(struct radeon_device *rdev,
2994 struct radeon_ps *radeon_new_state)
2995{
2996 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2997 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2998 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2999 struct ni_ps *ni_new_state = ni_get_ps(radeon_new_state);
3000 SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
3001 u16 address;
3002
3003 memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters));
3004
3005 ni_convert_mc_reg_table_to_smc(rdev, radeon_new_state, mc_reg_table);
3006
3007 address = eg_pi->mc_reg_table_start +
3008 (u16)offsetof(SMC_NIslands_MCRegisters, data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
3009
3010 return rv770_copy_bytes_to_smc(rdev, address,
3011 (u8 *)&mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
3012 sizeof(SMC_NIslands_MCRegisterSet) * ni_new_state->performance_level_count,
3013 pi->sram_end);
3014}
3015
3016static int ni_init_driver_calculated_leakage_table(struct radeon_device *rdev,
3017 PP_NIslands_CACTABLES *cac_tables)
3018{
3019 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3020 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3021 u32 leakage = 0;
3022 unsigned int i, j, table_size;
3023 s32 t;
3024 u32 smc_leakage, max_leakage = 0;
3025 u32 scaling_factor;
3026
3027 table_size = eg_pi->vddc_voltage_table.count;
3028
3029 if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size)
3030 table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
3031
3032 scaling_factor = ni_get_smc_power_scaling_factor(rdev);
3033
3034 for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++) {
3035 for (j = 0; j < table_size; j++) {
3036 t = (1000 * ((i + 1) * 8));
3037
3038 if (t < ni_pi->cac_data.leakage_minimum_temperature)
3039 t = ni_pi->cac_data.leakage_minimum_temperature;
3040
3041 ni_calculate_leakage_for_v_and_t(rdev,
3042 &ni_pi->cac_data.leakage_coefficients,
3043 eg_pi->vddc_voltage_table.entries[j].value,
3044 t,
3045 ni_pi->cac_data.i_leakage,
3046 &leakage);
3047
3048 smc_leakage = ni_scale_power_for_smc(leakage, scaling_factor) / 1000;
3049 if (smc_leakage > max_leakage)
3050 max_leakage = smc_leakage;
3051
3052 cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(smc_leakage);
3053 }
3054 }
3055
3056 for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
3057 for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
3058 cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(max_leakage);
3059 }
3060 return 0;
3061}
3062
3063static int ni_init_simplified_leakage_table(struct radeon_device *rdev,
3064 PP_NIslands_CACTABLES *cac_tables)
3065{
3066 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3067 struct radeon_cac_leakage_table *leakage_table =
3068 &rdev->pm.dpm.dyn_state.cac_leakage_table;
3069 u32 i, j, table_size;
3070 u32 smc_leakage, max_leakage = 0;
3071 u32 scaling_factor;
3072
3073 if (!leakage_table)
3074 return -EINVAL;
3075
3076 table_size = leakage_table->count;
3077
3078 if (eg_pi->vddc_voltage_table.count != table_size)
3079 table_size = (eg_pi->vddc_voltage_table.count < leakage_table->count) ?
3080 eg_pi->vddc_voltage_table.count : leakage_table->count;
3081
3082 if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size)
3083 table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
3084
3085 if (table_size == 0)
3086 return -EINVAL;
3087
3088 scaling_factor = ni_get_smc_power_scaling_factor(rdev);
3089
3090 for (j = 0; j < table_size; j++) {
3091 smc_leakage = leakage_table->entries[j].leakage;
3092
3093 if (smc_leakage > max_leakage)
3094 max_leakage = smc_leakage;
3095
3096 for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
3097 cac_tables->cac_lkge_lut[i][j] =
3098 cpu_to_be32(ni_scale_power_for_smc(smc_leakage, scaling_factor));
3099 }
3100
3101 for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
3102 for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
3103 cac_tables->cac_lkge_lut[i][j] =
3104 cpu_to_be32(ni_scale_power_for_smc(max_leakage, scaling_factor));
3105 }
3106 return 0;
3107}
3108
3109static int ni_initialize_smc_cac_tables(struct radeon_device *rdev)
3110{
3111 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3112 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3113 PP_NIslands_CACTABLES *cac_tables = NULL;
3114 int i, ret;
3115 u32 reg;
3116
3117 if (ni_pi->enable_cac == false)
3118 return 0;
3119
3120 cac_tables = kzalloc(sizeof(PP_NIslands_CACTABLES), GFP_KERNEL);
3121 if (!cac_tables)
3122 return -ENOMEM;
3123
3124 reg = RREG32(CG_CAC_CTRL) & ~(TID_CNT_MASK | TID_UNIT_MASK);
3125 reg |= (TID_CNT(ni_pi->cac_weights->tid_cnt) |
3126 TID_UNIT(ni_pi->cac_weights->tid_unit));
3127 WREG32(CG_CAC_CTRL, reg);
3128
3129 for (i = 0; i < NISLANDS_DCCAC_MAX_LEVELS; i++)
3130 ni_pi->dc_cac_table[i] = ni_pi->cac_weights->dc_cac[i];
3131
3132 for (i = 0; i < SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES; i++)
3133 cac_tables->cac_bif_lut[i] = ni_pi->cac_weights->pcie_cac[i];
3134
3135 ni_pi->cac_data.i_leakage = rdev->pm.dpm.cac_leakage;
3136 ni_pi->cac_data.pwr_const = 0;
3137 ni_pi->cac_data.dc_cac_value = ni_pi->dc_cac_table[NISLANDS_DCCAC_LEVEL_0];
3138 ni_pi->cac_data.bif_cac_value = 0;
3139 ni_pi->cac_data.mc_wr_weight = ni_pi->cac_weights->mc_write_weight;
3140 ni_pi->cac_data.mc_rd_weight = ni_pi->cac_weights->mc_read_weight;
3141 ni_pi->cac_data.allow_ovrflw = 0;
3142 ni_pi->cac_data.l2num_win_tdp = ni_pi->lta_window_size;
3143 ni_pi->cac_data.num_win_tdp = 0;
3144 ni_pi->cac_data.lts_truncate_n = ni_pi->lts_truncate;
3145
3146 if (ni_pi->driver_calculate_cac_leakage)
3147 ret = ni_init_driver_calculated_leakage_table(rdev, cac_tables);
3148 else
3149 ret = ni_init_simplified_leakage_table(rdev, cac_tables);
3150
3151 if (ret)
3152 goto done_free;
3153
3154 cac_tables->pwr_const = cpu_to_be32(ni_pi->cac_data.pwr_const);
3155 cac_tables->dc_cacValue = cpu_to_be32(ni_pi->cac_data.dc_cac_value);
3156 cac_tables->bif_cacValue = cpu_to_be32(ni_pi->cac_data.bif_cac_value);
3157 cac_tables->AllowOvrflw = ni_pi->cac_data.allow_ovrflw;
3158 cac_tables->MCWrWeight = ni_pi->cac_data.mc_wr_weight;
3159 cac_tables->MCRdWeight = ni_pi->cac_data.mc_rd_weight;
3160 cac_tables->numWin_TDP = ni_pi->cac_data.num_win_tdp;
3161 cac_tables->l2numWin_TDP = ni_pi->cac_data.l2num_win_tdp;
3162 cac_tables->lts_truncate_n = ni_pi->cac_data.lts_truncate_n;
3163
3164 ret = rv770_copy_bytes_to_smc(rdev, ni_pi->cac_table_start, (u8 *)cac_tables,
3165 sizeof(PP_NIslands_CACTABLES), pi->sram_end);
3166
3167done_free:
3168 if (ret) {
3169 ni_pi->enable_cac = false;
3170 ni_pi->enable_power_containment = false;
3171 }
3172
3173 kfree(cac_tables);
3174
3175 return 0;
3176}
3177
3178static int ni_initialize_hardware_cac_manager(struct radeon_device *rdev)
3179{
3180 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3181 u32 reg;
3182
3183 if (!ni_pi->enable_cac ||
3184 !ni_pi->cac_configuration_required)
3185 return 0;
3186
3187 if (ni_pi->cac_weights == NULL)
3188 return -EINVAL;
3189
3190 reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_0) & ~(WEIGHT_TCP_SIG0_MASK |
3191 WEIGHT_TCP_SIG1_MASK |
3192 WEIGHT_TA_SIG_MASK);
3193 reg |= (WEIGHT_TCP_SIG0(ni_pi->cac_weights->weight_tcp_sig0) |
3194 WEIGHT_TCP_SIG1(ni_pi->cac_weights->weight_tcp_sig1) |
3195 WEIGHT_TA_SIG(ni_pi->cac_weights->weight_ta_sig));
3196 WREG32_CG(CG_CAC_REGION_1_WEIGHT_0, reg);
3197
3198 reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_1) & ~(WEIGHT_TCC_EN0_MASK |
3199 WEIGHT_TCC_EN1_MASK |
3200 WEIGHT_TCC_EN2_MASK);
3201 reg |= (WEIGHT_TCC_EN0(ni_pi->cac_weights->weight_tcc_en0) |
3202 WEIGHT_TCC_EN1(ni_pi->cac_weights->weight_tcc_en1) |
3203 WEIGHT_TCC_EN2(ni_pi->cac_weights->weight_tcc_en2));
3204 WREG32_CG(CG_CAC_REGION_1_WEIGHT_1, reg);
3205
3206 reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_0) & ~(WEIGHT_CB_EN0_MASK |
3207 WEIGHT_CB_EN1_MASK |
3208 WEIGHT_CB_EN2_MASK |
3209 WEIGHT_CB_EN3_MASK);
3210 reg |= (WEIGHT_CB_EN0(ni_pi->cac_weights->weight_cb_en0) |
3211 WEIGHT_CB_EN1(ni_pi->cac_weights->weight_cb_en1) |
3212 WEIGHT_CB_EN2(ni_pi->cac_weights->weight_cb_en2) |
3213 WEIGHT_CB_EN3(ni_pi->cac_weights->weight_cb_en3));
3214 WREG32_CG(CG_CAC_REGION_2_WEIGHT_0, reg);
3215
3216 reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_1) & ~(WEIGHT_DB_SIG0_MASK |
3217 WEIGHT_DB_SIG1_MASK |
3218 WEIGHT_DB_SIG2_MASK |
3219 WEIGHT_DB_SIG3_MASK);
3220 reg |= (WEIGHT_DB_SIG0(ni_pi->cac_weights->weight_db_sig0) |
3221 WEIGHT_DB_SIG1(ni_pi->cac_weights->weight_db_sig1) |
3222 WEIGHT_DB_SIG2(ni_pi->cac_weights->weight_db_sig2) |
3223 WEIGHT_DB_SIG3(ni_pi->cac_weights->weight_db_sig3));
3224 WREG32_CG(CG_CAC_REGION_2_WEIGHT_1, reg);
3225
3226 reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_2) & ~(WEIGHT_SXM_SIG0_MASK |
3227 WEIGHT_SXM_SIG1_MASK |
3228 WEIGHT_SXM_SIG2_MASK |
3229 WEIGHT_SXS_SIG0_MASK |
3230 WEIGHT_SXS_SIG1_MASK);
3231 reg |= (WEIGHT_SXM_SIG0(ni_pi->cac_weights->weight_sxm_sig0) |
3232 WEIGHT_SXM_SIG1(ni_pi->cac_weights->weight_sxm_sig1) |
3233 WEIGHT_SXM_SIG2(ni_pi->cac_weights->weight_sxm_sig2) |
3234 WEIGHT_SXS_SIG0(ni_pi->cac_weights->weight_sxs_sig0) |
3235 WEIGHT_SXS_SIG1(ni_pi->cac_weights->weight_sxs_sig1));
3236 WREG32_CG(CG_CAC_REGION_2_WEIGHT_2, reg);
3237
3238 reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_0) & ~(WEIGHT_XBR_0_MASK |
3239 WEIGHT_XBR_1_MASK |
3240 WEIGHT_XBR_2_MASK |
3241 WEIGHT_SPI_SIG0_MASK);
3242 reg |= (WEIGHT_XBR_0(ni_pi->cac_weights->weight_xbr_0) |
3243 WEIGHT_XBR_1(ni_pi->cac_weights->weight_xbr_1) |
3244 WEIGHT_XBR_2(ni_pi->cac_weights->weight_xbr_2) |
3245 WEIGHT_SPI_SIG0(ni_pi->cac_weights->weight_spi_sig0));
3246 WREG32_CG(CG_CAC_REGION_3_WEIGHT_0, reg);
3247
3248 reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_1) & ~(WEIGHT_SPI_SIG1_MASK |
3249 WEIGHT_SPI_SIG2_MASK |
3250 WEIGHT_SPI_SIG3_MASK |
3251 WEIGHT_SPI_SIG4_MASK |
3252 WEIGHT_SPI_SIG5_MASK);
3253 reg |= (WEIGHT_SPI_SIG1(ni_pi->cac_weights->weight_spi_sig1) |
3254 WEIGHT_SPI_SIG2(ni_pi->cac_weights->weight_spi_sig2) |
3255 WEIGHT_SPI_SIG3(ni_pi->cac_weights->weight_spi_sig3) |
3256 WEIGHT_SPI_SIG4(ni_pi->cac_weights->weight_spi_sig4) |
3257 WEIGHT_SPI_SIG5(ni_pi->cac_weights->weight_spi_sig5));
3258 WREG32_CG(CG_CAC_REGION_3_WEIGHT_1, reg);
3259
3260 reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_0) & ~(WEIGHT_LDS_SIG0_MASK |
3261 WEIGHT_LDS_SIG1_MASK |
3262 WEIGHT_SC_MASK);
3263 reg |= (WEIGHT_LDS_SIG0(ni_pi->cac_weights->weight_lds_sig0) |
3264 WEIGHT_LDS_SIG1(ni_pi->cac_weights->weight_lds_sig1) |
3265 WEIGHT_SC(ni_pi->cac_weights->weight_sc));
3266 WREG32_CG(CG_CAC_REGION_4_WEIGHT_0, reg);
3267
3268 reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_1) & ~(WEIGHT_BIF_MASK |
3269 WEIGHT_CP_MASK |
3270 WEIGHT_PA_SIG0_MASK |
3271 WEIGHT_PA_SIG1_MASK |
3272 WEIGHT_VGT_SIG0_MASK);
3273 reg |= (WEIGHT_BIF(ni_pi->cac_weights->weight_bif) |
3274 WEIGHT_CP(ni_pi->cac_weights->weight_cp) |
3275 WEIGHT_PA_SIG0(ni_pi->cac_weights->weight_pa_sig0) |
3276 WEIGHT_PA_SIG1(ni_pi->cac_weights->weight_pa_sig1) |
3277 WEIGHT_VGT_SIG0(ni_pi->cac_weights->weight_vgt_sig0));
3278 WREG32_CG(CG_CAC_REGION_4_WEIGHT_1, reg);
3279
3280 reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_2) & ~(WEIGHT_VGT_SIG1_MASK |
3281 WEIGHT_VGT_SIG2_MASK |
3282 WEIGHT_DC_SIG0_MASK |
3283 WEIGHT_DC_SIG1_MASK |
3284 WEIGHT_DC_SIG2_MASK);
3285 reg |= (WEIGHT_VGT_SIG1(ni_pi->cac_weights->weight_vgt_sig1) |
3286 WEIGHT_VGT_SIG2(ni_pi->cac_weights->weight_vgt_sig2) |
3287 WEIGHT_DC_SIG0(ni_pi->cac_weights->weight_dc_sig0) |
3288 WEIGHT_DC_SIG1(ni_pi->cac_weights->weight_dc_sig1) |
3289 WEIGHT_DC_SIG2(ni_pi->cac_weights->weight_dc_sig2));
3290 WREG32_CG(CG_CAC_REGION_4_WEIGHT_2, reg);
3291
3292 reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_3) & ~(WEIGHT_DC_SIG3_MASK |
3293 WEIGHT_UVD_SIG0_MASK |
3294 WEIGHT_UVD_SIG1_MASK |
3295 WEIGHT_SPARE0_MASK |
3296 WEIGHT_SPARE1_MASK);
3297 reg |= (WEIGHT_DC_SIG3(ni_pi->cac_weights->weight_dc_sig3) |
3298 WEIGHT_UVD_SIG0(ni_pi->cac_weights->weight_uvd_sig0) |
3299 WEIGHT_UVD_SIG1(ni_pi->cac_weights->weight_uvd_sig1) |
3300 WEIGHT_SPARE0(ni_pi->cac_weights->weight_spare0) |
3301 WEIGHT_SPARE1(ni_pi->cac_weights->weight_spare1));
3302 WREG32_CG(CG_CAC_REGION_4_WEIGHT_3, reg);
3303
3304 reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_0) & ~(WEIGHT_SQ_VSP_MASK |
3305 WEIGHT_SQ_VSP0_MASK);
3306 reg |= (WEIGHT_SQ_VSP(ni_pi->cac_weights->weight_sq_vsp) |
3307 WEIGHT_SQ_VSP0(ni_pi->cac_weights->weight_sq_vsp0));
3308 WREG32_CG(CG_CAC_REGION_5_WEIGHT_0, reg);
3309
3310 reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_1) & ~(WEIGHT_SQ_GPR_MASK);
3311 reg |= WEIGHT_SQ_GPR(ni_pi->cac_weights->weight_sq_gpr);
3312 WREG32_CG(CG_CAC_REGION_5_WEIGHT_1, reg);
3313
3314 reg = RREG32_CG(CG_CAC_REGION_4_OVERRIDE_4) & ~(OVR_MODE_SPARE_0_MASK |
3315 OVR_VAL_SPARE_0_MASK |
3316 OVR_MODE_SPARE_1_MASK |
3317 OVR_VAL_SPARE_1_MASK);
3318 reg |= (OVR_MODE_SPARE_0(ni_pi->cac_weights->ovr_mode_spare_0) |
3319 OVR_VAL_SPARE_0(ni_pi->cac_weights->ovr_val_spare_0) |
3320 OVR_MODE_SPARE_1(ni_pi->cac_weights->ovr_mode_spare_1) |
3321 OVR_VAL_SPARE_1(ni_pi->cac_weights->ovr_val_spare_1));
3322 WREG32_CG(CG_CAC_REGION_4_OVERRIDE_4, reg);
3323
3324 reg = RREG32(SQ_CAC_THRESHOLD) & ~(VSP_MASK |
3325 VSP0_MASK |
3326 GPR_MASK);
3327 reg |= (VSP(ni_pi->cac_weights->vsp) |
3328 VSP0(ni_pi->cac_weights->vsp0) |
3329 GPR(ni_pi->cac_weights->gpr));
3330 WREG32(SQ_CAC_THRESHOLD, reg);
3331
3332 reg = (MCDW_WR_ENABLE |
3333 MCDX_WR_ENABLE |
3334 MCDY_WR_ENABLE |
3335 MCDZ_WR_ENABLE |
3336 INDEX(0x09D4));
3337 WREG32(MC_CG_CONFIG, reg);
3338
3339 reg = (READ_WEIGHT(ni_pi->cac_weights->mc_read_weight) |
3340 WRITE_WEIGHT(ni_pi->cac_weights->mc_write_weight) |
3341 ALLOW_OVERFLOW);
3342 WREG32(MC_CG_DATAPORT, reg);
3343
3344 return 0;
3345}
3346
3347static int ni_enable_smc_cac(struct radeon_device *rdev,
3348 struct radeon_ps *radeon_new_state,
3349 bool enable)
3350{
3351 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3352 int ret = 0;
3353 PPSMC_Result smc_result;
3354
3355 if (ni_pi->enable_cac) {
3356 if (enable) {
3357 if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
3358 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_CollectCAC_PowerCorreln);
3359
3360 if (ni_pi->support_cac_long_term_average) {
3361 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgEnable);
3362 if (PPSMC_Result_OK != smc_result)
3363 ni_pi->support_cac_long_term_average = false;
3364 }
3365
3366 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
3367 if (PPSMC_Result_OK != smc_result)
3368 ret = -EINVAL;
3369
3370 ni_pi->cac_enabled = (PPSMC_Result_OK == smc_result) ? true : false;
3371 }
3372 } else if (ni_pi->cac_enabled) {
3373 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
3374
3375 ni_pi->cac_enabled = false;
3376
3377 if (ni_pi->support_cac_long_term_average) {
3378 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgDisable);
3379 if (PPSMC_Result_OK != smc_result)
3380 ni_pi->support_cac_long_term_average = false;
3381 }
3382 }
3383 }
3384
3385 return ret;
3386}
3387
3388static int ni_pcie_performance_request(struct radeon_device *rdev,
3389 u8 perf_req, bool advertise)
3390{
3391 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3392
3393#if defined(CONFIG_ACPI)
3394 if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
3395 (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
3396 if (eg_pi->pcie_performance_request_registered == false)
3397 radeon_acpi_pcie_notify_device_ready(rdev);
3398 eg_pi->pcie_performance_request_registered = true;
3399 return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
3400 } else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
3401 eg_pi->pcie_performance_request_registered) {
3402 eg_pi->pcie_performance_request_registered = false;
3403 return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
3404 }
3405#endif
3406 return 0;
3407}
3408
3409static int ni_advertise_gen2_capability(struct radeon_device *rdev)
3410{
3411 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3412 u32 tmp;
3413
3414 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3415
3416 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3417 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
3418 pi->pcie_gen2 = true;
3419 else
3420 pi->pcie_gen2 = false;
3421
3422 if (!pi->pcie_gen2)
3423 ni_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, true);
3424
3425 return 0;
3426}
3427
3428static void ni_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
3429 bool enable)
3430{
3431 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3432 u32 tmp, bif;
3433
3434 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3435
3436 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3437 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3438 if (enable) {
3439 if (!pi->boot_in_gen2) {
3440 bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
3441 bif |= CG_CLIENT_REQ(0xd);
3442 WREG32(CG_BIF_REQ_AND_RSP, bif);
3443 }
3444 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
3445 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
3446 tmp |= LC_GEN2_EN_STRAP;
3447
3448 tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3449 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
3450 udelay(10);
3451 tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3452 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
3453 } else {
3454 if (!pi->boot_in_gen2) {
3455 bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
3456 bif |= CG_CLIENT_REQ(0xd);
3457 WREG32(CG_BIF_REQ_AND_RSP, bif);
3458
3459 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
3460 tmp &= ~LC_GEN2_EN_STRAP;
3461 }
3462 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
3463 }
3464 }
3465}
3466
3467static void ni_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
3468 bool enable)
3469{
3470 ni_enable_bif_dynamic_pcie_gen2(rdev, enable);
3471
3472 if (enable)
3473 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
3474 else
3475 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
3476}
3477
3478void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
3479 struct radeon_ps *new_ps,
3480 struct radeon_ps *old_ps)
3481{
3482 struct ni_ps *new_state = ni_get_ps(new_ps);
3483 struct ni_ps *current_state = ni_get_ps(old_ps);
3484
3485 if ((new_ps->vclk == old_ps->vclk) &&
3486 (new_ps->dclk == old_ps->dclk))
3487 return;
3488
3489 if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
3490 current_state->performance_levels[current_state->performance_level_count - 1].sclk)
3491 return;
3492
3493 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
3494}
3495
3496void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
3497 struct radeon_ps *new_ps,
3498 struct radeon_ps *old_ps)
3499{
3500 struct ni_ps *new_state = ni_get_ps(new_ps);
3501 struct ni_ps *current_state = ni_get_ps(old_ps);
3502
3503 if ((new_ps->vclk == old_ps->vclk) &&
3504 (new_ps->dclk == old_ps->dclk))
3505 return;
3506
3507 if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
3508 current_state->performance_levels[current_state->performance_level_count - 1].sclk)
3509 return;
3510
3511 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
3512}
3513
3514void ni_dpm_setup_asic(struct radeon_device *rdev)
3515{
3516 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3517
3518 ni_read_clock_registers(rdev);
3519 btc_read_arb_registers(rdev);
3520 rv770_get_memory_type(rdev);
3521 if (eg_pi->pcie_performance_request)
3522 ni_advertise_gen2_capability(rdev);
3523 rv770_get_pcie_gen2_status(rdev);
3524 rv770_enable_acpi_pm(rdev);
3525}
3526
3527void ni_update_current_ps(struct radeon_device *rdev,
3528 struct radeon_ps *rps)
3529{
3530 struct ni_ps *new_ps = ni_get_ps(rps);
3531 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3532 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3533
3534 eg_pi->current_rps = *rps;
3535 ni_pi->current_ps = *new_ps;
3536 eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
3537}
3538
3539void ni_update_requested_ps(struct radeon_device *rdev,
3540 struct radeon_ps *rps)
3541{
3542 struct ni_ps *new_ps = ni_get_ps(rps);
3543 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3544 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3545
3546 eg_pi->requested_rps = *rps;
3547 ni_pi->requested_ps = *new_ps;
3548 eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
3549}
3550
3551int ni_dpm_enable(struct radeon_device *rdev)
3552{
3553 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3554 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3555 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
3556 int ret;
3557
3558 if (pi->gfx_clock_gating)
3559 ni_cg_clockgating_default(rdev);
3560 if (btc_dpm_enabled(rdev))
3561 return -EINVAL;
3562 if (pi->mg_clock_gating)
3563 ni_mg_clockgating_default(rdev);
3564 if (eg_pi->ls_clock_gating)
3565 ni_ls_clockgating_default(rdev);
3566 if (pi->voltage_control) {
3567 rv770_enable_voltage_control(rdev, true);
3568 ret = cypress_construct_voltage_tables(rdev);
3569 if (ret) {
3570 DRM_ERROR("cypress_construct_voltage_tables failed\n");
3571 return ret;
3572 }
3573 }
3574 if (eg_pi->dynamic_ac_timing) {
3575 ret = ni_initialize_mc_reg_table(rdev);
3576 if (ret)
3577 eg_pi->dynamic_ac_timing = false;
3578 }
3579 if (pi->dynamic_ss)
3580 cypress_enable_spread_spectrum(rdev, true);
3581 if (pi->thermal_protection)
3582 rv770_enable_thermal_protection(rdev, true);
3583 rv770_setup_bsp(rdev);
3584 rv770_program_git(rdev);
3585 rv770_program_tp(rdev);
3586 rv770_program_tpp(rdev);
3587 rv770_program_sstp(rdev);
3588 cypress_enable_display_gap(rdev);
3589 rv770_program_vc(rdev);
3590 if (pi->dynamic_pcie_gen2)
3591 ni_enable_dynamic_pcie_gen2(rdev, true);
3592 ret = rv770_upload_firmware(rdev);
3593 if (ret) {
3594 DRM_ERROR("rv770_upload_firmware failed\n");
3595 return ret;
3596 }
3597 ret = ni_process_firmware_header(rdev);
3598 if (ret) {
3599 DRM_ERROR("ni_process_firmware_header failed\n");
3600 return ret;
3601 }
3602 ret = ni_initial_switch_from_arb_f0_to_f1(rdev);
3603 if (ret) {
3604 DRM_ERROR("ni_initial_switch_from_arb_f0_to_f1 failed\n");
3605 return ret;
3606 }
3607 ret = ni_init_smc_table(rdev);
3608 if (ret) {
3609 DRM_ERROR("ni_init_smc_table failed\n");
3610 return ret;
3611 }
3612 ret = ni_init_smc_spll_table(rdev);
3613 if (ret) {
3614 DRM_ERROR("ni_init_smc_spll_table failed\n");
3615 return ret;
3616 }
3617 ret = ni_init_arb_table_index(rdev);
3618 if (ret) {
3619 DRM_ERROR("ni_init_arb_table_index failed\n");
3620 return ret;
3621 }
3622 if (eg_pi->dynamic_ac_timing) {
3623 ret = ni_populate_mc_reg_table(rdev, boot_ps);
3624 if (ret) {
3625 DRM_ERROR("ni_populate_mc_reg_table failed\n");
3626 return ret;
3627 }
3628 }
3629 ret = ni_initialize_smc_cac_tables(rdev);
3630 if (ret) {
3631 DRM_ERROR("ni_initialize_smc_cac_tables failed\n");
3632 return ret;
3633 }
3634 ret = ni_initialize_hardware_cac_manager(rdev);
3635 if (ret) {
3636 DRM_ERROR("ni_initialize_hardware_cac_manager failed\n");
3637 return ret;
3638 }
3639 ret = ni_populate_smc_tdp_limits(rdev, boot_ps);
3640 if (ret) {
3641 DRM_ERROR("ni_populate_smc_tdp_limits failed\n");
3642 return ret;
3643 }
3644 ni_program_response_times(rdev);
3645 r7xx_start_smc(rdev);
3646 ret = cypress_notify_smc_display_change(rdev, false);
3647 if (ret) {
3648 DRM_ERROR("cypress_notify_smc_display_change failed\n");
3649 return ret;
3650 }
3651 cypress_enable_sclk_control(rdev, true);
3652 if (eg_pi->memory_transition)
3653 cypress_enable_mclk_control(rdev, true);
3654 cypress_start_dpm(rdev);
3655 if (pi->gfx_clock_gating)
3656 ni_gfx_clockgating_enable(rdev, true);
3657 if (pi->mg_clock_gating)
3658 ni_mg_clockgating_enable(rdev, true);
3659 if (eg_pi->ls_clock_gating)
3660 ni_ls_clockgating_enable(rdev, true);
3661
3662 if (rdev->irq.installed &&
3663 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
3664 PPSMC_Result result;
3665
3666 ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000);
3667 if (ret)
3668 return ret;
3669 rdev->irq.dpm_thermal = true;
3670 radeon_irq_set(rdev);
3671 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
3672
3673 if (result != PPSMC_Result_OK)
3674 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
3675 }
3676
3677 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
3678
3679 ni_update_current_ps(rdev, boot_ps);
3680
3681 return 0;
3682}
3683
3684void ni_dpm_disable(struct radeon_device *rdev)
3685{
3686 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3687 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3688 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
3689
3690 if (!btc_dpm_enabled(rdev))
3691 return;
3692 rv770_clear_vc(rdev);
3693 if (pi->thermal_protection)
3694 rv770_enable_thermal_protection(rdev, false);
3695 ni_enable_power_containment(rdev, boot_ps, false);
3696 ni_enable_smc_cac(rdev, boot_ps, false);
3697 cypress_enable_spread_spectrum(rdev, false);
3698 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
3699 if (pi->dynamic_pcie_gen2)
3700 ni_enable_dynamic_pcie_gen2(rdev, false);
3701
3702 if (rdev->irq.installed &&
3703 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
3704 rdev->irq.dpm_thermal = false;
3705 radeon_irq_set(rdev);
3706 }
3707
3708 if (pi->gfx_clock_gating)
3709 ni_gfx_clockgating_enable(rdev, false);
3710 if (pi->mg_clock_gating)
3711 ni_mg_clockgating_enable(rdev, false);
3712 if (eg_pi->ls_clock_gating)
3713 ni_ls_clockgating_enable(rdev, false);
3714 ni_stop_dpm(rdev);
3715 btc_reset_to_default(rdev);
3716 ni_stop_smc(rdev);
3717 ni_force_switch_to_arb_f0(rdev);
3718
3719 ni_update_current_ps(rdev, boot_ps);
3720}
3721
3722static int ni_power_control_set_level(struct radeon_device *rdev)
3723{
3724 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
3725 int ret;
3726
3727 ret = ni_restrict_performance_levels_before_switch(rdev);
3728 if (ret)
3729 return ret;
3730 ret = rv770_halt_smc(rdev);
3731 if (ret)
3732 return ret;
3733 ret = ni_populate_smc_tdp_limits(rdev, new_ps);
3734 if (ret)
3735 return ret;
3736 ret = rv770_resume_smc(rdev);
3737 if (ret)
3738 return ret;
3739 ret = rv770_set_sw_state(rdev);
3740 if (ret)
3741 return ret;
3742
3743 return 0;
3744}
3745
3746int ni_dpm_pre_set_power_state(struct radeon_device *rdev)
3747{
3748 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3749 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
3750 struct radeon_ps *new_ps = &requested_ps;
3751
3752 ni_update_requested_ps(rdev, new_ps);
3753
3754 ni_apply_state_adjust_rules(rdev, &eg_pi->requested_rps);
3755
3756 return 0;
3757}
3758
3759int ni_dpm_set_power_state(struct radeon_device *rdev)
3760{
3761 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3762 struct radeon_ps *new_ps = &eg_pi->requested_rps;
3763 struct radeon_ps *old_ps = &eg_pi->current_rps;
3764 int ret;
3765
3766 ret = ni_restrict_performance_levels_before_switch(rdev);
3767 if (ret) {
3768 DRM_ERROR("ni_restrict_performance_levels_before_switch failed\n");
3769 return ret;
3770 }
3771 ni_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
3772 ret = ni_enable_power_containment(rdev, new_ps, false);
3773 if (ret) {
3774 DRM_ERROR("ni_enable_power_containment failed\n");
3775 return ret;
3776 }
3777 ret = ni_enable_smc_cac(rdev, new_ps, false);
3778 if (ret) {
3779 DRM_ERROR("ni_enable_smc_cac failed\n");
3780 return ret;
3781 }
3782 ret = rv770_halt_smc(rdev);
3783 if (ret) {
3784 DRM_ERROR("rv770_halt_smc failed\n");
3785 return ret;
3786 }
3787 if (eg_pi->smu_uvd_hs)
3788 btc_notify_uvd_to_smc(rdev, new_ps);
3789 ret = ni_upload_sw_state(rdev, new_ps);
3790 if (ret) {
3791 DRM_ERROR("ni_upload_sw_state failed\n");
3792 return ret;
3793 }
3794 if (eg_pi->dynamic_ac_timing) {
3795 ret = ni_upload_mc_reg_table(rdev, new_ps);
3796 if (ret) {
3797 DRM_ERROR("ni_upload_mc_reg_table failed\n");
3798 return ret;
3799 }
3800 }
3801 ret = ni_program_memory_timing_parameters(rdev, new_ps);
3802 if (ret) {
3803 DRM_ERROR("ni_program_memory_timing_parameters failed\n");
3804 return ret;
3805 }
3806 ret = rv770_resume_smc(rdev);
3807 if (ret) {
3808 DRM_ERROR("rv770_resume_smc failed\n");
3809 return ret;
3810 }
3811 ret = rv770_set_sw_state(rdev);
3812 if (ret) {
3813 DRM_ERROR("rv770_set_sw_state failed\n");
3814 return ret;
3815 }
3816 ni_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
3817 ret = ni_enable_smc_cac(rdev, new_ps, true);
3818 if (ret) {
3819 DRM_ERROR("ni_enable_smc_cac failed\n");
3820 return ret;
3821 }
3822 ret = ni_enable_power_containment(rdev, new_ps, true);
3823 if (ret) {
3824 DRM_ERROR("ni_enable_power_containment failed\n");
3825 return ret;
3826 }
3827
3828 /* update tdp */
3829 ret = ni_power_control_set_level(rdev);
3830 if (ret) {
3831 DRM_ERROR("ni_power_control_set_level failed\n");
3832 return ret;
3833 }
3834
3835#if 0
3836 /* XXX */
3837 ret = ni_unrestrict_performance_levels_after_switch(rdev);
3838 if (ret) {
3839 DRM_ERROR("ni_unrestrict_performance_levels_after_switch failed\n");
3840 return ret;
3841 }
3842#endif
3843
3844 return 0;
3845}
3846
3847void ni_dpm_post_set_power_state(struct radeon_device *rdev)
3848{
3849 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3850 struct radeon_ps *new_ps = &eg_pi->requested_rps;
3851
3852 ni_update_current_ps(rdev, new_ps);
3853}
3854
3855void ni_dpm_reset_asic(struct radeon_device *rdev)
3856{
3857 ni_restrict_performance_levels_before_switch(rdev);
3858 rv770_set_boot_state(rdev);
3859}
3860
3861union power_info {
3862 struct _ATOM_POWERPLAY_INFO info;
3863 struct _ATOM_POWERPLAY_INFO_V2 info_2;
3864 struct _ATOM_POWERPLAY_INFO_V3 info_3;
3865 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
3866 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
3867 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
3868};
3869
3870union pplib_clock_info {
3871 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
3872 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
3873 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
3874 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
3875};
3876
3877union pplib_power_state {
3878 struct _ATOM_PPLIB_STATE v1;
3879 struct _ATOM_PPLIB_STATE_V2 v2;
3880};
3881
3882static void ni_parse_pplib_non_clock_info(struct radeon_device *rdev,
3883 struct radeon_ps *rps,
3884 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
3885 u8 table_rev)
3886{
3887 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
3888 rps->class = le16_to_cpu(non_clock_info->usClassification);
3889 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
3890
3891 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
3892 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
3893 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
3894 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
3895 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
3896 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
3897 } else {
3898 rps->vclk = 0;
3899 rps->dclk = 0;
3900 }
3901
3902 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
3903 rdev->pm.dpm.boot_ps = rps;
3904 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
3905 rdev->pm.dpm.uvd_ps = rps;
3906}
3907
3908static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
3909 struct radeon_ps *rps, int index,
3910 union pplib_clock_info *clock_info)
3911{
3912 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3913 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3914 struct ni_ps *ps = ni_get_ps(rps);
3915 u16 vddc;
3916 struct rv7xx_pl *pl = &ps->performance_levels[index];
3917
3918 ps->performance_level_count = index + 1;
3919
3920 pl->sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
3921 pl->sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
3922 pl->mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
3923 pl->mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
3924
3925 pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC);
3926 pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI);
3927 pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags);
3928
3929 /* patch up vddc if necessary */
3930 if (pl->vddc == 0xff01) {
3931 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
3932 pl->vddc = vddc;
3933 }
3934
3935 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
3936 pi->acpi_vddc = pl->vddc;
3937 eg_pi->acpi_vddci = pl->vddci;
3938 if (ps->performance_levels[0].flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
3939 pi->acpi_pcie_gen2 = true;
3940 else
3941 pi->acpi_pcie_gen2 = false;
3942 }
3943
3944 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
3945 eg_pi->ulv.supported = true;
3946 eg_pi->ulv.pl = pl;
3947 }
3948
3949 if (pi->min_vddc_in_table > pl->vddc)
3950 pi->min_vddc_in_table = pl->vddc;
3951
3952 if (pi->max_vddc_in_table < pl->vddc)
3953 pi->max_vddc_in_table = pl->vddc;
3954
3955 /* patch up boot state */
3956 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
3957 u16 vddc, vddci, mvdd;
3958 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
3959 pl->mclk = rdev->clock.default_mclk;
3960 pl->sclk = rdev->clock.default_sclk;
3961 pl->vddc = vddc;
3962 pl->vddci = vddci;
3963 }
3964
3965 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
3966 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
3967 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
3968 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
3969 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
3970 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
3971 }
3972}
3973
3974static int ni_parse_power_table(struct radeon_device *rdev)
3975{
3976 struct radeon_mode_info *mode_info = &rdev->mode_info;
3977 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
3978 union pplib_power_state *power_state;
3979 int i, j;
3980 union pplib_clock_info *clock_info;
3981 union power_info *power_info;
3982 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
3983 u16 data_offset;
3984 u8 frev, crev;
3985 struct ni_ps *ps;
3986
3987 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
3988 &frev, &crev, &data_offset))
3989 return -EINVAL;
3990 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
3991
3992 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
3993 power_info->pplib.ucNumStates, GFP_KERNEL);
3994 if (!rdev->pm.dpm.ps)
3995 return -ENOMEM;
3996 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
3997 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
3998 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
3999
4000 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
4001 power_state = (union pplib_power_state *)
4002 (mode_info->atom_context->bios + data_offset +
4003 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
4004 i * power_info->pplib.ucStateEntrySize);
4005 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4006 (mode_info->atom_context->bios + data_offset +
4007 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
4008 (power_state->v1.ucNonClockStateIndex *
4009 power_info->pplib.ucNonClockSize));
4010 if (power_info->pplib.ucStateEntrySize - 1) {
4011 ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
4012 if (ps == NULL) {
4013 kfree(rdev->pm.dpm.ps);
4014 return -ENOMEM;
4015 }
4016 rdev->pm.dpm.ps[i].ps_priv = ps;
4017 ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4018 non_clock_info,
4019 power_info->pplib.ucNonClockSize);
4020 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
4021 clock_info = (union pplib_clock_info *)
4022 (mode_info->atom_context->bios + data_offset +
4023 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
4024 (power_state->v1.ucClockStateIndices[j] *
4025 power_info->pplib.ucClockInfoSize));
4026 ni_parse_pplib_clock_info(rdev,
4027 &rdev->pm.dpm.ps[i], j,
4028 clock_info);
4029 }
4030 }
4031 }
4032 rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
4033 return 0;
4034}
4035
4036int ni_dpm_init(struct radeon_device *rdev)
4037{
4038 struct rv7xx_power_info *pi;
4039 struct evergreen_power_info *eg_pi;
4040 struct ni_power_info *ni_pi;
4041 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
4042 u16 data_offset, size;
4043 u8 frev, crev;
4044 struct atom_clock_dividers dividers;
4045 int ret;
4046
4047 ni_pi = kzalloc(sizeof(struct ni_power_info), GFP_KERNEL);
4048 if (ni_pi == NULL)
4049 return -ENOMEM;
4050 rdev->pm.dpm.priv = ni_pi;
4051 eg_pi = &ni_pi->eg;
4052 pi = &eg_pi->rv7xx;
4053
4054 rv770_get_max_vddc(rdev);
4055
4056 eg_pi->ulv.supported = false;
4057 pi->acpi_vddc = 0;
4058 eg_pi->acpi_vddci = 0;
4059 pi->min_vddc_in_table = 0;
4060 pi->max_vddc_in_table = 0;
4061
4062 ret = ni_parse_power_table(rdev);
4063 if (ret)
4064 return ret;
4065 ret = r600_parse_extended_power_table(rdev);
4066 if (ret)
4067 return ret;
4068
4069 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
4070 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
4071 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
4072 r600_free_extended_power_table(rdev);
4073 return -ENOMEM;
4074 }
4075 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
4076 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
4077 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
4078 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
4079 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
4080 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
4081 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
4082 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
4083 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
4084
4085 ni_patch_dependency_tables_based_on_leakage(rdev);
4086
4087 if (rdev->pm.dpm.voltage_response_time == 0)
4088 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
4089 if (rdev->pm.dpm.backbias_response_time == 0)
4090 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
4091
4092 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
4093 0, false, &dividers);
4094 if (ret)
4095 pi->ref_div = dividers.ref_div + 1;
4096 else
4097 pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
4098
4099 pi->rlp = RV770_RLP_DFLT;
4100 pi->rmp = RV770_RMP_DFLT;
4101 pi->lhp = RV770_LHP_DFLT;
4102 pi->lmp = RV770_LMP_DFLT;
4103
4104 eg_pi->ats[0].rlp = RV770_RLP_DFLT;
4105 eg_pi->ats[0].rmp = RV770_RMP_DFLT;
4106 eg_pi->ats[0].lhp = RV770_LHP_DFLT;
4107 eg_pi->ats[0].lmp = RV770_LMP_DFLT;
4108
4109 eg_pi->ats[1].rlp = BTC_RLP_UVD_DFLT;
4110 eg_pi->ats[1].rmp = BTC_RMP_UVD_DFLT;
4111 eg_pi->ats[1].lhp = BTC_LHP_UVD_DFLT;
4112 eg_pi->ats[1].lmp = BTC_LMP_UVD_DFLT;
4113
4114 eg_pi->smu_uvd_hs = true;
4115
4116 if (rdev->pdev->device == 0x6707) {
4117 pi->mclk_strobe_mode_threshold = 55000;
4118 pi->mclk_edc_enable_threshold = 55000;
4119 eg_pi->mclk_edc_wr_enable_threshold = 55000;
4120 } else {
4121 pi->mclk_strobe_mode_threshold = 40000;
4122 pi->mclk_edc_enable_threshold = 40000;
4123 eg_pi->mclk_edc_wr_enable_threshold = 40000;
4124 }
4125 ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
4126
4127 pi->voltage_control =
4128 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
4129
4130 pi->mvdd_control =
4131 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
4132
4133 eg_pi->vddci_control =
4134 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
4135
4136 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
4137 &frev, &crev, &data_offset)) {
4138 pi->sclk_ss = true;
4139 pi->mclk_ss = true;
4140 pi->dynamic_ss = true;
4141 } else {
4142 pi->sclk_ss = false;
4143 pi->mclk_ss = false;
4144 pi->dynamic_ss = true;
4145 }
4146
4147 pi->asi = RV770_ASI_DFLT;
4148 pi->pasi = CYPRESS_HASI_DFLT;
4149 pi->vrc = CYPRESS_VRC_DFLT;
4150
4151 pi->power_gating = false;
4152
4153 pi->gfx_clock_gating = true;
4154
4155 pi->mg_clock_gating = true;
4156 pi->mgcgtssm = true;
4157 eg_pi->ls_clock_gating = false;
4158 eg_pi->sclk_deep_sleep = false;
4159
4160 pi->dynamic_pcie_gen2 = true;
4161
4162 if (pi->gfx_clock_gating &&
4163 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
4164 pi->thermal_protection = true;
4165 else
4166 pi->thermal_protection = false;
4167
4168 pi->display_gap = true;
4169
4170 pi->dcodt = true;
4171
4172 pi->ulps = true;
4173
4174 eg_pi->dynamic_ac_timing = true;
4175 eg_pi->abm = true;
4176 eg_pi->mcls = true;
4177 eg_pi->light_sleep = true;
4178 eg_pi->memory_transition = true;
4179#if defined(CONFIG_ACPI)
4180 eg_pi->pcie_performance_request =
4181 radeon_acpi_is_pcie_performance_request_supported(rdev);
4182#else
4183 eg_pi->pcie_performance_request = false;
4184#endif
4185
4186 eg_pi->dll_default_on = false;
4187
4188 eg_pi->sclk_deep_sleep = false;
4189
4190 pi->mclk_stutter_mode_threshold = 0;
4191
4192 pi->sram_end = SMC_RAM_END;
4193
4194 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 3;
4195 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
4196 rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2 = 900;
4197 rdev->pm.dpm.dyn_state.valid_sclk_values.count = ARRAY_SIZE(btc_valid_sclk);
4198 rdev->pm.dpm.dyn_state.valid_sclk_values.values = btc_valid_sclk;
4199 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
4200 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
4201 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 12500;
4202
4203 ni_pi->cac_data.leakage_coefficients.at = 516;
4204 ni_pi->cac_data.leakage_coefficients.bt = 18;
4205 ni_pi->cac_data.leakage_coefficients.av = 51;
4206 ni_pi->cac_data.leakage_coefficients.bv = 2957;
4207
4208 switch (rdev->pdev->device) {
4209 case 0x6700:
4210 case 0x6701:
4211 case 0x6702:
4212 case 0x6703:
4213 case 0x6718:
4214 ni_pi->cac_weights = &cac_weights_cayman_xt;
4215 break;
4216 case 0x6705:
4217 case 0x6719:
4218 case 0x671D:
4219 case 0x671C:
4220 default:
4221 ni_pi->cac_weights = &cac_weights_cayman_pro;
4222 break;
4223 case 0x6704:
4224 case 0x6706:
4225 case 0x6707:
4226 case 0x6708:
4227 case 0x6709:
4228 ni_pi->cac_weights = &cac_weights_cayman_le;
4229 break;
4230 }
4231
4232 if (ni_pi->cac_weights->enable_power_containment_by_default) {
4233 ni_pi->enable_power_containment = true;
4234 ni_pi->enable_cac = true;
4235 ni_pi->enable_sq_ramping = true;
4236 } else {
4237 ni_pi->enable_power_containment = false;
4238 ni_pi->enable_cac = false;
4239 ni_pi->enable_sq_ramping = false;
4240 }
4241
4242 ni_pi->driver_calculate_cac_leakage = false;
4243 ni_pi->cac_configuration_required = true;
4244
4245 if (ni_pi->cac_configuration_required) {
4246 ni_pi->support_cac_long_term_average = true;
4247 ni_pi->lta_window_size = ni_pi->cac_weights->l2_lta_window_size;
4248 ni_pi->lts_truncate = ni_pi->cac_weights->lts_truncate;
4249 } else {
4250 ni_pi->support_cac_long_term_average = false;
4251 ni_pi->lta_window_size = 0;
4252 ni_pi->lts_truncate = 0;
4253 }
4254
4255 ni_pi->use_power_boost_limit = true;
4256
4257 return 0;
4258}
4259
4260void ni_dpm_fini(struct radeon_device *rdev)
4261{
4262 int i;
4263
4264 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
4265 kfree(rdev->pm.dpm.ps[i].ps_priv);
4266 }
4267 kfree(rdev->pm.dpm.ps);
4268 kfree(rdev->pm.dpm.priv);
4269 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
4270 r600_free_extended_power_table(rdev);
4271}
4272
4273void ni_dpm_print_power_state(struct radeon_device *rdev,
4274 struct radeon_ps *rps)
4275{
4276 struct ni_ps *ps = ni_get_ps(rps);
4277 struct rv7xx_pl *pl;
4278 int i;
4279
4280 r600_dpm_print_class_info(rps->class, rps->class2);
4281 r600_dpm_print_cap_info(rps->caps);
4282 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
4283 for (i = 0; i < ps->performance_level_count; i++) {
4284 pl = &ps->performance_levels[i];
4285 if (rdev->family >= CHIP_TAHITI)
4286 printk("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
4287 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
4288 else
4289 printk("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
4290 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
4291 }
4292 r600_dpm_print_ps_status(rdev, rps);
4293}
4294
4295u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low)
4296{
4297 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4298 struct ni_ps *requested_state = ni_get_ps(&eg_pi->requested_rps);
4299
4300 if (low)
4301 return requested_state->performance_levels[0].sclk;
4302 else
4303 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
4304}
4305
4306u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low)
4307{
4308 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4309 struct ni_ps *requested_state = ni_get_ps(&eg_pi->requested_rps);
4310
4311 if (low)
4312 return requested_state->performance_levels[0].mclk;
4313 else
4314 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
4315}
4316
diff --git a/drivers/gpu/drm/radeon/ni_dpm.h b/drivers/gpu/drm/radeon/ni_dpm.h
new file mode 100644
index 000000000000..ac1c7abf2c67
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_dpm.h
@@ -0,0 +1,248 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __NI_DPM_H__
24#define __NI_DPM_H__
25
26#include "cypress_dpm.h"
27#include "btc_dpm.h"
28#include "nislands_smc.h"
29
30struct ni_clock_registers {
31 u32 cg_spll_func_cntl;
32 u32 cg_spll_func_cntl_2;
33 u32 cg_spll_func_cntl_3;
34 u32 cg_spll_func_cntl_4;
35 u32 cg_spll_spread_spectrum;
36 u32 cg_spll_spread_spectrum_2;
37 u32 mclk_pwrmgt_cntl;
38 u32 dll_cntl;
39 u32 mpll_ad_func_cntl;
40 u32 mpll_ad_func_cntl_2;
41 u32 mpll_dq_func_cntl;
42 u32 mpll_dq_func_cntl_2;
43 u32 mpll_ss1;
44 u32 mpll_ss2;
45};
46
47struct ni_mc_reg_entry {
48 u32 mclk_max;
49 u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
50};
51
52struct ni_mc_reg_table {
53 u8 last;
54 u8 num_entries;
55 u16 valid_flag;
56 struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
57 SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
58};
59
60#define NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 2
61
62enum ni_dc_cac_level
63{
64 NISLANDS_DCCAC_LEVEL_0 = 0,
65 NISLANDS_DCCAC_LEVEL_1,
66 NISLANDS_DCCAC_LEVEL_2,
67 NISLANDS_DCCAC_LEVEL_3,
68 NISLANDS_DCCAC_LEVEL_4,
69 NISLANDS_DCCAC_LEVEL_5,
70 NISLANDS_DCCAC_LEVEL_6,
71 NISLANDS_DCCAC_LEVEL_7,
72 NISLANDS_DCCAC_MAX_LEVELS
73};
74
75struct ni_leakage_coeffients
76{
77 u32 at;
78 u32 bt;
79 u32 av;
80 u32 bv;
81 s32 t_slope;
82 s32 t_intercept;
83 u32 t_ref;
84};
85
86struct ni_cac_data
87{
88 struct ni_leakage_coeffients leakage_coefficients;
89 u32 i_leakage;
90 s32 leakage_minimum_temperature;
91 u32 pwr_const;
92 u32 dc_cac_value;
93 u32 bif_cac_value;
94 u32 lkge_pwr;
95 u8 mc_wr_weight;
96 u8 mc_rd_weight;
97 u8 allow_ovrflw;
98 u8 num_win_tdp;
99 u8 l2num_win_tdp;
100 u8 lts_truncate_n;
101};
102
103struct ni_cac_weights
104{
105 u32 weight_tcp_sig0;
106 u32 weight_tcp_sig1;
107 u32 weight_ta_sig;
108 u32 weight_tcc_en0;
109 u32 weight_tcc_en1;
110 u32 weight_tcc_en2;
111 u32 weight_cb_en0;
112 u32 weight_cb_en1;
113 u32 weight_cb_en2;
114 u32 weight_cb_en3;
115 u32 weight_db_sig0;
116 u32 weight_db_sig1;
117 u32 weight_db_sig2;
118 u32 weight_db_sig3;
119 u32 weight_sxm_sig0;
120 u32 weight_sxm_sig1;
121 u32 weight_sxm_sig2;
122 u32 weight_sxs_sig0;
123 u32 weight_sxs_sig1;
124 u32 weight_xbr_0;
125 u32 weight_xbr_1;
126 u32 weight_xbr_2;
127 u32 weight_spi_sig0;
128 u32 weight_spi_sig1;
129 u32 weight_spi_sig2;
130 u32 weight_spi_sig3;
131 u32 weight_spi_sig4;
132 u32 weight_spi_sig5;
133 u32 weight_lds_sig0;
134 u32 weight_lds_sig1;
135 u32 weight_sc;
136 u32 weight_bif;
137 u32 weight_cp;
138 u32 weight_pa_sig0;
139 u32 weight_pa_sig1;
140 u32 weight_vgt_sig0;
141 u32 weight_vgt_sig1;
142 u32 weight_vgt_sig2;
143 u32 weight_dc_sig0;
144 u32 weight_dc_sig1;
145 u32 weight_dc_sig2;
146 u32 weight_dc_sig3;
147 u32 weight_uvd_sig0;
148 u32 weight_uvd_sig1;
149 u32 weight_spare0;
150 u32 weight_spare1;
151 u32 weight_sq_vsp;
152 u32 weight_sq_vsp0;
153 u32 weight_sq_gpr;
154 u32 ovr_mode_spare_0;
155 u32 ovr_val_spare_0;
156 u32 ovr_mode_spare_1;
157 u32 ovr_val_spare_1;
158 u32 vsp;
159 u32 vsp0;
160 u32 gpr;
161 u8 mc_read_weight;
162 u8 mc_write_weight;
163 u32 tid_cnt;
164 u32 tid_unit;
165 u32 l2_lta_window_size;
166 u32 lts_truncate;
167 u32 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
168 u32 pcie_cac[SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES];
169 bool enable_power_containment_by_default;
170};
171
172struct ni_ps {
173 u16 performance_level_count;
174 bool dc_compatible;
175 struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
176};
177
178struct ni_power_info {
179 /* must be first! */
180 struct evergreen_power_info eg;
181 struct ni_clock_registers clock_registers;
182 struct ni_mc_reg_table mc_reg_table;
183 u32 mclk_rtt_mode_threshold;
184 /* flags */
185 bool use_power_boost_limit;
186 bool support_cac_long_term_average;
187 bool cac_enabled;
188 bool cac_configuration_required;
189 bool driver_calculate_cac_leakage;
190 bool pc_enabled;
191 bool enable_power_containment;
192 bool enable_cac;
193 bool enable_sq_ramping;
194 /* smc offsets */
195 u16 arb_table_start;
196 u16 fan_table_start;
197 u16 cac_table_start;
198 u16 spll_table_start;
199 /* CAC stuff */
200 struct ni_cac_data cac_data;
201 u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
202 const struct ni_cac_weights *cac_weights;
203 u8 lta_window_size;
204 u8 lts_truncate;
205 struct ni_ps current_ps;
206 struct ni_ps requested_ps;
207 /* scratch structs */
208 SMC_NIslands_MCRegisters smc_mc_reg_table;
209 NISLANDS_SMC_STATETABLE smc_statetable;
210};
211
212#define NISLANDS_INITIAL_STATE_ARB_INDEX 0
213#define NISLANDS_ACPI_STATE_ARB_INDEX 1
214#define NISLANDS_ULV_STATE_ARB_INDEX 2
215#define NISLANDS_DRIVER_STATE_ARB_INDEX 3
216
217#define NISLANDS_DPM2_MAX_PULSE_SKIP 256
218
219#define NISLANDS_DPM2_NEAR_TDP_DEC 10
220#define NISLANDS_DPM2_ABOVE_SAFE_INC 5
221#define NISLANDS_DPM2_BELOW_SAFE_INC 20
222
223#define NISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80
224
225#define NISLANDS_DPM2_MAXPS_PERCENT_H 90
226#define NISLANDS_DPM2_MAXPS_PERCENT_M 0
227
228#define NISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
229#define NISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12
230#define NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
231#define NISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E
232#define NISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF
233
234int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
235 u32 arb_freq_src, u32 arb_freq_dest);
236void ni_update_current_ps(struct radeon_device *rdev,
237 struct radeon_ps *rps);
238void ni_update_requested_ps(struct radeon_device *rdev,
239 struct radeon_ps *rps);
240
241void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
242 struct radeon_ps *new_ps,
243 struct radeon_ps *old_ps);
244void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
245 struct radeon_ps *new_ps,
246 struct radeon_ps *old_ps);
247
248#endif
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index e226faf16fea..95693c77351d 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -489,6 +489,567 @@
489# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0) 489# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
490# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) 490# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
491 491
492/* TN SMU registers */
493#define TN_CURRENT_GNB_TEMP 0x1F390
494
495/* pm registers */
496#define SMC_MSG 0x20c
497#define HOST_SMC_MSG(x) ((x) << 0)
498#define HOST_SMC_MSG_MASK (0xff << 0)
499#define HOST_SMC_MSG_SHIFT 0
500#define HOST_SMC_RESP(x) ((x) << 8)
501#define HOST_SMC_RESP_MASK (0xff << 8)
502#define HOST_SMC_RESP_SHIFT 8
503#define SMC_HOST_MSG(x) ((x) << 16)
504#define SMC_HOST_MSG_MASK (0xff << 16)
505#define SMC_HOST_MSG_SHIFT 16
506#define SMC_HOST_RESP(x) ((x) << 24)
507#define SMC_HOST_RESP_MASK (0xff << 24)
508#define SMC_HOST_RESP_SHIFT 24
509
510#define CG_SPLL_FUNC_CNTL 0x600
511#define SPLL_RESET (1 << 0)
512#define SPLL_SLEEP (1 << 1)
513#define SPLL_BYPASS_EN (1 << 3)
514#define SPLL_REF_DIV(x) ((x) << 4)
515#define SPLL_REF_DIV_MASK (0x3f << 4)
516#define SPLL_PDIV_A(x) ((x) << 20)
517#define SPLL_PDIV_A_MASK (0x7f << 20)
518#define SPLL_PDIV_A_SHIFT 20
519#define CG_SPLL_FUNC_CNTL_2 0x604
520#define SCLK_MUX_SEL(x) ((x) << 0)
521#define SCLK_MUX_SEL_MASK (0x1ff << 0)
522#define CG_SPLL_FUNC_CNTL_3 0x608
523#define SPLL_FB_DIV(x) ((x) << 0)
524#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
525#define SPLL_FB_DIV_SHIFT 0
526#define SPLL_DITHEN (1 << 28)
527
528#define MPLL_CNTL_MODE 0x61c
529# define SS_SSEN (1 << 24)
530# define SS_DSMODE_EN (1 << 25)
531
532#define MPLL_AD_FUNC_CNTL 0x624
533#define CLKF(x) ((x) << 0)
534#define CLKF_MASK (0x7f << 0)
535#define CLKR(x) ((x) << 7)
536#define CLKR_MASK (0x1f << 7)
537#define CLKFRAC(x) ((x) << 12)
538#define CLKFRAC_MASK (0x1f << 12)
539#define YCLK_POST_DIV(x) ((x) << 17)
540#define YCLK_POST_DIV_MASK (3 << 17)
541#define IBIAS(x) ((x) << 20)
542#define IBIAS_MASK (0x3ff << 20)
543#define RESET (1 << 30)
544#define PDNB (1 << 31)
545#define MPLL_AD_FUNC_CNTL_2 0x628
546#define BYPASS (1 << 19)
547#define BIAS_GEN_PDNB (1 << 24)
548#define RESET_EN (1 << 25)
549#define VCO_MODE (1 << 29)
550#define MPLL_DQ_FUNC_CNTL 0x62c
551#define MPLL_DQ_FUNC_CNTL_2 0x630
552
553#define GENERAL_PWRMGT 0x63c
554# define GLOBAL_PWRMGT_EN (1 << 0)
555# define STATIC_PM_EN (1 << 1)
556# define THERMAL_PROTECTION_DIS (1 << 2)
557# define THERMAL_PROTECTION_TYPE (1 << 3)
558# define ENABLE_GEN2PCIE (1 << 4)
559# define ENABLE_GEN2XSP (1 << 5)
560# define SW_SMIO_INDEX(x) ((x) << 6)
561# define SW_SMIO_INDEX_MASK (3 << 6)
562# define SW_SMIO_INDEX_SHIFT 6
563# define LOW_VOLT_D2_ACPI (1 << 8)
564# define LOW_VOLT_D3_ACPI (1 << 9)
565# define VOLT_PWRMGT_EN (1 << 10)
566# define BACKBIAS_PAD_EN (1 << 18)
567# define BACKBIAS_VALUE (1 << 19)
568# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
569# define AC_DC_SW (1 << 24)
570
571#define SCLK_PWRMGT_CNTL 0x644
572# define SCLK_PWRMGT_OFF (1 << 0)
573# define SCLK_LOW_D1 (1 << 1)
574# define FIR_RESET (1 << 4)
575# define FIR_FORCE_TREND_SEL (1 << 5)
576# define FIR_TREND_MODE (1 << 6)
577# define DYN_GFX_CLK_OFF_EN (1 << 7)
578# define GFX_CLK_FORCE_ON (1 << 8)
579# define GFX_CLK_REQUEST_OFF (1 << 9)
580# define GFX_CLK_FORCE_OFF (1 << 10)
581# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
582# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
583# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
584# define DYN_LIGHT_SLEEP_EN (1 << 14)
585#define MCLK_PWRMGT_CNTL 0x648
586# define DLL_SPEED(x) ((x) << 0)
587# define DLL_SPEED_MASK (0x1f << 0)
588# define MPLL_PWRMGT_OFF (1 << 5)
589# define DLL_READY (1 << 6)
590# define MC_INT_CNTL (1 << 7)
591# define MRDCKA0_PDNB (1 << 8)
592# define MRDCKA1_PDNB (1 << 9)
593# define MRDCKB0_PDNB (1 << 10)
594# define MRDCKB1_PDNB (1 << 11)
595# define MRDCKC0_PDNB (1 << 12)
596# define MRDCKC1_PDNB (1 << 13)
597# define MRDCKD0_PDNB (1 << 14)
598# define MRDCKD1_PDNB (1 << 15)
599# define MRDCKA0_RESET (1 << 16)
600# define MRDCKA1_RESET (1 << 17)
601# define MRDCKB0_RESET (1 << 18)
602# define MRDCKB1_RESET (1 << 19)
603# define MRDCKC0_RESET (1 << 20)
604# define MRDCKC1_RESET (1 << 21)
605# define MRDCKD0_RESET (1 << 22)
606# define MRDCKD1_RESET (1 << 23)
607# define DLL_READY_READ (1 << 24)
608# define USE_DISPLAY_GAP (1 << 25)
609# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
610# define MPLL_TURNOFF_D2 (1 << 28)
611#define DLL_CNTL 0x64c
612# define MRDCKA0_BYPASS (1 << 24)
613# define MRDCKA1_BYPASS (1 << 25)
614# define MRDCKB0_BYPASS (1 << 26)
615# define MRDCKB1_BYPASS (1 << 27)
616# define MRDCKC0_BYPASS (1 << 28)
617# define MRDCKC1_BYPASS (1 << 29)
618# define MRDCKD0_BYPASS (1 << 30)
619# define MRDCKD1_BYPASS (1 << 31)
620
621#define CG_AT 0x6d4
622# define CG_R(x) ((x) << 0)
623# define CG_R_MASK (0xffff << 0)
624# define CG_L(x) ((x) << 16)
625# define CG_L_MASK (0xffff << 16)
626
627#define CG_BIF_REQ_AND_RSP 0x7f4
628#define CG_CLIENT_REQ(x) ((x) << 0)
629#define CG_CLIENT_REQ_MASK (0xff << 0)
630#define CG_CLIENT_REQ_SHIFT 0
631#define CG_CLIENT_RESP(x) ((x) << 8)
632#define CG_CLIENT_RESP_MASK (0xff << 8)
633#define CG_CLIENT_RESP_SHIFT 8
634#define CLIENT_CG_REQ(x) ((x) << 16)
635#define CLIENT_CG_REQ_MASK (0xff << 16)
636#define CLIENT_CG_REQ_SHIFT 16
637#define CLIENT_CG_RESP(x) ((x) << 24)
638#define CLIENT_CG_RESP_MASK (0xff << 24)
639#define CLIENT_CG_RESP_SHIFT 24
640
641#define CG_SPLL_SPREAD_SPECTRUM 0x790
642#define SSEN (1 << 0)
643#define CLK_S(x) ((x) << 4)
644#define CLK_S_MASK (0xfff << 4)
645#define CLK_S_SHIFT 4
646#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
647#define CLK_V(x) ((x) << 0)
648#define CLK_V_MASK (0x3ffffff << 0)
649#define CLK_V_SHIFT 0
650
651#define SMC_SCRATCH0 0x81c
652
653#define CG_SPLL_FUNC_CNTL_4 0x850
654
655#define MPLL_SS1 0x85c
656#define CLKV(x) ((x) << 0)
657#define CLKV_MASK (0x3ffffff << 0)
658#define MPLL_SS2 0x860
659#define CLKS(x) ((x) << 0)
660#define CLKS_MASK (0xfff << 0)
661
662#define CG_CAC_CTRL 0x88c
663#define TID_CNT(x) ((x) << 0)
664#define TID_CNT_MASK (0x3fff << 0)
665#define TID_UNIT(x) ((x) << 14)
666#define TID_UNIT_MASK (0xf << 14)
667
668#define CG_IND_ADDR 0x8f8
669#define CG_IND_DATA 0x8fc
670/* CGIND regs */
671#define CG_CGTT_LOCAL_0 0x00
672#define CG_CGTT_LOCAL_1 0x01
673
674#define MC_CG_CONFIG 0x25bc
675#define MCDW_WR_ENABLE (1 << 0)
676#define MCDX_WR_ENABLE (1 << 1)
677#define MCDY_WR_ENABLE (1 << 2)
678#define MCDZ_WR_ENABLE (1 << 3)
679#define MC_RD_ENABLE(x) ((x) << 4)
680#define MC_RD_ENABLE_MASK (3 << 4)
681#define INDEX(x) ((x) << 6)
682#define INDEX_MASK (0xfff << 6)
683#define INDEX_SHIFT 6
684
685#define MC_ARB_CAC_CNTL 0x2750
686#define ENABLE (1 << 0)
687#define READ_WEIGHT(x) ((x) << 1)
688#define READ_WEIGHT_MASK (0x3f << 1)
689#define READ_WEIGHT_SHIFT 1
690#define WRITE_WEIGHT(x) ((x) << 7)
691#define WRITE_WEIGHT_MASK (0x3f << 7)
692#define WRITE_WEIGHT_SHIFT 7
693#define ALLOW_OVERFLOW (1 << 13)
694
695#define MC_ARB_DRAM_TIMING 0x2774
696#define MC_ARB_DRAM_TIMING2 0x2778
697
698#define MC_ARB_RFSH_RATE 0x27b0
699#define POWERMODE0(x) ((x) << 0)
700#define POWERMODE0_MASK (0xff << 0)
701#define POWERMODE0_SHIFT 0
702#define POWERMODE1(x) ((x) << 8)
703#define POWERMODE1_MASK (0xff << 8)
704#define POWERMODE1_SHIFT 8
705#define POWERMODE2(x) ((x) << 16)
706#define POWERMODE2_MASK (0xff << 16)
707#define POWERMODE2_SHIFT 16
708#define POWERMODE3(x) ((x) << 24)
709#define POWERMODE3_MASK (0xff << 24)
710#define POWERMODE3_SHIFT 24
711
712#define MC_ARB_CG 0x27e8
713#define CG_ARB_REQ(x) ((x) << 0)
714#define CG_ARB_REQ_MASK (0xff << 0)
715#define CG_ARB_REQ_SHIFT 0
716#define CG_ARB_RESP(x) ((x) << 8)
717#define CG_ARB_RESP_MASK (0xff << 8)
718#define CG_ARB_RESP_SHIFT 8
719#define ARB_CG_REQ(x) ((x) << 16)
720#define ARB_CG_REQ_MASK (0xff << 16)
721#define ARB_CG_REQ_SHIFT 16
722#define ARB_CG_RESP(x) ((x) << 24)
723#define ARB_CG_RESP_MASK (0xff << 24)
724#define ARB_CG_RESP_SHIFT 24
725
726#define MC_ARB_DRAM_TIMING_1 0x27f0
727#define MC_ARB_DRAM_TIMING_2 0x27f4
728#define MC_ARB_DRAM_TIMING_3 0x27f8
729#define MC_ARB_DRAM_TIMING2_1 0x27fc
730#define MC_ARB_DRAM_TIMING2_2 0x2800
731#define MC_ARB_DRAM_TIMING2_3 0x2804
732#define MC_ARB_BURST_TIME 0x2808
733#define STATE0(x) ((x) << 0)
734#define STATE0_MASK (0x1f << 0)
735#define STATE0_SHIFT 0
736#define STATE1(x) ((x) << 5)
737#define STATE1_MASK (0x1f << 5)
738#define STATE1_SHIFT 5
739#define STATE2(x) ((x) << 10)
740#define STATE2_MASK (0x1f << 10)
741#define STATE2_SHIFT 10
742#define STATE3(x) ((x) << 15)
743#define STATE3_MASK (0x1f << 15)
744#define STATE3_SHIFT 15
745
746#define MC_CG_DATAPORT 0x2884
747
748#define MC_SEQ_RAS_TIMING 0x28a0
749#define MC_SEQ_CAS_TIMING 0x28a4
750#define MC_SEQ_MISC_TIMING 0x28a8
751#define MC_SEQ_MISC_TIMING2 0x28ac
752#define MC_SEQ_PMG_TIMING 0x28b0
753#define MC_SEQ_RD_CTL_D0 0x28b4
754#define MC_SEQ_RD_CTL_D1 0x28b8
755#define MC_SEQ_WR_CTL_D0 0x28bc
756#define MC_SEQ_WR_CTL_D1 0x28c0
757
758#define MC_SEQ_MISC0 0x2a00
759#define MC_SEQ_MISC0_GDDR5_SHIFT 28
760#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
761#define MC_SEQ_MISC0_GDDR5_VALUE 5
762#define MC_SEQ_MISC1 0x2a04
763#define MC_SEQ_RESERVE_M 0x2a08
764#define MC_PMG_CMD_EMRS 0x2a0c
765
766#define MC_SEQ_MISC3 0x2a2c
767
768#define MC_SEQ_MISC5 0x2a54
769#define MC_SEQ_MISC6 0x2a58
770
771#define MC_SEQ_MISC7 0x2a64
772
773#define MC_SEQ_RAS_TIMING_LP 0x2a6c
774#define MC_SEQ_CAS_TIMING_LP 0x2a70
775#define MC_SEQ_MISC_TIMING_LP 0x2a74
776#define MC_SEQ_MISC_TIMING2_LP 0x2a78
777#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
778#define MC_SEQ_WR_CTL_D1_LP 0x2a80
779#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
780#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
781
782#define MC_PMG_CMD_MRS 0x2aac
783
784#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
785#define MC_SEQ_RD_CTL_D1_LP 0x2b20
786
787#define MC_PMG_CMD_MRS1 0x2b44
788#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
789#define MC_SEQ_PMG_TIMING_LP 0x2b4c
790
791#define MC_PMG_CMD_MRS2 0x2b5c
792#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
793
794#define LB_SYNC_RESET_SEL 0x6b28
795#define LB_SYNC_RESET_SEL_MASK (3 << 0)
796#define LB_SYNC_RESET_SEL_SHIFT 0
797
798#define DC_STUTTER_CNTL 0x6b30
799#define DC_STUTTER_ENABLE_A (1 << 0)
800#define DC_STUTTER_ENABLE_B (1 << 1)
801
802#define SQ_CAC_THRESHOLD 0x8e4c
803#define VSP(x) ((x) << 0)
804#define VSP_MASK (0xff << 0)
805#define VSP_SHIFT 0
806#define VSP0(x) ((x) << 8)
807#define VSP0_MASK (0xff << 8)
808#define VSP0_SHIFT 8
809#define GPR(x) ((x) << 16)
810#define GPR_MASK (0xff << 16)
811#define GPR_SHIFT 16
812
813#define SQ_POWER_THROTTLE 0x8e58
814#define MIN_POWER(x) ((x) << 0)
815#define MIN_POWER_MASK (0x3fff << 0)
816#define MIN_POWER_SHIFT 0
817#define MAX_POWER(x) ((x) << 16)
818#define MAX_POWER_MASK (0x3fff << 16)
819#define MAX_POWER_SHIFT 0
820#define SQ_POWER_THROTTLE2 0x8e5c
821#define MAX_POWER_DELTA(x) ((x) << 0)
822#define MAX_POWER_DELTA_MASK (0x3fff << 0)
823#define MAX_POWER_DELTA_SHIFT 0
824#define STI_SIZE(x) ((x) << 16)
825#define STI_SIZE_MASK (0x3ff << 16)
826#define STI_SIZE_SHIFT 16
827#define LTI_RATIO(x) ((x) << 27)
828#define LTI_RATIO_MASK (0xf << 27)
829#define LTI_RATIO_SHIFT 27
830
831/* CG indirect registers */
832#define CG_CAC_REGION_1_WEIGHT_0 0x83
833#define WEIGHT_TCP_SIG0(x) ((x) << 0)
834#define WEIGHT_TCP_SIG0_MASK (0x3f << 0)
835#define WEIGHT_TCP_SIG0_SHIFT 0
836#define WEIGHT_TCP_SIG1(x) ((x) << 6)
837#define WEIGHT_TCP_SIG1_MASK (0x3f << 6)
838#define WEIGHT_TCP_SIG1_SHIFT 6
839#define WEIGHT_TA_SIG(x) ((x) << 12)
840#define WEIGHT_TA_SIG_MASK (0x3f << 12)
841#define WEIGHT_TA_SIG_SHIFT 12
842#define CG_CAC_REGION_1_WEIGHT_1 0x84
843#define WEIGHT_TCC_EN0(x) ((x) << 0)
844#define WEIGHT_TCC_EN0_MASK (0x3f << 0)
845#define WEIGHT_TCC_EN0_SHIFT 0
846#define WEIGHT_TCC_EN1(x) ((x) << 6)
847#define WEIGHT_TCC_EN1_MASK (0x3f << 6)
848#define WEIGHT_TCC_EN1_SHIFT 6
849#define WEIGHT_TCC_EN2(x) ((x) << 12)
850#define WEIGHT_TCC_EN2_MASK (0x3f << 12)
851#define WEIGHT_TCC_EN2_SHIFT 12
852#define WEIGHT_TCC_EN3(x) ((x) << 18)
853#define WEIGHT_TCC_EN3_MASK (0x3f << 18)
854#define WEIGHT_TCC_EN3_SHIFT 18
855#define CG_CAC_REGION_2_WEIGHT_0 0x85
856#define WEIGHT_CB_EN0(x) ((x) << 0)
857#define WEIGHT_CB_EN0_MASK (0x3f << 0)
858#define WEIGHT_CB_EN0_SHIFT 0
859#define WEIGHT_CB_EN1(x) ((x) << 6)
860#define WEIGHT_CB_EN1_MASK (0x3f << 6)
861#define WEIGHT_CB_EN1_SHIFT 6
862#define WEIGHT_CB_EN2(x) ((x) << 12)
863#define WEIGHT_CB_EN2_MASK (0x3f << 12)
864#define WEIGHT_CB_EN2_SHIFT 12
865#define WEIGHT_CB_EN3(x) ((x) << 18)
866#define WEIGHT_CB_EN3_MASK (0x3f << 18)
867#define WEIGHT_CB_EN3_SHIFT 18
868#define CG_CAC_REGION_2_WEIGHT_1 0x86
869#define WEIGHT_DB_SIG0(x) ((x) << 0)
870#define WEIGHT_DB_SIG0_MASK (0x3f << 0)
871#define WEIGHT_DB_SIG0_SHIFT 0
872#define WEIGHT_DB_SIG1(x) ((x) << 6)
873#define WEIGHT_DB_SIG1_MASK (0x3f << 6)
874#define WEIGHT_DB_SIG1_SHIFT 6
875#define WEIGHT_DB_SIG2(x) ((x) << 12)
876#define WEIGHT_DB_SIG2_MASK (0x3f << 12)
877#define WEIGHT_DB_SIG2_SHIFT 12
878#define WEIGHT_DB_SIG3(x) ((x) << 18)
879#define WEIGHT_DB_SIG3_MASK (0x3f << 18)
880#define WEIGHT_DB_SIG3_SHIFT 18
881#define CG_CAC_REGION_2_WEIGHT_2 0x87
882#define WEIGHT_SXM_SIG0(x) ((x) << 0)
883#define WEIGHT_SXM_SIG0_MASK (0x3f << 0)
884#define WEIGHT_SXM_SIG0_SHIFT 0
885#define WEIGHT_SXM_SIG1(x) ((x) << 6)
886#define WEIGHT_SXM_SIG1_MASK (0x3f << 6)
887#define WEIGHT_SXM_SIG1_SHIFT 6
888#define WEIGHT_SXM_SIG2(x) ((x) << 12)
889#define WEIGHT_SXM_SIG2_MASK (0x3f << 12)
890#define WEIGHT_SXM_SIG2_SHIFT 12
891#define WEIGHT_SXS_SIG0(x) ((x) << 18)
892#define WEIGHT_SXS_SIG0_MASK (0x3f << 18)
893#define WEIGHT_SXS_SIG0_SHIFT 18
894#define WEIGHT_SXS_SIG1(x) ((x) << 24)
895#define WEIGHT_SXS_SIG1_MASK (0x3f << 24)
896#define WEIGHT_SXS_SIG1_SHIFT 24
897#define CG_CAC_REGION_3_WEIGHT_0 0x88
898#define WEIGHT_XBR_0(x) ((x) << 0)
899#define WEIGHT_XBR_0_MASK (0x3f << 0)
900#define WEIGHT_XBR_0_SHIFT 0
901#define WEIGHT_XBR_1(x) ((x) << 6)
902#define WEIGHT_XBR_1_MASK (0x3f << 6)
903#define WEIGHT_XBR_1_SHIFT 6
904#define WEIGHT_XBR_2(x) ((x) << 12)
905#define WEIGHT_XBR_2_MASK (0x3f << 12)
906#define WEIGHT_XBR_2_SHIFT 12
907#define WEIGHT_SPI_SIG0(x) ((x) << 18)
908#define WEIGHT_SPI_SIG0_MASK (0x3f << 18)
909#define WEIGHT_SPI_SIG0_SHIFT 18
910#define CG_CAC_REGION_3_WEIGHT_1 0x89
911#define WEIGHT_SPI_SIG1(x) ((x) << 0)
912#define WEIGHT_SPI_SIG1_MASK (0x3f << 0)
913#define WEIGHT_SPI_SIG1_SHIFT 0
914#define WEIGHT_SPI_SIG2(x) ((x) << 6)
915#define WEIGHT_SPI_SIG2_MASK (0x3f << 6)
916#define WEIGHT_SPI_SIG2_SHIFT 6
917#define WEIGHT_SPI_SIG3(x) ((x) << 12)
918#define WEIGHT_SPI_SIG3_MASK (0x3f << 12)
919#define WEIGHT_SPI_SIG3_SHIFT 12
920#define WEIGHT_SPI_SIG4(x) ((x) << 18)
921#define WEIGHT_SPI_SIG4_MASK (0x3f << 18)
922#define WEIGHT_SPI_SIG4_SHIFT 18
923#define WEIGHT_SPI_SIG5(x) ((x) << 24)
924#define WEIGHT_SPI_SIG5_MASK (0x3f << 24)
925#define WEIGHT_SPI_SIG5_SHIFT 24
926#define CG_CAC_REGION_4_WEIGHT_0 0x8a
927#define WEIGHT_LDS_SIG0(x) ((x) << 0)
928#define WEIGHT_LDS_SIG0_MASK (0x3f << 0)
929#define WEIGHT_LDS_SIG0_SHIFT 0
930#define WEIGHT_LDS_SIG1(x) ((x) << 6)
931#define WEIGHT_LDS_SIG1_MASK (0x3f << 6)
932#define WEIGHT_LDS_SIG1_SHIFT 6
933#define WEIGHT_SC(x) ((x) << 24)
934#define WEIGHT_SC_MASK (0x3f << 24)
935#define WEIGHT_SC_SHIFT 24
936#define CG_CAC_REGION_4_WEIGHT_1 0x8b
937#define WEIGHT_BIF(x) ((x) << 0)
938#define WEIGHT_BIF_MASK (0x3f << 0)
939#define WEIGHT_BIF_SHIFT 0
940#define WEIGHT_CP(x) ((x) << 6)
941#define WEIGHT_CP_MASK (0x3f << 6)
942#define WEIGHT_CP_SHIFT 6
943#define WEIGHT_PA_SIG0(x) ((x) << 12)
944#define WEIGHT_PA_SIG0_MASK (0x3f << 12)
945#define WEIGHT_PA_SIG0_SHIFT 12
946#define WEIGHT_PA_SIG1(x) ((x) << 18)
947#define WEIGHT_PA_SIG1_MASK (0x3f << 18)
948#define WEIGHT_PA_SIG1_SHIFT 18
949#define WEIGHT_VGT_SIG0(x) ((x) << 24)
950#define WEIGHT_VGT_SIG0_MASK (0x3f << 24)
951#define WEIGHT_VGT_SIG0_SHIFT 24
952#define CG_CAC_REGION_4_WEIGHT_2 0x8c
953#define WEIGHT_VGT_SIG1(x) ((x) << 0)
954#define WEIGHT_VGT_SIG1_MASK (0x3f << 0)
955#define WEIGHT_VGT_SIG1_SHIFT 0
956#define WEIGHT_VGT_SIG2(x) ((x) << 6)
957#define WEIGHT_VGT_SIG2_MASK (0x3f << 6)
958#define WEIGHT_VGT_SIG2_SHIFT 6
959#define WEIGHT_DC_SIG0(x) ((x) << 12)
960#define WEIGHT_DC_SIG0_MASK (0x3f << 12)
961#define WEIGHT_DC_SIG0_SHIFT 12
962#define WEIGHT_DC_SIG1(x) ((x) << 18)
963#define WEIGHT_DC_SIG1_MASK (0x3f << 18)
964#define WEIGHT_DC_SIG1_SHIFT 18
965#define WEIGHT_DC_SIG2(x) ((x) << 24)
966#define WEIGHT_DC_SIG2_MASK (0x3f << 24)
967#define WEIGHT_DC_SIG2_SHIFT 24
968#define CG_CAC_REGION_4_WEIGHT_3 0x8d
969#define WEIGHT_DC_SIG3(x) ((x) << 0)
970#define WEIGHT_DC_SIG3_MASK (0x3f << 0)
971#define WEIGHT_DC_SIG3_SHIFT 0
972#define WEIGHT_UVD_SIG0(x) ((x) << 6)
973#define WEIGHT_UVD_SIG0_MASK (0x3f << 6)
974#define WEIGHT_UVD_SIG0_SHIFT 6
975#define WEIGHT_UVD_SIG1(x) ((x) << 12)
976#define WEIGHT_UVD_SIG1_MASK (0x3f << 12)
977#define WEIGHT_UVD_SIG1_SHIFT 12
978#define WEIGHT_SPARE0(x) ((x) << 18)
979#define WEIGHT_SPARE0_MASK (0x3f << 18)
980#define WEIGHT_SPARE0_SHIFT 18
981#define WEIGHT_SPARE1(x) ((x) << 24)
982#define WEIGHT_SPARE1_MASK (0x3f << 24)
983#define WEIGHT_SPARE1_SHIFT 24
984#define CG_CAC_REGION_5_WEIGHT_0 0x8e
985#define WEIGHT_SQ_VSP(x) ((x) << 0)
986#define WEIGHT_SQ_VSP_MASK (0x3fff << 0)
987#define WEIGHT_SQ_VSP_SHIFT 0
988#define WEIGHT_SQ_VSP0(x) ((x) << 14)
989#define WEIGHT_SQ_VSP0_MASK (0x3fff << 14)
990#define WEIGHT_SQ_VSP0_SHIFT 14
991#define CG_CAC_REGION_4_OVERRIDE_4 0xab
992#define OVR_MODE_SPARE_0(x) ((x) << 16)
993#define OVR_MODE_SPARE_0_MASK (0x1 << 16)
994#define OVR_MODE_SPARE_0_SHIFT 16
995#define OVR_VAL_SPARE_0(x) ((x) << 17)
996#define OVR_VAL_SPARE_0_MASK (0x1 << 17)
997#define OVR_VAL_SPARE_0_SHIFT 17
998#define OVR_MODE_SPARE_1(x) ((x) << 18)
999#define OVR_MODE_SPARE_1_MASK (0x3f << 18)
1000#define OVR_MODE_SPARE_1_SHIFT 18
1001#define OVR_VAL_SPARE_1(x) ((x) << 19)
1002#define OVR_VAL_SPARE_1_MASK (0x3f << 19)
1003#define OVR_VAL_SPARE_1_SHIFT 19
1004#define CG_CAC_REGION_5_WEIGHT_1 0xb7
1005#define WEIGHT_SQ_GPR(x) ((x) << 0)
1006#define WEIGHT_SQ_GPR_MASK (0x3fff << 0)
1007#define WEIGHT_SQ_GPR_SHIFT 0
1008#define WEIGHT_SQ_LDS(x) ((x) << 14)
1009#define WEIGHT_SQ_LDS_MASK (0x3fff << 14)
1010#define WEIGHT_SQ_LDS_SHIFT 14
1011
1012/* PCIE link stuff */
1013#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
1014#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
1015# define LC_LINK_WIDTH_SHIFT 0
1016# define LC_LINK_WIDTH_MASK 0x7
1017# define LC_LINK_WIDTH_X0 0
1018# define LC_LINK_WIDTH_X1 1
1019# define LC_LINK_WIDTH_X2 2
1020# define LC_LINK_WIDTH_X4 3
1021# define LC_LINK_WIDTH_X8 4
1022# define LC_LINK_WIDTH_X16 6
1023# define LC_LINK_WIDTH_RD_SHIFT 4
1024# define LC_LINK_WIDTH_RD_MASK 0x70
1025# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
1026# define LC_RECONFIG_NOW (1 << 8)
1027# define LC_RENEGOTIATION_SUPPORT (1 << 9)
1028# define LC_RENEGOTIATE_EN (1 << 10)
1029# define LC_SHORT_RECONFIG_EN (1 << 11)
1030# define LC_UPCONFIGURE_SUPPORT (1 << 12)
1031# define LC_UPCONFIGURE_DIS (1 << 13)
1032#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
1033# define LC_GEN2_EN_STRAP (1 << 0)
1034# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
1035# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
1036# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
1037# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
1038# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
1039# define LC_CURRENT_DATA_RATE (1 << 11)
1040# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
1041# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
1042# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
1043# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
1044# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
1045# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
1046# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
1047#define MM_CFGREGS_CNTL 0x544c
1048# define MM_WR_TO_CFG_EN (1 << 3)
1049#define LINK_CNTL2 0x88 /* F0 */
1050# define TARGET_LINK_SPEED_MASK (0xf << 0)
1051# define SELECTABLE_DEEMPHASIS (1 << 6)
1052
492/* 1053/*
493 * UVD 1054 * UVD
494 */ 1055 */
diff --git a/drivers/gpu/drm/radeon/nislands_smc.h b/drivers/gpu/drm/radeon/nislands_smc.h
new file mode 100644
index 000000000000..3cf8fc0d83f4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/nislands_smc.h
@@ -0,0 +1,329 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __NISLANDS_SMC_H__
24#define __NISLANDS_SMC_H__
25
26#pragma pack(push, 1)
27
28#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
29
30struct PP_NIslands_Dpm2PerfLevel
31{
32 uint8_t MaxPS;
33 uint8_t TgtAct;
34 uint8_t MaxPS_StepInc;
35 uint8_t MaxPS_StepDec;
36 uint8_t PSST;
37 uint8_t NearTDPDec;
38 uint8_t AboveSafeInc;
39 uint8_t BelowSafeInc;
40 uint8_t PSDeltaLimit;
41 uint8_t PSDeltaWin;
42 uint8_t Reserved[6];
43};
44
45typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
46
47struct PP_NIslands_DPM2Parameters
48{
49 uint32_t TDPLimit;
50 uint32_t NearTDPLimit;
51 uint32_t SafePowerLimit;
52 uint32_t PowerBoostLimit;
53};
54typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
55
56struct NISLANDS_SMC_SCLK_VALUE
57{
58 uint32_t vCG_SPLL_FUNC_CNTL;
59 uint32_t vCG_SPLL_FUNC_CNTL_2;
60 uint32_t vCG_SPLL_FUNC_CNTL_3;
61 uint32_t vCG_SPLL_FUNC_CNTL_4;
62 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
63 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
64 uint32_t sclk_value;
65};
66
67typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
68
69struct NISLANDS_SMC_MCLK_VALUE
70{
71 uint32_t vMPLL_FUNC_CNTL;
72 uint32_t vMPLL_FUNC_CNTL_1;
73 uint32_t vMPLL_FUNC_CNTL_2;
74 uint32_t vMPLL_AD_FUNC_CNTL;
75 uint32_t vMPLL_AD_FUNC_CNTL_2;
76 uint32_t vMPLL_DQ_FUNC_CNTL;
77 uint32_t vMPLL_DQ_FUNC_CNTL_2;
78 uint32_t vMCLK_PWRMGT_CNTL;
79 uint32_t vDLL_CNTL;
80 uint32_t vMPLL_SS;
81 uint32_t vMPLL_SS2;
82 uint32_t mclk_value;
83};
84
85typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
86
87struct NISLANDS_SMC_VOLTAGE_VALUE
88{
89 uint16_t value;
90 uint8_t index;
91 uint8_t padding;
92};
93
94typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
95
96struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
97{
98 uint8_t arbValue;
99 uint8_t ACIndex;
100 uint8_t displayWatermark;
101 uint8_t gen2PCIE;
102 uint8_t reserved1;
103 uint8_t reserved2;
104 uint8_t strobeMode;
105 uint8_t mcFlags;
106 uint32_t aT;
107 uint32_t bSP;
108 NISLANDS_SMC_SCLK_VALUE sclk;
109 NISLANDS_SMC_MCLK_VALUE mclk;
110 NISLANDS_SMC_VOLTAGE_VALUE vddc;
111 NISLANDS_SMC_VOLTAGE_VALUE mvdd;
112 NISLANDS_SMC_VOLTAGE_VALUE vddci;
113 NISLANDS_SMC_VOLTAGE_VALUE std_vddc;
114 uint32_t powergate_en;
115 uint8_t hUp;
116 uint8_t hDown;
117 uint8_t stateFlags;
118 uint8_t arbRefreshState;
119 uint32_t SQPowerThrottle;
120 uint32_t SQPowerThrottle_2;
121 uint32_t reserved[2];
122 PP_NIslands_Dpm2PerfLevel dpm2;
123};
124
125#define NISLANDS_SMC_STROBE_RATIO 0x0F
126#define NISLANDS_SMC_STROBE_ENABLE 0x10
127
128#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01
129#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02
130#define NISLANDS_SMC_MC_RTT_ENABLE 0x04
131#define NISLANDS_SMC_MC_STUTTER_EN 0x08
132
133typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
134
135struct NISLANDS_SMC_SWSTATE
136{
137 uint8_t flags;
138 uint8_t levelCount;
139 uint8_t padding2;
140 uint8_t padding3;
141 NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1];
142};
143
144typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
145
146#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0
147#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1
148#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
149#define NISLANDS_SMC_VOLTAGEMASK_MAX 4
150
151struct NISLANDS_SMC_VOLTAGEMASKTABLE
152{
153 uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
154 uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
155};
156
157typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
158
159#define NISLANDS_MAX_NO_VREG_STEPS 32
160
161struct NISLANDS_SMC_STATETABLE
162{
163 uint8_t thermalProtectType;
164 uint8_t systemFlags;
165 uint8_t maxVDDCIndexInPPTable;
166 uint8_t extraFlags;
167 uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
168 uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
169 NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
170 PP_NIslands_DPM2Parameters dpm2Params;
171 NISLANDS_SMC_SWSTATE initialState;
172 NISLANDS_SMC_SWSTATE ACPIState;
173 NISLANDS_SMC_SWSTATE ULVState;
174 NISLANDS_SMC_SWSTATE driverState;
175 NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
176};
177
178typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
179
180#define NI_SMC_SOFT_REGISTERS_START 0x108
181
182#define NI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0
183#define NI_SMC_SOFT_REGISTER_delay_bbias 0xC
184#define NI_SMC_SOFT_REGISTER_delay_vreg 0x10
185#define NI_SMC_SOFT_REGISTER_delay_acpi 0x2C
186#define NI_SMC_SOFT_REGISTER_seq_index 0x64
187#define NI_SMC_SOFT_REGISTER_mvdd_chg_time 0x68
188#define NI_SMC_SOFT_REGISTER_mclk_switch_lim 0x78
189#define NI_SMC_SOFT_REGISTER_watermark_threshold 0x80
190#define NI_SMC_SOFT_REGISTER_mc_block_delay 0x84
191#define NI_SMC_SOFT_REGISTER_uvd_enabled 0x98
192
193#define SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES 16
194#define SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
195#define SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
196#define SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES 4
197
198struct SMC_NISLANDS_MC_TPP_CAC_TABLE
199{
200 uint32_t tpp[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES];
201 uint32_t cacValue[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES];
202};
203
204typedef struct SMC_NISLANDS_MC_TPP_CAC_TABLE SMC_NISLANDS_MC_TPP_CAC_TABLE;
205
206
207struct PP_NIslands_CACTABLES
208{
209 uint32_t cac_bif_lut[SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES];
210 uint32_t cac_lkge_lut[SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
211
212 uint32_t pwr_const;
213
214 uint32_t dc_cacValue;
215 uint32_t bif_cacValue;
216 uint32_t lkge_pwr;
217
218 uint8_t cac_width;
219 uint8_t window_size_p2;
220
221 uint8_t num_drop_lsb;
222 uint8_t padding_0;
223
224 uint32_t last_power;
225
226 uint8_t AllowOvrflw;
227 uint8_t MCWrWeight;
228 uint8_t MCRdWeight;
229 uint8_t padding_1[9];
230
231 uint8_t enableWinAvg;
232 uint8_t numWin_TDP;
233 uint8_t l2numWin_TDP;
234 uint8_t WinIndex;
235
236 uint32_t dynPwr_TDP[4];
237 uint32_t lkgePwr_TDP[4];
238 uint32_t power_TDP[4];
239 uint32_t avg_dynPwr_TDP;
240 uint32_t avg_lkgePwr_TDP;
241 uint32_t avg_power_TDP;
242 uint32_t lts_power_TDP;
243 uint8_t lts_truncate_n;
244 uint8_t padding_2[7];
245};
246
247typedef struct PP_NIslands_CACTABLES PP_NIslands_CACTABLES;
248
249#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
250#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
251
252struct SMC_NIslands_MCRegisterAddress
253{
254 uint16_t s0;
255 uint16_t s1;
256};
257
258typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
259
260
261struct SMC_NIslands_MCRegisterSet
262{
263 uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
264};
265
266typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
267
268struct SMC_NIslands_MCRegisters
269{
270 uint8_t last;
271 uint8_t reserved[3];
272 SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
273 SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
274};
275
276typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
277
278struct SMC_NIslands_MCArbDramTimingRegisterSet
279{
280 uint32_t mc_arb_dram_timing;
281 uint32_t mc_arb_dram_timing2;
282 uint8_t mc_arb_rfsh_rate;
283 uint8_t padding[3];
284};
285
286typedef struct SMC_NIslands_MCArbDramTimingRegisterSet SMC_NIslands_MCArbDramTimingRegisterSet;
287
288struct SMC_NIslands_MCArbDramTimingRegisters
289{
290 uint8_t arb_current;
291 uint8_t reserved[3];
292 SMC_NIslands_MCArbDramTimingRegisterSet data[20];
293};
294
295typedef struct SMC_NIslands_MCArbDramTimingRegisters SMC_NIslands_MCArbDramTimingRegisters;
296
297struct SMC_NISLANDS_SPLL_DIV_TABLE
298{
299 uint32_t freq[256];
300 uint32_t ss[256];
301};
302
303#define SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff
304#define SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
305#define SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000
306#define SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25
307#define SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff
308#define SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0
309#define SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000
310#define SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20
311
312typedef struct SMC_NISLANDS_SPLL_DIV_TABLE SMC_NISLANDS_SPLL_DIV_TABLE;
313
314#define NISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x100
315
316#define NISLANDS_SMC_FIRMWARE_HEADER_version 0x0
317#define NISLANDS_SMC_FIRMWARE_HEADER_flags 0x4
318#define NISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0x8
319#define NISLANDS_SMC_FIRMWARE_HEADER_stateTable 0xC
320#define NISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x10
321#define NISLANDS_SMC_FIRMWARE_HEADER_cacTable 0x14
322#define NISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
323#define NISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x2C
324#define NISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x30
325
326#pragma pack(pop)
327
328#endif
329
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
new file mode 100644
index 000000000000..8fb1113a8fd7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef PP_SMC_H
24#define PP_SMC_H
25
26#pragma pack(push, 1)
27
28#define PPSMC_SWSTATE_FLAG_DC 0x01
29#define PPSMC_SWSTATE_FLAG_UVD 0x02
30#define PPSMC_SWSTATE_FLAG_VCE 0x04
31#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08
32
33#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
34#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
35#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
36
37#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
38#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
39#define PPSMC_SYSTEMFLAG_GDDR5 0x04
40#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
41#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
42#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
43#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO 0x40
44
45#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
46#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
47#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
48#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
49#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x02
50
51#define PPSMC_DISPLAY_WATERMARK_LOW 0
52#define PPSMC_DISPLAY_WATERMARK_HIGH 1
53
54#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
55#define PPSMC_STATEFLAG_POWERBOOST 0x02
56#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
57#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
58
59#define PPSMC_Result_OK ((uint8_t)0x01)
60#define PPSMC_Result_Failed ((uint8_t)0xFF)
61
62typedef uint8_t PPSMC_Result;
63
64#define PPSMC_MSG_Halt ((uint8_t)0x10)
65#define PPSMC_MSG_Resume ((uint8_t)0x11)
66#define PPSMC_MSG_ZeroLevelsDisabled ((uint8_t)0x13)
67#define PPSMC_MSG_OneLevelsDisabled ((uint8_t)0x14)
68#define PPSMC_MSG_TwoLevelsDisabled ((uint8_t)0x15)
69#define PPSMC_MSG_EnableThermalInterrupt ((uint8_t)0x16)
70#define PPSMC_MSG_RunningOnAC ((uint8_t)0x17)
71#define PPSMC_MSG_SwitchToSwState ((uint8_t)0x20)
72#define PPSMC_MSG_SwitchToInitialState ((uint8_t)0x40)
73#define PPSMC_MSG_NoForcedLevel ((uint8_t)0x41)
74#define PPSMC_MSG_SwitchToMinimumPower ((uint8_t)0x51)
75#define PPSMC_MSG_ResumeFromMinimumPower ((uint8_t)0x52)
76#define PPSMC_MSG_EnableCac ((uint8_t)0x53)
77#define PPSMC_MSG_DisableCac ((uint8_t)0x54)
78#define PPSMC_TDPClampingActive ((uint8_t)0x59)
79#define PPSMC_TDPClampingInactive ((uint8_t)0x5A)
80#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
81#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
82#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
83#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61)
84#define PPSMC_MSG_EnableULV ((uint8_t)0x62)
85#define PPSMC_MSG_DisableULV ((uint8_t)0x63)
86#define PPSMC_MSG_EnterULV ((uint8_t)0x64)
87#define PPSMC_MSG_ExitULV ((uint8_t)0x65)
88#define PPSMC_CACLongTermAvgEnable ((uint8_t)0x6E)
89#define PPSMC_CACLongTermAvgDisable ((uint8_t)0x6F)
90#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint8_t)0x7A)
91#define PPSMC_FlushDataCache ((uint8_t)0x80)
92#define PPSMC_MSG_SetEnabledLevels ((uint8_t)0x82)
93#define PPSMC_MSG_SetForcedLevels ((uint8_t)0x83)
94#define PPSMC_MSG_ResetToDefaults ((uint8_t)0x84)
95#define PPSMC_MSG_EnableDTE ((uint8_t)0x87)
96#define PPSMC_MSG_DisableDTE ((uint8_t)0x88)
97#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
98#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
99
100/* TN */
101#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
102#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
103#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
104#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
105#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
106#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
107
108
109typedef uint16_t PPSMC_Msg;
110
111#pragma pack(pop)
112
113#endif
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6948eb88c2b7..2d3655f7f41e 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -38,18 +38,7 @@
38#include "r600d.h" 38#include "r600d.h"
39#include "atom.h" 39#include "atom.h"
40#include "avivod.h" 40#include "avivod.h"
41 41#include "radeon_ucode.h"
42#define PFP_UCODE_SIZE 576
43#define PM4_UCODE_SIZE 1792
44#define RLC_UCODE_SIZE 768
45#define R700_PFP_UCODE_SIZE 848
46#define R700_PM4_UCODE_SIZE 1360
47#define R700_RLC_UCODE_SIZE 1024
48#define EVERGREEN_PFP_UCODE_SIZE 1120
49#define EVERGREEN_PM4_UCODE_SIZE 1376
50#define EVERGREEN_RLC_UCODE_SIZE 768
51#define CAYMAN_RLC_UCODE_SIZE 1024
52#define ARUBA_RLC_UCODE_SIZE 1536
53 42
54/* Firmware Names */ 43/* Firmware Names */
55MODULE_FIRMWARE("radeon/R600_pfp.bin"); 44MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -68,24 +57,32 @@ MODULE_FIRMWARE("radeon/RS780_pfp.bin");
68MODULE_FIRMWARE("radeon/RS780_me.bin"); 57MODULE_FIRMWARE("radeon/RS780_me.bin");
69MODULE_FIRMWARE("radeon/RV770_pfp.bin"); 58MODULE_FIRMWARE("radeon/RV770_pfp.bin");
70MODULE_FIRMWARE("radeon/RV770_me.bin"); 59MODULE_FIRMWARE("radeon/RV770_me.bin");
60MODULE_FIRMWARE("radeon/RV770_smc.bin");
71MODULE_FIRMWARE("radeon/RV730_pfp.bin"); 61MODULE_FIRMWARE("radeon/RV730_pfp.bin");
72MODULE_FIRMWARE("radeon/RV730_me.bin"); 62MODULE_FIRMWARE("radeon/RV730_me.bin");
63MODULE_FIRMWARE("radeon/RV730_smc.bin");
64MODULE_FIRMWARE("radeon/RV740_smc.bin");
73MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 65MODULE_FIRMWARE("radeon/RV710_pfp.bin");
74MODULE_FIRMWARE("radeon/RV710_me.bin"); 66MODULE_FIRMWARE("radeon/RV710_me.bin");
67MODULE_FIRMWARE("radeon/RV710_smc.bin");
75MODULE_FIRMWARE("radeon/R600_rlc.bin"); 68MODULE_FIRMWARE("radeon/R600_rlc.bin");
76MODULE_FIRMWARE("radeon/R700_rlc.bin"); 69MODULE_FIRMWARE("radeon/R700_rlc.bin");
77MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); 70MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
78MODULE_FIRMWARE("radeon/CEDAR_me.bin"); 71MODULE_FIRMWARE("radeon/CEDAR_me.bin");
79MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); 72MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
73MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
80MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); 74MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
81MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); 75MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
82MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); 76MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
77MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
83MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); 78MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
84MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); 79MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
85MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); 80MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
81MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
86MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); 82MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
87MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); 83MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
88MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); 84MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
85MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
89MODULE_FIRMWARE("radeon/PALM_pfp.bin"); 86MODULE_FIRMWARE("radeon/PALM_pfp.bin");
90MODULE_FIRMWARE("radeon/PALM_me.bin"); 87MODULE_FIRMWARE("radeon/PALM_me.bin");
91MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); 88MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
@@ -108,6 +105,7 @@ static void r600_gpu_init(struct radeon_device *rdev);
108void r600_fini(struct radeon_device *rdev); 105void r600_fini(struct radeon_device *rdev);
109void r600_irq_disable(struct radeon_device *rdev); 106void r600_irq_disable(struct radeon_device *rdev);
110static void r600_pcie_gen2_enable(struct radeon_device *rdev); 107static void r600_pcie_gen2_enable(struct radeon_device *rdev);
108extern int evergreen_rlc_resume(struct radeon_device *rdev);
111 109
112/** 110/**
113 * r600_get_xclk - get the xclk 111 * r600_get_xclk - get the xclk
@@ -2149,7 +2147,8 @@ int r600_init_microcode(struct radeon_device *rdev)
2149 struct platform_device *pdev; 2147 struct platform_device *pdev;
2150 const char *chip_name; 2148 const char *chip_name;
2151 const char *rlc_chip_name; 2149 const char *rlc_chip_name;
2152 size_t pfp_req_size, me_req_size, rlc_req_size; 2150 const char *smc_chip_name = "RV770";
2151 size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
2153 char fw_name[30]; 2152 char fw_name[30];
2154 int err; 2153 int err;
2155 2154
@@ -2195,32 +2194,51 @@ int r600_init_microcode(struct radeon_device *rdev)
2195 case CHIP_RV770: 2194 case CHIP_RV770:
2196 chip_name = "RV770"; 2195 chip_name = "RV770";
2197 rlc_chip_name = "R700"; 2196 rlc_chip_name = "R700";
2197 smc_chip_name = "RV770";
2198 smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
2198 break; 2199 break;
2199 case CHIP_RV730: 2200 case CHIP_RV730:
2200 case CHIP_RV740:
2201 chip_name = "RV730"; 2201 chip_name = "RV730";
2202 rlc_chip_name = "R700"; 2202 rlc_chip_name = "R700";
2203 smc_chip_name = "RV730";
2204 smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
2203 break; 2205 break;
2204 case CHIP_RV710: 2206 case CHIP_RV710:
2205 chip_name = "RV710"; 2207 chip_name = "RV710";
2206 rlc_chip_name = "R700"; 2208 rlc_chip_name = "R700";
2209 smc_chip_name = "RV710";
2210 smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
2211 break;
2212 case CHIP_RV740:
2213 chip_name = "RV730";
2214 rlc_chip_name = "R700";
2215 smc_chip_name = "RV740";
2216 smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
2207 break; 2217 break;
2208 case CHIP_CEDAR: 2218 case CHIP_CEDAR:
2209 chip_name = "CEDAR"; 2219 chip_name = "CEDAR";
2210 rlc_chip_name = "CEDAR"; 2220 rlc_chip_name = "CEDAR";
2221 smc_chip_name = "CEDAR";
2222 smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
2211 break; 2223 break;
2212 case CHIP_REDWOOD: 2224 case CHIP_REDWOOD:
2213 chip_name = "REDWOOD"; 2225 chip_name = "REDWOOD";
2214 rlc_chip_name = "REDWOOD"; 2226 rlc_chip_name = "REDWOOD";
2227 smc_chip_name = "REDWOOD";
2228 smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
2215 break; 2229 break;
2216 case CHIP_JUNIPER: 2230 case CHIP_JUNIPER:
2217 chip_name = "JUNIPER"; 2231 chip_name = "JUNIPER";
2218 rlc_chip_name = "JUNIPER"; 2232 rlc_chip_name = "JUNIPER";
2233 smc_chip_name = "JUNIPER";
2234 smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
2219 break; 2235 break;
2220 case CHIP_CYPRESS: 2236 case CHIP_CYPRESS:
2221 case CHIP_HEMLOCK: 2237 case CHIP_HEMLOCK:
2222 chip_name = "CYPRESS"; 2238 chip_name = "CYPRESS";
2223 rlc_chip_name = "CYPRESS"; 2239 rlc_chip_name = "CYPRESS";
2240 smc_chip_name = "CYPRESS";
2241 smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
2224 break; 2242 break;
2225 case CHIP_PALM: 2243 case CHIP_PALM:
2226 chip_name = "PALM"; 2244 chip_name = "PALM";
@@ -2246,9 +2264,9 @@ int r600_init_microcode(struct radeon_device *rdev)
2246 me_req_size = R700_PM4_UCODE_SIZE * 4; 2264 me_req_size = R700_PM4_UCODE_SIZE * 4;
2247 rlc_req_size = R700_RLC_UCODE_SIZE * 4; 2265 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2248 } else { 2266 } else {
2249 pfp_req_size = PFP_UCODE_SIZE * 4; 2267 pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2250 me_req_size = PM4_UCODE_SIZE * 12; 2268 me_req_size = R600_PM4_UCODE_SIZE * 12;
2251 rlc_req_size = RLC_UCODE_SIZE * 4; 2269 rlc_req_size = R600_RLC_UCODE_SIZE * 4;
2252 } 2270 }
2253 2271
2254 DRM_INFO("Loading %s Microcode\n", chip_name); 2272 DRM_INFO("Loading %s Microcode\n", chip_name);
@@ -2287,6 +2305,19 @@ int r600_init_microcode(struct radeon_device *rdev)
2287 err = -EINVAL; 2305 err = -EINVAL;
2288 } 2306 }
2289 2307
2308 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2309 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2310 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev);
2311 if (err)
2312 goto out;
2313 if (rdev->smc_fw->size != smc_req_size) {
2314 printk(KERN_ERR
2315 "smc: Bogus length %zu in firmware \"%s\"\n",
2316 rdev->smc_fw->size, fw_name);
2317 err = -EINVAL;
2318 }
2319 }
2320
2290out: 2321out:
2291 platform_device_unregister(pdev); 2322 platform_device_unregister(pdev);
2292 2323
@@ -2301,6 +2332,8 @@ out:
2301 rdev->me_fw = NULL; 2332 rdev->me_fw = NULL;
2302 release_firmware(rdev->rlc_fw); 2333 release_firmware(rdev->rlc_fw);
2303 rdev->rlc_fw = NULL; 2334 rdev->rlc_fw = NULL;
2335 release_firmware(rdev->smc_fw);
2336 rdev->smc_fw = NULL;
2304 } 2337 }
2305 return err; 2338 return err;
2306} 2339}
@@ -2331,13 +2364,13 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
2331 2364
2332 fw_data = (const __be32 *)rdev->me_fw->data; 2365 fw_data = (const __be32 *)rdev->me_fw->data;
2333 WREG32(CP_ME_RAM_WADDR, 0); 2366 WREG32(CP_ME_RAM_WADDR, 0);
2334 for (i = 0; i < PM4_UCODE_SIZE * 3; i++) 2367 for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
2335 WREG32(CP_ME_RAM_DATA, 2368 WREG32(CP_ME_RAM_DATA,
2336 be32_to_cpup(fw_data++)); 2369 be32_to_cpup(fw_data++));
2337 2370
2338 fw_data = (const __be32 *)rdev->pfp_fw->data; 2371 fw_data = (const __be32 *)rdev->pfp_fw->data;
2339 WREG32(CP_PFP_UCODE_ADDR, 0); 2372 WREG32(CP_PFP_UCODE_ADDR, 0);
2340 for (i = 0; i < PFP_UCODE_SIZE; i++) 2373 for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
2341 WREG32(CP_PFP_UCODE_DATA, 2374 WREG32(CP_PFP_UCODE_DATA,
2342 be32_to_cpup(fw_data++)); 2375 be32_to_cpup(fw_data++));
2343 2376
@@ -3789,7 +3822,7 @@ static void r600_rlc_start(struct radeon_device *rdev)
3789 WREG32(RLC_CNTL, RLC_ENABLE); 3822 WREG32(RLC_CNTL, RLC_ENABLE);
3790} 3823}
3791 3824
3792static int r600_rlc_init(struct radeon_device *rdev) 3825static int r600_rlc_resume(struct radeon_device *rdev)
3793{ 3826{
3794 u32 i; 3827 u32 i;
3795 const __be32 *fw_data; 3828 const __be32 *fw_data;
@@ -3801,45 +3834,22 @@ static int r600_rlc_init(struct radeon_device *rdev)
3801 3834
3802 WREG32(RLC_HB_CNTL, 0); 3835 WREG32(RLC_HB_CNTL, 0);
3803 3836
3804 if (rdev->family == CHIP_ARUBA) { 3837 WREG32(RLC_HB_BASE, 0);
3805 WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); 3838 WREG32(RLC_HB_RPTR, 0);
3806 WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); 3839 WREG32(RLC_HB_WPTR, 0);
3807 } 3840 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3808 if (rdev->family <= CHIP_CAYMAN) { 3841 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3809 WREG32(RLC_HB_BASE, 0);
3810 WREG32(RLC_HB_RPTR, 0);
3811 WREG32(RLC_HB_WPTR, 0);
3812 }
3813 if (rdev->family <= CHIP_CAICOS) {
3814 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3815 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3816 }
3817 WREG32(RLC_MC_CNTL, 0); 3842 WREG32(RLC_MC_CNTL, 0);
3818 WREG32(RLC_UCODE_CNTL, 0); 3843 WREG32(RLC_UCODE_CNTL, 0);
3819 3844
3820 fw_data = (const __be32 *)rdev->rlc_fw->data; 3845 fw_data = (const __be32 *)rdev->rlc_fw->data;
3821 if (rdev->family >= CHIP_ARUBA) { 3846 if (rdev->family >= CHIP_RV770) {
3822 for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
3823 WREG32(RLC_UCODE_ADDR, i);
3824 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3825 }
3826 } else if (rdev->family >= CHIP_CAYMAN) {
3827 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
3828 WREG32(RLC_UCODE_ADDR, i);
3829 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3830 }
3831 } else if (rdev->family >= CHIP_CEDAR) {
3832 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
3833 WREG32(RLC_UCODE_ADDR, i);
3834 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3835 }
3836 } else if (rdev->family >= CHIP_RV770) {
3837 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { 3847 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3838 WREG32(RLC_UCODE_ADDR, i); 3848 WREG32(RLC_UCODE_ADDR, i);
3839 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3849 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3840 } 3850 }
3841 } else { 3851 } else {
3842 for (i = 0; i < RLC_UCODE_SIZE; i++) { 3852 for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
3843 WREG32(RLC_UCODE_ADDR, i); 3853 WREG32(RLC_UCODE_ADDR, i);
3844 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3854 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3845 } 3855 }
@@ -3947,7 +3957,10 @@ int r600_irq_init(struct radeon_device *rdev)
3947 r600_disable_interrupts(rdev); 3957 r600_disable_interrupts(rdev);
3948 3958
3949 /* init rlc */ 3959 /* init rlc */
3950 ret = r600_rlc_init(rdev); 3960 if (rdev->family >= CHIP_CEDAR)
3961 ret = evergreen_rlc_resume(rdev);
3962 else
3963 ret = r600_rlc_resume(rdev);
3951 if (ret) { 3964 if (ret) {
3952 r600_ih_ring_fini(rdev); 3965 r600_ih_ring_fini(rdev);
3953 return ret; 3966 return ret;
@@ -4028,6 +4041,7 @@ int r600_irq_set(struct radeon_device *rdev)
4028 u32 hdmi0, hdmi1; 4041 u32 hdmi0, hdmi1;
4029 u32 d1grph = 0, d2grph = 0; 4042 u32 d1grph = 0, d2grph = 0;
4030 u32 dma_cntl; 4043 u32 dma_cntl;
4044 u32 thermal_int = 0;
4031 4045
4032 if (!rdev->irq.installed) { 4046 if (!rdev->irq.installed) {
4033 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 4047 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -4062,8 +4076,21 @@ int r600_irq_set(struct radeon_device *rdev)
4062 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 4076 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
4063 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 4077 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
4064 } 4078 }
4079
4065 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 4080 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
4066 4081
4082 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
4083 thermal_int = RREG32(CG_THERMAL_INT) &
4084 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
4085 } else if (rdev->family >= CHIP_RV770) {
4086 thermal_int = RREG32(RV770_CG_THERMAL_INT) &
4087 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
4088 }
4089 if (rdev->irq.dpm_thermal) {
4090 DRM_DEBUG("dpm thermal\n");
4091 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
4092 }
4093
4067 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 4094 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
4068 DRM_DEBUG("r600_irq_set: sw int\n"); 4095 DRM_DEBUG("r600_irq_set: sw int\n");
4069 cp_int_cntl |= RB_INT_ENABLE; 4096 cp_int_cntl |= RB_INT_ENABLE;
@@ -4145,6 +4172,11 @@ int r600_irq_set(struct radeon_device *rdev)
4145 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 4172 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
4146 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1); 4173 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
4147 } 4174 }
4175 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
4176 WREG32(CG_THERMAL_INT, thermal_int);
4177 } else if (rdev->family >= CHIP_RV770) {
4178 WREG32(RV770_CG_THERMAL_INT, thermal_int);
4179 }
4148 4180
4149 return 0; 4181 return 0;
4150} 4182}
@@ -4336,6 +4368,7 @@ int r600_irq_process(struct radeon_device *rdev)
4336 u32 ring_index; 4368 u32 ring_index;
4337 bool queue_hotplug = false; 4369 bool queue_hotplug = false;
4338 bool queue_hdmi = false; 4370 bool queue_hdmi = false;
4371 bool queue_thermal = false;
4339 4372
4340 if (!rdev->ih.enabled || rdev->shutdown) 4373 if (!rdev->ih.enabled || rdev->shutdown)
4341 return IRQ_NONE; 4374 return IRQ_NONE;
@@ -4503,6 +4536,16 @@ restart_ih:
4503 DRM_DEBUG("IH: DMA trap\n"); 4536 DRM_DEBUG("IH: DMA trap\n");
4504 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 4537 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4505 break; 4538 break;
4539 case 230: /* thermal low to high */
4540 DRM_DEBUG("IH: thermal low to high\n");
4541 rdev->pm.dpm.thermal.high_to_low = false;
4542 queue_thermal = true;
4543 break;
4544 case 231: /* thermal high to low */
4545 DRM_DEBUG("IH: thermal high to low\n");
4546 rdev->pm.dpm.thermal.high_to_low = true;
4547 queue_thermal = true;
4548 break;
4506 case 233: /* GUI IDLE */ 4549 case 233: /* GUI IDLE */
4507 DRM_DEBUG("IH: GUI idle\n"); 4550 DRM_DEBUG("IH: GUI idle\n");
4508 break; 4551 break;
@@ -4519,6 +4562,8 @@ restart_ih:
4519 schedule_work(&rdev->hotplug_work); 4562 schedule_work(&rdev->hotplug_work);
4520 if (queue_hdmi) 4563 if (queue_hdmi)
4521 schedule_work(&rdev->audio_work); 4564 schedule_work(&rdev->audio_work);
4565 if (queue_thermal && rdev->pm.dpm_enabled)
4566 schedule_work(&rdev->pm.dpm.thermal.work);
4522 rdev->ih.rptr = rptr; 4567 rdev->ih.rptr = rptr;
4523 WREG32(IH_RB_RPTR, rdev->ih.rptr); 4568 WREG32(IH_RB_RPTR, rdev->ih.rptr);
4524 atomic_set(&rdev->ih.lock, 0); 4569 atomic_set(&rdev->ih.lock, 0);
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
new file mode 100644
index 000000000000..76368c04f809
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -0,0 +1,1024 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "r600d.h"
28#include "r600_dpm.h"
29#include "atom.h"
30
31const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
32{
33 R600_UTC_DFLT_00,
34 R600_UTC_DFLT_01,
35 R600_UTC_DFLT_02,
36 R600_UTC_DFLT_03,
37 R600_UTC_DFLT_04,
38 R600_UTC_DFLT_05,
39 R600_UTC_DFLT_06,
40 R600_UTC_DFLT_07,
41 R600_UTC_DFLT_08,
42 R600_UTC_DFLT_09,
43 R600_UTC_DFLT_10,
44 R600_UTC_DFLT_11,
45 R600_UTC_DFLT_12,
46 R600_UTC_DFLT_13,
47 R600_UTC_DFLT_14,
48};
49
50const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
51{
52 R600_DTC_DFLT_00,
53 R600_DTC_DFLT_01,
54 R600_DTC_DFLT_02,
55 R600_DTC_DFLT_03,
56 R600_DTC_DFLT_04,
57 R600_DTC_DFLT_05,
58 R600_DTC_DFLT_06,
59 R600_DTC_DFLT_07,
60 R600_DTC_DFLT_08,
61 R600_DTC_DFLT_09,
62 R600_DTC_DFLT_10,
63 R600_DTC_DFLT_11,
64 R600_DTC_DFLT_12,
65 R600_DTC_DFLT_13,
66 R600_DTC_DFLT_14,
67};
68
69void r600_dpm_print_class_info(u32 class, u32 class2)
70{
71 printk("\tui class: ");
72 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
73 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
74 default:
75 printk("none\n");
76 break;
77 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
78 printk("battery\n");
79 break;
80 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
81 printk("balanced\n");
82 break;
83 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
84 printk("performance\n");
85 break;
86 }
87 printk("\tinternal class: ");
88 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
89 (class2 == 0))
90 printk("none");
91 else {
92 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
93 printk("boot ");
94 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
95 printk("thermal ");
96 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
97 printk("limited_pwr ");
98 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
99 printk("rest ");
100 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
101 printk("forced ");
102 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
103 printk("3d_perf ");
104 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
105 printk("ovrdrv ");
106 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
107 printk("uvd ");
108 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
109 printk("3d_low ");
110 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
111 printk("acpi ");
112 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
113 printk("uvd_hd2 ");
114 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
115 printk("uvd_hd ");
116 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
117 printk("uvd_sd ");
118 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
119 printk("limited_pwr2 ");
120 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
121 printk("ulv ");
122 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
123 printk("uvd_mvc ");
124 }
125 printk("\n");
126}
127
128void r600_dpm_print_cap_info(u32 caps)
129{
130 printk("\tcaps: ");
131 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
132 printk("single_disp ");
133 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
134 printk("video ");
135 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
136 printk("no_dc ");
137 printk("\n");
138}
139
140void r600_dpm_print_ps_status(struct radeon_device *rdev,
141 struct radeon_ps *rps)
142{
143 printk("\tstatus: ");
144 if (rps == rdev->pm.dpm.current_ps)
145 printk("c ");
146 if (rps == rdev->pm.dpm.requested_ps)
147 printk("r ");
148 if (rps == rdev->pm.dpm.boot_ps)
149 printk("b ");
150 printk("\n");
151}
152
153void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
154 u32 *p, u32 *u)
155{
156 u32 b_c = 0;
157 u32 i_c;
158 u32 tmp;
159
160 i_c = (i * r_c) / 100;
161 tmp = i_c >> p_b;
162
163 while (tmp) {
164 b_c++;
165 tmp >>= 1;
166 }
167
168 *u = (b_c + 1) / 2;
169 *p = i_c / (1 << (2 * (*u)));
170}
171
172int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
173{
174 u32 k, a, ah, al;
175 u32 t1;
176
177 if ((fl == 0) || (fh == 0) || (fl > fh))
178 return -EINVAL;
179
180 k = (100 * fh) / fl;
181 t1 = (t * (k - 100));
182 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
183 a = (a + 5) / 10;
184 ah = ((a * t) + 5000) / 10000;
185 al = a - ah;
186
187 *th = t - ah;
188 *tl = t + al;
189
190 return 0;
191}
192
193void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
194{
195 int i;
196
197 if (enable) {
198 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
199 } else {
200 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
201
202 WREG32(CG_RLC_REQ_AND_RSP, 0x2);
203
204 for (i = 0; i < rdev->usec_timeout; i++) {
205 if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
206 break;
207 udelay(1);
208 }
209
210 WREG32(CG_RLC_REQ_AND_RSP, 0x0);
211
212 WREG32(GRBM_PWR_CNTL, 0x1);
213 RREG32(GRBM_PWR_CNTL);
214 }
215}
216
217void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
218{
219 if (enable)
220 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
221 else
222 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
223}
224
225void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
226{
227 if (enable)
228 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
229 else
230 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
231}
232
233void r600_enable_acpi_pm(struct radeon_device *rdev)
234{
235 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
236}
237
238void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
239{
240 if (enable)
241 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
242 else
243 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
244}
245
246bool r600_dynamicpm_enabled(struct radeon_device *rdev)
247{
248 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
249 return true;
250 else
251 return false;
252}
253
254void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
255{
256 if (enable)
257 WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF);
258 else
259 WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
260}
261
262void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
263{
264 if (enable)
265 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
266 else
267 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
268}
269
270void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
271{
272 if (enable)
273 WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
274 else
275 WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
276}
277
278void r600_wait_for_spll_change(struct radeon_device *rdev)
279{
280 int i;
281
282 for (i = 0; i < rdev->usec_timeout; i++) {
283 if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
284 break;
285 udelay(1);
286 }
287}
288
289void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
290{
291 WREG32(CG_BSP, BSP(p) | BSU(u));
292}
293
294void r600_set_at(struct radeon_device *rdev,
295 u32 l_to_m, u32 m_to_h,
296 u32 h_to_m, u32 m_to_l)
297{
298 WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
299 WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
300}
301
302void r600_set_tc(struct radeon_device *rdev,
303 u32 index, u32 u_t, u32 d_t)
304{
305 WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
306}
307
308void r600_select_td(struct radeon_device *rdev,
309 enum r600_td td)
310{
311 if (td == R600_TD_AUTO)
312 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
313 else
314 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
315 if (td == R600_TD_UP)
316 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
317 if (td == R600_TD_DOWN)
318 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
319}
320
321void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
322{
323 WREG32(CG_FTV, vrv);
324}
325
326void r600_set_tpu(struct radeon_device *rdev, u32 u)
327{
328 WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
329}
330
331void r600_set_tpc(struct radeon_device *rdev, u32 c)
332{
333 WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
334}
335
336void r600_set_sstu(struct radeon_device *rdev, u32 u)
337{
338 WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
339}
340
341void r600_set_sst(struct radeon_device *rdev, u32 t)
342{
343 WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
344}
345
346void r600_set_git(struct radeon_device *rdev, u32 t)
347{
348 WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
349}
350
351void r600_set_fctu(struct radeon_device *rdev, u32 u)
352{
353 WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
354}
355
356void r600_set_fct(struct radeon_device *rdev, u32 t)
357{
358 WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
359}
360
361void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
362{
363 WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
364}
365
366void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
367{
368 WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
369}
370
371void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
372{
373 WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
374}
375
376void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
377{
378 WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
379}
380
381void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
382{
383 WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
384}
385
386void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
387{
388 WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
389}
390
391void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
392{
393 WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
394}
395
396void r600_engine_clock_entry_enable(struct radeon_device *rdev,
397 u32 index, bool enable)
398{
399 if (enable)
400 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
401 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
402 else
403 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
404 0, ~STEP_0_SPLL_ENTRY_VALID);
405}
406
407void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
408 u32 index, bool enable)
409{
410 if (enable)
411 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
412 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
413 else
414 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
415 0, ~STEP_0_SPLL_STEP_ENABLE);
416}
417
418void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
419 u32 index, bool enable)
420{
421 if (enable)
422 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
423 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
424 else
425 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
426 0, ~STEP_0_POST_DIV_EN);
427}
428
429void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
430 u32 index, u32 divider)
431{
432 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
433 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
434}
435
436void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
437 u32 index, u32 divider)
438{
439 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
440 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
441}
442
443void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
444 u32 index, u32 divider)
445{
446 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
447 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
448}
449
450void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
451 u32 index, u32 step_time)
452{
453 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
454 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
455}
456
457void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
458{
459 WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
460}
461
462void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
463{
464 WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
465}
466
467void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
468{
469 WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
470}
471
472void r600_voltage_control_enable_pins(struct radeon_device *rdev,
473 u64 mask)
474{
475 WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
476 WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
477}
478
479
480void r600_voltage_control_program_voltages(struct radeon_device *rdev,
481 enum r600_power_level index, u64 pins)
482{
483 u32 tmp, mask;
484 u32 ix = 3 - (3 & index);
485
486 WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
487
488 mask = 7 << (3 * ix);
489 tmp = RREG32(VID_UPPER_GPIO_CNTL);
490 tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
491 WREG32(VID_UPPER_GPIO_CNTL, tmp);
492}
493
494void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
495 u64 mask)
496{
497 u32 gpio;
498
499 gpio = RREG32(GPIOPAD_MASK);
500 gpio &= ~mask;
501 WREG32(GPIOPAD_MASK, gpio);
502
503 gpio = RREG32(GPIOPAD_EN);
504 gpio &= ~mask;
505 WREG32(GPIOPAD_EN, gpio);
506
507 gpio = RREG32(GPIOPAD_A);
508 gpio &= ~mask;
509 WREG32(GPIOPAD_A, gpio);
510}
511
512void r600_power_level_enable(struct radeon_device *rdev,
513 enum r600_power_level index, bool enable)
514{
515 u32 ix = 3 - (3 & index);
516
517 if (enable)
518 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
519 ~CTXSW_FREQ_STATE_ENABLE);
520 else
521 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
522 ~CTXSW_FREQ_STATE_ENABLE);
523}
524
525void r600_power_level_set_voltage_index(struct radeon_device *rdev,
526 enum r600_power_level index, u32 voltage_index)
527{
528 u32 ix = 3 - (3 & index);
529
530 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
531 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
532}
533
534void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
535 enum r600_power_level index, u32 mem_clock_index)
536{
537 u32 ix = 3 - (3 & index);
538
539 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
540 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
541}
542
543void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
544 enum r600_power_level index, u32 eng_clock_index)
545{
546 u32 ix = 3 - (3 & index);
547
548 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
549 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
550}
551
552void r600_power_level_set_watermark_id(struct radeon_device *rdev,
553 enum r600_power_level index,
554 enum r600_display_watermark watermark_id)
555{
556 u32 ix = 3 - (3 & index);
557 u32 tmp = 0;
558
559 if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
560 tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
561 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
562}
563
564void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
565 enum r600_power_level index, bool compatible)
566{
567 u32 ix = 3 - (3 & index);
568 u32 tmp = 0;
569
570 if (compatible)
571 tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
572 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
573}
574
575enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
576{
577 u32 tmp;
578
579 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
580 tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
581 return tmp;
582}
583
584enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
585{
586 u32 tmp;
587
588 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
589 tmp >>= TARGET_PROFILE_INDEX_SHIFT;
590 return tmp;
591}
592
593void r600_power_level_set_enter_index(struct radeon_device *rdev,
594 enum r600_power_level index)
595{
596 WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
597 ~DYN_PWR_ENTER_INDEX_MASK);
598}
599
600void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
601 enum r600_power_level index)
602{
603 int i;
604
605 for (i = 0; i < rdev->usec_timeout; i++) {
606 if (r600_power_level_get_target_index(rdev) != index)
607 break;
608 udelay(1);
609 }
610
611 for (i = 0; i < rdev->usec_timeout; i++) {
612 if (r600_power_level_get_current_index(rdev) != index)
613 break;
614 udelay(1);
615 }
616}
617
618void r600_wait_for_power_level(struct radeon_device *rdev,
619 enum r600_power_level index)
620{
621 int i;
622
623 for (i = 0; i < rdev->usec_timeout; i++) {
624 if (r600_power_level_get_target_index(rdev) == index)
625 break;
626 udelay(1);
627 }
628
629 for (i = 0; i < rdev->usec_timeout; i++) {
630 if (r600_power_level_get_current_index(rdev) == index)
631 break;
632 udelay(1);
633 }
634}
635
636void r600_start_dpm(struct radeon_device *rdev)
637{
638 r600_enable_sclk_control(rdev, false);
639 r600_enable_mclk_control(rdev, false);
640
641 r600_dynamicpm_enable(rdev, true);
642
643 radeon_wait_for_vblank(rdev, 0);
644 radeon_wait_for_vblank(rdev, 1);
645
646 r600_enable_spll_bypass(rdev, true);
647 r600_wait_for_spll_change(rdev);
648 r600_enable_spll_bypass(rdev, false);
649 r600_wait_for_spll_change(rdev);
650
651 r600_enable_spll_bypass(rdev, true);
652 r600_wait_for_spll_change(rdev);
653 r600_enable_spll_bypass(rdev, false);
654 r600_wait_for_spll_change(rdev);
655
656 r600_enable_sclk_control(rdev, true);
657 r600_enable_mclk_control(rdev, true);
658}
659
660void r600_stop_dpm(struct radeon_device *rdev)
661{
662 r600_dynamicpm_enable(rdev, false);
663}
664
665int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
666{
667 return 0;
668}
669
670void r600_dpm_post_set_power_state(struct radeon_device *rdev)
671{
672
673}
674
675bool r600_is_uvd_state(u32 class, u32 class2)
676{
677 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
678 return true;
679 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
680 return true;
681 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
682 return true;
683 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
684 return true;
685 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
686 return true;
687 return false;
688}
689
690int r600_set_thermal_temperature_range(struct radeon_device *rdev,
691 int min_temp, int max_temp)
692{
693 int low_temp = 0 * 1000;
694 int high_temp = 255 * 1000;
695
696 if (low_temp < min_temp)
697 low_temp = min_temp;
698 if (high_temp > max_temp)
699 high_temp = max_temp;
700 if (high_temp < low_temp) {
701 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
702 return -EINVAL;
703 }
704
705 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
706 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
707 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
708
709 rdev->pm.dpm.thermal.min_temp = low_temp;
710 rdev->pm.dpm.thermal.max_temp = high_temp;
711
712 return 0;
713}
714
715bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
716{
717 switch (sensor) {
718 case THERMAL_TYPE_RV6XX:
719 case THERMAL_TYPE_RV770:
720 case THERMAL_TYPE_EVERGREEN:
721 case THERMAL_TYPE_SUMO:
722 case THERMAL_TYPE_NI:
723 case THERMAL_TYPE_SI:
724 return true;
725 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
726 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
727 return false; /* need special handling */
728 case THERMAL_TYPE_NONE:
729 case THERMAL_TYPE_EXTERNAL:
730 case THERMAL_TYPE_EXTERNAL_GPIO:
731 default:
732 return false;
733 }
734}
735
736union power_info {
737 struct _ATOM_POWERPLAY_INFO info;
738 struct _ATOM_POWERPLAY_INFO_V2 info_2;
739 struct _ATOM_POWERPLAY_INFO_V3 info_3;
740 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
741 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
742 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
743 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
744 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
745};
746
747union fan_info {
748 struct _ATOM_PPLIB_FANTABLE fan;
749 struct _ATOM_PPLIB_FANTABLE2 fan2;
750};
751
752static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
753 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
754{
755 u32 size = atom_table->ucNumEntries *
756 sizeof(struct radeon_clock_voltage_dependency_entry);
757 int i;
758
759 radeon_table->entries = kzalloc(size, GFP_KERNEL);
760 if (!radeon_table->entries)
761 return -ENOMEM;
762
763 for (i = 0; i < atom_table->ucNumEntries; i++) {
764 radeon_table->entries[i].clk = le16_to_cpu(atom_table->entries[i].usClockLow) |
765 (atom_table->entries[i].ucClockHigh << 16);
766 radeon_table->entries[i].v = le16_to_cpu(atom_table->entries[i].usVoltage);
767 }
768 radeon_table->count = atom_table->ucNumEntries;
769
770 return 0;
771}
772
773/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
774#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
775#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
776#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
777#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
778#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
779#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
780
781int r600_parse_extended_power_table(struct radeon_device *rdev)
782{
783 struct radeon_mode_info *mode_info = &rdev->mode_info;
784 union power_info *power_info;
785 union fan_info *fan_info;
786 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
787 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
788 u16 data_offset;
789 u8 frev, crev;
790 int ret, i;
791
792 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
793 &frev, &crev, &data_offset))
794 return -EINVAL;
795 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
796
797 /* fan table */
798 if (le16_to_cpu(power_info->pplib.usTableSize) >=
799 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
800 if (power_info->pplib3.usFanTableOffset) {
801 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
802 le16_to_cpu(power_info->pplib3.usFanTableOffset));
803 rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
804 rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
805 rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
806 rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
807 rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
808 rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
809 rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
810 if (fan_info->fan.ucFanTableFormat >= 2)
811 rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
812 else
813 rdev->pm.dpm.fan.t_max = 10900;
814 rdev->pm.dpm.fan.cycle_delay = 100000;
815 rdev->pm.dpm.fan.ucode_fan_control = true;
816 }
817 }
818
819 /* clock dependancy tables, shedding tables */
820 if (le16_to_cpu(power_info->pplib.usTableSize) >=
821 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
822 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
823 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
824 (mode_info->atom_context->bios + data_offset +
825 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
826 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
827 dep_table);
828 if (ret)
829 return ret;
830 }
831 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
832 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
833 (mode_info->atom_context->bios + data_offset +
834 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
835 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
836 dep_table);
837 if (ret) {
838 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
839 return ret;
840 }
841 }
842 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
843 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
844 (mode_info->atom_context->bios + data_offset +
845 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
846 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
847 dep_table);
848 if (ret) {
849 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
850 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
851 return ret;
852 }
853 }
854 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
855 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
856 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
857 (mode_info->atom_context->bios + data_offset +
858 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
859 if (clk_v->ucNumEntries) {
860 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
861 le16_to_cpu(clk_v->entries[0].usSclkLow) |
862 (clk_v->entries[0].ucSclkHigh << 16);
863 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
864 le16_to_cpu(clk_v->entries[0].usMclkLow) |
865 (clk_v->entries[0].ucMclkHigh << 16);
866 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
867 le16_to_cpu(clk_v->entries[0].usVddc);
868 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
869 le16_to_cpu(clk_v->entries[0].usVddci);
870 }
871 }
872 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
873 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
874 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
875 (mode_info->atom_context->bios + data_offset +
876 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
877
878 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
879 kzalloc(psl->ucNumEntries *
880 sizeof(struct radeon_phase_shedding_limits_entry),
881 GFP_KERNEL);
882 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
883 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
884 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
885 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
886 return -ENOMEM;
887 }
888
889 for (i = 0; i < psl->ucNumEntries; i++) {
890 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
891 le16_to_cpu(psl->entries[i].usSclkLow) |
892 (psl->entries[i].ucSclkHigh << 16);
893 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
894 le16_to_cpu(psl->entries[i].usMclkLow) |
895 (psl->entries[i].ucMclkHigh << 16);
896 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
897 le16_to_cpu(psl->entries[i].usVoltage);
898 }
899 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
900 psl->ucNumEntries;
901 }
902 }
903
904 /* cac data */
905 if (le16_to_cpu(power_info->pplib.usTableSize) >=
906 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
907 rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
908 rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
909 rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
910 rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
911 if (rdev->pm.dpm.tdp_od_limit)
912 rdev->pm.dpm.power_control = true;
913 else
914 rdev->pm.dpm.power_control = false;
915 rdev->pm.dpm.tdp_adjustment = 0;
916 rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
917 rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
918 rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
919 if (power_info->pplib5.usCACLeakageTableOffset) {
920 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
921 (ATOM_PPLIB_CAC_Leakage_Table *)
922 (mode_info->atom_context->bios + data_offset +
923 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
924 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
925 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
926 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
927 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
928 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
929 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
930 return -ENOMEM;
931 }
932 for (i = 0; i < cac_table->ucNumEntries; i++) {
933 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
934 le16_to_cpu(cac_table->entries[i].usVddc);
935 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
936 le32_to_cpu(cac_table->entries[i].ulLeakageValue);
937 }
938 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
939 }
940 }
941
942 /* ppm table */
943 if (le16_to_cpu(power_info->pplib.usTableSize) >=
944 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
945 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
946 (mode_info->atom_context->bios + data_offset +
947 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
948 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
949 ext_hdr->usPPMTableOffset) {
950 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
951 (mode_info->atom_context->bios + data_offset +
952 le16_to_cpu(ext_hdr->usPPMTableOffset));
953 rdev->pm.dpm.dyn_state.ppm_table =
954 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
955 if (!rdev->pm.dpm.dyn_state.ppm_table) {
956 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
957 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
958 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
959 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
960 return -ENOMEM;
961 }
962 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
963 rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
964 le16_to_cpu(ppm->usCpuCoreNumber);
965 rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
966 le32_to_cpu(ppm->ulPlatformTDP);
967 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
968 le32_to_cpu(ppm->ulSmallACPlatformTDP);
969 rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
970 le32_to_cpu(ppm->ulPlatformTDC);
971 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
972 le32_to_cpu(ppm->ulSmallACPlatformTDC);
973 rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
974 le32_to_cpu(ppm->ulApuTDP);
975 rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
976 le32_to_cpu(ppm->ulDGpuTDP);
977 rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
978 le32_to_cpu(ppm->ulDGpuUlvPower);
979 rdev->pm.dpm.dyn_state.ppm_table->tj_max =
980 le32_to_cpu(ppm->ulTjmax);
981 }
982 }
983
984 return 0;
985}
986
987void r600_free_extended_power_table(struct radeon_device *rdev)
988{
989 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries)
990 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
991 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries)
992 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
993 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries)
994 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
995 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries)
996 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
997 if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
998 kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries);
999 if (rdev->pm.dpm.dyn_state.ppm_table)
1000 kfree(rdev->pm.dpm.dyn_state.ppm_table);
1001}
1002
1003enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1004 u32 sys_mask,
1005 enum radeon_pcie_gen asic_gen,
1006 enum radeon_pcie_gen default_gen)
1007{
1008 switch (asic_gen) {
1009 case RADEON_PCIE_GEN1:
1010 return RADEON_PCIE_GEN1;
1011 case RADEON_PCIE_GEN2:
1012 return RADEON_PCIE_GEN2;
1013 case RADEON_PCIE_GEN3:
1014 return RADEON_PCIE_GEN3;
1015 default:
1016 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
1017 return RADEON_PCIE_GEN3;
1018 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
1019 return RADEON_PCIE_GEN2;
1020 else
1021 return RADEON_PCIE_GEN1;
1022 }
1023 return RADEON_PCIE_GEN1;
1024}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
new file mode 100644
index 000000000000..a95ab214289b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -0,0 +1,226 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __R600_DPM_H__
24#define __R600_DPM_H__
25
26#define R600_ASI_DFLT 10000
27#define R600_BSP_DFLT 0x41EB
28#define R600_BSU_DFLT 0x2
29#define R600_AH_DFLT 5
30#define R600_RLP_DFLT 25
31#define R600_RMP_DFLT 65
32#define R600_LHP_DFLT 40
33#define R600_LMP_DFLT 15
34#define R600_TD_DFLT 0
35#define R600_UTC_DFLT_00 0x24
36#define R600_UTC_DFLT_01 0x22
37#define R600_UTC_DFLT_02 0x22
38#define R600_UTC_DFLT_03 0x22
39#define R600_UTC_DFLT_04 0x22
40#define R600_UTC_DFLT_05 0x22
41#define R600_UTC_DFLT_06 0x22
42#define R600_UTC_DFLT_07 0x22
43#define R600_UTC_DFLT_08 0x22
44#define R600_UTC_DFLT_09 0x22
45#define R600_UTC_DFLT_10 0x22
46#define R600_UTC_DFLT_11 0x22
47#define R600_UTC_DFLT_12 0x22
48#define R600_UTC_DFLT_13 0x22
49#define R600_UTC_DFLT_14 0x22
50#define R600_DTC_DFLT_00 0x24
51#define R600_DTC_DFLT_01 0x22
52#define R600_DTC_DFLT_02 0x22
53#define R600_DTC_DFLT_03 0x22
54#define R600_DTC_DFLT_04 0x22
55#define R600_DTC_DFLT_05 0x22
56#define R600_DTC_DFLT_06 0x22
57#define R600_DTC_DFLT_07 0x22
58#define R600_DTC_DFLT_08 0x22
59#define R600_DTC_DFLT_09 0x22
60#define R600_DTC_DFLT_10 0x22
61#define R600_DTC_DFLT_11 0x22
62#define R600_DTC_DFLT_12 0x22
63#define R600_DTC_DFLT_13 0x22
64#define R600_DTC_DFLT_14 0x22
65#define R600_VRC_DFLT 0x0000C003
66#define R600_VOLTAGERESPONSETIME_DFLT 1000
67#define R600_BACKBIASRESPONSETIME_DFLT 1000
68#define R600_VRU_DFLT 0x3
69#define R600_SPLLSTEPTIME_DFLT 0x1000
70#define R600_SPLLSTEPUNIT_DFLT 0x3
71#define R600_TPU_DFLT 0
72#define R600_TPC_DFLT 0x200
73#define R600_SSTU_DFLT 0
74#define R600_SST_DFLT 0x00C8
75#define R600_GICST_DFLT 0x200
76#define R600_FCT_DFLT 0x0400
77#define R600_FCTU_DFLT 0
78#define R600_CTXCGTT3DRPHC_DFLT 0x20
79#define R600_CTXCGTT3DRSDC_DFLT 0x40
80#define R600_VDDC3DOORPHC_DFLT 0x100
81#define R600_VDDC3DOORSDC_DFLT 0x7
82#define R600_VDDC3DOORSU_DFLT 0
83#define R600_MPLLLOCKTIME_DFLT 100
84#define R600_MPLLRESETTIME_DFLT 150
85#define R600_VCOSTEPPCT_DFLT 20
86#define R600_ENDINGVCOSTEPPCT_DFLT 5
87#define R600_REFERENCEDIVIDER_DFLT 4
88
89#define R600_PM_NUMBER_OF_TC 15
90#define R600_PM_NUMBER_OF_SCLKS 20
91#define R600_PM_NUMBER_OF_MCLKS 4
92#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4
93#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3
94
95/* XXX are these ok? */
96#define R600_TEMP_RANGE_MIN (90 * 1000)
97#define R600_TEMP_RANGE_MAX (120 * 1000)
98
99enum r600_power_level {
100 R600_POWER_LEVEL_LOW = 0,
101 R600_POWER_LEVEL_MEDIUM = 1,
102 R600_POWER_LEVEL_HIGH = 2,
103 R600_POWER_LEVEL_CTXSW = 3,
104};
105
106enum r600_td {
107 R600_TD_AUTO,
108 R600_TD_UP,
109 R600_TD_DOWN,
110};
111
112enum r600_display_watermark {
113 R600_DISPLAY_WATERMARK_LOW = 0,
114 R600_DISPLAY_WATERMARK_HIGH = 1,
115};
116
117enum r600_display_gap
118{
119 R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
120 R600_PM_DISPLAY_GAP_VBLANK = 1,
121 R600_PM_DISPLAY_GAP_WATERMARK = 2,
122 R600_PM_DISPLAY_GAP_IGNORE = 3,
123};
124
125extern const u32 r600_utc[R600_PM_NUMBER_OF_TC];
126extern const u32 r600_dtc[R600_PM_NUMBER_OF_TC];
127
128void r600_dpm_print_class_info(u32 class, u32 class2);
129void r600_dpm_print_cap_info(u32 caps);
130void r600_dpm_print_ps_status(struct radeon_device *rdev,
131 struct radeon_ps *rps);
132bool r600_is_uvd_state(u32 class, u32 class2);
133void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
134 u32 *p, u32 *u);
135int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
136void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable);
137void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable);
138void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable);
139void r600_enable_acpi_pm(struct radeon_device *rdev);
140void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable);
141bool r600_dynamicpm_enabled(struct radeon_device *rdev);
142void r600_enable_sclk_control(struct radeon_device *rdev, bool enable);
143void r600_enable_mclk_control(struct radeon_device *rdev, bool enable);
144void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable);
145void r600_wait_for_spll_change(struct radeon_device *rdev);
146void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p);
147void r600_set_at(struct radeon_device *rdev,
148 u32 l_to_m, u32 m_to_h,
149 u32 h_to_m, u32 m_to_l);
150void r600_set_tc(struct radeon_device *rdev, u32 index, u32 u_t, u32 d_t);
151void r600_select_td(struct radeon_device *rdev, enum r600_td td);
152void r600_set_vrc(struct radeon_device *rdev, u32 vrv);
153void r600_set_tpu(struct radeon_device *rdev, u32 u);
154void r600_set_tpc(struct radeon_device *rdev, u32 c);
155void r600_set_sstu(struct radeon_device *rdev, u32 u);
156void r600_set_sst(struct radeon_device *rdev, u32 t);
157void r600_set_git(struct radeon_device *rdev, u32 t);
158void r600_set_fctu(struct radeon_device *rdev, u32 u);
159void r600_set_fct(struct radeon_device *rdev, u32 t);
160void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p);
161void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s);
162void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u);
163void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p);
164void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s);
165void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time);
166void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time);
167void r600_engine_clock_entry_enable(struct radeon_device *rdev,
168 u32 index, bool enable);
169void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
170 u32 index, bool enable);
171void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
172 u32 index, bool enable);
173void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
174 u32 index, u32 divider);
175void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
176 u32 index, u32 divider);
177void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
178 u32 index, u32 divider);
179void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
180 u32 index, u32 step_time);
181void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u);
182void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u);
183void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt);
184void r600_voltage_control_enable_pins(struct radeon_device *rdev,
185 u64 mask);
186void r600_voltage_control_program_voltages(struct radeon_device *rdev,
187 enum r600_power_level index, u64 pins);
188void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
189 u64 mask);
190void r600_power_level_enable(struct radeon_device *rdev,
191 enum r600_power_level index, bool enable);
192void r600_power_level_set_voltage_index(struct radeon_device *rdev,
193 enum r600_power_level index, u32 voltage_index);
194void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
195 enum r600_power_level index, u32 mem_clock_index);
196void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
197 enum r600_power_level index, u32 eng_clock_index);
198void r600_power_level_set_watermark_id(struct radeon_device *rdev,
199 enum r600_power_level index,
200 enum r600_display_watermark watermark_id);
201void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
202 enum r600_power_level index, bool compatible);
203enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev);
204enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev);
205void r600_power_level_set_enter_index(struct radeon_device *rdev,
206 enum r600_power_level index);
207void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
208 enum r600_power_level index);
209void r600_wait_for_power_level(struct radeon_device *rdev,
210 enum r600_power_level index);
211void r600_start_dpm(struct radeon_device *rdev);
212void r600_stop_dpm(struct radeon_device *rdev);
213
214int r600_set_thermal_temperature_range(struct radeon_device *rdev,
215 int min_temp, int max_temp);
216bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor);
217
218int r600_parse_extended_power_table(struct radeon_device *rdev);
219void r600_free_extended_power_table(struct radeon_device *rdev);
220
221enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
222 u32 sys_mask,
223 enum radeon_pcie_gen asic_gen,
224 enum radeon_pcie_gen default_gen);
225
226#endif
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 456750a0daa5..e73b2a73494a 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -133,14 +133,7 @@ static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
133 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 133 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
134 uint32_t offset = dig->afmt->offset; 134 uint32_t offset = dig->afmt->offset;
135 uint8_t *frame = buffer + 3; 135 uint8_t *frame = buffer + 3;
136 136 uint8_t *header = buffer;
137 /* Our header values (type, version, length) should be alright, Intel
138 * is using the same. Checksum function also seems to be OK, it works
139 * fine for audio infoframe. However calculated value is always lower
140 * by 2 in comparison to fglrx. It breaks displaying anything in case
141 * of TVs that strictly check the checksum. Hack it manually here to
142 * workaround this issue. */
143 frame[0x0] += 2;
144 137
145 WREG32(HDMI0_AVI_INFO0 + offset, 138 WREG32(HDMI0_AVI_INFO0 + offset,
146 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 139 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -149,7 +142,7 @@ static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
149 WREG32(HDMI0_AVI_INFO2 + offset, 142 WREG32(HDMI0_AVI_INFO2 + offset,
150 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); 143 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
151 WREG32(HDMI0_AVI_INFO3 + offset, 144 WREG32(HDMI0_AVI_INFO3 + offset,
152 frame[0xC] | (frame[0xD] << 8)); 145 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
153} 146}
154 147
155/* 148/*
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 909219b1bf80..3ef202629e7e 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -31,6 +31,12 @@
31#define R600_PCIE_PORT_INDEX 0x0038 31#define R600_PCIE_PORT_INDEX 0x0038
32#define R600_PCIE_PORT_DATA 0x003c 32#define R600_PCIE_PORT_DATA 0x003c
33 33
34#define R600_RCU_INDEX 0x0100
35#define R600_RCU_DATA 0x0104
36
37#define R600_UVD_CTX_INDEX 0xf4a0
38#define R600_UVD_CTX_DATA 0xf4a4
39
34#define R600_MC_VM_FB_LOCATION 0x2180 40#define R600_MC_VM_FB_LOCATION 0x2180
35#define R600_MC_FB_BASE_MASK 0x0000FFFF 41#define R600_MC_FB_BASE_MASK 0x0000FFFF
36#define R600_MC_FB_BASE_SHIFT 0 42#define R600_MC_FB_BASE_SHIFT 0
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 79df558f8c40..f1b3084d8f51 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -302,10 +302,25 @@
302#define GRBM_SOFT_RESET 0x8020 302#define GRBM_SOFT_RESET 0x8020
303#define SOFT_RESET_CP (1<<0) 303#define SOFT_RESET_CP (1<<0)
304 304
305#define CG_THERMAL_CTRL 0x7F0
306#define DIG_THERM_DPM(x) ((x) << 12)
307#define DIG_THERM_DPM_MASK 0x000FF000
308#define DIG_THERM_DPM_SHIFT 12
305#define CG_THERMAL_STATUS 0x7F4 309#define CG_THERMAL_STATUS 0x7F4
306#define ASIC_T(x) ((x) << 0) 310#define ASIC_T(x) ((x) << 0)
307#define ASIC_T_MASK 0x1FF 311#define ASIC_T_MASK 0x1FF
308#define ASIC_T_SHIFT 0 312#define ASIC_T_SHIFT 0
313#define CG_THERMAL_INT 0x7F8
314#define DIG_THERM_INTH(x) ((x) << 8)
315#define DIG_THERM_INTH_MASK 0x0000FF00
316#define DIG_THERM_INTH_SHIFT 8
317#define DIG_THERM_INTL(x) ((x) << 16)
318#define DIG_THERM_INTL_MASK 0x00FF0000
319#define DIG_THERM_INTL_SHIFT 16
320#define THERM_INT_MASK_HIGH (1 << 24)
321#define THERM_INT_MASK_LOW (1 << 25)
322
323#define RV770_CG_THERMAL_INT 0x734
309 324
310#define HDP_HOST_PATH_CNTL 0x2C00 325#define HDP_HOST_PATH_CNTL 0x2C00
311#define HDP_NONSURFACE_BASE 0x2C04 326#define HDP_NONSURFACE_BASE 0x2C04
@@ -684,10 +699,6 @@
684#define RLC_UCODE_ADDR 0x3f2c 699#define RLC_UCODE_ADDR 0x3f2c
685#define RLC_UCODE_DATA 0x3f30 700#define RLC_UCODE_DATA 0x3f30
686 701
687/* new for TN */
688#define TN_RLC_SAVE_AND_RESTORE_BASE 0x3f10
689#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
690
691#define SRBM_SOFT_RESET 0xe60 702#define SRBM_SOFT_RESET 0xe60
692# define SOFT_RESET_DMA (1 << 12) 703# define SOFT_RESET_DMA (1 << 12)
693# define SOFT_RESET_RLC (1 << 13) 704# define SOFT_RESET_RLC (1 << 13)
@@ -1148,6 +1159,219 @@
1148# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29) 1159# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
1149# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30) 1160# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
1150 1161
1162/* Power management */
1163#define CG_SPLL_FUNC_CNTL 0x600
1164# define SPLL_RESET (1 << 0)
1165# define SPLL_SLEEP (1 << 1)
1166# define SPLL_REF_DIV(x) ((x) << 2)
1167# define SPLL_REF_DIV_MASK (7 << 2)
1168# define SPLL_FB_DIV(x) ((x) << 5)
1169# define SPLL_FB_DIV_MASK (0xff << 5)
1170# define SPLL_PULSEEN (1 << 13)
1171# define SPLL_PULSENUM(x) ((x) << 14)
1172# define SPLL_PULSENUM_MASK (3 << 14)
1173# define SPLL_SW_HILEN(x) ((x) << 16)
1174# define SPLL_SW_HILEN_MASK (0xf << 16)
1175# define SPLL_SW_LOLEN(x) ((x) << 20)
1176# define SPLL_SW_LOLEN_MASK (0xf << 20)
1177# define SPLL_DIVEN (1 << 24)
1178# define SPLL_BYPASS_EN (1 << 25)
1179# define SPLL_CHG_STATUS (1 << 29)
1180# define SPLL_CTLREQ (1 << 30)
1181# define SPLL_CTLACK (1 << 31)
1182
1183#define GENERAL_PWRMGT 0x618
1184# define GLOBAL_PWRMGT_EN (1 << 0)
1185# define STATIC_PM_EN (1 << 1)
1186# define MOBILE_SU (1 << 2)
1187# define THERMAL_PROTECTION_DIS (1 << 3)
1188# define THERMAL_PROTECTION_TYPE (1 << 4)
1189# define ENABLE_GEN2PCIE (1 << 5)
1190# define SW_GPIO_INDEX(x) ((x) << 6)
1191# define SW_GPIO_INDEX_MASK (3 << 6)
1192# define LOW_VOLT_D2_ACPI (1 << 8)
1193# define LOW_VOLT_D3_ACPI (1 << 9)
1194# define VOLT_PWRMGT_EN (1 << 10)
1195#define CG_TPC 0x61c
1196# define TPCC(x) ((x) << 0)
1197# define TPCC_MASK (0x7fffff << 0)
1198# define TPU(x) ((x) << 23)
1199# define TPU_MASK (0x1f << 23)
1200#define SCLK_PWRMGT_CNTL 0x620
1201# define SCLK_PWRMGT_OFF (1 << 0)
1202# define SCLK_TURNOFF (1 << 1)
1203# define SPLL_TURNOFF (1 << 2)
1204# define SU_SCLK_USE_BCLK (1 << 3)
1205# define DYNAMIC_GFX_ISLAND_PWR_DOWN (1 << 4)
1206# define DYNAMIC_GFX_ISLAND_PWR_LP (1 << 5)
1207# define CLK_TURN_ON_STAGGER (1 << 6)
1208# define CLK_TURN_OFF_STAGGER (1 << 7)
1209# define FIR_FORCE_TREND_SEL (1 << 8)
1210# define FIR_TREND_MODE (1 << 9)
1211# define DYN_GFX_CLK_OFF_EN (1 << 10)
1212# define VDDC3D_TURNOFF_D1 (1 << 11)
1213# define VDDC3D_TURNOFF_D2 (1 << 12)
1214# define VDDC3D_TURNOFF_D3 (1 << 13)
1215# define SPLL_TURNOFF_D2 (1 << 14)
1216# define SCLK_LOW_D1 (1 << 15)
1217# define DYN_GFX_CLK_OFF_MC_EN (1 << 16)
1218#define MCLK_PWRMGT_CNTL 0x624
1219# define MPLL_PWRMGT_OFF (1 << 0)
1220# define YCLK_TURNOFF (1 << 1)
1221# define MPLL_TURNOFF (1 << 2)
1222# define SU_MCLK_USE_BCLK (1 << 3)
1223# define DLL_READY (1 << 4)
1224# define MC_BUSY (1 << 5)
1225# define MC_INT_CNTL (1 << 7)
1226# define MRDCKA_SLEEP (1 << 8)
1227# define MRDCKB_SLEEP (1 << 9)
1228# define MRDCKC_SLEEP (1 << 10)
1229# define MRDCKD_SLEEP (1 << 11)
1230# define MRDCKE_SLEEP (1 << 12)
1231# define MRDCKF_SLEEP (1 << 13)
1232# define MRDCKG_SLEEP (1 << 14)
1233# define MRDCKH_SLEEP (1 << 15)
1234# define MRDCKA_RESET (1 << 16)
1235# define MRDCKB_RESET (1 << 17)
1236# define MRDCKC_RESET (1 << 18)
1237# define MRDCKD_RESET (1 << 19)
1238# define MRDCKE_RESET (1 << 20)
1239# define MRDCKF_RESET (1 << 21)
1240# define MRDCKG_RESET (1 << 22)
1241# define MRDCKH_RESET (1 << 23)
1242# define DLL_READY_READ (1 << 24)
1243# define USE_DISPLAY_GAP (1 << 25)
1244# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
1245# define USE_DISPLAY_GAP_CTXSW (1 << 27)
1246# define MPLL_TURNOFF_D2 (1 << 28)
1247# define USE_DISPLAY_URGENT_CTXSW (1 << 29)
1248
1249#define MPLL_TIME 0x634
1250# define MPLL_LOCK_TIME(x) ((x) << 0)
1251# define MPLL_LOCK_TIME_MASK (0xffff << 0)
1252# define MPLL_RESET_TIME(x) ((x) << 16)
1253# define MPLL_RESET_TIME_MASK (0xffff << 16)
1254
1255#define SCLK_FREQ_SETTING_STEP_0_PART1 0x648
1256# define STEP_0_SPLL_POST_DIV(x) ((x) << 0)
1257# define STEP_0_SPLL_POST_DIV_MASK (0xff << 0)
1258# define STEP_0_SPLL_FB_DIV(x) ((x) << 8)
1259# define STEP_0_SPLL_FB_DIV_MASK (0xff << 8)
1260# define STEP_0_SPLL_REF_DIV(x) ((x) << 16)
1261# define STEP_0_SPLL_REF_DIV_MASK (7 << 16)
1262# define STEP_0_SPLL_STEP_TIME(x) ((x) << 19)
1263# define STEP_0_SPLL_STEP_TIME_MASK (0x1fff << 19)
1264#define SCLK_FREQ_SETTING_STEP_0_PART2 0x64c
1265# define STEP_0_PULSE_HIGH_CNT(x) ((x) << 0)
1266# define STEP_0_PULSE_HIGH_CNT_MASK (0x1ff << 0)
1267# define STEP_0_POST_DIV_EN (1 << 9)
1268# define STEP_0_SPLL_STEP_ENABLE (1 << 30)
1269# define STEP_0_SPLL_ENTRY_VALID (1 << 31)
1270
1271#define VID_RT 0x6f8
1272# define VID_CRT(x) ((x) << 0)
1273# define VID_CRT_MASK (0x1fff << 0)
1274# define VID_CRTU(x) ((x) << 13)
1275# define VID_CRTU_MASK (7 << 13)
1276# define SSTU(x) ((x) << 16)
1277# define SSTU_MASK (7 << 16)
1278#define CTXSW_PROFILE_INDEX 0x6fc
1279# define CTXSW_FREQ_VIDS_CFG_INDEX(x) ((x) << 0)
1280# define CTXSW_FREQ_VIDS_CFG_INDEX_MASK (3 << 0)
1281# define CTXSW_FREQ_VIDS_CFG_INDEX_SHIFT 0
1282# define CTXSW_FREQ_MCLK_CFG_INDEX(x) ((x) << 2)
1283# define CTXSW_FREQ_MCLK_CFG_INDEX_MASK (3 << 2)
1284# define CTXSW_FREQ_MCLK_CFG_INDEX_SHIFT 2
1285# define CTXSW_FREQ_SCLK_CFG_INDEX(x) ((x) << 4)
1286# define CTXSW_FREQ_SCLK_CFG_INDEX_MASK (0x1f << 4)
1287# define CTXSW_FREQ_SCLK_CFG_INDEX_SHIFT 4
1288# define CTXSW_FREQ_STATE_SPLL_RESET_EN (1 << 9)
1289# define CTXSW_FREQ_STATE_ENABLE (1 << 10)
1290# define CTXSW_FREQ_DISPLAY_WATERMARK (1 << 11)
1291# define CTXSW_FREQ_GEN2PCIE_VOLT (1 << 12)
1292
1293#define TARGET_AND_CURRENT_PROFILE_INDEX 0x70c
1294# define TARGET_PROFILE_INDEX_MASK (3 << 0)
1295# define TARGET_PROFILE_INDEX_SHIFT 0
1296# define CURRENT_PROFILE_INDEX_MASK (3 << 2)
1297# define CURRENT_PROFILE_INDEX_SHIFT 2
1298# define DYN_PWR_ENTER_INDEX(x) ((x) << 4)
1299# define DYN_PWR_ENTER_INDEX_MASK (3 << 4)
1300# define DYN_PWR_ENTER_INDEX_SHIFT 4
1301# define CURR_MCLK_INDEX_MASK (3 << 6)
1302# define CURR_MCLK_INDEX_SHIFT 6
1303# define CURR_SCLK_INDEX_MASK (0x1f << 8)
1304# define CURR_SCLK_INDEX_SHIFT 8
1305# define CURR_VID_INDEX_MASK (3 << 13)
1306# define CURR_VID_INDEX_SHIFT 13
1307
1308#define LOWER_GPIO_ENABLE 0x710
1309#define UPPER_GPIO_ENABLE 0x714
1310#define CTXSW_VID_LOWER_GPIO_CNTL 0x718
1311
1312#define VID_UPPER_GPIO_CNTL 0x740
1313#define CG_CTX_CGTT3D_R 0x744
1314# define PHC(x) ((x) << 0)
1315# define PHC_MASK (0x1ff << 0)
1316# define SDC(x) ((x) << 9)
1317# define SDC_MASK (0x3fff << 9)
1318#define CG_VDDC3D_OOR 0x748
1319# define SU(x) ((x) << 23)
1320# define SU_MASK (0xf << 23)
1321#define CG_FTV 0x74c
1322#define CG_FFCT_0 0x750
1323# define UTC_0(x) ((x) << 0)
1324# define UTC_0_MASK (0x3ff << 0)
1325# define DTC_0(x) ((x) << 10)
1326# define DTC_0_MASK (0x3ff << 10)
1327
1328#define CG_BSP 0x78c
1329# define BSP(x) ((x) << 0)
1330# define BSP_MASK (0xffff << 0)
1331# define BSU(x) ((x) << 16)
1332# define BSU_MASK (0xf << 16)
1333#define CG_RT 0x790
1334# define FLS(x) ((x) << 0)
1335# define FLS_MASK (0xffff << 0)
1336# define FMS(x) ((x) << 16)
1337# define FMS_MASK (0xffff << 16)
1338#define CG_LT 0x794
1339# define FHS(x) ((x) << 0)
1340# define FHS_MASK (0xffff << 0)
1341#define CG_GIT 0x798
1342# define CG_GICST(x) ((x) << 0)
1343# define CG_GICST_MASK (0xffff << 0)
1344# define CG_GIPOT(x) ((x) << 16)
1345# define CG_GIPOT_MASK (0xffff << 16)
1346
1347#define CG_SSP 0x7a8
1348# define CG_SST(x) ((x) << 0)
1349# define CG_SST_MASK (0xffff << 0)
1350# define CG_SSTU(x) ((x) << 16)
1351# define CG_SSTU_MASK (0xf << 16)
1352
1353#define CG_RLC_REQ_AND_RSP 0x7c4
1354# define RLC_CG_REQ_TYPE_MASK 0xf
1355# define RLC_CG_REQ_TYPE_SHIFT 0
1356# define CG_RLC_RSP_TYPE_MASK 0xf0
1357# define CG_RLC_RSP_TYPE_SHIFT 4
1358
1359#define CG_FC_T 0x7cc
1360# define FC_T(x) ((x) << 0)
1361# define FC_T_MASK (0xffff << 0)
1362# define FC_TU(x) ((x) << 16)
1363# define FC_TU_MASK (0x1f << 16)
1364
1365#define GPIOPAD_MASK 0x1798
1366#define GPIOPAD_A 0x179c
1367#define GPIOPAD_EN 0x17a0
1368
1369#define GRBM_PWR_CNTL 0x800c
1370# define REQ_TYPE_MASK 0xf
1371# define REQ_TYPE_SHIFT 0
1372# define RSP_TYPE_MASK 0xf0
1373# define RSP_TYPE_SHIFT 4
1374
1151/* 1375/*
1152 * UVD 1376 * UVD
1153 */ 1377 */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 142ce6cc69f5..a424949005c9 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -96,6 +96,7 @@ extern int radeon_pcie_gen2;
96extern int radeon_msi; 96extern int radeon_msi;
97extern int radeon_lockup_timeout; 97extern int radeon_lockup_timeout;
98extern int radeon_fastfb; 98extern int radeon_fastfb;
99extern int radeon_dpm;
99 100
100/* 101/*
101 * Copy from radeon_drv.h so we don't have to include both and have conflicting 102 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -150,6 +151,13 @@ extern int radeon_fastfb;
150#define RADEON_RESET_MC (1 << 10) 151#define RADEON_RESET_MC (1 << 10)
151#define RADEON_RESET_DISPLAY (1 << 11) 152#define RADEON_RESET_DISPLAY (1 << 11)
152 153
154/* max cursor sizes (in pixels) */
155#define CURSOR_WIDTH 64
156#define CURSOR_HEIGHT 64
157
158#define CIK_CURSOR_WIDTH 128
159#define CIK_CURSOR_HEIGHT 128
160
153/* 161/*
154 * Errata workarounds. 162 * Errata workarounds.
155 */ 163 */
@@ -192,6 +200,7 @@ struct radeon_clock {
192 uint32_t default_mclk; 200 uint32_t default_mclk;
193 uint32_t default_sclk; 201 uint32_t default_sclk;
194 uint32_t default_dispclk; 202 uint32_t default_dispclk;
203 uint32_t current_dispclk;
195 uint32_t dp_extclk; 204 uint32_t dp_extclk;
196 uint32_t max_pixel_clock; 205 uint32_t max_pixel_clock;
197}; 206};
@@ -211,13 +220,51 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
211 u32 clock, 220 u32 clock,
212 bool strobe_mode, 221 bool strobe_mode,
213 struct atom_clock_dividers *dividers); 222 struct atom_clock_dividers *dividers);
223int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
224 u32 clock,
225 bool strobe_mode,
226 struct atom_mpll_param *mpll_param);
214void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); 227void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
228int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
229 u16 voltage_level, u8 voltage_type,
230 u32 *gpio_value, u32 *gpio_mask);
231void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
232 u32 eng_clock, u32 mem_clock);
233int radeon_atom_get_voltage_step(struct radeon_device *rdev,
234 u8 voltage_type, u16 *voltage_step);
235int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
236 u16 voltage_id, u16 *voltage);
237int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
238 u16 *voltage,
239 u16 leakage_idx);
240int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
241 u8 voltage_type,
242 u16 nominal_voltage,
243 u16 *true_voltage);
244int radeon_atom_get_min_voltage(struct radeon_device *rdev,
245 u8 voltage_type, u16 *min_voltage);
246int radeon_atom_get_max_voltage(struct radeon_device *rdev,
247 u8 voltage_type, u16 *max_voltage);
248int radeon_atom_get_voltage_table(struct radeon_device *rdev,
249 u8 voltage_type, u8 voltage_mode,
250 struct atom_voltage_table *voltage_table);
251bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
252 u8 voltage_type, u8 voltage_mode);
253void radeon_atom_update_memory_dll(struct radeon_device *rdev,
254 u32 mem_clock);
255void radeon_atom_set_ac_timing(struct radeon_device *rdev,
256 u32 mem_clock);
257int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
258 u8 module_index,
259 struct atom_mc_reg_table *reg_table);
260int radeon_atom_get_memory_info(struct radeon_device *rdev,
261 u8 module_index, struct atom_memory_info *mem_info);
262int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
263 bool gddr5, u8 module_index,
264 struct atom_memory_clock_range_table *mclk_range_table);
265int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
266 u16 voltage_id, u16 *voltage);
215void rs690_pm_info(struct radeon_device *rdev); 267void rs690_pm_info(struct radeon_device *rdev);
216extern int rv6xx_get_temp(struct radeon_device *rdev);
217extern int rv770_get_temp(struct radeon_device *rdev);
218extern int evergreen_get_temp(struct radeon_device *rdev);
219extern int sumo_get_temp(struct radeon_device *rdev);
220extern int si_get_temp(struct radeon_device *rdev);
221extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, 268extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
222 unsigned *bankh, unsigned *mtaspect, 269 unsigned *bankh, unsigned *mtaspect,
223 unsigned *tile_split); 270 unsigned *tile_split);
@@ -549,6 +596,20 @@ struct radeon_scratch {
549int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg); 596int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
550void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); 597void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
551 598
599/*
600 * GPU doorbell structures, functions & helpers
601 */
602struct radeon_doorbell {
603 u32 num_pages;
604 bool free[1024];
605 /* doorbell mmio */
606 resource_size_t base;
607 resource_size_t size;
608 void __iomem *ptr;
609};
610
611int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
612void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
552 613
553/* 614/*
554 * IRQS. 615 * IRQS.
@@ -600,10 +661,21 @@ struct evergreen_irq_stat_regs {
600 u32 afmt_status6; 661 u32 afmt_status6;
601}; 662};
602 663
664struct cik_irq_stat_regs {
665 u32 disp_int;
666 u32 disp_int_cont;
667 u32 disp_int_cont2;
668 u32 disp_int_cont3;
669 u32 disp_int_cont4;
670 u32 disp_int_cont5;
671 u32 disp_int_cont6;
672};
673
603union radeon_irq_stat_regs { 674union radeon_irq_stat_regs {
604 struct r500_irq_stat_regs r500; 675 struct r500_irq_stat_regs r500;
605 struct r600_irq_stat_regs r600; 676 struct r600_irq_stat_regs r600;
606 struct evergreen_irq_stat_regs evergreen; 677 struct evergreen_irq_stat_regs evergreen;
678 struct cik_irq_stat_regs cik;
607}; 679};
608 680
609#define RADEON_MAX_HPD_PINS 6 681#define RADEON_MAX_HPD_PINS 6
@@ -620,6 +692,7 @@ struct radeon_irq {
620 bool hpd[RADEON_MAX_HPD_PINS]; 692 bool hpd[RADEON_MAX_HPD_PINS];
621 bool afmt[RADEON_MAX_AFMT_BLOCKS]; 693 bool afmt[RADEON_MAX_AFMT_BLOCKS];
622 union radeon_irq_stat_regs stat_regs; 694 union radeon_irq_stat_regs stat_regs;
695 bool dpm_thermal;
623}; 696};
624 697
625int radeon_irq_kms_init(struct radeon_device *rdev); 698int radeon_irq_kms_init(struct radeon_device *rdev);
@@ -677,6 +750,22 @@ struct radeon_ring {
677 u32 idx; 750 u32 idx;
678 u64 last_semaphore_signal_addr; 751 u64 last_semaphore_signal_addr;
679 u64 last_semaphore_wait_addr; 752 u64 last_semaphore_wait_addr;
753 /* for CIK queues */
754 u32 me;
755 u32 pipe;
756 u32 queue;
757 struct radeon_bo *mqd_obj;
758 u32 doorbell_page_num;
759 u32 doorbell_offset;
760 unsigned wptr_offs;
761};
762
763struct radeon_mec {
764 struct radeon_bo *hpd_eop_obj;
765 u64 hpd_eop_gpu_addr;
766 u32 num_pipe;
767 u32 num_mec;
768 u32 num_queue;
680}; 769};
681 770
682/* 771/*
@@ -778,15 +867,22 @@ struct r600_blit {
778}; 867};
779 868
780/* 869/*
781 * SI RLC stuff 870 * RLC stuff
782 */ 871 */
783struct si_rlc { 872#include "clearstate_defs.h"
873
874struct radeon_rlc {
784 /* for power gating */ 875 /* for power gating */
785 struct radeon_bo *save_restore_obj; 876 struct radeon_bo *save_restore_obj;
786 uint64_t save_restore_gpu_addr; 877 uint64_t save_restore_gpu_addr;
878 volatile uint32_t *sr_ptr;
879 u32 *reg_list;
880 u32 reg_list_size;
787 /* for clear state */ 881 /* for clear state */
788 struct radeon_bo *clear_state_obj; 882 struct radeon_bo *clear_state_obj;
789 uint64_t clear_state_gpu_addr; 883 uint64_t clear_state_gpu_addr;
884 volatile uint32_t *cs_ptr;
885 struct cs_section_def *cs_data;
790}; 886};
791 887
792int radeon_ib_get(struct radeon_device *rdev, int ring, 888int radeon_ib_get(struct radeon_device *rdev, int ring,
@@ -934,6 +1030,8 @@ struct radeon_wb {
934#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304 1030#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
935#define R600_WB_UVD_RPTR_OFFSET 2560 1031#define R600_WB_UVD_RPTR_OFFSET 2560
936#define R600_WB_EVENT_OFFSET 3072 1032#define R600_WB_EVENT_OFFSET 3072
1033#define CIK_WB_CP1_WPTR_OFFSET 3328
1034#define CIK_WB_CP2_WPTR_OFFSET 3584
937 1035
938/** 1036/**
939 * struct radeon_pm - power management datas 1037 * struct radeon_pm - power management datas
@@ -958,6 +1056,7 @@ struct radeon_wb {
958enum radeon_pm_method { 1056enum radeon_pm_method {
959 PM_METHOD_PROFILE, 1057 PM_METHOD_PROFILE,
960 PM_METHOD_DYNPM, 1058 PM_METHOD_DYNPM,
1059 PM_METHOD_DPM,
961}; 1060};
962 1061
963enum radeon_dynpm_state { 1062enum radeon_dynpm_state {
@@ -983,11 +1082,23 @@ enum radeon_voltage_type {
983}; 1082};
984 1083
985enum radeon_pm_state_type { 1084enum radeon_pm_state_type {
1085 /* not used for dpm */
986 POWER_STATE_TYPE_DEFAULT, 1086 POWER_STATE_TYPE_DEFAULT,
987 POWER_STATE_TYPE_POWERSAVE, 1087 POWER_STATE_TYPE_POWERSAVE,
1088 /* user selectable states */
988 POWER_STATE_TYPE_BATTERY, 1089 POWER_STATE_TYPE_BATTERY,
989 POWER_STATE_TYPE_BALANCED, 1090 POWER_STATE_TYPE_BALANCED,
990 POWER_STATE_TYPE_PERFORMANCE, 1091 POWER_STATE_TYPE_PERFORMANCE,
1092 /* internal states */
1093 POWER_STATE_TYPE_INTERNAL_UVD,
1094 POWER_STATE_TYPE_INTERNAL_UVD_SD,
1095 POWER_STATE_TYPE_INTERNAL_UVD_HD,
1096 POWER_STATE_TYPE_INTERNAL_UVD_HD2,
1097 POWER_STATE_TYPE_INTERNAL_UVD_MVC,
1098 POWER_STATE_TYPE_INTERNAL_BOOT,
1099 POWER_STATE_TYPE_INTERNAL_THERMAL,
1100 POWER_STATE_TYPE_INTERNAL_ACPI,
1101 POWER_STATE_TYPE_INTERNAL_ULV,
991}; 1102};
992 1103
993enum radeon_pm_profile_type { 1104enum radeon_pm_profile_type {
@@ -1016,12 +1127,17 @@ struct radeon_pm_profile {
1016 1127
1017enum radeon_int_thermal_type { 1128enum radeon_int_thermal_type {
1018 THERMAL_TYPE_NONE, 1129 THERMAL_TYPE_NONE,
1130 THERMAL_TYPE_EXTERNAL,
1131 THERMAL_TYPE_EXTERNAL_GPIO,
1019 THERMAL_TYPE_RV6XX, 1132 THERMAL_TYPE_RV6XX,
1020 THERMAL_TYPE_RV770, 1133 THERMAL_TYPE_RV770,
1134 THERMAL_TYPE_ADT7473_WITH_INTERNAL,
1021 THERMAL_TYPE_EVERGREEN, 1135 THERMAL_TYPE_EVERGREEN,
1022 THERMAL_TYPE_SUMO, 1136 THERMAL_TYPE_SUMO,
1023 THERMAL_TYPE_NI, 1137 THERMAL_TYPE_NI,
1024 THERMAL_TYPE_SI, 1138 THERMAL_TYPE_SI,
1139 THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1140 THERMAL_TYPE_CI,
1025}; 1141};
1026 1142
1027struct radeon_voltage { 1143struct radeon_voltage {
@@ -1075,6 +1191,193 @@ struct radeon_power_state {
1075 */ 1191 */
1076#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */ 1192#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
1077 1193
1194enum radeon_dpm_auto_throttle_src {
1195 RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL,
1196 RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL
1197};
1198
1199enum radeon_dpm_event_src {
1200 RADEON_DPM_EVENT_SRC_ANALOG = 0,
1201 RADEON_DPM_EVENT_SRC_EXTERNAL = 1,
1202 RADEON_DPM_EVENT_SRC_DIGITAL = 2,
1203 RADEON_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
1204 RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
1205};
1206
1207struct radeon_ps {
1208 u32 caps; /* vbios flags */
1209 u32 class; /* vbios flags */
1210 u32 class2; /* vbios flags */
1211 /* UVD clocks */
1212 u32 vclk;
1213 u32 dclk;
1214 /* asic priv */
1215 void *ps_priv;
1216};
1217
1218struct radeon_dpm_thermal {
1219 /* thermal interrupt work */
1220 struct work_struct work;
1221 /* low temperature threshold */
1222 int min_temp;
1223 /* high temperature threshold */
1224 int max_temp;
1225 /* was interrupt low to high or high to low */
1226 bool high_to_low;
1227};
1228
1229enum radeon_clk_action
1230{
1231 RADEON_SCLK_UP = 1,
1232 RADEON_SCLK_DOWN
1233};
1234
1235struct radeon_blacklist_clocks
1236{
1237 u32 sclk;
1238 u32 mclk;
1239 enum radeon_clk_action action;
1240};
1241
1242struct radeon_clock_and_voltage_limits {
1243 u32 sclk;
1244 u32 mclk;
1245 u32 vddc;
1246 u32 vddci;
1247};
1248
1249struct radeon_clock_array {
1250 u32 count;
1251 u32 *values;
1252};
1253
1254struct radeon_clock_voltage_dependency_entry {
1255 u32 clk;
1256 u16 v;
1257};
1258
1259struct radeon_clock_voltage_dependency_table {
1260 u32 count;
1261 struct radeon_clock_voltage_dependency_entry *entries;
1262};
1263
1264struct radeon_cac_leakage_entry {
1265 u16 vddc;
1266 u32 leakage;
1267};
1268
1269struct radeon_cac_leakage_table {
1270 u32 count;
1271 struct radeon_cac_leakage_entry *entries;
1272};
1273
1274struct radeon_phase_shedding_limits_entry {
1275 u16 voltage;
1276 u32 sclk;
1277 u32 mclk;
1278};
1279
1280struct radeon_phase_shedding_limits_table {
1281 u32 count;
1282 struct radeon_phase_shedding_limits_entry *entries;
1283};
1284
1285struct radeon_ppm_table {
1286 u8 ppm_design;
1287 u16 cpu_core_number;
1288 u32 platform_tdp;
1289 u32 small_ac_platform_tdp;
1290 u32 platform_tdc;
1291 u32 small_ac_platform_tdc;
1292 u32 apu_tdp;
1293 u32 dgpu_tdp;
1294 u32 dgpu_ulv_power;
1295 u32 tj_max;
1296};
1297
1298struct radeon_dpm_dynamic_state {
1299 struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
1300 struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
1301 struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
1302 struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1303 struct radeon_clock_array valid_sclk_values;
1304 struct radeon_clock_array valid_mclk_values;
1305 struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
1306 struct radeon_clock_and_voltage_limits max_clock_voltage_on_ac;
1307 u32 mclk_sclk_ratio;
1308 u32 sclk_mclk_delta;
1309 u16 vddc_vddci_delta;
1310 u16 min_vddc_for_pcie_gen2;
1311 struct radeon_cac_leakage_table cac_leakage_table;
1312 struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
1313 struct radeon_ppm_table *ppm_table;
1314};
1315
1316struct radeon_dpm_fan {
1317 u16 t_min;
1318 u16 t_med;
1319 u16 t_high;
1320 u16 pwm_min;
1321 u16 pwm_med;
1322 u16 pwm_high;
1323 u8 t_hyst;
1324 u32 cycle_delay;
1325 u16 t_max;
1326 bool ucode_fan_control;
1327};
1328
1329enum radeon_pcie_gen {
1330 RADEON_PCIE_GEN1 = 0,
1331 RADEON_PCIE_GEN2 = 1,
1332 RADEON_PCIE_GEN3 = 2,
1333 RADEON_PCIE_GEN_INVALID = 0xffff
1334};
1335
1336struct radeon_dpm {
1337 struct radeon_ps *ps;
1338 /* number of valid power states */
1339 int num_ps;
1340 /* current power state that is active */
1341 struct radeon_ps *current_ps;
1342 /* requested power state */
1343 struct radeon_ps *requested_ps;
1344 /* boot up power state */
1345 struct radeon_ps *boot_ps;
1346 /* default uvd power state */
1347 struct radeon_ps *uvd_ps;
1348 enum radeon_pm_state_type state;
1349 enum radeon_pm_state_type user_state;
1350 u32 platform_caps;
1351 u32 voltage_response_time;
1352 u32 backbias_response_time;
1353 void *priv;
1354 u32 new_active_crtcs;
1355 int new_active_crtc_count;
1356 u32 current_active_crtcs;
1357 int current_active_crtc_count;
1358 struct radeon_dpm_dynamic_state dyn_state;
1359 struct radeon_dpm_fan fan;
1360 u32 tdp_limit;
1361 u32 near_tdp_limit;
1362 u32 near_tdp_limit_adjusted;
1363 u32 sq_ramping_threshold;
1364 u32 cac_leakage;
1365 u16 tdp_od_limit;
1366 u32 tdp_adjustment;
1367 u16 load_line_slope;
1368 bool power_control;
1369 bool ac_power;
1370 /* special states active */
1371 bool thermal_active;
1372 bool uvd_active;
1373 /* thermal handling */
1374 struct radeon_dpm_thermal thermal;
1375};
1376
1377void radeon_dpm_enable_power_state(struct radeon_device *rdev,
1378 enum radeon_pm_state_type dpm_state);
1379
1380
1078struct radeon_pm { 1381struct radeon_pm {
1079 struct mutex mutex; 1382 struct mutex mutex;
1080 /* write locked while reprogramming mclk */ 1383 /* write locked while reprogramming mclk */
@@ -1128,6 +1431,9 @@ struct radeon_pm {
1128 /* internal thermal controller on rv6xx+ */ 1431 /* internal thermal controller on rv6xx+ */
1129 enum radeon_int_thermal_type int_thermal_type; 1432 enum radeon_int_thermal_type int_thermal_type;
1130 struct device *int_hwmon_dev; 1433 struct device *int_hwmon_dev;
1434 /* dpm */
1435 bool dpm_enabled;
1436 struct radeon_dpm dpm;
1131}; 1437};
1132 1438
1133int radeon_pm_get_type_index(struct radeon_device *rdev, 1439int radeon_pm_get_type_index(struct radeon_device *rdev,
@@ -1266,6 +1572,10 @@ struct radeon_asic {
1266 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1572 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1267 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); 1573 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1268 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 1574 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
1575
1576 u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1577 u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1578 void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1269 } ring[RADEON_NUM_RINGS]; 1579 } ring[RADEON_NUM_RINGS];
1270 /* irqs */ 1580 /* irqs */
1271 struct { 1581 struct {
@@ -1325,7 +1635,7 @@ struct radeon_asic {
1325 bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 1635 bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1326 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 1636 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1327 } hpd; 1637 } hpd;
1328 /* power management */ 1638 /* static power management */
1329 struct { 1639 struct {
1330 void (*misc)(struct radeon_device *rdev); 1640 void (*misc)(struct radeon_device *rdev);
1331 void (*prepare)(struct radeon_device *rdev); 1641 void (*prepare)(struct radeon_device *rdev);
@@ -1340,7 +1650,23 @@ struct radeon_asic {
1340 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 1650 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
1341 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 1651 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
1342 int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk); 1652 int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
1653 int (*get_temperature)(struct radeon_device *rdev);
1343 } pm; 1654 } pm;
1655 /* dynamic power management */
1656 struct {
1657 int (*init)(struct radeon_device *rdev);
1658 void (*setup_asic)(struct radeon_device *rdev);
1659 int (*enable)(struct radeon_device *rdev);
1660 void (*disable)(struct radeon_device *rdev);
1661 int (*pre_set_power_state)(struct radeon_device *rdev);
1662 int (*set_power_state)(struct radeon_device *rdev);
1663 void (*post_set_power_state)(struct radeon_device *rdev);
1664 void (*display_configuration_changed)(struct radeon_device *rdev);
1665 void (*fini)(struct radeon_device *rdev);
1666 u32 (*get_sclk)(struct radeon_device *rdev, bool low);
1667 u32 (*get_mclk)(struct radeon_device *rdev, bool low);
1668 void (*print_power_state)(struct radeon_device *rdev, struct radeon_ps *ps);
1669 } dpm;
1344 /* pageflipping */ 1670 /* pageflipping */
1345 struct { 1671 struct {
1346 void (*pre_page_flip)(struct radeon_device *rdev, int crtc); 1672 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
@@ -1505,6 +1831,36 @@ struct si_asic {
1505 uint32_t tile_mode_array[32]; 1831 uint32_t tile_mode_array[32];
1506}; 1832};
1507 1833
1834struct cik_asic {
1835 unsigned max_shader_engines;
1836 unsigned max_tile_pipes;
1837 unsigned max_cu_per_sh;
1838 unsigned max_sh_per_se;
1839 unsigned max_backends_per_se;
1840 unsigned max_texture_channel_caches;
1841 unsigned max_gprs;
1842 unsigned max_gs_threads;
1843 unsigned max_hw_contexts;
1844 unsigned sc_prim_fifo_size_frontend;
1845 unsigned sc_prim_fifo_size_backend;
1846 unsigned sc_hiz_tile_fifo_size;
1847 unsigned sc_earlyz_tile_fifo_size;
1848
1849 unsigned num_tile_pipes;
1850 unsigned num_backends_per_se;
1851 unsigned backend_disable_mask_per_asic;
1852 unsigned backend_map;
1853 unsigned num_texture_channel_caches;
1854 unsigned mem_max_burst_length_bytes;
1855 unsigned mem_row_size_in_kb;
1856 unsigned shader_engine_tile_size;
1857 unsigned num_gpus;
1858 unsigned multi_gpu_tile_size;
1859
1860 unsigned tile_config;
1861 uint32_t tile_mode_array[32];
1862};
1863
1508union radeon_asic_config { 1864union radeon_asic_config {
1509 struct r300_asic r300; 1865 struct r300_asic r300;
1510 struct r100_asic r100; 1866 struct r100_asic r100;
@@ -1513,6 +1869,7 @@ union radeon_asic_config {
1513 struct evergreen_asic evergreen; 1869 struct evergreen_asic evergreen;
1514 struct cayman_asic cayman; 1870 struct cayman_asic cayman;
1515 struct si_asic si; 1871 struct si_asic si;
1872 struct cik_asic cik;
1516}; 1873};
1517 1874
1518/* 1875/*
@@ -1657,6 +2014,7 @@ struct radeon_device {
1657 struct radeon_gart gart; 2014 struct radeon_gart gart;
1658 struct radeon_mode_info mode_info; 2015 struct radeon_mode_info mode_info;
1659 struct radeon_scratch scratch; 2016 struct radeon_scratch scratch;
2017 struct radeon_doorbell doorbell;
1660 struct radeon_mman mman; 2018 struct radeon_mman mman;
1661 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 2019 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
1662 wait_queue_head_t fence_queue; 2020 wait_queue_head_t fence_queue;
@@ -1684,13 +2042,18 @@ struct radeon_device {
1684 const struct firmware *mc_fw; /* NI MC firmware */ 2042 const struct firmware *mc_fw; /* NI MC firmware */
1685 const struct firmware *ce_fw; /* SI CE firmware */ 2043 const struct firmware *ce_fw; /* SI CE firmware */
1686 const struct firmware *uvd_fw; /* UVD firmware */ 2044 const struct firmware *uvd_fw; /* UVD firmware */
2045 const struct firmware *mec_fw; /* CIK MEC firmware */
2046 const struct firmware *sdma_fw; /* CIK SDMA firmware */
2047 const struct firmware *smc_fw; /* SMC firmware */
1687 struct r600_blit r600_blit; 2048 struct r600_blit r600_blit;
1688 struct r600_vram_scratch vram_scratch; 2049 struct r600_vram_scratch vram_scratch;
1689 int msi_enabled; /* msi enabled */ 2050 int msi_enabled; /* msi enabled */
1690 struct r600_ih ih; /* r6/700 interrupt ring */ 2051 struct r600_ih ih; /* r6/700 interrupt ring */
1691 struct si_rlc rlc; 2052 struct radeon_rlc rlc;
2053 struct radeon_mec mec;
1692 struct work_struct hotplug_work; 2054 struct work_struct hotplug_work;
1693 struct work_struct audio_work; 2055 struct work_struct audio_work;
2056 struct work_struct reset_work;
1694 int num_crtc; /* number of crtcs */ 2057 int num_crtc; /* number of crtcs */
1695 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 2058 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
1696 bool audio_enabled; 2059 bool audio_enabled;
@@ -1727,6 +2090,9 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
1727u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); 2090u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
1728void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2091void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1729 2092
2093u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset);
2094void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
2095
1730/* 2096/*
1731 * Cast helper 2097 * Cast helper
1732 */ 2098 */
@@ -1754,6 +2120,18 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1754#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) 2120#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
1755#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg)) 2121#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
1756#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v)) 2122#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
2123#define RREG32_SMC(reg) tn_smc_rreg(rdev, (reg))
2124#define WREG32_SMC(reg, v) tn_smc_wreg(rdev, (reg), (v))
2125#define RREG32_RCU(reg) r600_rcu_rreg(rdev, (reg))
2126#define WREG32_RCU(reg, v) r600_rcu_wreg(rdev, (reg), (v))
2127#define RREG32_CG(reg) eg_cg_rreg(rdev, (reg))
2128#define WREG32_CG(reg, v) eg_cg_wreg(rdev, (reg), (v))
2129#define RREG32_PIF_PHY0(reg) eg_pif_phy0_rreg(rdev, (reg))
2130#define WREG32_PIF_PHY0(reg, v) eg_pif_phy0_wreg(rdev, (reg), (v))
2131#define RREG32_PIF_PHY1(reg) eg_pif_phy1_rreg(rdev, (reg))
2132#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v))
2133#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg))
2134#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v))
1757#define WREG32_P(reg, val, mask) \ 2135#define WREG32_P(reg, val, mask) \
1758 do { \ 2136 do { \
1759 uint32_t tmp_ = RREG32(reg); \ 2137 uint32_t tmp_ = RREG32(reg); \
@@ -1774,6 +2152,9 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1774#define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) 2152#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
1775#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) 2153#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
1776 2154
2155#define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset))
2156#define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v))
2157
1777/* 2158/*
1778 * Indirect registers accessor 2159 * Indirect registers accessor
1779 */ 2160 */
@@ -1792,6 +2173,96 @@ static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uin
1792 WREG32(RADEON_PCIE_DATA, (v)); 2173 WREG32(RADEON_PCIE_DATA, (v));
1793} 2174}
1794 2175
2176static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
2177{
2178 u32 r;
2179
2180 WREG32(TN_SMC_IND_INDEX_0, (reg));
2181 r = RREG32(TN_SMC_IND_DATA_0);
2182 return r;
2183}
2184
2185static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2186{
2187 WREG32(TN_SMC_IND_INDEX_0, (reg));
2188 WREG32(TN_SMC_IND_DATA_0, (v));
2189}
2190
2191static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
2192{
2193 u32 r;
2194
2195 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
2196 r = RREG32(R600_RCU_DATA);
2197 return r;
2198}
2199
2200static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2201{
2202 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
2203 WREG32(R600_RCU_DATA, (v));
2204}
2205
2206static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
2207{
2208 u32 r;
2209
2210 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
2211 r = RREG32(EVERGREEN_CG_IND_DATA);
2212 return r;
2213}
2214
2215static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2216{
2217 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
2218 WREG32(EVERGREEN_CG_IND_DATA, (v));
2219}
2220
2221static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
2222{
2223 u32 r;
2224
2225 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2226 r = RREG32(EVERGREEN_PIF_PHY0_DATA);
2227 return r;
2228}
2229
2230static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2231{
2232 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2233 WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
2234}
2235
2236static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
2237{
2238 u32 r;
2239
2240 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2241 r = RREG32(EVERGREEN_PIF_PHY1_DATA);
2242 return r;
2243}
2244
2245static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2246{
2247 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2248 WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
2249}
2250
2251static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
2252{
2253 u32 r;
2254
2255 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
2256 r = RREG32(R600_UVD_CTX_DATA);
2257 return r;
2258}
2259
2260static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2261{
2262 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
2263 WREG32(R600_UVD_CTX_DATA, (v));
2264}
2265
1795void r100_pll_errata_after_index(struct radeon_device *rdev); 2266void r100_pll_errata_after_index(struct radeon_device *rdev);
1796 2267
1797 2268
@@ -1840,6 +2311,16 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
1840 (rdev->flags & RADEON_IS_IGP)) 2311 (rdev->flags & RADEON_IS_IGP))
1841#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND)) 2312#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
1842#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN)) 2313#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
2314#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
2315
2316#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
2317 (rdev->ddev->pdev->device == 0x6850) || \
2318 (rdev->ddev->pdev->device == 0x6858) || \
2319 (rdev->ddev->pdev->device == 0x6859) || \
2320 (rdev->ddev->pdev->device == 0x6840) || \
2321 (rdev->ddev->pdev->device == 0x6841) || \
2322 (rdev->ddev->pdev->device == 0x6842) || \
2323 (rdev->ddev->pdev->device == 0x6843))
1843 2324
1844/* 2325/*
1845 * BIOS helpers. 2326 * BIOS helpers.
@@ -1892,6 +2373,9 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1892#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) 2373#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
1893#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp)) 2374#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
1894#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm)) 2375#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
2376#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_rptr((rdev), (r))
2377#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_wptr((rdev), (r))
2378#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].set_wptr((rdev), (r))
1895#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) 2379#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
1896#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) 2380#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
1897#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 2381#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
@@ -1915,6 +2399,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1915#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) 2399#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
1916#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) 2400#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
1917#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d)) 2401#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
2402#define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev))
1918#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) 2403#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
1919#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) 2404#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
1920#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev)) 2405#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
@@ -1935,6 +2420,18 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1935#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) 2420#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
1936#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev)) 2421#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
1937#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev)) 2422#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
2423#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
2424#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
2425#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
2426#define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
2427#define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev))
2428#define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev))
2429#define radeon_dpm_post_set_power_state(rdev) rdev->asic->dpm.post_set_power_state((rdev))
2430#define radeon_dpm_display_configuration_changed(rdev) rdev->asic->dpm.display_configuration_changed((rdev))
2431#define radeon_dpm_fini(rdev) rdev->asic->dpm.fini((rdev))
2432#define radeon_dpm_get_sclk(rdev, l) rdev->asic->dpm.get_sclk((rdev), (l))
2433#define radeon_dpm_get_mclk(rdev, l) rdev->asic->dpm.get_mclk((rdev), (l))
2434#define radeon_dpm_print_power_state(rdev, ps) rdev->asic->dpm.print_power_state((rdev), (ps))
1938 2435
1939/* Common functions */ 2436/* Common functions */
1940/* AGP */ 2437/* AGP */
@@ -2054,6 +2551,10 @@ extern int ni_mc_load_microcode(struct radeon_device *rdev);
2054#if defined(CONFIG_ACPI) 2551#if defined(CONFIG_ACPI)
2055extern int radeon_acpi_init(struct radeon_device *rdev); 2552extern int radeon_acpi_init(struct radeon_device *rdev);
2056extern void radeon_acpi_fini(struct radeon_device *rdev); 2553extern void radeon_acpi_fini(struct radeon_device *rdev);
2554extern bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev);
2555extern int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
2556 u8 perf_req, bool advertise);
2557extern int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev);
2057#else 2558#else
2058static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } 2559static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
2059static inline void radeon_acpi_fini(struct radeon_device *rdev) { } 2560static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 196d28d99570..10f98c7742d8 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -78,6 +78,22 @@ struct atcs_verify_interface {
78 u32 function_bits; /* supported functions bit vector */ 78 u32 function_bits; /* supported functions bit vector */
79} __packed; 79} __packed;
80 80
81#define ATCS_VALID_FLAGS_MASK 0x3
82
83struct atcs_pref_req_input {
84 u16 size; /* structure size in bytes (includes size field) */
85 u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
86 u16 valid_flags_mask; /* valid flags mask */
87 u16 flags; /* flags */
88 u8 req_type; /* request type */
89 u8 perf_req; /* performance request */
90} __packed;
91
92struct atcs_pref_req_output {
93 u16 size; /* structure size in bytes (includes size field) */
94 u8 ret_val; /* return value */
95} __packed;
96
81/* Call the ATIF method 97/* Call the ATIF method
82 */ 98 */
83/** 99/**
@@ -506,6 +522,135 @@ out:
506} 522}
507 523
508/** 524/**
525 * radeon_acpi_is_pcie_performance_request_supported
526 *
527 * @rdev: radeon_device pointer
528 *
529 * Check if the ATCS pcie_perf_req and pcie_dev_rdy methods
530 * are supported (all asics).
531 * returns true if supported, false if not.
532 */
533bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev)
534{
535 struct radeon_atcs *atcs = &rdev->atcs;
536
537 if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy)
538 return true;
539
540 return false;
541}
542
543/**
544 * radeon_acpi_pcie_notify_device_ready
545 *
546 * @rdev: radeon_device pointer
547 *
548 * Executes the PCIE_DEVICE_READY_NOTIFICATION method
549 * (all asics).
550 * returns 0 on success, error on failure.
551 */
552int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev)
553{
554 acpi_handle handle;
555 union acpi_object *info;
556 struct radeon_atcs *atcs = &rdev->atcs;
557
558 /* Get the device handle */
559 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
560 if (!handle)
561 return -EINVAL;
562
563 if (!atcs->functions.pcie_dev_rdy)
564 return -EINVAL;
565
566 info = radeon_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
567 if (!info)
568 return -EIO;
569
570 kfree(info);
571
572 return 0;
573}
574
575/**
576 * radeon_acpi_pcie_performance_request
577 *
578 * @rdev: radeon_device pointer
579 * @perf_req: requested perf level (pcie gen speed)
580 * @advertise: set advertise caps flag if set
581 *
582 * Executes the PCIE_PERFORMANCE_REQUEST method to
583 * change the pcie gen speed (all asics).
584 * returns 0 on success, error on failure.
585 */
586int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
587 u8 perf_req, bool advertise)
588{
589 acpi_handle handle;
590 union acpi_object *info;
591 struct radeon_atcs *atcs = &rdev->atcs;
592 struct atcs_pref_req_input atcs_input;
593 struct atcs_pref_req_output atcs_output;
594 struct acpi_buffer params;
595 size_t size;
596 u32 retry = 3;
597
598 /* Get the device handle */
599 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
600 if (!handle)
601 return -EINVAL;
602
603 if (!atcs->functions.pcie_perf_req)
604 return -EINVAL;
605
606 atcs_input.size = sizeof(struct atcs_pref_req_input);
607 /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
608 atcs_input.client_id = rdev->pdev->devfn | (rdev->pdev->bus->number << 8);
609 atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
610 atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
611 if (advertise)
612 atcs_input.flags |= ATCS_ADVERTISE_CAPS;
613 atcs_input.req_type = ATCS_PCIE_LINK_SPEED;
614 atcs_input.perf_req = perf_req;
615
616 params.length = sizeof(struct atcs_pref_req_input);
617 params.pointer = &atcs_input;
618
619 while (retry--) {
620 info = radeon_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
621 if (!info)
622 return -EIO;
623
624 memset(&atcs_output, 0, sizeof(atcs_output));
625
626 size = *(u16 *) info->buffer.pointer;
627 if (size < 3) {
628 DRM_INFO("ATCS buffer is too small: %zu\n", size);
629 kfree(info);
630 return -EINVAL;
631 }
632 size = min(sizeof(atcs_output), size);
633
634 memcpy(&atcs_output, info->buffer.pointer, size);
635
636 kfree(info);
637
638 switch (atcs_output.ret_val) {
639 case ATCS_REQUEST_REFUSED:
640 default:
641 return -EINVAL;
642 case ATCS_REQUEST_COMPLETE:
643 return 0;
644 case ATCS_REQUEST_IN_PROGRESS:
645 udelay(10);
646 break;
647 }
648 }
649
650 return 0;
651}
652
653/**
509 * radeon_acpi_event - handle notify events 654 * radeon_acpi_event - handle notify events
510 * 655 *
511 * @nb: notifier block 656 * @nb: notifier block
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a2802b47ee95..c3df589715a0 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -126,7 +126,11 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
126 rdev->mc_rreg = &rs780_mc_rreg; 126 rdev->mc_rreg = &rs780_mc_rreg;
127 rdev->mc_wreg = &rs780_mc_wreg; 127 rdev->mc_wreg = &rs780_mc_wreg;
128 } 128 }
129 if (rdev->family >= CHIP_R600) { 129
130 if (rdev->family >= CHIP_BONAIRE) {
131 rdev->pciep_rreg = &cik_pciep_rreg;
132 rdev->pciep_wreg = &cik_pciep_wreg;
133 } else if (rdev->family >= CHIP_R600) {
130 rdev->pciep_rreg = &r600_pciep_rreg; 134 rdev->pciep_rreg = &r600_pciep_rreg;
131 rdev->pciep_wreg = &r600_pciep_wreg; 135 rdev->pciep_wreg = &r600_pciep_wreg;
132 } 136 }
@@ -192,6 +196,9 @@ static struct radeon_asic r100_asic = {
192 .ring_test = &r100_ring_test, 196 .ring_test = &r100_ring_test,
193 .ib_test = &r100_ib_test, 197 .ib_test = &r100_ib_test,
194 .is_lockup = &r100_gpu_is_lockup, 198 .is_lockup = &r100_gpu_is_lockup,
199 .get_rptr = &radeon_ring_generic_get_rptr,
200 .get_wptr = &radeon_ring_generic_get_wptr,
201 .set_wptr = &radeon_ring_generic_set_wptr,
195 } 202 }
196 }, 203 },
197 .irq = { 204 .irq = {
@@ -268,6 +275,9 @@ static struct radeon_asic r200_asic = {
268 .ring_test = &r100_ring_test, 275 .ring_test = &r100_ring_test,
269 .ib_test = &r100_ib_test, 276 .ib_test = &r100_ib_test,
270 .is_lockup = &r100_gpu_is_lockup, 277 .is_lockup = &r100_gpu_is_lockup,
278 .get_rptr = &radeon_ring_generic_get_rptr,
279 .get_wptr = &radeon_ring_generic_get_wptr,
280 .set_wptr = &radeon_ring_generic_set_wptr,
271 } 281 }
272 }, 282 },
273 .irq = { 283 .irq = {
@@ -344,6 +354,9 @@ static struct radeon_asic r300_asic = {
344 .ring_test = &r100_ring_test, 354 .ring_test = &r100_ring_test,
345 .ib_test = &r100_ib_test, 355 .ib_test = &r100_ib_test,
346 .is_lockup = &r100_gpu_is_lockup, 356 .is_lockup = &r100_gpu_is_lockup,
357 .get_rptr = &radeon_ring_generic_get_rptr,
358 .get_wptr = &radeon_ring_generic_get_wptr,
359 .set_wptr = &radeon_ring_generic_set_wptr,
347 } 360 }
348 }, 361 },
349 .irq = { 362 .irq = {
@@ -420,6 +433,9 @@ static struct radeon_asic r300_asic_pcie = {
420 .ring_test = &r100_ring_test, 433 .ring_test = &r100_ring_test,
421 .ib_test = &r100_ib_test, 434 .ib_test = &r100_ib_test,
422 .is_lockup = &r100_gpu_is_lockup, 435 .is_lockup = &r100_gpu_is_lockup,
436 .get_rptr = &radeon_ring_generic_get_rptr,
437 .get_wptr = &radeon_ring_generic_get_wptr,
438 .set_wptr = &radeon_ring_generic_set_wptr,
423 } 439 }
424 }, 440 },
425 .irq = { 441 .irq = {
@@ -496,6 +512,9 @@ static struct radeon_asic r420_asic = {
496 .ring_test = &r100_ring_test, 512 .ring_test = &r100_ring_test,
497 .ib_test = &r100_ib_test, 513 .ib_test = &r100_ib_test,
498 .is_lockup = &r100_gpu_is_lockup, 514 .is_lockup = &r100_gpu_is_lockup,
515 .get_rptr = &radeon_ring_generic_get_rptr,
516 .get_wptr = &radeon_ring_generic_get_wptr,
517 .set_wptr = &radeon_ring_generic_set_wptr,
499 } 518 }
500 }, 519 },
501 .irq = { 520 .irq = {
@@ -572,6 +591,9 @@ static struct radeon_asic rs400_asic = {
572 .ring_test = &r100_ring_test, 591 .ring_test = &r100_ring_test,
573 .ib_test = &r100_ib_test, 592 .ib_test = &r100_ib_test,
574 .is_lockup = &r100_gpu_is_lockup, 593 .is_lockup = &r100_gpu_is_lockup,
594 .get_rptr = &radeon_ring_generic_get_rptr,
595 .get_wptr = &radeon_ring_generic_get_wptr,
596 .set_wptr = &radeon_ring_generic_set_wptr,
575 } 597 }
576 }, 598 },
577 .irq = { 599 .irq = {
@@ -648,6 +670,9 @@ static struct radeon_asic rs600_asic = {
648 .ring_test = &r100_ring_test, 670 .ring_test = &r100_ring_test,
649 .ib_test = &r100_ib_test, 671 .ib_test = &r100_ib_test,
650 .is_lockup = &r100_gpu_is_lockup, 672 .is_lockup = &r100_gpu_is_lockup,
673 .get_rptr = &radeon_ring_generic_get_rptr,
674 .get_wptr = &radeon_ring_generic_get_wptr,
675 .set_wptr = &radeon_ring_generic_set_wptr,
651 } 676 }
652 }, 677 },
653 .irq = { 678 .irq = {
@@ -726,6 +751,9 @@ static struct radeon_asic rs690_asic = {
726 .ring_test = &r100_ring_test, 751 .ring_test = &r100_ring_test,
727 .ib_test = &r100_ib_test, 752 .ib_test = &r100_ib_test,
728 .is_lockup = &r100_gpu_is_lockup, 753 .is_lockup = &r100_gpu_is_lockup,
754 .get_rptr = &radeon_ring_generic_get_rptr,
755 .get_wptr = &radeon_ring_generic_get_wptr,
756 .set_wptr = &radeon_ring_generic_set_wptr,
729 } 757 }
730 }, 758 },
731 .irq = { 759 .irq = {
@@ -804,6 +832,9 @@ static struct radeon_asic rv515_asic = {
804 .ring_test = &r100_ring_test, 832 .ring_test = &r100_ring_test,
805 .ib_test = &r100_ib_test, 833 .ib_test = &r100_ib_test,
806 .is_lockup = &r100_gpu_is_lockup, 834 .is_lockup = &r100_gpu_is_lockup,
835 .get_rptr = &radeon_ring_generic_get_rptr,
836 .get_wptr = &radeon_ring_generic_get_wptr,
837 .set_wptr = &radeon_ring_generic_set_wptr,
807 } 838 }
808 }, 839 },
809 .irq = { 840 .irq = {
@@ -880,6 +911,9 @@ static struct radeon_asic r520_asic = {
880 .ring_test = &r100_ring_test, 911 .ring_test = &r100_ring_test,
881 .ib_test = &r100_ib_test, 912 .ib_test = &r100_ib_test,
882 .is_lockup = &r100_gpu_is_lockup, 913 .is_lockup = &r100_gpu_is_lockup,
914 .get_rptr = &radeon_ring_generic_get_rptr,
915 .get_wptr = &radeon_ring_generic_get_wptr,
916 .set_wptr = &radeon_ring_generic_set_wptr,
883 } 917 }
884 }, 918 },
885 .irq = { 919 .irq = {
@@ -957,6 +991,9 @@ static struct radeon_asic r600_asic = {
957 .ring_test = &r600_ring_test, 991 .ring_test = &r600_ring_test,
958 .ib_test = &r600_ib_test, 992 .ib_test = &r600_ib_test,
959 .is_lockup = &r600_gfx_is_lockup, 993 .is_lockup = &r600_gfx_is_lockup,
994 .get_rptr = &radeon_ring_generic_get_rptr,
995 .get_wptr = &radeon_ring_generic_get_wptr,
996 .set_wptr = &radeon_ring_generic_set_wptr,
960 }, 997 },
961 [R600_RING_TYPE_DMA_INDEX] = { 998 [R600_RING_TYPE_DMA_INDEX] = {
962 .ib_execute = &r600_dma_ring_ib_execute, 999 .ib_execute = &r600_dma_ring_ib_execute,
@@ -966,6 +1003,9 @@ static struct radeon_asic r600_asic = {
966 .ring_test = &r600_dma_ring_test, 1003 .ring_test = &r600_dma_ring_test,
967 .ib_test = &r600_dma_ib_test, 1004 .ib_test = &r600_dma_ib_test,
968 .is_lockup = &r600_dma_is_lockup, 1005 .is_lockup = &r600_dma_is_lockup,
1006 .get_rptr = &radeon_ring_generic_get_rptr,
1007 .get_wptr = &radeon_ring_generic_get_wptr,
1008 .set_wptr = &radeon_ring_generic_set_wptr,
969 } 1009 }
970 }, 1010 },
971 .irq = { 1011 .irq = {
@@ -1012,6 +1052,114 @@ static struct radeon_asic r600_asic = {
1012 .get_pcie_lanes = &r600_get_pcie_lanes, 1052 .get_pcie_lanes = &r600_get_pcie_lanes,
1013 .set_pcie_lanes = &r600_set_pcie_lanes, 1053 .set_pcie_lanes = &r600_set_pcie_lanes,
1014 .set_clock_gating = NULL, 1054 .set_clock_gating = NULL,
1055 .get_temperature = &rv6xx_get_temp,
1056 },
1057 .pflip = {
1058 .pre_page_flip = &rs600_pre_page_flip,
1059 .page_flip = &rs600_page_flip,
1060 .post_page_flip = &rs600_post_page_flip,
1061 },
1062};
1063
1064static struct radeon_asic rv6xx_asic = {
1065 .init = &r600_init,
1066 .fini = &r600_fini,
1067 .suspend = &r600_suspend,
1068 .resume = &r600_resume,
1069 .vga_set_state = &r600_vga_set_state,
1070 .asic_reset = &r600_asic_reset,
1071 .ioctl_wait_idle = r600_ioctl_wait_idle,
1072 .gui_idle = &r600_gui_idle,
1073 .mc_wait_for_idle = &r600_mc_wait_for_idle,
1074 .get_xclk = &r600_get_xclk,
1075 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1076 .gart = {
1077 .tlb_flush = &r600_pcie_gart_tlb_flush,
1078 .set_page = &rs600_gart_set_page,
1079 },
1080 .ring = {
1081 [RADEON_RING_TYPE_GFX_INDEX] = {
1082 .ib_execute = &r600_ring_ib_execute,
1083 .emit_fence = &r600_fence_ring_emit,
1084 .emit_semaphore = &r600_semaphore_ring_emit,
1085 .cs_parse = &r600_cs_parse,
1086 .ring_test = &r600_ring_test,
1087 .ib_test = &r600_ib_test,
1088 .is_lockup = &r600_gfx_is_lockup,
1089 .get_rptr = &radeon_ring_generic_get_rptr,
1090 .get_wptr = &radeon_ring_generic_get_wptr,
1091 .set_wptr = &radeon_ring_generic_set_wptr,
1092 },
1093 [R600_RING_TYPE_DMA_INDEX] = {
1094 .ib_execute = &r600_dma_ring_ib_execute,
1095 .emit_fence = &r600_dma_fence_ring_emit,
1096 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1097 .cs_parse = &r600_dma_cs_parse,
1098 .ring_test = &r600_dma_ring_test,
1099 .ib_test = &r600_dma_ib_test,
1100 .is_lockup = &r600_dma_is_lockup,
1101 .get_rptr = &radeon_ring_generic_get_rptr,
1102 .get_wptr = &radeon_ring_generic_get_wptr,
1103 .set_wptr = &radeon_ring_generic_set_wptr,
1104 }
1105 },
1106 .irq = {
1107 .set = &r600_irq_set,
1108 .process = &r600_irq_process,
1109 },
1110 .display = {
1111 .bandwidth_update = &rv515_bandwidth_update,
1112 .get_vblank_counter = &rs600_get_vblank_counter,
1113 .wait_for_vblank = &avivo_wait_for_vblank,
1114 .set_backlight_level = &atombios_set_backlight_level,
1115 .get_backlight_level = &atombios_get_backlight_level,
1116 },
1117 .copy = {
1118 .blit = &r600_copy_blit,
1119 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1120 .dma = &r600_copy_dma,
1121 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1122 .copy = &r600_copy_dma,
1123 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1124 },
1125 .surface = {
1126 .set_reg = r600_set_surface_reg,
1127 .clear_reg = r600_clear_surface_reg,
1128 },
1129 .hpd = {
1130 .init = &r600_hpd_init,
1131 .fini = &r600_hpd_fini,
1132 .sense = &r600_hpd_sense,
1133 .set_polarity = &r600_hpd_set_polarity,
1134 },
1135 .pm = {
1136 .misc = &r600_pm_misc,
1137 .prepare = &rs600_pm_prepare,
1138 .finish = &rs600_pm_finish,
1139 .init_profile = &r600_pm_init_profile,
1140 .get_dynpm_state = &r600_pm_get_dynpm_state,
1141 .get_engine_clock = &radeon_atom_get_engine_clock,
1142 .set_engine_clock = &radeon_atom_set_engine_clock,
1143 .get_memory_clock = &radeon_atom_get_memory_clock,
1144 .set_memory_clock = &radeon_atom_set_memory_clock,
1145 .get_pcie_lanes = &r600_get_pcie_lanes,
1146 .set_pcie_lanes = &r600_set_pcie_lanes,
1147 .set_clock_gating = NULL,
1148 .get_temperature = &rv6xx_get_temp,
1149 },
1150 .dpm = {
1151 .init = &rv6xx_dpm_init,
1152 .setup_asic = &rv6xx_setup_asic,
1153 .enable = &rv6xx_dpm_enable,
1154 .disable = &rv6xx_dpm_disable,
1155 .pre_set_power_state = &r600_dpm_pre_set_power_state,
1156 .set_power_state = &rv6xx_dpm_set_power_state,
1157 .post_set_power_state = &r600_dpm_post_set_power_state,
1158 .display_configuration_changed = &rv6xx_dpm_display_configuration_changed,
1159 .fini = &rv6xx_dpm_fini,
1160 .get_sclk = &rv6xx_dpm_get_sclk,
1161 .get_mclk = &rv6xx_dpm_get_mclk,
1162 .print_power_state = &rv6xx_dpm_print_power_state,
1015 }, 1163 },
1016 .pflip = { 1164 .pflip = {
1017 .pre_page_flip = &rs600_pre_page_flip, 1165 .pre_page_flip = &rs600_pre_page_flip,
@@ -1045,6 +1193,9 @@ static struct radeon_asic rs780_asic = {
1045 .ring_test = &r600_ring_test, 1193 .ring_test = &r600_ring_test,
1046 .ib_test = &r600_ib_test, 1194 .ib_test = &r600_ib_test,
1047 .is_lockup = &r600_gfx_is_lockup, 1195 .is_lockup = &r600_gfx_is_lockup,
1196 .get_rptr = &radeon_ring_generic_get_rptr,
1197 .get_wptr = &radeon_ring_generic_get_wptr,
1198 .set_wptr = &radeon_ring_generic_set_wptr,
1048 }, 1199 },
1049 [R600_RING_TYPE_DMA_INDEX] = { 1200 [R600_RING_TYPE_DMA_INDEX] = {
1050 .ib_execute = &r600_dma_ring_ib_execute, 1201 .ib_execute = &r600_dma_ring_ib_execute,
@@ -1054,6 +1205,9 @@ static struct radeon_asic rs780_asic = {
1054 .ring_test = &r600_dma_ring_test, 1205 .ring_test = &r600_dma_ring_test,
1055 .ib_test = &r600_dma_ib_test, 1206 .ib_test = &r600_dma_ib_test,
1056 .is_lockup = &r600_dma_is_lockup, 1207 .is_lockup = &r600_dma_is_lockup,
1208 .get_rptr = &radeon_ring_generic_get_rptr,
1209 .get_wptr = &radeon_ring_generic_get_wptr,
1210 .set_wptr = &radeon_ring_generic_set_wptr,
1057 } 1211 }
1058 }, 1212 },
1059 .irq = { 1213 .irq = {
@@ -1100,6 +1254,21 @@ static struct radeon_asic rs780_asic = {
1100 .get_pcie_lanes = NULL, 1254 .get_pcie_lanes = NULL,
1101 .set_pcie_lanes = NULL, 1255 .set_pcie_lanes = NULL,
1102 .set_clock_gating = NULL, 1256 .set_clock_gating = NULL,
1257 .get_temperature = &rv6xx_get_temp,
1258 },
1259 .dpm = {
1260 .init = &rs780_dpm_init,
1261 .setup_asic = &rs780_dpm_setup_asic,
1262 .enable = &rs780_dpm_enable,
1263 .disable = &rs780_dpm_disable,
1264 .pre_set_power_state = &r600_dpm_pre_set_power_state,
1265 .set_power_state = &rs780_dpm_set_power_state,
1266 .post_set_power_state = &r600_dpm_post_set_power_state,
1267 .display_configuration_changed = &rs780_dpm_display_configuration_changed,
1268 .fini = &rs780_dpm_fini,
1269 .get_sclk = &rs780_dpm_get_sclk,
1270 .get_mclk = &rs780_dpm_get_mclk,
1271 .print_power_state = &rs780_dpm_print_power_state,
1103 }, 1272 },
1104 .pflip = { 1273 .pflip = {
1105 .pre_page_flip = &rs600_pre_page_flip, 1274 .pre_page_flip = &rs600_pre_page_flip,
@@ -1133,6 +1302,9 @@ static struct radeon_asic rv770_asic = {
1133 .ring_test = &r600_ring_test, 1302 .ring_test = &r600_ring_test,
1134 .ib_test = &r600_ib_test, 1303 .ib_test = &r600_ib_test,
1135 .is_lockup = &r600_gfx_is_lockup, 1304 .is_lockup = &r600_gfx_is_lockup,
1305 .get_rptr = &radeon_ring_generic_get_rptr,
1306 .get_wptr = &radeon_ring_generic_get_wptr,
1307 .set_wptr = &radeon_ring_generic_set_wptr,
1136 }, 1308 },
1137 [R600_RING_TYPE_DMA_INDEX] = { 1309 [R600_RING_TYPE_DMA_INDEX] = {
1138 .ib_execute = &r600_dma_ring_ib_execute, 1310 .ib_execute = &r600_dma_ring_ib_execute,
@@ -1142,6 +1314,9 @@ static struct radeon_asic rv770_asic = {
1142 .ring_test = &r600_dma_ring_test, 1314 .ring_test = &r600_dma_ring_test,
1143 .ib_test = &r600_dma_ib_test, 1315 .ib_test = &r600_dma_ib_test,
1144 .is_lockup = &r600_dma_is_lockup, 1316 .is_lockup = &r600_dma_is_lockup,
1317 .get_rptr = &radeon_ring_generic_get_rptr,
1318 .get_wptr = &radeon_ring_generic_get_wptr,
1319 .set_wptr = &radeon_ring_generic_set_wptr,
1145 }, 1320 },
1146 [R600_RING_TYPE_UVD_INDEX] = { 1321 [R600_RING_TYPE_UVD_INDEX] = {
1147 .ib_execute = &r600_uvd_ib_execute, 1322 .ib_execute = &r600_uvd_ib_execute,
@@ -1151,6 +1326,9 @@ static struct radeon_asic rv770_asic = {
1151 .ring_test = &r600_uvd_ring_test, 1326 .ring_test = &r600_uvd_ring_test,
1152 .ib_test = &r600_uvd_ib_test, 1327 .ib_test = &r600_uvd_ib_test,
1153 .is_lockup = &radeon_ring_test_lockup, 1328 .is_lockup = &radeon_ring_test_lockup,
1329 .get_rptr = &radeon_ring_generic_get_rptr,
1330 .get_wptr = &radeon_ring_generic_get_wptr,
1331 .set_wptr = &radeon_ring_generic_set_wptr,
1154 } 1332 }
1155 }, 1333 },
1156 .irq = { 1334 .irq = {
@@ -1198,6 +1376,21 @@ static struct radeon_asic rv770_asic = {
1198 .set_pcie_lanes = &r600_set_pcie_lanes, 1376 .set_pcie_lanes = &r600_set_pcie_lanes,
1199 .set_clock_gating = &radeon_atom_set_clock_gating, 1377 .set_clock_gating = &radeon_atom_set_clock_gating,
1200 .set_uvd_clocks = &rv770_set_uvd_clocks, 1378 .set_uvd_clocks = &rv770_set_uvd_clocks,
1379 .get_temperature = &rv770_get_temp,
1380 },
1381 .dpm = {
1382 .init = &rv770_dpm_init,
1383 .setup_asic = &rv770_dpm_setup_asic,
1384 .enable = &rv770_dpm_enable,
1385 .disable = &rv770_dpm_disable,
1386 .pre_set_power_state = &r600_dpm_pre_set_power_state,
1387 .set_power_state = &rv770_dpm_set_power_state,
1388 .post_set_power_state = &r600_dpm_post_set_power_state,
1389 .display_configuration_changed = &rv770_dpm_display_configuration_changed,
1390 .fini = &rv770_dpm_fini,
1391 .get_sclk = &rv770_dpm_get_sclk,
1392 .get_mclk = &rv770_dpm_get_mclk,
1393 .print_power_state = &rv770_dpm_print_power_state,
1201 }, 1394 },
1202 .pflip = { 1395 .pflip = {
1203 .pre_page_flip = &rs600_pre_page_flip, 1396 .pre_page_flip = &rs600_pre_page_flip,
@@ -1231,6 +1424,9 @@ static struct radeon_asic evergreen_asic = {
1231 .ring_test = &r600_ring_test, 1424 .ring_test = &r600_ring_test,
1232 .ib_test = &r600_ib_test, 1425 .ib_test = &r600_ib_test,
1233 .is_lockup = &evergreen_gfx_is_lockup, 1426 .is_lockup = &evergreen_gfx_is_lockup,
1427 .get_rptr = &radeon_ring_generic_get_rptr,
1428 .get_wptr = &radeon_ring_generic_get_wptr,
1429 .set_wptr = &radeon_ring_generic_set_wptr,
1234 }, 1430 },
1235 [R600_RING_TYPE_DMA_INDEX] = { 1431 [R600_RING_TYPE_DMA_INDEX] = {
1236 .ib_execute = &evergreen_dma_ring_ib_execute, 1432 .ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1240,6 +1436,9 @@ static struct radeon_asic evergreen_asic = {
1240 .ring_test = &r600_dma_ring_test, 1436 .ring_test = &r600_dma_ring_test,
1241 .ib_test = &r600_dma_ib_test, 1437 .ib_test = &r600_dma_ib_test,
1242 .is_lockup = &evergreen_dma_is_lockup, 1438 .is_lockup = &evergreen_dma_is_lockup,
1439 .get_rptr = &radeon_ring_generic_get_rptr,
1440 .get_wptr = &radeon_ring_generic_get_wptr,
1441 .set_wptr = &radeon_ring_generic_set_wptr,
1243 }, 1442 },
1244 [R600_RING_TYPE_UVD_INDEX] = { 1443 [R600_RING_TYPE_UVD_INDEX] = {
1245 .ib_execute = &r600_uvd_ib_execute, 1444 .ib_execute = &r600_uvd_ib_execute,
@@ -1249,6 +1448,9 @@ static struct radeon_asic evergreen_asic = {
1249 .ring_test = &r600_uvd_ring_test, 1448 .ring_test = &r600_uvd_ring_test,
1250 .ib_test = &r600_uvd_ib_test, 1449 .ib_test = &r600_uvd_ib_test,
1251 .is_lockup = &radeon_ring_test_lockup, 1450 .is_lockup = &radeon_ring_test_lockup,
1451 .get_rptr = &radeon_ring_generic_get_rptr,
1452 .get_wptr = &radeon_ring_generic_get_wptr,
1453 .set_wptr = &radeon_ring_generic_set_wptr,
1252 } 1454 }
1253 }, 1455 },
1254 .irq = { 1456 .irq = {
@@ -1296,6 +1498,21 @@ static struct radeon_asic evergreen_asic = {
1296 .set_pcie_lanes = &r600_set_pcie_lanes, 1498 .set_pcie_lanes = &r600_set_pcie_lanes,
1297 .set_clock_gating = NULL, 1499 .set_clock_gating = NULL,
1298 .set_uvd_clocks = &evergreen_set_uvd_clocks, 1500 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1501 .get_temperature = &evergreen_get_temp,
1502 },
1503 .dpm = {
1504 .init = &cypress_dpm_init,
1505 .setup_asic = &cypress_dpm_setup_asic,
1506 .enable = &cypress_dpm_enable,
1507 .disable = &cypress_dpm_disable,
1508 .pre_set_power_state = &r600_dpm_pre_set_power_state,
1509 .set_power_state = &cypress_dpm_set_power_state,
1510 .post_set_power_state = &r600_dpm_post_set_power_state,
1511 .display_configuration_changed = &cypress_dpm_display_configuration_changed,
1512 .fini = &cypress_dpm_fini,
1513 .get_sclk = &rv770_dpm_get_sclk,
1514 .get_mclk = &rv770_dpm_get_mclk,
1515 .print_power_state = &rv770_dpm_print_power_state,
1299 }, 1516 },
1300 .pflip = { 1517 .pflip = {
1301 .pre_page_flip = &evergreen_pre_page_flip, 1518 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1329,6 +1546,9 @@ static struct radeon_asic sumo_asic = {
1329 .ring_test = &r600_ring_test, 1546 .ring_test = &r600_ring_test,
1330 .ib_test = &r600_ib_test, 1547 .ib_test = &r600_ib_test,
1331 .is_lockup = &evergreen_gfx_is_lockup, 1548 .is_lockup = &evergreen_gfx_is_lockup,
1549 .get_rptr = &radeon_ring_generic_get_rptr,
1550 .get_wptr = &radeon_ring_generic_get_wptr,
1551 .set_wptr = &radeon_ring_generic_set_wptr,
1332 }, 1552 },
1333 [R600_RING_TYPE_DMA_INDEX] = { 1553 [R600_RING_TYPE_DMA_INDEX] = {
1334 .ib_execute = &evergreen_dma_ring_ib_execute, 1554 .ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1338,6 +1558,9 @@ static struct radeon_asic sumo_asic = {
1338 .ring_test = &r600_dma_ring_test, 1558 .ring_test = &r600_dma_ring_test,
1339 .ib_test = &r600_dma_ib_test, 1559 .ib_test = &r600_dma_ib_test,
1340 .is_lockup = &evergreen_dma_is_lockup, 1560 .is_lockup = &evergreen_dma_is_lockup,
1561 .get_rptr = &radeon_ring_generic_get_rptr,
1562 .get_wptr = &radeon_ring_generic_get_wptr,
1563 .set_wptr = &radeon_ring_generic_set_wptr,
1341 }, 1564 },
1342 [R600_RING_TYPE_UVD_INDEX] = { 1565 [R600_RING_TYPE_UVD_INDEX] = {
1343 .ib_execute = &r600_uvd_ib_execute, 1566 .ib_execute = &r600_uvd_ib_execute,
@@ -1347,6 +1570,9 @@ static struct radeon_asic sumo_asic = {
1347 .ring_test = &r600_uvd_ring_test, 1570 .ring_test = &r600_uvd_ring_test,
1348 .ib_test = &r600_uvd_ib_test, 1571 .ib_test = &r600_uvd_ib_test,
1349 .is_lockup = &radeon_ring_test_lockup, 1572 .is_lockup = &radeon_ring_test_lockup,
1573 .get_rptr = &radeon_ring_generic_get_rptr,
1574 .get_wptr = &radeon_ring_generic_get_wptr,
1575 .set_wptr = &radeon_ring_generic_set_wptr,
1350 } 1576 }
1351 }, 1577 },
1352 .irq = { 1578 .irq = {
@@ -1394,6 +1620,21 @@ static struct radeon_asic sumo_asic = {
1394 .set_pcie_lanes = NULL, 1620 .set_pcie_lanes = NULL,
1395 .set_clock_gating = NULL, 1621 .set_clock_gating = NULL,
1396 .set_uvd_clocks = &sumo_set_uvd_clocks, 1622 .set_uvd_clocks = &sumo_set_uvd_clocks,
1623 .get_temperature = &sumo_get_temp,
1624 },
1625 .dpm = {
1626 .init = &sumo_dpm_init,
1627 .setup_asic = &sumo_dpm_setup_asic,
1628 .enable = &sumo_dpm_enable,
1629 .disable = &sumo_dpm_disable,
1630 .pre_set_power_state = &sumo_dpm_pre_set_power_state,
1631 .set_power_state = &sumo_dpm_set_power_state,
1632 .post_set_power_state = &sumo_dpm_post_set_power_state,
1633 .display_configuration_changed = &sumo_dpm_display_configuration_changed,
1634 .fini = &sumo_dpm_fini,
1635 .get_sclk = &sumo_dpm_get_sclk,
1636 .get_mclk = &sumo_dpm_get_mclk,
1637 .print_power_state = &sumo_dpm_print_power_state,
1397 }, 1638 },
1398 .pflip = { 1639 .pflip = {
1399 .pre_page_flip = &evergreen_pre_page_flip, 1640 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1427,6 +1668,9 @@ static struct radeon_asic btc_asic = {
1427 .ring_test = &r600_ring_test, 1668 .ring_test = &r600_ring_test,
1428 .ib_test = &r600_ib_test, 1669 .ib_test = &r600_ib_test,
1429 .is_lockup = &evergreen_gfx_is_lockup, 1670 .is_lockup = &evergreen_gfx_is_lockup,
1671 .get_rptr = &radeon_ring_generic_get_rptr,
1672 .get_wptr = &radeon_ring_generic_get_wptr,
1673 .set_wptr = &radeon_ring_generic_set_wptr,
1430 }, 1674 },
1431 [R600_RING_TYPE_DMA_INDEX] = { 1675 [R600_RING_TYPE_DMA_INDEX] = {
1432 .ib_execute = &evergreen_dma_ring_ib_execute, 1676 .ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1436,6 +1680,9 @@ static struct radeon_asic btc_asic = {
1436 .ring_test = &r600_dma_ring_test, 1680 .ring_test = &r600_dma_ring_test,
1437 .ib_test = &r600_dma_ib_test, 1681 .ib_test = &r600_dma_ib_test,
1438 .is_lockup = &evergreen_dma_is_lockup, 1682 .is_lockup = &evergreen_dma_is_lockup,
1683 .get_rptr = &radeon_ring_generic_get_rptr,
1684 .get_wptr = &radeon_ring_generic_get_wptr,
1685 .set_wptr = &radeon_ring_generic_set_wptr,
1439 }, 1686 },
1440 [R600_RING_TYPE_UVD_INDEX] = { 1687 [R600_RING_TYPE_UVD_INDEX] = {
1441 .ib_execute = &r600_uvd_ib_execute, 1688 .ib_execute = &r600_uvd_ib_execute,
@@ -1445,6 +1692,9 @@ static struct radeon_asic btc_asic = {
1445 .ring_test = &r600_uvd_ring_test, 1692 .ring_test = &r600_uvd_ring_test,
1446 .ib_test = &r600_uvd_ib_test, 1693 .ib_test = &r600_uvd_ib_test,
1447 .is_lockup = &radeon_ring_test_lockup, 1694 .is_lockup = &radeon_ring_test_lockup,
1695 .get_rptr = &radeon_ring_generic_get_rptr,
1696 .get_wptr = &radeon_ring_generic_get_wptr,
1697 .set_wptr = &radeon_ring_generic_set_wptr,
1448 } 1698 }
1449 }, 1699 },
1450 .irq = { 1700 .irq = {
@@ -1492,6 +1742,21 @@ static struct radeon_asic btc_asic = {
1492 .set_pcie_lanes = &r600_set_pcie_lanes, 1742 .set_pcie_lanes = &r600_set_pcie_lanes,
1493 .set_clock_gating = NULL, 1743 .set_clock_gating = NULL,
1494 .set_uvd_clocks = &evergreen_set_uvd_clocks, 1744 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1745 .get_temperature = &evergreen_get_temp,
1746 },
1747 .dpm = {
1748 .init = &btc_dpm_init,
1749 .setup_asic = &btc_dpm_setup_asic,
1750 .enable = &btc_dpm_enable,
1751 .disable = &btc_dpm_disable,
1752 .pre_set_power_state = &btc_dpm_pre_set_power_state,
1753 .set_power_state = &btc_dpm_set_power_state,
1754 .post_set_power_state = &btc_dpm_post_set_power_state,
1755 .display_configuration_changed = &cypress_dpm_display_configuration_changed,
1756 .fini = &btc_dpm_fini,
1757 .get_sclk = &btc_dpm_get_sclk,
1758 .get_mclk = &btc_dpm_get_mclk,
1759 .print_power_state = &rv770_dpm_print_power_state,
1495 }, 1760 },
1496 .pflip = { 1761 .pflip = {
1497 .pre_page_flip = &evergreen_pre_page_flip, 1762 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1533,6 +1798,9 @@ static struct radeon_asic cayman_asic = {
1533 .ib_test = &r600_ib_test, 1798 .ib_test = &r600_ib_test,
1534 .is_lockup = &cayman_gfx_is_lockup, 1799 .is_lockup = &cayman_gfx_is_lockup,
1535 .vm_flush = &cayman_vm_flush, 1800 .vm_flush = &cayman_vm_flush,
1801 .get_rptr = &radeon_ring_generic_get_rptr,
1802 .get_wptr = &radeon_ring_generic_get_wptr,
1803 .set_wptr = &radeon_ring_generic_set_wptr,
1536 }, 1804 },
1537 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1805 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1538 .ib_execute = &cayman_ring_ib_execute, 1806 .ib_execute = &cayman_ring_ib_execute,
@@ -1544,6 +1812,9 @@ static struct radeon_asic cayman_asic = {
1544 .ib_test = &r600_ib_test, 1812 .ib_test = &r600_ib_test,
1545 .is_lockup = &cayman_gfx_is_lockup, 1813 .is_lockup = &cayman_gfx_is_lockup,
1546 .vm_flush = &cayman_vm_flush, 1814 .vm_flush = &cayman_vm_flush,
1815 .get_rptr = &radeon_ring_generic_get_rptr,
1816 .get_wptr = &radeon_ring_generic_get_wptr,
1817 .set_wptr = &radeon_ring_generic_set_wptr,
1547 }, 1818 },
1548 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1819 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1549 .ib_execute = &cayman_ring_ib_execute, 1820 .ib_execute = &cayman_ring_ib_execute,
@@ -1555,6 +1826,9 @@ static struct radeon_asic cayman_asic = {
1555 .ib_test = &r600_ib_test, 1826 .ib_test = &r600_ib_test,
1556 .is_lockup = &cayman_gfx_is_lockup, 1827 .is_lockup = &cayman_gfx_is_lockup,
1557 .vm_flush = &cayman_vm_flush, 1828 .vm_flush = &cayman_vm_flush,
1829 .get_rptr = &radeon_ring_generic_get_rptr,
1830 .get_wptr = &radeon_ring_generic_get_wptr,
1831 .set_wptr = &radeon_ring_generic_set_wptr,
1558 }, 1832 },
1559 [R600_RING_TYPE_DMA_INDEX] = { 1833 [R600_RING_TYPE_DMA_INDEX] = {
1560 .ib_execute = &cayman_dma_ring_ib_execute, 1834 .ib_execute = &cayman_dma_ring_ib_execute,
@@ -1566,6 +1840,9 @@ static struct radeon_asic cayman_asic = {
1566 .ib_test = &r600_dma_ib_test, 1840 .ib_test = &r600_dma_ib_test,
1567 .is_lockup = &cayman_dma_is_lockup, 1841 .is_lockup = &cayman_dma_is_lockup,
1568 .vm_flush = &cayman_dma_vm_flush, 1842 .vm_flush = &cayman_dma_vm_flush,
1843 .get_rptr = &radeon_ring_generic_get_rptr,
1844 .get_wptr = &radeon_ring_generic_get_wptr,
1845 .set_wptr = &radeon_ring_generic_set_wptr,
1569 }, 1846 },
1570 [CAYMAN_RING_TYPE_DMA1_INDEX] = { 1847 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1571 .ib_execute = &cayman_dma_ring_ib_execute, 1848 .ib_execute = &cayman_dma_ring_ib_execute,
@@ -1577,6 +1854,9 @@ static struct radeon_asic cayman_asic = {
1577 .ib_test = &r600_dma_ib_test, 1854 .ib_test = &r600_dma_ib_test,
1578 .is_lockup = &cayman_dma_is_lockup, 1855 .is_lockup = &cayman_dma_is_lockup,
1579 .vm_flush = &cayman_dma_vm_flush, 1856 .vm_flush = &cayman_dma_vm_flush,
1857 .get_rptr = &radeon_ring_generic_get_rptr,
1858 .get_wptr = &radeon_ring_generic_get_wptr,
1859 .set_wptr = &radeon_ring_generic_set_wptr,
1580 }, 1860 },
1581 [R600_RING_TYPE_UVD_INDEX] = { 1861 [R600_RING_TYPE_UVD_INDEX] = {
1582 .ib_execute = &r600_uvd_ib_execute, 1862 .ib_execute = &r600_uvd_ib_execute,
@@ -1586,6 +1866,9 @@ static struct radeon_asic cayman_asic = {
1586 .ring_test = &r600_uvd_ring_test, 1866 .ring_test = &r600_uvd_ring_test,
1587 .ib_test = &r600_uvd_ib_test, 1867 .ib_test = &r600_uvd_ib_test,
1588 .is_lockup = &radeon_ring_test_lockup, 1868 .is_lockup = &radeon_ring_test_lockup,
1869 .get_rptr = &radeon_ring_generic_get_rptr,
1870 .get_wptr = &radeon_ring_generic_get_wptr,
1871 .set_wptr = &radeon_ring_generic_set_wptr,
1589 } 1872 }
1590 }, 1873 },
1591 .irq = { 1874 .irq = {
@@ -1633,6 +1916,21 @@ static struct radeon_asic cayman_asic = {
1633 .set_pcie_lanes = &r600_set_pcie_lanes, 1916 .set_pcie_lanes = &r600_set_pcie_lanes,
1634 .set_clock_gating = NULL, 1917 .set_clock_gating = NULL,
1635 .set_uvd_clocks = &evergreen_set_uvd_clocks, 1918 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1919 .get_temperature = &evergreen_get_temp,
1920 },
1921 .dpm = {
1922 .init = &ni_dpm_init,
1923 .setup_asic = &ni_dpm_setup_asic,
1924 .enable = &ni_dpm_enable,
1925 .disable = &ni_dpm_disable,
1926 .pre_set_power_state = &ni_dpm_pre_set_power_state,
1927 .set_power_state = &ni_dpm_set_power_state,
1928 .post_set_power_state = &ni_dpm_post_set_power_state,
1929 .display_configuration_changed = &cypress_dpm_display_configuration_changed,
1930 .fini = &ni_dpm_fini,
1931 .get_sclk = &ni_dpm_get_sclk,
1932 .get_mclk = &ni_dpm_get_mclk,
1933 .print_power_state = &ni_dpm_print_power_state,
1636 }, 1934 },
1637 .pflip = { 1935 .pflip = {
1638 .pre_page_flip = &evergreen_pre_page_flip, 1936 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1674,6 +1972,9 @@ static struct radeon_asic trinity_asic = {
1674 .ib_test = &r600_ib_test, 1972 .ib_test = &r600_ib_test,
1675 .is_lockup = &cayman_gfx_is_lockup, 1973 .is_lockup = &cayman_gfx_is_lockup,
1676 .vm_flush = &cayman_vm_flush, 1974 .vm_flush = &cayman_vm_flush,
1975 .get_rptr = &radeon_ring_generic_get_rptr,
1976 .get_wptr = &radeon_ring_generic_get_wptr,
1977 .set_wptr = &radeon_ring_generic_set_wptr,
1677 }, 1978 },
1678 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1979 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1679 .ib_execute = &cayman_ring_ib_execute, 1980 .ib_execute = &cayman_ring_ib_execute,
@@ -1685,6 +1986,9 @@ static struct radeon_asic trinity_asic = {
1685 .ib_test = &r600_ib_test, 1986 .ib_test = &r600_ib_test,
1686 .is_lockup = &cayman_gfx_is_lockup, 1987 .is_lockup = &cayman_gfx_is_lockup,
1687 .vm_flush = &cayman_vm_flush, 1988 .vm_flush = &cayman_vm_flush,
1989 .get_rptr = &radeon_ring_generic_get_rptr,
1990 .get_wptr = &radeon_ring_generic_get_wptr,
1991 .set_wptr = &radeon_ring_generic_set_wptr,
1688 }, 1992 },
1689 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1993 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1690 .ib_execute = &cayman_ring_ib_execute, 1994 .ib_execute = &cayman_ring_ib_execute,
@@ -1696,6 +2000,9 @@ static struct radeon_asic trinity_asic = {
1696 .ib_test = &r600_ib_test, 2000 .ib_test = &r600_ib_test,
1697 .is_lockup = &cayman_gfx_is_lockup, 2001 .is_lockup = &cayman_gfx_is_lockup,
1698 .vm_flush = &cayman_vm_flush, 2002 .vm_flush = &cayman_vm_flush,
2003 .get_rptr = &radeon_ring_generic_get_rptr,
2004 .get_wptr = &radeon_ring_generic_get_wptr,
2005 .set_wptr = &radeon_ring_generic_set_wptr,
1699 }, 2006 },
1700 [R600_RING_TYPE_DMA_INDEX] = { 2007 [R600_RING_TYPE_DMA_INDEX] = {
1701 .ib_execute = &cayman_dma_ring_ib_execute, 2008 .ib_execute = &cayman_dma_ring_ib_execute,
@@ -1707,6 +2014,9 @@ static struct radeon_asic trinity_asic = {
1707 .ib_test = &r600_dma_ib_test, 2014 .ib_test = &r600_dma_ib_test,
1708 .is_lockup = &cayman_dma_is_lockup, 2015 .is_lockup = &cayman_dma_is_lockup,
1709 .vm_flush = &cayman_dma_vm_flush, 2016 .vm_flush = &cayman_dma_vm_flush,
2017 .get_rptr = &radeon_ring_generic_get_rptr,
2018 .get_wptr = &radeon_ring_generic_get_wptr,
2019 .set_wptr = &radeon_ring_generic_set_wptr,
1710 }, 2020 },
1711 [CAYMAN_RING_TYPE_DMA1_INDEX] = { 2021 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1712 .ib_execute = &cayman_dma_ring_ib_execute, 2022 .ib_execute = &cayman_dma_ring_ib_execute,
@@ -1718,6 +2028,9 @@ static struct radeon_asic trinity_asic = {
1718 .ib_test = &r600_dma_ib_test, 2028 .ib_test = &r600_dma_ib_test,
1719 .is_lockup = &cayman_dma_is_lockup, 2029 .is_lockup = &cayman_dma_is_lockup,
1720 .vm_flush = &cayman_dma_vm_flush, 2030 .vm_flush = &cayman_dma_vm_flush,
2031 .get_rptr = &radeon_ring_generic_get_rptr,
2032 .get_wptr = &radeon_ring_generic_get_wptr,
2033 .set_wptr = &radeon_ring_generic_set_wptr,
1721 }, 2034 },
1722 [R600_RING_TYPE_UVD_INDEX] = { 2035 [R600_RING_TYPE_UVD_INDEX] = {
1723 .ib_execute = &r600_uvd_ib_execute, 2036 .ib_execute = &r600_uvd_ib_execute,
@@ -1727,6 +2040,9 @@ static struct radeon_asic trinity_asic = {
1727 .ring_test = &r600_uvd_ring_test, 2040 .ring_test = &r600_uvd_ring_test,
1728 .ib_test = &r600_uvd_ib_test, 2041 .ib_test = &r600_uvd_ib_test,
1729 .is_lockup = &radeon_ring_test_lockup, 2042 .is_lockup = &radeon_ring_test_lockup,
2043 .get_rptr = &radeon_ring_generic_get_rptr,
2044 .get_wptr = &radeon_ring_generic_get_wptr,
2045 .set_wptr = &radeon_ring_generic_set_wptr,
1730 } 2046 }
1731 }, 2047 },
1732 .irq = { 2048 .irq = {
@@ -1772,6 +2088,21 @@ static struct radeon_asic trinity_asic = {
1772 .set_pcie_lanes = NULL, 2088 .set_pcie_lanes = NULL,
1773 .set_clock_gating = NULL, 2089 .set_clock_gating = NULL,
1774 .set_uvd_clocks = &sumo_set_uvd_clocks, 2090 .set_uvd_clocks = &sumo_set_uvd_clocks,
2091 .get_temperature = &tn_get_temp,
2092 },
2093 .dpm = {
2094 .init = &trinity_dpm_init,
2095 .setup_asic = &trinity_dpm_setup_asic,
2096 .enable = &trinity_dpm_enable,
2097 .disable = &trinity_dpm_disable,
2098 .pre_set_power_state = &trinity_dpm_pre_set_power_state,
2099 .set_power_state = &trinity_dpm_set_power_state,
2100 .post_set_power_state = &trinity_dpm_post_set_power_state,
2101 .display_configuration_changed = &trinity_dpm_display_configuration_changed,
2102 .fini = &trinity_dpm_fini,
2103 .get_sclk = &trinity_dpm_get_sclk,
2104 .get_mclk = &trinity_dpm_get_mclk,
2105 .print_power_state = &trinity_dpm_print_power_state,
1775 }, 2106 },
1776 .pflip = { 2107 .pflip = {
1777 .pre_page_flip = &evergreen_pre_page_flip, 2108 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1813,6 +2144,9 @@ static struct radeon_asic si_asic = {
1813 .ib_test = &r600_ib_test, 2144 .ib_test = &r600_ib_test,
1814 .is_lockup = &si_gfx_is_lockup, 2145 .is_lockup = &si_gfx_is_lockup,
1815 .vm_flush = &si_vm_flush, 2146 .vm_flush = &si_vm_flush,
2147 .get_rptr = &radeon_ring_generic_get_rptr,
2148 .get_wptr = &radeon_ring_generic_get_wptr,
2149 .set_wptr = &radeon_ring_generic_set_wptr,
1816 }, 2150 },
1817 [CAYMAN_RING_TYPE_CP1_INDEX] = { 2151 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1818 .ib_execute = &si_ring_ib_execute, 2152 .ib_execute = &si_ring_ib_execute,
@@ -1824,6 +2158,9 @@ static struct radeon_asic si_asic = {
1824 .ib_test = &r600_ib_test, 2158 .ib_test = &r600_ib_test,
1825 .is_lockup = &si_gfx_is_lockup, 2159 .is_lockup = &si_gfx_is_lockup,
1826 .vm_flush = &si_vm_flush, 2160 .vm_flush = &si_vm_flush,
2161 .get_rptr = &radeon_ring_generic_get_rptr,
2162 .get_wptr = &radeon_ring_generic_get_wptr,
2163 .set_wptr = &radeon_ring_generic_set_wptr,
1827 }, 2164 },
1828 [CAYMAN_RING_TYPE_CP2_INDEX] = { 2165 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1829 .ib_execute = &si_ring_ib_execute, 2166 .ib_execute = &si_ring_ib_execute,
@@ -1835,6 +2172,9 @@ static struct radeon_asic si_asic = {
1835 .ib_test = &r600_ib_test, 2172 .ib_test = &r600_ib_test,
1836 .is_lockup = &si_gfx_is_lockup, 2173 .is_lockup = &si_gfx_is_lockup,
1837 .vm_flush = &si_vm_flush, 2174 .vm_flush = &si_vm_flush,
2175 .get_rptr = &radeon_ring_generic_get_rptr,
2176 .get_wptr = &radeon_ring_generic_get_wptr,
2177 .set_wptr = &radeon_ring_generic_set_wptr,
1838 }, 2178 },
1839 [R600_RING_TYPE_DMA_INDEX] = { 2179 [R600_RING_TYPE_DMA_INDEX] = {
1840 .ib_execute = &cayman_dma_ring_ib_execute, 2180 .ib_execute = &cayman_dma_ring_ib_execute,
@@ -1846,6 +2186,9 @@ static struct radeon_asic si_asic = {
1846 .ib_test = &r600_dma_ib_test, 2186 .ib_test = &r600_dma_ib_test,
1847 .is_lockup = &si_dma_is_lockup, 2187 .is_lockup = &si_dma_is_lockup,
1848 .vm_flush = &si_dma_vm_flush, 2188 .vm_flush = &si_dma_vm_flush,
2189 .get_rptr = &radeon_ring_generic_get_rptr,
2190 .get_wptr = &radeon_ring_generic_get_wptr,
2191 .set_wptr = &radeon_ring_generic_set_wptr,
1849 }, 2192 },
1850 [CAYMAN_RING_TYPE_DMA1_INDEX] = { 2193 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1851 .ib_execute = &cayman_dma_ring_ib_execute, 2194 .ib_execute = &cayman_dma_ring_ib_execute,
@@ -1857,6 +2200,9 @@ static struct radeon_asic si_asic = {
1857 .ib_test = &r600_dma_ib_test, 2200 .ib_test = &r600_dma_ib_test,
1858 .is_lockup = &si_dma_is_lockup, 2201 .is_lockup = &si_dma_is_lockup,
1859 .vm_flush = &si_dma_vm_flush, 2202 .vm_flush = &si_dma_vm_flush,
2203 .get_rptr = &radeon_ring_generic_get_rptr,
2204 .get_wptr = &radeon_ring_generic_get_wptr,
2205 .set_wptr = &radeon_ring_generic_set_wptr,
1860 }, 2206 },
1861 [R600_RING_TYPE_UVD_INDEX] = { 2207 [R600_RING_TYPE_UVD_INDEX] = {
1862 .ib_execute = &r600_uvd_ib_execute, 2208 .ib_execute = &r600_uvd_ib_execute,
@@ -1866,6 +2212,9 @@ static struct radeon_asic si_asic = {
1866 .ring_test = &r600_uvd_ring_test, 2212 .ring_test = &r600_uvd_ring_test,
1867 .ib_test = &r600_uvd_ib_test, 2213 .ib_test = &r600_uvd_ib_test,
1868 .is_lockup = &radeon_ring_test_lockup, 2214 .is_lockup = &radeon_ring_test_lockup,
2215 .get_rptr = &radeon_ring_generic_get_rptr,
2216 .get_wptr = &radeon_ring_generic_get_wptr,
2217 .set_wptr = &radeon_ring_generic_set_wptr,
1869 } 2218 }
1870 }, 2219 },
1871 .irq = { 2220 .irq = {
@@ -1911,6 +2260,331 @@ static struct radeon_asic si_asic = {
1911 .set_pcie_lanes = &r600_set_pcie_lanes, 2260 .set_pcie_lanes = &r600_set_pcie_lanes,
1912 .set_clock_gating = NULL, 2261 .set_clock_gating = NULL,
1913 .set_uvd_clocks = &si_set_uvd_clocks, 2262 .set_uvd_clocks = &si_set_uvd_clocks,
2263 .get_temperature = &si_get_temp,
2264 },
2265 .dpm = {
2266 .init = &si_dpm_init,
2267 .setup_asic = &si_dpm_setup_asic,
2268 .enable = &si_dpm_enable,
2269 .disable = &si_dpm_disable,
2270 .pre_set_power_state = &si_dpm_pre_set_power_state,
2271 .set_power_state = &si_dpm_set_power_state,
2272 .post_set_power_state = &si_dpm_post_set_power_state,
2273 .display_configuration_changed = &si_dpm_display_configuration_changed,
2274 .fini = &si_dpm_fini,
2275 .get_sclk = &ni_dpm_get_sclk,
2276 .get_mclk = &ni_dpm_get_mclk,
2277 .print_power_state = &ni_dpm_print_power_state,
2278 },
2279 .pflip = {
2280 .pre_page_flip = &evergreen_pre_page_flip,
2281 .page_flip = &evergreen_page_flip,
2282 .post_page_flip = &evergreen_post_page_flip,
2283 },
2284};
2285
2286static struct radeon_asic ci_asic = {
2287 .init = &cik_init,
2288 .fini = &cik_fini,
2289 .suspend = &cik_suspend,
2290 .resume = &cik_resume,
2291 .asic_reset = &cik_asic_reset,
2292 .vga_set_state = &r600_vga_set_state,
2293 .ioctl_wait_idle = NULL,
2294 .gui_idle = &r600_gui_idle,
2295 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
2296 .get_xclk = &cik_get_xclk,
2297 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2298 .gart = {
2299 .tlb_flush = &cik_pcie_gart_tlb_flush,
2300 .set_page = &rs600_gart_set_page,
2301 },
2302 .vm = {
2303 .init = &cik_vm_init,
2304 .fini = &cik_vm_fini,
2305 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
2306 .set_page = &cik_vm_set_page,
2307 },
2308 .ring = {
2309 [RADEON_RING_TYPE_GFX_INDEX] = {
2310 .ib_execute = &cik_ring_ib_execute,
2311 .ib_parse = &cik_ib_parse,
2312 .emit_fence = &cik_fence_gfx_ring_emit,
2313 .emit_semaphore = &cik_semaphore_ring_emit,
2314 .cs_parse = NULL,
2315 .ring_test = &cik_ring_test,
2316 .ib_test = &cik_ib_test,
2317 .is_lockup = &cik_gfx_is_lockup,
2318 .vm_flush = &cik_vm_flush,
2319 .get_rptr = &radeon_ring_generic_get_rptr,
2320 .get_wptr = &radeon_ring_generic_get_wptr,
2321 .set_wptr = &radeon_ring_generic_set_wptr,
2322 },
2323 [CAYMAN_RING_TYPE_CP1_INDEX] = {
2324 .ib_execute = &cik_ring_ib_execute,
2325 .ib_parse = &cik_ib_parse,
2326 .emit_fence = &cik_fence_compute_ring_emit,
2327 .emit_semaphore = &cik_semaphore_ring_emit,
2328 .cs_parse = NULL,
2329 .ring_test = &cik_ring_test,
2330 .ib_test = &cik_ib_test,
2331 .is_lockup = &cik_gfx_is_lockup,
2332 .vm_flush = &cik_vm_flush,
2333 .get_rptr = &cik_compute_ring_get_rptr,
2334 .get_wptr = &cik_compute_ring_get_wptr,
2335 .set_wptr = &cik_compute_ring_set_wptr,
2336 },
2337 [CAYMAN_RING_TYPE_CP2_INDEX] = {
2338 .ib_execute = &cik_ring_ib_execute,
2339 .ib_parse = &cik_ib_parse,
2340 .emit_fence = &cik_fence_compute_ring_emit,
2341 .emit_semaphore = &cik_semaphore_ring_emit,
2342 .cs_parse = NULL,
2343 .ring_test = &cik_ring_test,
2344 .ib_test = &cik_ib_test,
2345 .is_lockup = &cik_gfx_is_lockup,
2346 .vm_flush = &cik_vm_flush,
2347 .get_rptr = &cik_compute_ring_get_rptr,
2348 .get_wptr = &cik_compute_ring_get_wptr,
2349 .set_wptr = &cik_compute_ring_set_wptr,
2350 },
2351 [R600_RING_TYPE_DMA_INDEX] = {
2352 .ib_execute = &cik_sdma_ring_ib_execute,
2353 .ib_parse = &cik_ib_parse,
2354 .emit_fence = &cik_sdma_fence_ring_emit,
2355 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2356 .cs_parse = NULL,
2357 .ring_test = &cik_sdma_ring_test,
2358 .ib_test = &cik_sdma_ib_test,
2359 .is_lockup = &cik_sdma_is_lockup,
2360 .vm_flush = &cik_dma_vm_flush,
2361 .get_rptr = &radeon_ring_generic_get_rptr,
2362 .get_wptr = &radeon_ring_generic_get_wptr,
2363 .set_wptr = &radeon_ring_generic_set_wptr,
2364 },
2365 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
2366 .ib_execute = &cik_sdma_ring_ib_execute,
2367 .ib_parse = &cik_ib_parse,
2368 .emit_fence = &cik_sdma_fence_ring_emit,
2369 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2370 .cs_parse = NULL,
2371 .ring_test = &cik_sdma_ring_test,
2372 .ib_test = &cik_sdma_ib_test,
2373 .is_lockup = &cik_sdma_is_lockup,
2374 .vm_flush = &cik_dma_vm_flush,
2375 .get_rptr = &radeon_ring_generic_get_rptr,
2376 .get_wptr = &radeon_ring_generic_get_wptr,
2377 .set_wptr = &radeon_ring_generic_set_wptr,
2378 },
2379 [R600_RING_TYPE_UVD_INDEX] = {
2380 .ib_execute = &r600_uvd_ib_execute,
2381 .emit_fence = &r600_uvd_fence_emit,
2382 .emit_semaphore = &cayman_uvd_semaphore_emit,
2383 .cs_parse = &radeon_uvd_cs_parse,
2384 .ring_test = &r600_uvd_ring_test,
2385 .ib_test = &r600_uvd_ib_test,
2386 .is_lockup = &radeon_ring_test_lockup,
2387 .get_rptr = &radeon_ring_generic_get_rptr,
2388 .get_wptr = &radeon_ring_generic_get_wptr,
2389 .set_wptr = &radeon_ring_generic_set_wptr,
2390 }
2391 },
2392 .irq = {
2393 .set = &cik_irq_set,
2394 .process = &cik_irq_process,
2395 },
2396 .display = {
2397 .bandwidth_update = &dce8_bandwidth_update,
2398 .get_vblank_counter = &evergreen_get_vblank_counter,
2399 .wait_for_vblank = &dce4_wait_for_vblank,
2400 },
2401 .copy = {
2402 .blit = NULL,
2403 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2404 .dma = &cik_copy_dma,
2405 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2406 .copy = &cik_copy_dma,
2407 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
2408 },
2409 .surface = {
2410 .set_reg = r600_set_surface_reg,
2411 .clear_reg = r600_clear_surface_reg,
2412 },
2413 .hpd = {
2414 .init = &evergreen_hpd_init,
2415 .fini = &evergreen_hpd_fini,
2416 .sense = &evergreen_hpd_sense,
2417 .set_polarity = &evergreen_hpd_set_polarity,
2418 },
2419 .pm = {
2420 .misc = &evergreen_pm_misc,
2421 .prepare = &evergreen_pm_prepare,
2422 .finish = &evergreen_pm_finish,
2423 .init_profile = &sumo_pm_init_profile,
2424 .get_dynpm_state = &r600_pm_get_dynpm_state,
2425 .get_engine_clock = &radeon_atom_get_engine_clock,
2426 .set_engine_clock = &radeon_atom_set_engine_clock,
2427 .get_memory_clock = &radeon_atom_get_memory_clock,
2428 .set_memory_clock = &radeon_atom_set_memory_clock,
2429 .get_pcie_lanes = NULL,
2430 .set_pcie_lanes = NULL,
2431 .set_clock_gating = NULL,
2432 .set_uvd_clocks = &cik_set_uvd_clocks,
2433 },
2434 .pflip = {
2435 .pre_page_flip = &evergreen_pre_page_flip,
2436 .page_flip = &evergreen_page_flip,
2437 .post_page_flip = &evergreen_post_page_flip,
2438 },
2439};
2440
2441static struct radeon_asic kv_asic = {
2442 .init = &cik_init,
2443 .fini = &cik_fini,
2444 .suspend = &cik_suspend,
2445 .resume = &cik_resume,
2446 .asic_reset = &cik_asic_reset,
2447 .vga_set_state = &r600_vga_set_state,
2448 .ioctl_wait_idle = NULL,
2449 .gui_idle = &r600_gui_idle,
2450 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
2451 .get_xclk = &cik_get_xclk,
2452 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2453 .gart = {
2454 .tlb_flush = &cik_pcie_gart_tlb_flush,
2455 .set_page = &rs600_gart_set_page,
2456 },
2457 .vm = {
2458 .init = &cik_vm_init,
2459 .fini = &cik_vm_fini,
2460 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
2461 .set_page = &cik_vm_set_page,
2462 },
2463 .ring = {
2464 [RADEON_RING_TYPE_GFX_INDEX] = {
2465 .ib_execute = &cik_ring_ib_execute,
2466 .ib_parse = &cik_ib_parse,
2467 .emit_fence = &cik_fence_gfx_ring_emit,
2468 .emit_semaphore = &cik_semaphore_ring_emit,
2469 .cs_parse = NULL,
2470 .ring_test = &cik_ring_test,
2471 .ib_test = &cik_ib_test,
2472 .is_lockup = &cik_gfx_is_lockup,
2473 .vm_flush = &cik_vm_flush,
2474 .get_rptr = &radeon_ring_generic_get_rptr,
2475 .get_wptr = &radeon_ring_generic_get_wptr,
2476 .set_wptr = &radeon_ring_generic_set_wptr,
2477 },
2478 [CAYMAN_RING_TYPE_CP1_INDEX] = {
2479 .ib_execute = &cik_ring_ib_execute,
2480 .ib_parse = &cik_ib_parse,
2481 .emit_fence = &cik_fence_compute_ring_emit,
2482 .emit_semaphore = &cik_semaphore_ring_emit,
2483 .cs_parse = NULL,
2484 .ring_test = &cik_ring_test,
2485 .ib_test = &cik_ib_test,
2486 .is_lockup = &cik_gfx_is_lockup,
2487 .vm_flush = &cik_vm_flush,
2488 .get_rptr = &cik_compute_ring_get_rptr,
2489 .get_wptr = &cik_compute_ring_get_wptr,
2490 .set_wptr = &cik_compute_ring_set_wptr,
2491 },
2492 [CAYMAN_RING_TYPE_CP2_INDEX] = {
2493 .ib_execute = &cik_ring_ib_execute,
2494 .ib_parse = &cik_ib_parse,
2495 .emit_fence = &cik_fence_compute_ring_emit,
2496 .emit_semaphore = &cik_semaphore_ring_emit,
2497 .cs_parse = NULL,
2498 .ring_test = &cik_ring_test,
2499 .ib_test = &cik_ib_test,
2500 .is_lockup = &cik_gfx_is_lockup,
2501 .vm_flush = &cik_vm_flush,
2502 .get_rptr = &cik_compute_ring_get_rptr,
2503 .get_wptr = &cik_compute_ring_get_wptr,
2504 .set_wptr = &cik_compute_ring_set_wptr,
2505 },
2506 [R600_RING_TYPE_DMA_INDEX] = {
2507 .ib_execute = &cik_sdma_ring_ib_execute,
2508 .ib_parse = &cik_ib_parse,
2509 .emit_fence = &cik_sdma_fence_ring_emit,
2510 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2511 .cs_parse = NULL,
2512 .ring_test = &cik_sdma_ring_test,
2513 .ib_test = &cik_sdma_ib_test,
2514 .is_lockup = &cik_sdma_is_lockup,
2515 .vm_flush = &cik_dma_vm_flush,
2516 .get_rptr = &radeon_ring_generic_get_rptr,
2517 .get_wptr = &radeon_ring_generic_get_wptr,
2518 .set_wptr = &radeon_ring_generic_set_wptr,
2519 },
2520 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
2521 .ib_execute = &cik_sdma_ring_ib_execute,
2522 .ib_parse = &cik_ib_parse,
2523 .emit_fence = &cik_sdma_fence_ring_emit,
2524 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2525 .cs_parse = NULL,
2526 .ring_test = &cik_sdma_ring_test,
2527 .ib_test = &cik_sdma_ib_test,
2528 .is_lockup = &cik_sdma_is_lockup,
2529 .vm_flush = &cik_dma_vm_flush,
2530 .get_rptr = &radeon_ring_generic_get_rptr,
2531 .get_wptr = &radeon_ring_generic_get_wptr,
2532 .set_wptr = &radeon_ring_generic_set_wptr,
2533 },
2534 [R600_RING_TYPE_UVD_INDEX] = {
2535 .ib_execute = &r600_uvd_ib_execute,
2536 .emit_fence = &r600_uvd_fence_emit,
2537 .emit_semaphore = &cayman_uvd_semaphore_emit,
2538 .cs_parse = &radeon_uvd_cs_parse,
2539 .ring_test = &r600_uvd_ring_test,
2540 .ib_test = &r600_uvd_ib_test,
2541 .is_lockup = &radeon_ring_test_lockup,
2542 .get_rptr = &radeon_ring_generic_get_rptr,
2543 .get_wptr = &radeon_ring_generic_get_wptr,
2544 .set_wptr = &radeon_ring_generic_set_wptr,
2545 }
2546 },
2547 .irq = {
2548 .set = &cik_irq_set,
2549 .process = &cik_irq_process,
2550 },
2551 .display = {
2552 .bandwidth_update = &dce8_bandwidth_update,
2553 .get_vblank_counter = &evergreen_get_vblank_counter,
2554 .wait_for_vblank = &dce4_wait_for_vblank,
2555 },
2556 .copy = {
2557 .blit = NULL,
2558 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2559 .dma = &cik_copy_dma,
2560 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2561 .copy = &cik_copy_dma,
2562 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
2563 },
2564 .surface = {
2565 .set_reg = r600_set_surface_reg,
2566 .clear_reg = r600_clear_surface_reg,
2567 },
2568 .hpd = {
2569 .init = &evergreen_hpd_init,
2570 .fini = &evergreen_hpd_fini,
2571 .sense = &evergreen_hpd_sense,
2572 .set_polarity = &evergreen_hpd_set_polarity,
2573 },
2574 .pm = {
2575 .misc = &evergreen_pm_misc,
2576 .prepare = &evergreen_pm_prepare,
2577 .finish = &evergreen_pm_finish,
2578 .init_profile = &sumo_pm_init_profile,
2579 .get_dynpm_state = &r600_pm_get_dynpm_state,
2580 .get_engine_clock = &radeon_atom_get_engine_clock,
2581 .set_engine_clock = &radeon_atom_set_engine_clock,
2582 .get_memory_clock = &radeon_atom_get_memory_clock,
2583 .set_memory_clock = &radeon_atom_set_memory_clock,
2584 .get_pcie_lanes = NULL,
2585 .set_pcie_lanes = NULL,
2586 .set_clock_gating = NULL,
2587 .set_uvd_clocks = &cik_set_uvd_clocks,
1914 }, 2588 },
1915 .pflip = { 2589 .pflip = {
1916 .pre_page_flip = &evergreen_pre_page_flip, 2590 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1999,16 +2673,15 @@ int radeon_asic_init(struct radeon_device *rdev)
1999 rdev->asic = &r520_asic; 2673 rdev->asic = &r520_asic;
2000 break; 2674 break;
2001 case CHIP_R600: 2675 case CHIP_R600:
2676 rdev->asic = &r600_asic;
2677 break;
2002 case CHIP_RV610: 2678 case CHIP_RV610:
2003 case CHIP_RV630: 2679 case CHIP_RV630:
2004 case CHIP_RV620: 2680 case CHIP_RV620:
2005 case CHIP_RV635: 2681 case CHIP_RV635:
2006 case CHIP_RV670: 2682 case CHIP_RV670:
2007 rdev->asic = &r600_asic; 2683 rdev->asic = &rv6xx_asic;
2008 if (rdev->family == CHIP_R600) 2684 rdev->has_uvd = true;
2009 rdev->has_uvd = false;
2010 else
2011 rdev->has_uvd = true;
2012 break; 2685 break;
2013 case CHIP_RS780: 2686 case CHIP_RS780:
2014 case CHIP_RS880: 2687 case CHIP_RS880:
@@ -2082,6 +2755,19 @@ int radeon_asic_init(struct radeon_device *rdev)
2082 else 2755 else
2083 rdev->has_uvd = true; 2756 rdev->has_uvd = true;
2084 break; 2757 break;
2758 case CHIP_BONAIRE:
2759 rdev->asic = &ci_asic;
2760 rdev->num_crtc = 6;
2761 break;
2762 case CHIP_KAVERI:
2763 case CHIP_KABINI:
2764 rdev->asic = &kv_asic;
2765 /* set num crtcs */
2766 if (rdev->family == CHIP_KAVERI)
2767 rdev->num_crtc = 4;
2768 else
2769 rdev->num_crtc = 2;
2770 break;
2085 default: 2771 default:
2086 /* FIXME: not supported yet */ 2772 /* FIXME: not supported yet */
2087 return -EINVAL; 2773 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a72759ede753..2497d0a02de5 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -47,6 +47,12 @@ u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
47void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level); 47void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
48u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder); 48u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
49 49
50u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
51 struct radeon_ring *ring);
52u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
53 struct radeon_ring *ring);
54void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
55 struct radeon_ring *ring);
50 56
51/* 57/*
52 * r100,rv100,rs100,rv200,rs200 58 * r100,rv100,rs100,rv200,rs200
@@ -395,6 +401,33 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
395int r600_mc_wait_for_idle(struct radeon_device *rdev); 401int r600_mc_wait_for_idle(struct radeon_device *rdev);
396u32 r600_get_xclk(struct radeon_device *rdev); 402u32 r600_get_xclk(struct radeon_device *rdev);
397uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); 403uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
404int rv6xx_get_temp(struct radeon_device *rdev);
405int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
406void r600_dpm_post_set_power_state(struct radeon_device *rdev);
407/* rv6xx dpm */
408int rv6xx_dpm_init(struct radeon_device *rdev);
409int rv6xx_dpm_enable(struct radeon_device *rdev);
410void rv6xx_dpm_disable(struct radeon_device *rdev);
411int rv6xx_dpm_set_power_state(struct radeon_device *rdev);
412void rv6xx_setup_asic(struct radeon_device *rdev);
413void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev);
414void rv6xx_dpm_fini(struct radeon_device *rdev);
415u32 rv6xx_dpm_get_sclk(struct radeon_device *rdev, bool low);
416u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low);
417void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
418 struct radeon_ps *ps);
419/* rs780 dpm */
420int rs780_dpm_init(struct radeon_device *rdev);
421int rs780_dpm_enable(struct radeon_device *rdev);
422void rs780_dpm_disable(struct radeon_device *rdev);
423int rs780_dpm_set_power_state(struct radeon_device *rdev);
424void rs780_dpm_setup_asic(struct radeon_device *rdev);
425void rs780_dpm_display_configuration_changed(struct radeon_device *rdev);
426void rs780_dpm_fini(struct radeon_device *rdev);
427u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low);
428u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low);
429void rs780_dpm_print_power_state(struct radeon_device *rdev,
430 struct radeon_ps *ps);
398 431
399/* uvd */ 432/* uvd */
400int r600_uvd_init(struct radeon_device *rdev); 433int r600_uvd_init(struct radeon_device *rdev);
@@ -428,6 +461,19 @@ int rv770_copy_dma(struct radeon_device *rdev,
428u32 rv770_get_xclk(struct radeon_device *rdev); 461u32 rv770_get_xclk(struct radeon_device *rdev);
429int rv770_uvd_resume(struct radeon_device *rdev); 462int rv770_uvd_resume(struct radeon_device *rdev);
430int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 463int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
464int rv770_get_temp(struct radeon_device *rdev);
465/* rv7xx pm */
466int rv770_dpm_init(struct radeon_device *rdev);
467int rv770_dpm_enable(struct radeon_device *rdev);
468void rv770_dpm_disable(struct radeon_device *rdev);
469int rv770_dpm_set_power_state(struct radeon_device *rdev);
470void rv770_dpm_setup_asic(struct radeon_device *rdev);
471void rv770_dpm_display_configuration_changed(struct radeon_device *rdev);
472void rv770_dpm_fini(struct radeon_device *rdev);
473u32 rv770_dpm_get_sclk(struct radeon_device *rdev, bool low);
474u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low);
475void rv770_dpm_print_power_state(struct radeon_device *rdev,
476 struct radeon_ps *ps);
431 477
432/* 478/*
433 * evergreen 479 * evergreen
@@ -482,6 +528,39 @@ int evergreen_copy_dma(struct radeon_device *rdev,
482 struct radeon_fence **fence); 528 struct radeon_fence **fence);
483void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); 529void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
484void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 530void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
531int evergreen_get_temp(struct radeon_device *rdev);
532int sumo_get_temp(struct radeon_device *rdev);
533int tn_get_temp(struct radeon_device *rdev);
534int cypress_dpm_init(struct radeon_device *rdev);
535void cypress_dpm_setup_asic(struct radeon_device *rdev);
536int cypress_dpm_enable(struct radeon_device *rdev);
537void cypress_dpm_disable(struct radeon_device *rdev);
538int cypress_dpm_set_power_state(struct radeon_device *rdev);
539void cypress_dpm_display_configuration_changed(struct radeon_device *rdev);
540void cypress_dpm_fini(struct radeon_device *rdev);
541int btc_dpm_init(struct radeon_device *rdev);
542void btc_dpm_setup_asic(struct radeon_device *rdev);
543int btc_dpm_enable(struct radeon_device *rdev);
544void btc_dpm_disable(struct radeon_device *rdev);
545int btc_dpm_pre_set_power_state(struct radeon_device *rdev);
546int btc_dpm_set_power_state(struct radeon_device *rdev);
547void btc_dpm_post_set_power_state(struct radeon_device *rdev);
548void btc_dpm_fini(struct radeon_device *rdev);
549u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
550u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
551int sumo_dpm_init(struct radeon_device *rdev);
552int sumo_dpm_enable(struct radeon_device *rdev);
553void sumo_dpm_disable(struct radeon_device *rdev);
554int sumo_dpm_pre_set_power_state(struct radeon_device *rdev);
555int sumo_dpm_set_power_state(struct radeon_device *rdev);
556void sumo_dpm_post_set_power_state(struct radeon_device *rdev);
557void sumo_dpm_setup_asic(struct radeon_device *rdev);
558void sumo_dpm_display_configuration_changed(struct radeon_device *rdev);
559void sumo_dpm_fini(struct radeon_device *rdev);
560u32 sumo_dpm_get_sclk(struct radeon_device *rdev, bool low);
561u32 sumo_dpm_get_mclk(struct radeon_device *rdev, bool low);
562void sumo_dpm_print_power_state(struct radeon_device *rdev,
563 struct radeon_ps *ps);
485 564
486/* 565/*
487 * cayman 566 * cayman
@@ -516,6 +595,32 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
516bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 595bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
517void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 596void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
518 597
598int ni_dpm_init(struct radeon_device *rdev);
599void ni_dpm_setup_asic(struct radeon_device *rdev);
600int ni_dpm_enable(struct radeon_device *rdev);
601void ni_dpm_disable(struct radeon_device *rdev);
602int ni_dpm_pre_set_power_state(struct radeon_device *rdev);
603int ni_dpm_set_power_state(struct radeon_device *rdev);
604void ni_dpm_post_set_power_state(struct radeon_device *rdev);
605void ni_dpm_fini(struct radeon_device *rdev);
606u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low);
607u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low);
608void ni_dpm_print_power_state(struct radeon_device *rdev,
609 struct radeon_ps *ps);
610int trinity_dpm_init(struct radeon_device *rdev);
611int trinity_dpm_enable(struct radeon_device *rdev);
612void trinity_dpm_disable(struct radeon_device *rdev);
613int trinity_dpm_pre_set_power_state(struct radeon_device *rdev);
614int trinity_dpm_set_power_state(struct radeon_device *rdev);
615void trinity_dpm_post_set_power_state(struct radeon_device *rdev);
616void trinity_dpm_setup_asic(struct radeon_device *rdev);
617void trinity_dpm_display_configuration_changed(struct radeon_device *rdev);
618void trinity_dpm_fini(struct radeon_device *rdev);
619u32 trinity_dpm_get_sclk(struct radeon_device *rdev, bool low);
620u32 trinity_dpm_get_mclk(struct radeon_device *rdev, bool low);
621void trinity_dpm_print_power_state(struct radeon_device *rdev,
622 struct radeon_ps *ps);
623
519/* DCE6 - SI */ 624/* DCE6 - SI */
520void dce6_bandwidth_update(struct radeon_device *rdev); 625void dce6_bandwidth_update(struct radeon_device *rdev);
521 626
@@ -552,5 +657,78 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
552u32 si_get_xclk(struct radeon_device *rdev); 657u32 si_get_xclk(struct radeon_device *rdev);
553uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); 658uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
554int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 659int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
660int si_get_temp(struct radeon_device *rdev);
661int si_dpm_init(struct radeon_device *rdev);
662void si_dpm_setup_asic(struct radeon_device *rdev);
663int si_dpm_enable(struct radeon_device *rdev);
664void si_dpm_disable(struct radeon_device *rdev);
665int si_dpm_pre_set_power_state(struct radeon_device *rdev);
666int si_dpm_set_power_state(struct radeon_device *rdev);
667void si_dpm_post_set_power_state(struct radeon_device *rdev);
668void si_dpm_fini(struct radeon_device *rdev);
669void si_dpm_display_configuration_changed(struct radeon_device *rdev);
670
671/* DCE8 - CIK */
672void dce8_bandwidth_update(struct radeon_device *rdev);
673
674/*
675 * cik
676 */
677uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev);
678u32 cik_get_xclk(struct radeon_device *rdev);
679uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
680void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
681int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
682int cik_uvd_resume(struct radeon_device *rdev);
683void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
684 struct radeon_fence *fence);
685void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
686 struct radeon_ring *ring,
687 struct radeon_semaphore *semaphore,
688 bool emit_wait);
689void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
690int cik_copy_dma(struct radeon_device *rdev,
691 uint64_t src_offset, uint64_t dst_offset,
692 unsigned num_gpu_pages,
693 struct radeon_fence **fence);
694int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
695int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
696bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
697void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
698 struct radeon_fence *fence);
699void cik_fence_compute_ring_emit(struct radeon_device *rdev,
700 struct radeon_fence *fence);
701void cik_semaphore_ring_emit(struct radeon_device *rdev,
702 struct radeon_ring *cp,
703 struct radeon_semaphore *semaphore,
704 bool emit_wait);
705void cik_pcie_gart_tlb_flush(struct radeon_device *rdev);
706int cik_init(struct radeon_device *rdev);
707void cik_fini(struct radeon_device *rdev);
708int cik_suspend(struct radeon_device *rdev);
709int cik_resume(struct radeon_device *rdev);
710bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
711int cik_asic_reset(struct radeon_device *rdev);
712void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
713int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
714int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
715int cik_irq_set(struct radeon_device *rdev);
716int cik_irq_process(struct radeon_device *rdev);
717int cik_vm_init(struct radeon_device *rdev);
718void cik_vm_fini(struct radeon_device *rdev);
719void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
720void cik_vm_set_page(struct radeon_device *rdev,
721 struct radeon_ib *ib,
722 uint64_t pe,
723 uint64_t addr, unsigned count,
724 uint32_t incr, uint32_t flags);
725void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
726int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
727u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
728 struct radeon_ring *ring);
729u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
730 struct radeon_ring *ring);
731void cik_compute_ring_set_wptr(struct radeon_device *rdev,
732 struct radeon_ring *ring);
555 733
556#endif 734#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index dea6f63c9724..a8296e0f8543 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -56,10 +56,6 @@ extern void
56radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, 56radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
57 uint32_t supported_device); 57 uint32_t supported_device);
58 58
59/* local */
60static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
61 u16 voltage_id, u16 *voltage);
62
63union atom_supported_devices { 59union atom_supported_devices {
64 struct _ATOM_SUPPORTED_DEVICES_INFO info; 60 struct _ATOM_SUPPORTED_DEVICES_INFO info;
65 struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2; 61 struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
@@ -1247,6 +1243,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1247 } 1243 }
1248 rdev->clock.dp_extclk = 1244 rdev->clock.dp_extclk =
1249 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); 1245 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
1246 rdev->clock.current_dispclk = rdev->clock.default_dispclk;
1250 } 1247 }
1251 *dcpll = *p1pll; 1248 *dcpll = *p1pll;
1252 1249
@@ -1269,6 +1266,7 @@ union igp_info {
1269 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 1266 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
1270 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 1267 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
1271 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 1268 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
1269 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
1272}; 1270};
1273 1271
1274bool radeon_atombios_sideport_present(struct radeon_device *rdev) 1272bool radeon_atombios_sideport_present(struct radeon_device *rdev)
@@ -1438,6 +1436,22 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
1438 break; 1436 break;
1439 } 1437 }
1440 break; 1438 break;
1439 case 8:
1440 switch (id) {
1441 case ASIC_INTERNAL_SS_ON_TMDS:
1442 percentage = le16_to_cpu(igp_info->info_8.usDVISSPercentage);
1443 rate = le16_to_cpu(igp_info->info_8.usDVISSpreadRateIn10Hz);
1444 break;
1445 case ASIC_INTERNAL_SS_ON_HDMI:
1446 percentage = le16_to_cpu(igp_info->info_8.usHDMISSPercentage);
1447 rate = le16_to_cpu(igp_info->info_8.usHDMISSpreadRateIn10Hz);
1448 break;
1449 case ASIC_INTERNAL_SS_ON_LVDS:
1450 percentage = le16_to_cpu(igp_info->info_8.usLvdsSSPercentage);
1451 rate = le16_to_cpu(igp_info->info_8.usLvdsSSpreadRateIn10Hz);
1452 break;
1453 }
1454 break;
1441 default: 1455 default:
1442 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 1456 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
1443 break; 1457 break;
@@ -1499,6 +1513,10 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1499 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1513 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1500 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1514 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
1501 ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1515 ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
1516 if ((crev == 2) &&
1517 ((id == ASIC_INTERNAL_ENGINE_SS) ||
1518 (id == ASIC_INTERNAL_MEMORY_SS)))
1519 ss->rate /= 100;
1502 return true; 1520 return true;
1503 } 1521 }
1504 } 1522 }
@@ -1513,6 +1531,9 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1513 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1531 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1514 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1532 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
1515 ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1533 ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
1534 if ((id == ASIC_INTERNAL_ENGINE_SS) ||
1535 (id == ASIC_INTERNAL_MEMORY_SS))
1536 ss->rate /= 100;
1516 if (rdev->flags & RADEON_IS_IGP) 1537 if (rdev->flags & RADEON_IS_IGP)
1517 radeon_atombios_get_igp_ss_overrides(rdev, ss, id); 1538 radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
1518 return true; 1539 return true;
@@ -1927,6 +1948,7 @@ static const char *pp_lib_thermal_controller_names[] = {
1927 "Northern Islands", 1948 "Northern Islands",
1928 "Southern Islands", 1949 "Southern Islands",
1929 "lm96163", 1950 "lm96163",
1951 "Sea Islands",
1930}; 1952};
1931 1953
1932union power_info { 1954union power_info {
@@ -1944,6 +1966,7 @@ union pplib_clock_info {
1944 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 1966 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
1945 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 1967 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
1946 struct _ATOM_PPLIB_SI_CLOCK_INFO si; 1968 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
1969 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
1947}; 1970};
1948 1971
1949union pplib_power_state { 1972union pplib_power_state {
@@ -2209,6 +2232,11 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2209 (controller->ucFanParameters & 2232 (controller->ucFanParameters &
2210 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 2233 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2211 rdev->pm.int_thermal_type = THERMAL_TYPE_SI; 2234 rdev->pm.int_thermal_type = THERMAL_TYPE_SI;
2235 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
2236 DRM_INFO("Internal thermal controller %s fan control\n",
2237 (controller->ucFanParameters &
2238 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2239 rdev->pm.int_thermal_type = THERMAL_TYPE_CI;
2212 } else if ((controller->ucType == 2240 } else if ((controller->ucType ==
2213 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || 2241 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
2214 (controller->ucType == 2242 (controller->ucType ==
@@ -2241,8 +2269,8 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2241 } 2269 }
2242} 2270}
2243 2271
2244static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, 2272void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
2245 u16 *vddc, u16 *vddci) 2273 u16 *vddc, u16 *vddci, u16 *mvdd)
2246{ 2274{
2247 struct radeon_mode_info *mode_info = &rdev->mode_info; 2275 struct radeon_mode_info *mode_info = &rdev->mode_info;
2248 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 2276 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
@@ -2252,6 +2280,7 @@ static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
2252 2280
2253 *vddc = 0; 2281 *vddc = 0;
2254 *vddci = 0; 2282 *vddci = 0;
2283 *mvdd = 0;
2255 2284
2256 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 2285 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2257 &frev, &crev, &data_offset)) { 2286 &frev, &crev, &data_offset)) {
@@ -2259,8 +2288,10 @@ static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
2259 (union firmware_info *)(mode_info->atom_context->bios + 2288 (union firmware_info *)(mode_info->atom_context->bios +
2260 data_offset); 2289 data_offset);
2261 *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); 2290 *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
2262 if ((frev == 2) && (crev >= 2)) 2291 if ((frev == 2) && (crev >= 2)) {
2263 *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage); 2292 *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
2293 *mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
2294 }
2264 } 2295 }
2265} 2296}
2266 2297
@@ -2271,9 +2302,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2271 int j; 2302 int j;
2272 u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2303 u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2273 u32 misc2 = le16_to_cpu(non_clock_info->usClassification); 2304 u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
2274 u16 vddc, vddci; 2305 u16 vddc, vddci, mvdd;
2275 2306
2276 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci); 2307 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
2277 2308
2278 rdev->pm.power_state[state_index].misc = misc; 2309 rdev->pm.power_state[state_index].misc = misc;
2279 rdev->pm.power_state[state_index].misc2 = misc2; 2310 rdev->pm.power_state[state_index].misc2 = misc2;
@@ -2316,7 +2347,13 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2316 rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; 2347 rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
2317 rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci; 2348 rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
2318 } else { 2349 } else {
2319 /* patch the table values with the default slck/mclk from firmware info */ 2350 u16 max_vddci = 0;
2351
2352 if (ASIC_IS_DCE4(rdev))
2353 radeon_atom_get_max_voltage(rdev,
2354 SET_VOLTAGE_TYPE_ASIC_VDDCI,
2355 &max_vddci);
2356 /* patch the table values with the default sclk/mclk from firmware info */
2320 for (j = 0; j < mode_index; j++) { 2357 for (j = 0; j < mode_index; j++) {
2321 rdev->pm.power_state[state_index].clock_info[j].mclk = 2358 rdev->pm.power_state[state_index].clock_info[j].mclk =
2322 rdev->clock.default_mclk; 2359 rdev->clock.default_mclk;
@@ -2325,6 +2362,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2325 if (vddc) 2362 if (vddc)
2326 rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = 2363 rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
2327 vddc; 2364 vddc;
2365 if (max_vddci)
2366 rdev->pm.power_state[state_index].clock_info[j].voltage.vddci =
2367 max_vddci;
2328 } 2368 }
2329 } 2369 }
2330 } 2370 }
@@ -2347,6 +2387,15 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2347 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; 2387 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
2348 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; 2388 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
2349 } 2389 }
2390 } else if (rdev->family >= CHIP_BONAIRE) {
2391 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
2392 sclk |= clock_info->ci.ucEngineClockHigh << 16;
2393 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
2394 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
2395 rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
2396 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
2397 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2398 VOLTAGE_NONE;
2350 } else if (rdev->family >= CHIP_TAHITI) { 2399 } else if (rdev->family >= CHIP_TAHITI) {
2351 sclk = le16_to_cpu(clock_info->si.usEngineClockLow); 2400 sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
2352 sclk |= clock_info->si.ucEngineClockHigh << 16; 2401 sclk |= clock_info->si.ucEngineClockHigh << 16;
@@ -2667,6 +2716,8 @@ union get_clock_dividers {
2667 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3; 2716 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
2668 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4; 2717 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
2669 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5; 2718 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
2719 struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in;
2720 struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out;
2670}; 2721};
2671 2722
2672int radeon_atom_get_clock_dividers(struct radeon_device *rdev, 2723int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
@@ -2699,7 +2750,8 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
2699 break; 2750 break;
2700 case 2: 2751 case 2:
2701 case 3: 2752 case 3:
2702 /* r6xx, r7xx, evergreen, ni */ 2753 case 5:
2754 /* r6xx, r7xx, evergreen, ni, si */
2703 if (rdev->family <= CHIP_RV770) { 2755 if (rdev->family <= CHIP_RV770) {
2704 args.v2.ucAction = clock_type; 2756 args.v2.ucAction = clock_type;
2705 args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */ 2757 args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */
@@ -2732,6 +2784,9 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
2732 dividers->vco_mode = (args.v3.ucCntlFlag & 2784 dividers->vco_mode = (args.v3.ucCntlFlag &
2733 ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0; 2785 ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
2734 } else { 2786 } else {
2787 /* for SI we use ComputeMemoryClockParam for memory plls */
2788 if (rdev->family >= CHIP_TAHITI)
2789 return -EINVAL;
2735 args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock); 2790 args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
2736 if (strobe_mode) 2791 if (strobe_mode)
2737 args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN; 2792 args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
@@ -2757,9 +2812,76 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
2757 2812
2758 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2813 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2759 2814
2760 dividers->post_div = args.v4.ucPostDiv; 2815 dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
2761 dividers->real_clock = le32_to_cpu(args.v4.ulClock); 2816 dividers->real_clock = le32_to_cpu(args.v4.ulClock);
2762 break; 2817 break;
2818 case 6:
2819 /* CI */
2820 /* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */
2821 args.v6_in.ulClock.ulComputeClockFlag = clock_type;
2822 args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
2823
2824 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2825
2826 dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
2827 dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
2828 dividers->ref_div = args.v6_out.ucPllRefDiv;
2829 dividers->post_div = args.v6_out.ucPllPostDiv;
2830 dividers->flags = args.v6_out.ucPllCntlFlag;
2831 dividers->real_clock = le32_to_cpu(args.v6_out.ulClock.ulClock);
2832 dividers->post_divider = args.v6_out.ulClock.ucPostDiv;
2833 break;
2834 default:
2835 return -EINVAL;
2836 }
2837 return 0;
2838}
2839
2840int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
2841 u32 clock,
2842 bool strobe_mode,
2843 struct atom_mpll_param *mpll_param)
2844{
2845 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args;
2846 int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam);
2847 u8 frev, crev;
2848
2849 memset(&args, 0, sizeof(args));
2850 memset(mpll_param, 0, sizeof(struct atom_mpll_param));
2851
2852 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
2853 return -EINVAL;
2854
2855 switch (frev) {
2856 case 2:
2857 switch (crev) {
2858 case 1:
2859 /* SI */
2860 args.ulClock = cpu_to_le32(clock); /* 10 khz */
2861 args.ucInputFlag = 0;
2862 if (strobe_mode)
2863 args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
2864
2865 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2866
2867 mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
2868 mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
2869 mpll_param->post_div = args.ucPostDiv;
2870 mpll_param->dll_speed = args.ucDllSpeed;
2871 mpll_param->bwcntl = args.ucBWCntl;
2872 mpll_param->vco_mode =
2873 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0;
2874 mpll_param->yclk_sel =
2875 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
2876 mpll_param->qdr =
2877 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0;
2878 mpll_param->half_rate =
2879 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0;
2880 break;
2881 default:
2882 return -EINVAL;
2883 }
2884 break;
2763 default: 2885 default:
2764 return -EINVAL; 2886 return -EINVAL;
2765 } 2887 }
@@ -2819,6 +2941,48 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
2819 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2941 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2820} 2942}
2821 2943
2944void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
2945 u32 eng_clock, u32 mem_clock)
2946{
2947 SET_ENGINE_CLOCK_PS_ALLOCATION args;
2948 int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
2949 u32 tmp;
2950
2951 memset(&args, 0, sizeof(args));
2952
2953 tmp = eng_clock & SET_CLOCK_FREQ_MASK;
2954 tmp |= (COMPUTE_ENGINE_PLL_PARAM << 24);
2955
2956 args.ulTargetEngineClock = cpu_to_le32(tmp);
2957 if (mem_clock)
2958 args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
2959
2960 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2961}
2962
2963void radeon_atom_update_memory_dll(struct radeon_device *rdev,
2964 u32 mem_clock)
2965{
2966 u32 args;
2967 int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
2968
2969 args = cpu_to_le32(mem_clock); /* 10 khz */
2970
2971 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2972}
2973
2974void radeon_atom_set_ac_timing(struct radeon_device *rdev,
2975 u32 mem_clock)
2976{
2977 SET_MEMORY_CLOCK_PS_ALLOCATION args;
2978 int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
2979 u32 tmp = mem_clock | (COMPUTE_MEMORY_PLL_PARAM << 24);
2980
2981 args.ulTargetMemoryClock = cpu_to_le32(tmp); /* 10 khz */
2982
2983 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2984}
2985
2822union set_voltage { 2986union set_voltage {
2823 struct _SET_VOLTAGE_PS_ALLOCATION alloc; 2987 struct _SET_VOLTAGE_PS_ALLOCATION alloc;
2824 struct _SET_VOLTAGE_PARAMETERS v1; 2988 struct _SET_VOLTAGE_PARAMETERS v1;
@@ -2863,8 +3027,8 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
2863 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 3027 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2864} 3028}
2865 3029
2866static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type, 3030int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
2867 u16 voltage_id, u16 *voltage) 3031 u16 voltage_id, u16 *voltage)
2868{ 3032{
2869 union set_voltage args; 3033 union set_voltage args;
2870 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); 3034 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
@@ -2902,6 +3066,694 @@ static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
2902 return 0; 3066 return 0;
2903} 3067}
2904 3068
3069int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
3070 u16 *voltage,
3071 u16 leakage_idx)
3072{
3073 return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
3074}
3075
3076int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
3077 u16 voltage_level, u8 voltage_type,
3078 u32 *gpio_value, u32 *gpio_mask)
3079{
3080 union set_voltage args;
3081 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
3082 u8 frev, crev;
3083
3084 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
3085 return -EINVAL;
3086
3087 switch (crev) {
3088 case 1:
3089 return -EINVAL;
3090 case 2:
3091 args.v2.ucVoltageType = voltage_type;
3092 args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK;
3093 args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
3094
3095 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
3096
3097 *gpio_mask = le32_to_cpu(*(u32 *)&args.v2);
3098
3099 args.v2.ucVoltageType = voltage_type;
3100 args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL;
3101 args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
3102
3103 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
3104
3105 *gpio_value = le32_to_cpu(*(u32 *)&args.v2);
3106 break;
3107 default:
3108 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3109 return -EINVAL;
3110 }
3111
3112 return 0;
3113}
3114
3115union voltage_object_info {
3116 struct _ATOM_VOLTAGE_OBJECT_INFO v1;
3117 struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
3118 struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
3119};
3120
3121union voltage_object {
3122 struct _ATOM_VOLTAGE_OBJECT v1;
3123 struct _ATOM_VOLTAGE_OBJECT_V2 v2;
3124 union _ATOM_VOLTAGE_OBJECT_V3 v3;
3125};
3126
3127static ATOM_VOLTAGE_OBJECT *atom_lookup_voltage_object_v1(ATOM_VOLTAGE_OBJECT_INFO *v1,
3128 u8 voltage_type)
3129{
3130 u32 size = le16_to_cpu(v1->sHeader.usStructureSize);
3131 u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO, asVoltageObj[0]);
3132 u8 *start = (u8 *)v1;
3133
3134 while (offset < size) {
3135 ATOM_VOLTAGE_OBJECT *vo = (ATOM_VOLTAGE_OBJECT *)(start + offset);
3136 if (vo->ucVoltageType == voltage_type)
3137 return vo;
3138 offset += offsetof(ATOM_VOLTAGE_OBJECT, asFormula.ucVIDAdjustEntries) +
3139 vo->asFormula.ucNumOfVoltageEntries;
3140 }
3141 return NULL;
3142}
3143
3144static ATOM_VOLTAGE_OBJECT_V2 *atom_lookup_voltage_object_v2(ATOM_VOLTAGE_OBJECT_INFO_V2 *v2,
3145 u8 voltage_type)
3146{
3147 u32 size = le16_to_cpu(v2->sHeader.usStructureSize);
3148 u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V2, asVoltageObj[0]);
3149 u8 *start = (u8*)v2;
3150
3151 while (offset < size) {
3152 ATOM_VOLTAGE_OBJECT_V2 *vo = (ATOM_VOLTAGE_OBJECT_V2 *)(start + offset);
3153 if (vo->ucVoltageType == voltage_type)
3154 return vo;
3155 offset += offsetof(ATOM_VOLTAGE_OBJECT_V2, asFormula.asVIDAdjustEntries) +
3156 (vo->asFormula.ucNumOfVoltageEntries * sizeof(VOLTAGE_LUT_ENTRY));
3157 }
3158 return NULL;
3159}
3160
3161static ATOM_VOLTAGE_OBJECT_V3 *atom_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *v3,
3162 u8 voltage_type, u8 voltage_mode)
3163{
3164 u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
3165 u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
3166 u8 *start = (u8*)v3;
3167
3168 while (offset < size) {
3169 ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
3170 if ((vo->asGpioVoltageObj.sHeader.ucVoltageType == voltage_type) &&
3171 (vo->asGpioVoltageObj.sHeader.ucVoltageMode == voltage_mode))
3172 return vo;
3173 offset += le16_to_cpu(vo->asGpioVoltageObj.sHeader.usSize);
3174 }
3175 return NULL;
3176}
3177
3178bool
3179radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
3180 u8 voltage_type, u8 voltage_mode)
3181{
3182 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
3183 u8 frev, crev;
3184 u16 data_offset, size;
3185 union voltage_object_info *voltage_info;
3186 union voltage_object *voltage_object = NULL;
3187
3188 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3189 &frev, &crev, &data_offset)) {
3190 voltage_info = (union voltage_object_info *)
3191 (rdev->mode_info.atom_context->bios + data_offset);
3192
3193 switch (frev) {
3194 case 1:
3195 case 2:
3196 switch (crev) {
3197 case 1:
3198 voltage_object = (union voltage_object *)
3199 atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
3200 if (voltage_object &&
3201 (voltage_object->v1.asControl.ucVoltageControlId == VOLTAGE_CONTROLLED_BY_GPIO))
3202 return true;
3203 break;
3204 case 2:
3205 voltage_object = (union voltage_object *)
3206 atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
3207 if (voltage_object &&
3208 (voltage_object->v2.asControl.ucVoltageControlId == VOLTAGE_CONTROLLED_BY_GPIO))
3209 return true;
3210 break;
3211 default:
3212 DRM_ERROR("unknown voltage object table\n");
3213 return false;
3214 }
3215 break;
3216 case 3:
3217 switch (crev) {
3218 case 1:
3219 if (atom_lookup_voltage_object_v3(&voltage_info->v3,
3220 voltage_type, voltage_mode))
3221 return true;
3222 break;
3223 default:
3224 DRM_ERROR("unknown voltage object table\n");
3225 return false;
3226 }
3227 break;
3228 default:
3229 DRM_ERROR("unknown voltage object table\n");
3230 return false;
3231 }
3232
3233 }
3234 return false;
3235}
3236
3237int radeon_atom_get_max_voltage(struct radeon_device *rdev,
3238 u8 voltage_type, u16 *max_voltage)
3239{
3240 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
3241 u8 frev, crev;
3242 u16 data_offset, size;
3243 union voltage_object_info *voltage_info;
3244 union voltage_object *voltage_object = NULL;
3245
3246 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3247 &frev, &crev, &data_offset)) {
3248 voltage_info = (union voltage_object_info *)
3249 (rdev->mode_info.atom_context->bios + data_offset);
3250
3251 switch (crev) {
3252 case 1:
3253 voltage_object = (union voltage_object *)
3254 atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
3255 if (voltage_object) {
3256 ATOM_VOLTAGE_FORMULA *formula =
3257 &voltage_object->v1.asFormula;
3258 if (formula->ucFlag & 1)
3259 *max_voltage =
3260 le16_to_cpu(formula->usVoltageBaseLevel) +
3261 formula->ucNumOfVoltageEntries / 2 *
3262 le16_to_cpu(formula->usVoltageStep);
3263 else
3264 *max_voltage =
3265 le16_to_cpu(formula->usVoltageBaseLevel) +
3266 (formula->ucNumOfVoltageEntries - 1) *
3267 le16_to_cpu(formula->usVoltageStep);
3268 return 0;
3269 }
3270 break;
3271 case 2:
3272 voltage_object = (union voltage_object *)
3273 atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
3274 if (voltage_object) {
3275 ATOM_VOLTAGE_FORMULA_V2 *formula =
3276 &voltage_object->v2.asFormula;
3277 if (formula->ucNumOfVoltageEntries) {
3278 *max_voltage =
3279 le16_to_cpu(formula->asVIDAdjustEntries[
3280 formula->ucNumOfVoltageEntries - 1
3281 ].usVoltageValue);
3282 return 0;
3283 }
3284 }
3285 break;
3286 default:
3287 DRM_ERROR("unknown voltage object table\n");
3288 return -EINVAL;
3289 }
3290
3291 }
3292 return -EINVAL;
3293}
3294
3295int radeon_atom_get_min_voltage(struct radeon_device *rdev,
3296 u8 voltage_type, u16 *min_voltage)
3297{
3298 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
3299 u8 frev, crev;
3300 u16 data_offset, size;
3301 union voltage_object_info *voltage_info;
3302 union voltage_object *voltage_object = NULL;
3303
3304 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3305 &frev, &crev, &data_offset)) {
3306 voltage_info = (union voltage_object_info *)
3307 (rdev->mode_info.atom_context->bios + data_offset);
3308
3309 switch (crev) {
3310 case 1:
3311 voltage_object = (union voltage_object *)
3312 atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
3313 if (voltage_object) {
3314 ATOM_VOLTAGE_FORMULA *formula =
3315 &voltage_object->v1.asFormula;
3316 *min_voltage =
3317 le16_to_cpu(formula->usVoltageBaseLevel);
3318 return 0;
3319 }
3320 break;
3321 case 2:
3322 voltage_object = (union voltage_object *)
3323 atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
3324 if (voltage_object) {
3325 ATOM_VOLTAGE_FORMULA_V2 *formula =
3326 &voltage_object->v2.asFormula;
3327 if (formula->ucNumOfVoltageEntries) {
3328 *min_voltage =
3329 le16_to_cpu(formula->asVIDAdjustEntries[
3330 0
3331 ].usVoltageValue);
3332 return 0;
3333 }
3334 }
3335 break;
3336 default:
3337 DRM_ERROR("unknown voltage object table\n");
3338 return -EINVAL;
3339 }
3340
3341 }
3342 return -EINVAL;
3343}
3344
3345int radeon_atom_get_voltage_step(struct radeon_device *rdev,
3346 u8 voltage_type, u16 *voltage_step)
3347{
3348 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
3349 u8 frev, crev;
3350 u16 data_offset, size;
3351 union voltage_object_info *voltage_info;
3352 union voltage_object *voltage_object = NULL;
3353
3354 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3355 &frev, &crev, &data_offset)) {
3356 voltage_info = (union voltage_object_info *)
3357 (rdev->mode_info.atom_context->bios + data_offset);
3358
3359 switch (crev) {
3360 case 1:
3361 voltage_object = (union voltage_object *)
3362 atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
3363 if (voltage_object) {
3364 ATOM_VOLTAGE_FORMULA *formula =
3365 &voltage_object->v1.asFormula;
3366 if (formula->ucFlag & 1)
3367 *voltage_step =
3368 (le16_to_cpu(formula->usVoltageStep) + 1) / 2;
3369 else
3370 *voltage_step =
3371 le16_to_cpu(formula->usVoltageStep);
3372 return 0;
3373 }
3374 break;
3375 case 2:
3376 return -EINVAL;
3377 default:
3378 DRM_ERROR("unknown voltage object table\n");
3379 return -EINVAL;
3380 }
3381
3382 }
3383 return -EINVAL;
3384}
3385
3386int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
3387 u8 voltage_type,
3388 u16 nominal_voltage,
3389 u16 *true_voltage)
3390{
3391 u16 min_voltage, max_voltage, voltage_step;
3392
3393 if (radeon_atom_get_max_voltage(rdev, voltage_type, &max_voltage))
3394 return -EINVAL;
3395 if (radeon_atom_get_min_voltage(rdev, voltage_type, &min_voltage))
3396 return -EINVAL;
3397 if (radeon_atom_get_voltage_step(rdev, voltage_type, &voltage_step))
3398 return -EINVAL;
3399
3400 if (nominal_voltage <= min_voltage)
3401 *true_voltage = min_voltage;
3402 else if (nominal_voltage >= max_voltage)
3403 *true_voltage = max_voltage;
3404 else
3405 *true_voltage = min_voltage +
3406 ((nominal_voltage - min_voltage) / voltage_step) *
3407 voltage_step;
3408
3409 return 0;
3410}
3411
3412int radeon_atom_get_voltage_table(struct radeon_device *rdev,
3413 u8 voltage_type, u8 voltage_mode,
3414 struct atom_voltage_table *voltage_table)
3415{
3416 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
3417 u8 frev, crev;
3418 u16 data_offset, size;
3419 int i, ret;
3420 union voltage_object_info *voltage_info;
3421 union voltage_object *voltage_object = NULL;
3422
3423 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3424 &frev, &crev, &data_offset)) {
3425 voltage_info = (union voltage_object_info *)
3426 (rdev->mode_info.atom_context->bios + data_offset);
3427
3428 switch (frev) {
3429 case 1:
3430 case 2:
3431 switch (crev) {
3432 case 1:
3433 DRM_ERROR("old table version %d, %d\n", frev, crev);
3434 return -EINVAL;
3435 case 2:
3436 voltage_object = (union voltage_object *)
3437 atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
3438 if (voltage_object) {
3439 ATOM_VOLTAGE_FORMULA_V2 *formula =
3440 &voltage_object->v2.asFormula;
3441 if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES)
3442 return -EINVAL;
3443 for (i = 0; i < formula->ucNumOfVoltageEntries; i++) {
3444 voltage_table->entries[i].value =
3445 le16_to_cpu(formula->asVIDAdjustEntries[i].usVoltageValue);
3446 ret = radeon_atom_get_voltage_gpio_settings(rdev,
3447 voltage_table->entries[i].value,
3448 voltage_type,
3449 &voltage_table->entries[i].smio_low,
3450 &voltage_table->mask_low);
3451 if (ret)
3452 return ret;
3453 }
3454 voltage_table->count = formula->ucNumOfVoltageEntries;
3455 return 0;
3456 }
3457 break;
3458 default:
3459 DRM_ERROR("unknown voltage object table\n");
3460 return -EINVAL;
3461 }
3462 break;
3463 case 3:
3464 switch (crev) {
3465 case 1:
3466 voltage_object = (union voltage_object *)
3467 atom_lookup_voltage_object_v3(&voltage_info->v3,
3468 voltage_type, voltage_mode);
3469 if (voltage_object) {
3470 ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
3471 &voltage_object->v3.asGpioVoltageObj;
3472 if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
3473 return -EINVAL;
3474 for (i = 0; i < gpio->ucGpioEntryNum; i++) {
3475 voltage_table->entries[i].value =
3476 le16_to_cpu(gpio->asVolGpioLut[i].usVoltageValue);
3477 voltage_table->entries[i].smio_low =
3478 le32_to_cpu(gpio->asVolGpioLut[i].ulVoltageId);
3479 }
3480 voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
3481 voltage_table->count = gpio->ucGpioEntryNum;
3482 voltage_table->phase_delay = gpio->ucPhaseDelay;
3483 return 0;
3484 }
3485 break;
3486 default:
3487 DRM_ERROR("unknown voltage object table\n");
3488 return -EINVAL;
3489 }
3490 break;
3491 default:
3492 DRM_ERROR("unknown voltage object table\n");
3493 return -EINVAL;
3494 }
3495 }
3496 return -EINVAL;
3497}
3498
3499union vram_info {
3500 struct _ATOM_VRAM_INFO_V3 v1_3;
3501 struct _ATOM_VRAM_INFO_V4 v1_4;
3502 struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1;
3503};
3504
3505int radeon_atom_get_memory_info(struct radeon_device *rdev,
3506 u8 module_index, struct atom_memory_info *mem_info)
3507{
3508 int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
3509 u8 frev, crev, i;
3510 u16 data_offset, size;
3511 union vram_info *vram_info;
3512 u8 *p;
3513
3514 memset(mem_info, 0, sizeof(struct atom_memory_info));
3515
3516 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3517 &frev, &crev, &data_offset)) {
3518 vram_info = (union vram_info *)
3519 (rdev->mode_info.atom_context->bios + data_offset);
3520 switch (frev) {
3521 case 1:
3522 switch (crev) {
3523 case 3:
3524 /* r6xx */
3525 if (module_index < vram_info->v1_3.ucNumOfVRAMModule) {
3526 ATOM_VRAM_MODULE_V3 *vram_module =
3527 (ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo;
3528 p = (u8 *)vram_info->v1_3.aVramInfo;
3529
3530 for (i = 0; i < module_index; i++) {
3531 vram_module = (ATOM_VRAM_MODULE_V3 *)p;
3532 if (le16_to_cpu(vram_module->usSize) == 0)
3533 return -EINVAL;
3534 p += le16_to_cpu(vram_module->usSize);
3535 }
3536 mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf;
3537 mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0;
3538 } else
3539 return -EINVAL;
3540 break;
3541 case 4:
3542 /* r7xx, evergreen */
3543 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
3544 ATOM_VRAM_MODULE_V4 *vram_module =
3545 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
3546 p = (u8 *)vram_info->v1_4.aVramInfo;
3547
3548 for (i = 0; i < module_index; i++) {
3549 vram_module = (ATOM_VRAM_MODULE_V4 *)p;
3550 if (le16_to_cpu(vram_module->usModuleSize) == 0)
3551 return -EINVAL;
3552 p += le16_to_cpu(vram_module->usModuleSize);
3553 }
3554 mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
3555 mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
3556 } else
3557 return -EINVAL;
3558 break;
3559 default:
3560 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3561 return -EINVAL;
3562 }
3563 break;
3564 case 2:
3565 switch (crev) {
3566 case 1:
3567 /* ni */
3568 if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
3569 ATOM_VRAM_MODULE_V7 *vram_module =
3570 (ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo;
3571 p = (u8 *)vram_info->v2_1.aVramInfo;
3572
3573 for (i = 0; i < module_index; i++) {
3574 vram_module = (ATOM_VRAM_MODULE_V7 *)p;
3575 if (le16_to_cpu(vram_module->usModuleSize) == 0)
3576 return -EINVAL;
3577 p += le16_to_cpu(vram_module->usModuleSize);
3578 }
3579 mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
3580 mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
3581 } else
3582 return -EINVAL;
3583 break;
3584 default:
3585 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3586 return -EINVAL;
3587 }
3588 break;
3589 default:
3590 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3591 return -EINVAL;
3592 }
3593 return 0;
3594 }
3595 return -EINVAL;
3596}
3597
3598int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
3599 bool gddr5, u8 module_index,
3600 struct atom_memory_clock_range_table *mclk_range_table)
3601{
3602 int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
3603 u8 frev, crev, i;
3604 u16 data_offset, size;
3605 union vram_info *vram_info;
3606 u32 mem_timing_size = gddr5 ?
3607 sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT);
3608 u8 *p;
3609
3610 memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table));
3611
3612 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3613 &frev, &crev, &data_offset)) {
3614 vram_info = (union vram_info *)
3615 (rdev->mode_info.atom_context->bios + data_offset);
3616 switch (frev) {
3617 case 1:
3618 switch (crev) {
3619 case 3:
3620 DRM_ERROR("old table version %d, %d\n", frev, crev);
3621 return -EINVAL;
3622 case 4:
3623 /* r7xx, evergreen */
3624 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
3625 ATOM_VRAM_MODULE_V4 *vram_module =
3626 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
3627 ATOM_MEMORY_TIMING_FORMAT *format;
3628 p = (u8 *)vram_info->v1_4.aVramInfo;
3629
3630 for (i = 0; i < module_index; i++) {
3631 vram_module = (ATOM_VRAM_MODULE_V4 *)p;
3632 if (le16_to_cpu(vram_module->usModuleSize) == 0)
3633 return -EINVAL;
3634 p += le16_to_cpu(vram_module->usModuleSize);
3635 }
3636 mclk_range_table->num_entries = (u8)
3637 ((vram_module->usModuleSize - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
3638 mem_timing_size);
3639 p = (u8 *)vram_module->asMemTiming;
3640 for (i = 0; i < mclk_range_table->num_entries; i++) {
3641 format = (ATOM_MEMORY_TIMING_FORMAT *)p;
3642 mclk_range_table->mclk[i] = format->ulClkRange;
3643 p += mem_timing_size;
3644 }
3645 } else
3646 return -EINVAL;
3647 break;
3648 default:
3649 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3650 return -EINVAL;
3651 }
3652 break;
3653 case 2:
3654 DRM_ERROR("new table version %d, %d\n", frev, crev);
3655 return -EINVAL;
3656 default:
3657 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3658 return -EINVAL;
3659 }
3660 return 0;
3661 }
3662 return -EINVAL;
3663}
3664
3665#define MEM_ID_MASK 0xff000000
3666#define MEM_ID_SHIFT 24
3667#define CLOCK_RANGE_MASK 0x00ffffff
3668#define CLOCK_RANGE_SHIFT 0
3669#define LOW_NIBBLE_MASK 0xf
3670#define DATA_EQU_PREV 0
3671#define DATA_FROM_TABLE 4
3672
3673int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
3674 u8 module_index,
3675 struct atom_mc_reg_table *reg_table)
3676{
3677 int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
3678 u8 frev, crev, num_entries, t_mem_id, num_ranges = 0;
3679 u32 i = 0, j;
3680 u16 data_offset, size;
3681 union vram_info *vram_info;
3682
3683 memset(reg_table, 0, sizeof(struct atom_mc_reg_table));
3684
3685 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3686 &frev, &crev, &data_offset)) {
3687 vram_info = (union vram_info *)
3688 (rdev->mode_info.atom_context->bios + data_offset);
3689 switch (frev) {
3690 case 1:
3691 DRM_ERROR("old table version %d, %d\n", frev, crev);
3692 return -EINVAL;
3693 case 2:
3694 switch (crev) {
3695 case 1:
3696 if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
3697 ATOM_INIT_REG_BLOCK *reg_block =
3698 (ATOM_INIT_REG_BLOCK *)
3699 ((u8 *)vram_info + le16_to_cpu(vram_info->v2_1.usMemClkPatchTblOffset));
3700 ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data =
3701 (ATOM_MEMORY_SETTING_DATA_BLOCK *)
3702 ((u8 *)reg_block + (2 * sizeof(u16)) +
3703 le16_to_cpu(reg_block->usRegIndexTblSize));
3704 num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) /
3705 sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1;
3706 if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE)
3707 return -EINVAL;
3708 while (!(reg_block->asRegIndexBuf[i].ucPreRegDataLength & ACCESS_PLACEHOLDER) &&
3709 (i < num_entries)) {
3710 reg_table->mc_reg_address[i].s1 =
3711 (u16)(le16_to_cpu(reg_block->asRegIndexBuf[i].usRegIndex));
3712 reg_table->mc_reg_address[i].pre_reg_data =
3713 (u8)(reg_block->asRegIndexBuf[i].ucPreRegDataLength);
3714 i++;
3715 }
3716 reg_table->last = i;
3717 while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) &&
3718 (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
3719 t_mem_id = (u8)((*(u32 *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
3720 if (module_index == t_mem_id) {
3721 reg_table->mc_reg_table_entry[num_ranges].mclk_max =
3722 (u32)((*(u32 *)reg_data & CLOCK_RANGE_MASK) >> CLOCK_RANGE_SHIFT);
3723 for (i = 0, j = 1; i < reg_table->last; i++) {
3724 if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
3725 reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
3726 (u32)*((u32 *)reg_data + j);
3727 j++;
3728 } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
3729 reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
3730 reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
3731 }
3732 }
3733 num_ranges++;
3734 }
3735 reg_data += reg_block->usRegDataBlkSize;
3736 }
3737 if (*(u32 *)reg_data != END_OF_REG_DATA_BLOCK)
3738 return -EINVAL;
3739 reg_table->num_entries = num_ranges;
3740 } else
3741 return -EINVAL;
3742 break;
3743 default:
3744 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3745 return -EINVAL;
3746 }
3747 break;
3748 default:
3749 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3750 return -EINVAL;
3751 }
3752 return 0;
3753 }
3754 return -EINVAL;
3755}
3756
2905void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) 3757void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
2906{ 3758{
2907 struct radeon_device *rdev = dev->dev_private; 3759 struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 7e265a58141f..4f6b22b799ba 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -550,6 +550,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
550 return r; 550 return r;
551 } 551 }
552 552
553 /* XXX pick SD/HD/MVC */
553 if (parser.ring == R600_RING_TYPE_UVD_INDEX) 554 if (parser.ring == R600_RING_TYPE_UVD_INDEX)
554 radeon_uvd_note_usage(rdev); 555 radeon_uvd_note_usage(rdev);
555 556
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index b097d5b4ff39..9630e8d95fb4 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -27,9 +27,6 @@
27#include <drm/radeon_drm.h> 27#include <drm/radeon_drm.h>
28#include "radeon.h" 28#include "radeon.h"
29 29
30#define CURSOR_WIDTH 64
31#define CURSOR_HEIGHT 64
32
33static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock) 30static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
34{ 31{
35 struct radeon_device *rdev = crtc->dev->dev_private; 32 struct radeon_device *rdev = crtc->dev->dev_private;
@@ -167,7 +164,8 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
167 goto unpin; 164 goto unpin;
168 } 165 }
169 166
170 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { 167 if ((width > radeon_crtc->max_cursor_width) ||
168 (height > radeon_crtc->max_cursor_height)) {
171 DRM_ERROR("bad cursor width or height %d x %d\n", width, height); 169 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
172 return -EINVAL; 170 return -EINVAL;
173 } 171 }
@@ -233,11 +231,11 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
233 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 231 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
234 232
235 if (x < 0) { 233 if (x < 0) {
236 xorigin = min(-x, CURSOR_WIDTH - 1); 234 xorigin = min(-x, radeon_crtc->max_cursor_width - 1);
237 x = 0; 235 x = 0;
238 } 236 }
239 if (y < 0) { 237 if (y < 0) {
240 yorigin = min(-y, CURSOR_HEIGHT - 1); 238 yorigin = min(-y, radeon_crtc->max_cursor_height - 1);
241 y = 0; 239 y = 0;
242 } 240 }
243 241
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b0dc0b6cb4e0..82335e38ec4f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -95,6 +95,9 @@ static const char radeon_family_name[][16] = {
95 "VERDE", 95 "VERDE",
96 "OLAND", 96 "OLAND",
97 "HAINAN", 97 "HAINAN",
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
98 "LAST", 101 "LAST",
99}; 102};
100 103
@@ -229,6 +232,94 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
229} 232}
230 233
231/* 234/*
235 * GPU doorbell aperture helpers function.
236 */
237/**
238 * radeon_doorbell_init - Init doorbell driver information.
239 *
240 * @rdev: radeon_device pointer
241 *
242 * Init doorbell driver information (CIK)
243 * Returns 0 on success, error on failure.
244 */
245int radeon_doorbell_init(struct radeon_device *rdev)
246{
247 int i;
248
249 /* doorbell bar mapping */
250 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
251 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
252
253 /* limit to 4 MB for now */
254 if (rdev->doorbell.size > (4 * 1024 * 1024))
255 rdev->doorbell.size = 4 * 1024 * 1024;
256
257 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
258 if (rdev->doorbell.ptr == NULL) {
259 return -ENOMEM;
260 }
261 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
262 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
263
264 rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
265
266 for (i = 0; i < rdev->doorbell.num_pages; i++) {
267 rdev->doorbell.free[i] = true;
268 }
269 return 0;
270}
271
272/**
273 * radeon_doorbell_fini - Tear down doorbell driver information.
274 *
275 * @rdev: radeon_device pointer
276 *
277 * Tear down doorbell driver information (CIK)
278 */
279void radeon_doorbell_fini(struct radeon_device *rdev)
280{
281 iounmap(rdev->doorbell.ptr);
282 rdev->doorbell.ptr = NULL;
283}
284
285/**
286 * radeon_doorbell_get - Allocate a doorbell page
287 *
288 * @rdev: radeon_device pointer
289 * @doorbell: doorbell page number
290 *
291 * Allocate a doorbell page for use by the driver (all asics).
292 * Returns 0 on success or -EINVAL on failure.
293 */
294int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
295{
296 int i;
297
298 for (i = 0; i < rdev->doorbell.num_pages; i++) {
299 if (rdev->doorbell.free[i]) {
300 rdev->doorbell.free[i] = false;
301 *doorbell = i;
302 return 0;
303 }
304 }
305 return -EINVAL;
306}
307
308/**
309 * radeon_doorbell_free - Free a doorbell page
310 *
311 * @rdev: radeon_device pointer
312 * @doorbell: doorbell page number
313 *
314 * Free a doorbell page allocated for use by the driver (all asics)
315 */
316void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
317{
318 if (doorbell < rdev->doorbell.num_pages)
319 rdev->doorbell.free[doorbell] = true;
320}
321
322/*
232 * radeon_wb_*() 323 * radeon_wb_*()
233 * Writeback is the the method by which the the GPU updates special pages 324 * Writeback is the the method by which the the GPU updates special pages
234 * in memory with the status of certain GPU events (fences, ring pointers, 325 * in memory with the status of certain GPU events (fences, ring pointers,
@@ -1145,8 +1236,13 @@ int radeon_device_init(struct radeon_device *rdev,
1145 /* Registers mapping */ 1236 /* Registers mapping */
1146 /* TODO: block userspace mapping of io register */ 1237 /* TODO: block userspace mapping of io register */
1147 spin_lock_init(&rdev->mmio_idx_lock); 1238 spin_lock_init(&rdev->mmio_idx_lock);
1148 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 1239 if (rdev->family >= CHIP_BONAIRE) {
1149 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 1240 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1241 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1242 } else {
1243 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1244 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1245 }
1150 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 1246 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1151 if (rdev->rmmio == NULL) { 1247 if (rdev->rmmio == NULL) {
1152 return -ENOMEM; 1248 return -ENOMEM;
@@ -1154,6 +1250,10 @@ int radeon_device_init(struct radeon_device *rdev,
1154 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 1250 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1155 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 1251 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1156 1252
1253 /* doorbell bar mapping */
1254 if (rdev->family >= CHIP_BONAIRE)
1255 radeon_doorbell_init(rdev);
1256
1157 /* io port mapping */ 1257 /* io port mapping */
1158 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 1258 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1159 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { 1259 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
@@ -1231,6 +1331,8 @@ void radeon_device_fini(struct radeon_device *rdev)
1231 rdev->rio_mem = NULL; 1331 rdev->rio_mem = NULL;
1232 iounmap(rdev->rmmio); 1332 iounmap(rdev->rmmio);
1233 rdev->rmmio = NULL; 1333 rdev->rmmio = NULL;
1334 if (rdev->family >= CHIP_BONAIRE)
1335 radeon_doorbell_fini(rdev);
1234 radeon_debugfs_remove_files(rdev); 1336 radeon_debugfs_remove_files(rdev);
1235} 1337}
1236 1338
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index eb18bb7af1cc..c2b67b4e1ac2 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -153,7 +153,13 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
153 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); 153 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
154 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 154 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
155 WREG32(0x6940 + radeon_crtc->crtc_offset, 0); 155 WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
156 156 if (ASIC_IS_DCE8(rdev)) {
157 /* XXX this only needs to be programmed once per crtc at startup,
158 * not sure where the best place for it is
159 */
160 WREG32(CIK_ALPHA_CONTROL + radeon_crtc->crtc_offset,
161 CIK_CURSOR_ALPHA_BLND_ENA);
162 }
157} 163}
158 164
159static void legacy_crtc_load_lut(struct drm_crtc *crtc) 165static void legacy_crtc_load_lut(struct drm_crtc *crtc)
@@ -512,6 +518,14 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
512 radeon_crtc->crtc_id = index; 518 radeon_crtc->crtc_id = index;
513 rdev->mode_info.crtcs[index] = radeon_crtc; 519 rdev->mode_info.crtcs[index] = radeon_crtc;
514 520
521 if (rdev->family >= CHIP_BONAIRE) {
522 radeon_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
523 radeon_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
524 } else {
525 radeon_crtc->max_cursor_width = CURSOR_WIDTH;
526 radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
527 }
528
515#if 0 529#if 0
516 radeon_crtc->mode_set.crtc = &radeon_crtc->base; 530 radeon_crtc->mode_set.crtc = &radeon_crtc->base;
517 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); 531 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
@@ -530,7 +544,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
530 radeon_legacy_init_crtc(dev, radeon_crtc); 544 radeon_legacy_init_crtc(dev, radeon_crtc);
531} 545}
532 546
533static const char *encoder_names[37] = { 547static const char *encoder_names[38] = {
534 "NONE", 548 "NONE",
535 "INTERNAL_LVDS", 549 "INTERNAL_LVDS",
536 "INTERNAL_TMDS1", 550 "INTERNAL_TMDS1",
@@ -567,7 +581,8 @@ static const char *encoder_names[37] = {
567 "INTERNAL_UNIPHY2", 581 "INTERNAL_UNIPHY2",
568 "NUTMEG", 582 "NUTMEG",
569 "TRAVIS", 583 "TRAVIS",
570 "INTERNAL_VCE" 584 "INTERNAL_VCE",
585 "INTERNAL_UNIPHY3",
571}; 586};
572 587
573static const char *hpd_names[6] = { 588static const char *hpd_names[6] = {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 094e7e5ea39e..00cc52e601fe 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -74,9 +74,10 @@
74 * 2.31.0 - Add fastfb support for rs690 74 * 2.31.0 - Add fastfb support for rs690
75 * 2.32.0 - new info request for rings working 75 * 2.32.0 - new info request for rings working
76 * 2.33.0 - Add SI tiling mode array query 76 * 2.33.0 - Add SI tiling mode array query
77 * 2.34.0 - Add CIK tiling mode array query
77 */ 78 */
78#define KMS_DRIVER_MAJOR 2 79#define KMS_DRIVER_MAJOR 2
79#define KMS_DRIVER_MINOR 33 80#define KMS_DRIVER_MINOR 34
80#define KMS_DRIVER_PATCHLEVEL 0 81#define KMS_DRIVER_PATCHLEVEL 0
81int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 82int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
82int radeon_driver_unload_kms(struct drm_device *dev); 83int radeon_driver_unload_kms(struct drm_device *dev);
@@ -164,6 +165,7 @@ int radeon_pcie_gen2 = -1;
164int radeon_msi = -1; 165int radeon_msi = -1;
165int radeon_lockup_timeout = 10000; 166int radeon_lockup_timeout = 10000;
166int radeon_fastfb = 0; 167int radeon_fastfb = 0;
168int radeon_dpm = -1;
167 169
168MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 170MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
169module_param_named(no_wb, radeon_no_wb, int, 0444); 171module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -219,6 +221,9 @@ module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
219MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)"); 221MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)");
220module_param_named(fastfb, radeon_fastfb, int, 0444); 222module_param_named(fastfb, radeon_fastfb, int, 0444);
221 223
224MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
225module_param_named(dpm, radeon_dpm, int, 0444);
226
222static struct pci_device_id pciidlist[] = { 227static struct pci_device_id pciidlist[] = {
223 radeon_PCI_IDS 228 radeon_PCI_IDS
224}; 229};
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 36e9803b077d..3c8289083f9d 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -93,6 +93,9 @@ enum radeon_family {
93 CHIP_VERDE, 93 CHIP_VERDE,
94 CHIP_OLAND, 94 CHIP_OLAND,
95 CHIP_HAINAN, 95 CHIP_HAINAN,
96 CHIP_BONAIRE,
97 CHIP_KAVERI,
98 CHIP_KABINI,
96 CHIP_LAST, 99 CHIP_LAST,
97}; 100};
98 101
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 5a99d433fc35..bcdefd1dcd43 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -82,6 +82,23 @@ static void radeon_hotplug_work_func(struct work_struct *work)
82} 82}
83 83
84/** 84/**
85 * radeon_irq_reset_work_func - execute gpu reset
86 *
87 * @work: work struct
88 *
89 * Execute scheduled gpu reset (cayman+).
90 * This function is called when the irq handler
91 * thinks we need a gpu reset.
92 */
93static void radeon_irq_reset_work_func(struct work_struct *work)
94{
95 struct radeon_device *rdev = container_of(work, struct radeon_device,
96 reset_work);
97
98 radeon_gpu_reset(rdev);
99}
100
101/**
85 * radeon_driver_irq_preinstall_kms - drm irq preinstall callback 102 * radeon_driver_irq_preinstall_kms - drm irq preinstall callback
86 * 103 *
87 * @dev: drm dev pointer 104 * @dev: drm dev pointer
@@ -99,6 +116,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
99 /* Disable *all* interrupts */ 116 /* Disable *all* interrupts */
100 for (i = 0; i < RADEON_NUM_RINGS; i++) 117 for (i = 0; i < RADEON_NUM_RINGS; i++)
101 atomic_set(&rdev->irq.ring_int[i], 0); 118 atomic_set(&rdev->irq.ring_int[i], 0);
119 rdev->irq.dpm_thermal = false;
102 for (i = 0; i < RADEON_MAX_HPD_PINS; i++) 120 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
103 rdev->irq.hpd[i] = false; 121 rdev->irq.hpd[i] = false;
104 for (i = 0; i < RADEON_MAX_CRTCS; i++) { 122 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -146,6 +164,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
146 /* Disable *all* interrupts */ 164 /* Disable *all* interrupts */
147 for (i = 0; i < RADEON_NUM_RINGS; i++) 165 for (i = 0; i < RADEON_NUM_RINGS; i++)
148 atomic_set(&rdev->irq.ring_int[i], 0); 166 atomic_set(&rdev->irq.ring_int[i], 0);
167 rdev->irq.dpm_thermal = false;
149 for (i = 0; i < RADEON_MAX_HPD_PINS; i++) 168 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
150 rdev->irq.hpd[i] = false; 169 rdev->irq.hpd[i] = false;
151 for (i = 0; i < RADEON_MAX_CRTCS; i++) { 170 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -243,6 +262,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
243 262
244 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); 263 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
245 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); 264 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
265 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
246 266
247 spin_lock_init(&rdev->irq.lock); 267 spin_lock_init(&rdev->irq.lock);
248 r = drm_vblank_init(rdev->ddev, rdev->num_crtc); 268 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 4f2d4f4c1dab..49ff3d1a6102 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -229,7 +229,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
229 *value = rdev->accel_working; 229 *value = rdev->accel_working;
230 break; 230 break;
231 case RADEON_INFO_TILING_CONFIG: 231 case RADEON_INFO_TILING_CONFIG:
232 if (rdev->family >= CHIP_TAHITI) 232 if (rdev->family >= CHIP_BONAIRE)
233 *value = rdev->config.cik.tile_config;
234 else if (rdev->family >= CHIP_TAHITI)
233 *value = rdev->config.si.tile_config; 235 *value = rdev->config.si.tile_config;
234 else if (rdev->family >= CHIP_CAYMAN) 236 else if (rdev->family >= CHIP_CAYMAN)
235 *value = rdev->config.cayman.tile_config; 237 *value = rdev->config.cayman.tile_config;
@@ -281,7 +283,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
281 *value = rdev->clock.spll.reference_freq * 10; 283 *value = rdev->clock.spll.reference_freq * 10;
282 break; 284 break;
283 case RADEON_INFO_NUM_BACKENDS: 285 case RADEON_INFO_NUM_BACKENDS:
284 if (rdev->family >= CHIP_TAHITI) 286 if (rdev->family >= CHIP_BONAIRE)
287 *value = rdev->config.cik.max_backends_per_se *
288 rdev->config.cik.max_shader_engines;
289 else if (rdev->family >= CHIP_TAHITI)
285 *value = rdev->config.si.max_backends_per_se * 290 *value = rdev->config.si.max_backends_per_se *
286 rdev->config.si.max_shader_engines; 291 rdev->config.si.max_shader_engines;
287 else if (rdev->family >= CHIP_CAYMAN) 292 else if (rdev->family >= CHIP_CAYMAN)
@@ -298,7 +303,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
298 } 303 }
299 break; 304 break;
300 case RADEON_INFO_NUM_TILE_PIPES: 305 case RADEON_INFO_NUM_TILE_PIPES:
301 if (rdev->family >= CHIP_TAHITI) 306 if (rdev->family >= CHIP_BONAIRE)
307 *value = rdev->config.cik.max_tile_pipes;
308 else if (rdev->family >= CHIP_TAHITI)
302 *value = rdev->config.si.max_tile_pipes; 309 *value = rdev->config.si.max_tile_pipes;
303 else if (rdev->family >= CHIP_CAYMAN) 310 else if (rdev->family >= CHIP_CAYMAN)
304 *value = rdev->config.cayman.max_tile_pipes; 311 *value = rdev->config.cayman.max_tile_pipes;
@@ -316,7 +323,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
316 *value = 1; 323 *value = 1;
317 break; 324 break;
318 case RADEON_INFO_BACKEND_MAP: 325 case RADEON_INFO_BACKEND_MAP:
319 if (rdev->family >= CHIP_TAHITI) 326 if (rdev->family >= CHIP_BONAIRE)
327 return -EINVAL;
328 else if (rdev->family >= CHIP_TAHITI)
320 *value = rdev->config.si.backend_map; 329 *value = rdev->config.si.backend_map;
321 else if (rdev->family >= CHIP_CAYMAN) 330 else if (rdev->family >= CHIP_CAYMAN)
322 *value = rdev->config.cayman.backend_map; 331 *value = rdev->config.cayman.backend_map;
@@ -343,7 +352,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
343 *value = RADEON_IB_VM_MAX_SIZE; 352 *value = RADEON_IB_VM_MAX_SIZE;
344 break; 353 break;
345 case RADEON_INFO_MAX_PIPES: 354 case RADEON_INFO_MAX_PIPES:
346 if (rdev->family >= CHIP_TAHITI) 355 if (rdev->family >= CHIP_BONAIRE)
356 *value = rdev->config.cik.max_cu_per_sh;
357 else if (rdev->family >= CHIP_TAHITI)
347 *value = rdev->config.si.max_cu_per_sh; 358 *value = rdev->config.si.max_cu_per_sh;
348 else if (rdev->family >= CHIP_CAYMAN) 359 else if (rdev->family >= CHIP_CAYMAN)
349 *value = rdev->config.cayman.max_pipes_per_simd; 360 *value = rdev->config.cayman.max_pipes_per_simd;
@@ -367,7 +378,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
367 value64 = radeon_get_gpu_clock_counter(rdev); 378 value64 = radeon_get_gpu_clock_counter(rdev);
368 break; 379 break;
369 case RADEON_INFO_MAX_SE: 380 case RADEON_INFO_MAX_SE:
370 if (rdev->family >= CHIP_TAHITI) 381 if (rdev->family >= CHIP_BONAIRE)
382 *value = rdev->config.cik.max_shader_engines;
383 else if (rdev->family >= CHIP_TAHITI)
371 *value = rdev->config.si.max_shader_engines; 384 *value = rdev->config.si.max_shader_engines;
372 else if (rdev->family >= CHIP_CAYMAN) 385 else if (rdev->family >= CHIP_CAYMAN)
373 *value = rdev->config.cayman.max_shader_engines; 386 *value = rdev->config.cayman.max_shader_engines;
@@ -377,7 +390,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
377 *value = 1; 390 *value = 1;
378 break; 391 break;
379 case RADEON_INFO_MAX_SH_PER_SE: 392 case RADEON_INFO_MAX_SH_PER_SE:
380 if (rdev->family >= CHIP_TAHITI) 393 if (rdev->family >= CHIP_BONAIRE)
394 *value = rdev->config.cik.max_sh_per_se;
395 else if (rdev->family >= CHIP_TAHITI)
381 *value = rdev->config.si.max_sh_per_se; 396 *value = rdev->config.si.max_sh_per_se;
382 else 397 else
383 return -EINVAL; 398 return -EINVAL;
@@ -407,12 +422,16 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
407 } 422 }
408 break; 423 break;
409 case RADEON_INFO_SI_TILE_MODE_ARRAY: 424 case RADEON_INFO_SI_TILE_MODE_ARRAY:
410 if (rdev->family < CHIP_TAHITI) { 425 if (rdev->family >= CHIP_BONAIRE) {
411 DRM_DEBUG_KMS("tile mode array is si only!\n"); 426 value = rdev->config.cik.tile_mode_array;
427 value_size = sizeof(uint32_t)*32;
428 } else if (rdev->family >= CHIP_TAHITI) {
429 value = rdev->config.si.tile_mode_array;
430 value_size = sizeof(uint32_t)*32;
431 } else {
432 DRM_DEBUG_KMS("tile mode array is si+ only!\n");
412 return -EINVAL; 433 return -EINVAL;
413 } 434 }
414 value = rdev->config.si.tile_mode_array;
415 value_size = sizeof(uint32_t)*32;
416 break; 435 break;
417 default: 436 default:
418 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 437 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 69ad4fe224c1..b568cb19a7fa 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -307,6 +307,8 @@ struct radeon_crtc {
307 uint64_t cursor_addr; 307 uint64_t cursor_addr;
308 int cursor_width; 308 int cursor_width;
309 int cursor_height; 309 int cursor_height;
310 int max_cursor_width;
311 int max_cursor_height;
310 uint32_t legacy_display_base_addr; 312 uint32_t legacy_display_base_addr;
311 uint32_t legacy_cursor_offset; 313 uint32_t legacy_cursor_offset;
312 enum radeon_rmx_type rmx_type; 314 enum radeon_rmx_type rmx_type;
@@ -329,6 +331,10 @@ struct radeon_crtc {
329 u32 pll_flags; 331 u32 pll_flags;
330 struct drm_encoder *encoder; 332 struct drm_encoder *encoder;
331 struct drm_connector *connector; 333 struct drm_connector *connector;
334 /* for dpm */
335 u32 line_time;
336 u32 wm_low;
337 u32 wm_high;
332}; 338};
333 339
334struct radeon_encoder_primary_dac { 340struct radeon_encoder_primary_dac {
@@ -512,12 +518,99 @@ struct atom_clock_dividers {
512 bool enable_dithen; 518 bool enable_dithen;
513 u32 vco_mode; 519 u32 vco_mode;
514 u32 real_clock; 520 u32 real_clock;
521 /* added for CI */
522 u32 post_divider;
523 u32 flags;
524};
525
526struct atom_mpll_param {
527 union {
528 struct {
529#ifdef __BIG_ENDIAN
530 u32 reserved : 8;
531 u32 clkfrac : 12;
532 u32 clkf : 12;
533#else
534 u32 clkf : 12;
535 u32 clkfrac : 12;
536 u32 reserved : 8;
537#endif
538 };
539 u32 fb_div;
540 };
541 u32 post_div;
542 u32 bwcntl;
543 u32 dll_speed;
544 u32 vco_mode;
545 u32 yclk_sel;
546 u32 qdr;
547 u32 half_rate;
548};
549
550#define MEM_TYPE_GDDR5 0x50
551#define MEM_TYPE_GDDR4 0x40
552#define MEM_TYPE_GDDR3 0x30
553#define MEM_TYPE_DDR2 0x20
554#define MEM_TYPE_GDDR1 0x10
555#define MEM_TYPE_DDR3 0xb0
556#define MEM_TYPE_MASK 0xf0
557
558struct atom_memory_info {
559 u8 mem_vendor;
560 u8 mem_type;
561};
562
563#define MAX_AC_TIMING_ENTRIES 16
564
565struct atom_memory_clock_range_table
566{
567 u8 num_entries;
568 u8 rsv[3];
569 u32 mclk[MAX_AC_TIMING_ENTRIES];
570};
571
572#define VBIOS_MC_REGISTER_ARRAY_SIZE 32
573#define VBIOS_MAX_AC_TIMING_ENTRIES 20
574
575struct atom_mc_reg_entry {
576 u32 mclk_max;
577 u32 mc_data[VBIOS_MC_REGISTER_ARRAY_SIZE];
578};
579
580struct atom_mc_register_address {
581 u16 s1;
582 u8 pre_reg_data;
583};
584
585struct atom_mc_reg_table {
586 u8 last;
587 u8 num_entries;
588 struct atom_mc_reg_entry mc_reg_table_entry[VBIOS_MAX_AC_TIMING_ENTRIES];
589 struct atom_mc_register_address mc_reg_address[VBIOS_MC_REGISTER_ARRAY_SIZE];
590};
591
592#define MAX_VOLTAGE_ENTRIES 32
593
594struct atom_voltage_table_entry
595{
596 u16 value;
597 u32 smio_low;
598};
599
600struct atom_voltage_table
601{
602 u32 count;
603 u32 mask_low;
604 u32 phase_delay;
605 struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
515}; 606};
516 607
517extern enum radeon_tv_std 608extern enum radeon_tv_std
518radeon_combios_get_tv_info(struct radeon_device *rdev); 609radeon_combios_get_tv_info(struct radeon_device *rdev);
519extern enum radeon_tv_std 610extern enum radeon_tv_std
520radeon_atombios_get_tv_info(struct radeon_device *rdev); 611radeon_atombios_get_tv_info(struct radeon_device *rdev);
612extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
613 u16 *vddc, u16 *vddci, u16 *mvdd);
521 614
522extern struct drm_connector * 615extern struct drm_connector *
523radeon_get_connector_for_encoder(struct drm_encoder *encoder); 616radeon_get_connector_for_encoder(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 788c64cb4b47..9737baeb711d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -388,7 +388,8 @@ static ssize_t radeon_get_pm_method(struct device *dev,
388 int pm = rdev->pm.pm_method; 388 int pm = rdev->pm.pm_method;
389 389
390 return snprintf(buf, PAGE_SIZE, "%s\n", 390 return snprintf(buf, PAGE_SIZE, "%s\n",
391 (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile"); 391 (pm == PM_METHOD_DYNPM) ? "dynpm" :
392 (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
392} 393}
393 394
394static ssize_t radeon_set_pm_method(struct device *dev, 395static ssize_t radeon_set_pm_method(struct device *dev,
@@ -399,6 +400,11 @@ static ssize_t radeon_set_pm_method(struct device *dev,
399 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 400 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
400 struct radeon_device *rdev = ddev->dev_private; 401 struct radeon_device *rdev = ddev->dev_private;
401 402
403 /* we don't support the legacy modes with dpm */
404 if (rdev->pm.pm_method == PM_METHOD_DPM) {
405 count = -EINVAL;
406 goto fail;
407 }
402 408
403 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { 409 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
404 mutex_lock(&rdev->pm.mutex); 410 mutex_lock(&rdev->pm.mutex);
@@ -423,8 +429,48 @@ fail:
423 return count; 429 return count;
424} 430}
425 431
432static ssize_t radeon_get_dpm_state(struct device *dev,
433 struct device_attribute *attr,
434 char *buf)
435{
436 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
437 struct radeon_device *rdev = ddev->dev_private;
438 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
439
440 return snprintf(buf, PAGE_SIZE, "%s\n",
441 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
442 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
443}
444
445static ssize_t radeon_set_dpm_state(struct device *dev,
446 struct device_attribute *attr,
447 const char *buf,
448 size_t count)
449{
450 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
451 struct radeon_device *rdev = ddev->dev_private;
452
453 mutex_lock(&rdev->pm.mutex);
454 if (strncmp("battery", buf, strlen("battery")) == 0)
455 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
456 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
457 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
458 else if (strncmp("performance", buf, strlen("performance")) == 0)
459 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
460 else {
461 mutex_unlock(&rdev->pm.mutex);
462 count = -EINVAL;
463 goto fail;
464 }
465 mutex_unlock(&rdev->pm.mutex);
466 radeon_pm_compute_clocks(rdev);
467fail:
468 return count;
469}
470
426static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); 471static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
427static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); 472static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
473static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
428 474
429static ssize_t radeon_hwmon_show_temp(struct device *dev, 475static ssize_t radeon_hwmon_show_temp(struct device *dev,
430 struct device_attribute *attr, 476 struct device_attribute *attr,
@@ -434,27 +480,10 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
434 struct radeon_device *rdev = ddev->dev_private; 480 struct radeon_device *rdev = ddev->dev_private;
435 int temp; 481 int temp;
436 482
437 switch (rdev->pm.int_thermal_type) { 483 if (rdev->asic->pm.get_temperature)
438 case THERMAL_TYPE_RV6XX: 484 temp = radeon_get_temperature(rdev);
439 temp = rv6xx_get_temp(rdev); 485 else
440 break;
441 case THERMAL_TYPE_RV770:
442 temp = rv770_get_temp(rdev);
443 break;
444 case THERMAL_TYPE_EVERGREEN:
445 case THERMAL_TYPE_NI:
446 temp = evergreen_get_temp(rdev);
447 break;
448 case THERMAL_TYPE_SUMO:
449 temp = sumo_get_temp(rdev);
450 break;
451 case THERMAL_TYPE_SI:
452 temp = si_get_temp(rdev);
453 break;
454 default:
455 temp = 0; 486 temp = 0;
456 break;
457 }
458 487
459 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 488 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
460} 489}
@@ -492,8 +521,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
492 case THERMAL_TYPE_NI: 521 case THERMAL_TYPE_NI:
493 case THERMAL_TYPE_SUMO: 522 case THERMAL_TYPE_SUMO:
494 case THERMAL_TYPE_SI: 523 case THERMAL_TYPE_SI:
495 /* No support for TN yet */ 524 if (rdev->asic->pm.get_temperature == NULL)
496 if (rdev->family == CHIP_ARUBA)
497 return err; 525 return err;
498 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 526 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
499 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 527 if (IS_ERR(rdev->pm.int_hwmon_dev)) {
@@ -526,7 +554,270 @@ static void radeon_hwmon_fini(struct radeon_device *rdev)
526 } 554 }
527} 555}
528 556
529void radeon_pm_suspend(struct radeon_device *rdev) 557static void radeon_dpm_thermal_work_handler(struct work_struct *work)
558{
559 struct radeon_device *rdev =
560 container_of(work, struct radeon_device,
561 pm.dpm.thermal.work);
562 /* switch to the thermal state */
563 enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
564
565 if (!rdev->pm.dpm_enabled)
566 return;
567
568 if (rdev->asic->pm.get_temperature) {
569 int temp = radeon_get_temperature(rdev);
570
571 if (temp < rdev->pm.dpm.thermal.min_temp)
572 /* switch back the user state */
573 dpm_state = rdev->pm.dpm.user_state;
574 } else {
575 if (rdev->pm.dpm.thermal.high_to_low)
576 /* switch back the user state */
577 dpm_state = rdev->pm.dpm.user_state;
578 }
579 radeon_dpm_enable_power_state(rdev, dpm_state);
580}
581
582static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
583 enum radeon_pm_state_type dpm_state)
584{
585 int i;
586 struct radeon_ps *ps;
587 u32 ui_class;
588
589restart_search:
590 /* balanced states don't exist at the moment */
591 if (dpm_state == POWER_STATE_TYPE_BALANCED)
592 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
593
594 /* Pick the best power state based on current conditions */
595 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
596 ps = &rdev->pm.dpm.ps[i];
597 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
598 switch (dpm_state) {
599 /* user states */
600 case POWER_STATE_TYPE_BATTERY:
601 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
602 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
603 if (rdev->pm.dpm.new_active_crtc_count < 2)
604 return ps;
605 } else
606 return ps;
607 }
608 break;
609 case POWER_STATE_TYPE_BALANCED:
610 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
611 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
612 if (rdev->pm.dpm.new_active_crtc_count < 2)
613 return ps;
614 } else
615 return ps;
616 }
617 break;
618 case POWER_STATE_TYPE_PERFORMANCE:
619 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
620 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
621 if (rdev->pm.dpm.new_active_crtc_count < 2)
622 return ps;
623 } else
624 return ps;
625 }
626 break;
627 /* internal states */
628 case POWER_STATE_TYPE_INTERNAL_UVD:
629 return rdev->pm.dpm.uvd_ps;
630 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
631 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
632 return ps;
633 break;
634 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
635 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
636 return ps;
637 break;
638 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
639 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
640 return ps;
641 break;
642 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
643 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
644 return ps;
645 break;
646 case POWER_STATE_TYPE_INTERNAL_BOOT:
647 return rdev->pm.dpm.boot_ps;
648 case POWER_STATE_TYPE_INTERNAL_THERMAL:
649 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
650 return ps;
651 break;
652 case POWER_STATE_TYPE_INTERNAL_ACPI:
653 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
654 return ps;
655 break;
656 case POWER_STATE_TYPE_INTERNAL_ULV:
657 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
658 return ps;
659 break;
660 default:
661 break;
662 }
663 }
664 /* use a fallback state if we didn't match */
665 switch (dpm_state) {
666 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
667 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
668 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
669 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
670 return rdev->pm.dpm.uvd_ps;
671 case POWER_STATE_TYPE_INTERNAL_THERMAL:
672 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
673 goto restart_search;
674 case POWER_STATE_TYPE_INTERNAL_ACPI:
675 dpm_state = POWER_STATE_TYPE_BATTERY;
676 goto restart_search;
677 case POWER_STATE_TYPE_BATTERY:
678 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
679 goto restart_search;
680 default:
681 break;
682 }
683
684 return NULL;
685}
686
687static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
688{
689 int i;
690 struct radeon_ps *ps;
691 enum radeon_pm_state_type dpm_state;
692 int ret;
693
694 /* if dpm init failed */
695 if (!rdev->pm.dpm_enabled)
696 return;
697
698 if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
699 /* add other state override checks here */
700 if ((!rdev->pm.dpm.thermal_active) &&
701 (!rdev->pm.dpm.uvd_active))
702 rdev->pm.dpm.state = rdev->pm.dpm.user_state;
703 }
704 dpm_state = rdev->pm.dpm.state;
705
706 ps = radeon_dpm_pick_power_state(rdev, dpm_state);
707 if (ps)
708 rdev->pm.dpm.requested_ps = ps;
709 else
710 return;
711
712 /* no need to reprogram if nothing changed unless we are on BTC+ */
713 if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
714 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
715 /* for pre-BTC and APUs if the num crtcs changed but state is the same,
716 * all we need to do is update the display configuration.
717 */
718 if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
719 /* update display watermarks based on new power state */
720 radeon_bandwidth_update(rdev);
721 /* update displays */
722 radeon_dpm_display_configuration_changed(rdev);
723 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
724 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
725 }
726 return;
727 } else {
728 /* for BTC+ if the num crtcs hasn't changed and state is the same,
729 * nothing to do, if the num crtcs is > 1 and state is the same,
730 * update display configuration.
731 */
732 if (rdev->pm.dpm.new_active_crtcs ==
733 rdev->pm.dpm.current_active_crtcs) {
734 return;
735 } else {
736 if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
737 (rdev->pm.dpm.new_active_crtc_count > 1)) {
738 /* update display watermarks based on new power state */
739 radeon_bandwidth_update(rdev);
740 /* update displays */
741 radeon_dpm_display_configuration_changed(rdev);
742 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
743 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
744 return;
745 }
746 }
747 }
748 }
749
750 printk("switching from power state:\n");
751 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
752 printk("switching to power state:\n");
753 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
754
755 mutex_lock(&rdev->ddev->struct_mutex);
756 down_write(&rdev->pm.mclk_lock);
757 mutex_lock(&rdev->ring_lock);
758
759 ret = radeon_dpm_pre_set_power_state(rdev);
760 if (ret)
761 goto done;
762
763 /* update display watermarks based on new power state */
764 radeon_bandwidth_update(rdev);
765 /* update displays */
766 radeon_dpm_display_configuration_changed(rdev);
767
768 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
769 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
770
771 /* wait for the rings to drain */
772 for (i = 0; i < RADEON_NUM_RINGS; i++) {
773 struct radeon_ring *ring = &rdev->ring[i];
774 if (ring->ready)
775 radeon_fence_wait_empty_locked(rdev, i);
776 }
777
778 /* program the new power state */
779 radeon_dpm_set_power_state(rdev);
780
781 /* update current power state */
782 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
783
784 radeon_dpm_post_set_power_state(rdev);
785
786done:
787 mutex_unlock(&rdev->ring_lock);
788 up_write(&rdev->pm.mclk_lock);
789 mutex_unlock(&rdev->ddev->struct_mutex);
790}
791
792void radeon_dpm_enable_power_state(struct radeon_device *rdev,
793 enum radeon_pm_state_type dpm_state)
794{
795 if (!rdev->pm.dpm_enabled)
796 return;
797
798 mutex_lock(&rdev->pm.mutex);
799 switch (dpm_state) {
800 case POWER_STATE_TYPE_INTERNAL_THERMAL:
801 rdev->pm.dpm.thermal_active = true;
802 break;
803 case POWER_STATE_TYPE_INTERNAL_UVD:
804 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
805 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
806 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
807 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
808 rdev->pm.dpm.uvd_active = true;
809 break;
810 default:
811 rdev->pm.dpm.thermal_active = false;
812 rdev->pm.dpm.uvd_active = false;
813 break;
814 }
815 rdev->pm.dpm.state = dpm_state;
816 mutex_unlock(&rdev->pm.mutex);
817 radeon_pm_compute_clocks(rdev);
818}
819
820static void radeon_pm_suspend_old(struct radeon_device *rdev)
530{ 821{
531 mutex_lock(&rdev->pm.mutex); 822 mutex_lock(&rdev->pm.mutex);
532 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 823 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
@@ -538,7 +829,26 @@ void radeon_pm_suspend(struct radeon_device *rdev)
538 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 829 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
539} 830}
540 831
541void radeon_pm_resume(struct radeon_device *rdev) 832static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
833{
834 mutex_lock(&rdev->pm.mutex);
835 /* disable dpm */
836 radeon_dpm_disable(rdev);
837 /* reset the power state */
838 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
839 rdev->pm.dpm_enabled = false;
840 mutex_unlock(&rdev->pm.mutex);
841}
842
843void radeon_pm_suspend(struct radeon_device *rdev)
844{
845 if (rdev->pm.pm_method == PM_METHOD_DPM)
846 radeon_pm_suspend_dpm(rdev);
847 else
848 radeon_pm_suspend_old(rdev);
849}
850
851static void radeon_pm_resume_old(struct radeon_device *rdev)
542{ 852{
543 /* set up the default clocks if the MC ucode is loaded */ 853 /* set up the default clocks if the MC ucode is loaded */
544 if ((rdev->family >= CHIP_BARTS) && 854 if ((rdev->family >= CHIP_BARTS) &&
@@ -573,12 +883,50 @@ void radeon_pm_resume(struct radeon_device *rdev)
573 radeon_pm_compute_clocks(rdev); 883 radeon_pm_compute_clocks(rdev);
574} 884}
575 885
576int radeon_pm_init(struct radeon_device *rdev) 886static void radeon_pm_resume_dpm(struct radeon_device *rdev)
887{
888 int ret;
889
890 /* asic init will reset to the boot state */
891 mutex_lock(&rdev->pm.mutex);
892 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
893 radeon_dpm_setup_asic(rdev);
894 ret = radeon_dpm_enable(rdev);
895 mutex_unlock(&rdev->pm.mutex);
896 if (ret) {
897 DRM_ERROR("radeon: dpm resume failed\n");
898 if ((rdev->family >= CHIP_BARTS) &&
899 (rdev->family <= CHIP_CAYMAN) &&
900 rdev->mc_fw) {
901 if (rdev->pm.default_vddc)
902 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
903 SET_VOLTAGE_TYPE_ASIC_VDDC);
904 if (rdev->pm.default_vddci)
905 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
906 SET_VOLTAGE_TYPE_ASIC_VDDCI);
907 if (rdev->pm.default_sclk)
908 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
909 if (rdev->pm.default_mclk)
910 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
911 }
912 } else {
913 rdev->pm.dpm_enabled = true;
914 radeon_pm_compute_clocks(rdev);
915 }
916}
917
918void radeon_pm_resume(struct radeon_device *rdev)
919{
920 if (rdev->pm.pm_method == PM_METHOD_DPM)
921 radeon_pm_resume_dpm(rdev);
922 else
923 radeon_pm_resume_old(rdev);
924}
925
926static int radeon_pm_init_old(struct radeon_device *rdev)
577{ 927{
578 int ret; 928 int ret;
579 929
580 /* default to profile method */
581 rdev->pm.pm_method = PM_METHOD_PROFILE;
582 rdev->pm.profile = PM_PROFILE_DEFAULT; 930 rdev->pm.profile = PM_PROFILE_DEFAULT;
583 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 931 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
584 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 932 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
@@ -640,7 +988,137 @@ int radeon_pm_init(struct radeon_device *rdev)
640 return 0; 988 return 0;
641} 989}
642 990
643void radeon_pm_fini(struct radeon_device *rdev) 991static void radeon_dpm_print_power_states(struct radeon_device *rdev)
992{
993 int i;
994
995 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
996 printk("== power state %d ==\n", i);
997 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
998 }
999}
1000
1001static int radeon_pm_init_dpm(struct radeon_device *rdev)
1002{
1003 int ret;
1004
1005 /* default to performance state */
1006 rdev->pm.dpm.state = POWER_STATE_TYPE_PERFORMANCE;
1007 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
1008 rdev->pm.default_sclk = rdev->clock.default_sclk;
1009 rdev->pm.default_mclk = rdev->clock.default_mclk;
1010 rdev->pm.current_sclk = rdev->clock.default_sclk;
1011 rdev->pm.current_mclk = rdev->clock.default_mclk;
1012 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1013
1014 if (rdev->bios && rdev->is_atom_bios)
1015 radeon_atombios_get_power_modes(rdev);
1016 else
1017 return -EINVAL;
1018
1019 /* set up the internal thermal sensor if applicable */
1020 ret = radeon_hwmon_init(rdev);
1021 if (ret)
1022 return ret;
1023
1024 INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1025 mutex_lock(&rdev->pm.mutex);
1026 radeon_dpm_init(rdev);
1027 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1028 radeon_dpm_print_power_states(rdev);
1029 radeon_dpm_setup_asic(rdev);
1030 ret = radeon_dpm_enable(rdev);
1031 mutex_unlock(&rdev->pm.mutex);
1032 if (ret) {
1033 rdev->pm.dpm_enabled = false;
1034 if ((rdev->family >= CHIP_BARTS) &&
1035 (rdev->family <= CHIP_CAYMAN) &&
1036 rdev->mc_fw) {
1037 if (rdev->pm.default_vddc)
1038 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1039 SET_VOLTAGE_TYPE_ASIC_VDDC);
1040 if (rdev->pm.default_vddci)
1041 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1042 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1043 if (rdev->pm.default_sclk)
1044 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1045 if (rdev->pm.default_mclk)
1046 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1047 }
1048 DRM_ERROR("radeon: dpm initialization failed\n");
1049 return ret;
1050 }
1051 rdev->pm.dpm_enabled = true;
1052 radeon_pm_compute_clocks(rdev);
1053
1054 if (rdev->pm.num_power_states > 1) {
1055 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1056 if (ret)
1057 DRM_ERROR("failed to create device file for dpm state\n");
1058 /* XXX: these are noops for dpm but are here for backwards compat */
1059 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1060 if (ret)
1061 DRM_ERROR("failed to create device file for power profile\n");
1062 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1063 if (ret)
1064 DRM_ERROR("failed to create device file for power method\n");
1065 DRM_INFO("radeon: dpm initialized\n");
1066 }
1067
1068 return 0;
1069}
1070
1071int radeon_pm_init(struct radeon_device *rdev)
1072{
1073 /* enable dpm on rv6xx+ */
1074 switch (rdev->family) {
1075 case CHIP_RV610:
1076 case CHIP_RV630:
1077 case CHIP_RV620:
1078 case CHIP_RV635:
1079 case CHIP_RV670:
1080 case CHIP_RS780:
1081 case CHIP_RS880:
1082 case CHIP_RV770:
1083 case CHIP_RV730:
1084 case CHIP_RV710:
1085 case CHIP_RV740:
1086 case CHIP_CEDAR:
1087 case CHIP_REDWOOD:
1088 case CHIP_JUNIPER:
1089 case CHIP_CYPRESS:
1090 case CHIP_HEMLOCK:
1091 case CHIP_PALM:
1092 case CHIP_SUMO:
1093 case CHIP_SUMO2:
1094 case CHIP_BARTS:
1095 case CHIP_TURKS:
1096 case CHIP_CAICOS:
1097 case CHIP_CAYMAN:
1098 case CHIP_ARUBA:
1099 case CHIP_TAHITI:
1100 case CHIP_PITCAIRN:
1101 case CHIP_VERDE:
1102 case CHIP_OLAND:
1103 case CHIP_HAINAN:
1104 if (radeon_dpm == 1)
1105 rdev->pm.pm_method = PM_METHOD_DPM;
1106 else
1107 rdev->pm.pm_method = PM_METHOD_PROFILE;
1108 break;
1109 default:
1110 /* default to profile method */
1111 rdev->pm.pm_method = PM_METHOD_PROFILE;
1112 break;
1113 }
1114
1115 if (rdev->pm.pm_method == PM_METHOD_DPM)
1116 return radeon_pm_init_dpm(rdev);
1117 else
1118 return radeon_pm_init_old(rdev);
1119}
1120
1121static void radeon_pm_fini_old(struct radeon_device *rdev)
644{ 1122{
645 if (rdev->pm.num_power_states > 1) { 1123 if (rdev->pm.num_power_states > 1) {
646 mutex_lock(&rdev->pm.mutex); 1124 mutex_lock(&rdev->pm.mutex);
@@ -668,7 +1146,35 @@ void radeon_pm_fini(struct radeon_device *rdev)
668 radeon_hwmon_fini(rdev); 1146 radeon_hwmon_fini(rdev);
669} 1147}
670 1148
671void radeon_pm_compute_clocks(struct radeon_device *rdev) 1149static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1150{
1151 if (rdev->pm.num_power_states > 1) {
1152 mutex_lock(&rdev->pm.mutex);
1153 radeon_dpm_disable(rdev);
1154 mutex_unlock(&rdev->pm.mutex);
1155
1156 device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1157 /* XXX backwards compat */
1158 device_remove_file(rdev->dev, &dev_attr_power_profile);
1159 device_remove_file(rdev->dev, &dev_attr_power_method);
1160 }
1161 radeon_dpm_fini(rdev);
1162
1163 if (rdev->pm.power_state)
1164 kfree(rdev->pm.power_state);
1165
1166 radeon_hwmon_fini(rdev);
1167}
1168
1169void radeon_pm_fini(struct radeon_device *rdev)
1170{
1171 if (rdev->pm.pm_method == PM_METHOD_DPM)
1172 radeon_pm_fini_dpm(rdev);
1173 else
1174 radeon_pm_fini_old(rdev);
1175}
1176
1177static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
672{ 1178{
673 struct drm_device *ddev = rdev->ddev; 1179 struct drm_device *ddev = rdev->ddev;
674 struct drm_crtc *crtc; 1180 struct drm_crtc *crtc;
@@ -677,6 +1183,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
677 if (rdev->pm.num_power_states < 2) 1183 if (rdev->pm.num_power_states < 2)
678 return; 1184 return;
679 1185
1186 INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
680 mutex_lock(&rdev->pm.mutex); 1187 mutex_lock(&rdev->pm.mutex);
681 1188
682 rdev->pm.active_crtcs = 0; 1189 rdev->pm.active_crtcs = 0;
@@ -739,6 +1246,46 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
739 mutex_unlock(&rdev->pm.mutex); 1246 mutex_unlock(&rdev->pm.mutex);
740} 1247}
741 1248
1249static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1250{
1251 struct drm_device *ddev = rdev->ddev;
1252 struct drm_crtc *crtc;
1253 struct radeon_crtc *radeon_crtc;
1254
1255 mutex_lock(&rdev->pm.mutex);
1256
1257 /* update active crtc counts */
1258 rdev->pm.dpm.new_active_crtcs = 0;
1259 rdev->pm.dpm.new_active_crtc_count = 0;
1260 list_for_each_entry(crtc,
1261 &ddev->mode_config.crtc_list, head) {
1262 radeon_crtc = to_radeon_crtc(crtc);
1263 if (crtc->enabled) {
1264 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1265 rdev->pm.dpm.new_active_crtc_count++;
1266 }
1267 }
1268
1269 /* update battery/ac status */
1270 if (power_supply_is_system_supplied() > 0)
1271 rdev->pm.dpm.ac_power = true;
1272 else
1273 rdev->pm.dpm.ac_power = false;
1274
1275 radeon_dpm_change_power_state_locked(rdev);
1276
1277 mutex_unlock(&rdev->pm.mutex);
1278
1279}
1280
1281void radeon_pm_compute_clocks(struct radeon_device *rdev)
1282{
1283 if (rdev->pm.pm_method == PM_METHOD_DPM)
1284 radeon_pm_compute_clocks_dpm(rdev);
1285 else
1286 radeon_pm_compute_clocks_old(rdev);
1287}
1288
742static bool radeon_pm_in_vbl(struct radeon_device *rdev) 1289static bool radeon_pm_in_vbl(struct radeon_device *rdev)
743{ 1290{
744 int crtc, vpos, hpos, vbl_status; 1291 int crtc, vpos, hpos, vbl_status;
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 7e2c2b7cf188..62d54976d24e 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -57,6 +57,7 @@
57#include "evergreen_reg.h" 57#include "evergreen_reg.h"
58#include "ni_reg.h" 58#include "ni_reg.h"
59#include "si_reg.h" 59#include "si_reg.h"
60#include "cik_reg.h"
60 61
61#define RADEON_MC_AGP_LOCATION 0x014c 62#define RADEON_MC_AGP_LOCATION 0x014c
62#define RADEON_MC_AGP_START_MASK 0x0000FFFF 63#define RADEON_MC_AGP_START_MASK 0x0000FFFF
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 82434018cbe8..5f1c51a776ed 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -357,6 +357,38 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
357 } 357 }
358} 358}
359 359
360u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
361 struct radeon_ring *ring)
362{
363 u32 rptr;
364
365 if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
366 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
367 else
368 rptr = RREG32(ring->rptr_reg);
369 rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
370
371 return rptr;
372}
373
374u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
375 struct radeon_ring *ring)
376{
377 u32 wptr;
378
379 wptr = RREG32(ring->wptr_reg);
380 wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
381
382 return wptr;
383}
384
385void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
386 struct radeon_ring *ring)
387{
388 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
389 (void)RREG32(ring->wptr_reg);
390}
391
360/** 392/**
361 * radeon_ring_free_size - update the free size 393 * radeon_ring_free_size - update the free size
362 * 394 *
@@ -367,13 +399,7 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
367 */ 399 */
368void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) 400void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
369{ 401{
370 u32 rptr; 402 ring->rptr = radeon_ring_get_rptr(rdev, ring);
371
372 if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
373 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
374 else
375 rptr = RREG32(ring->rptr_reg);
376 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
377 /* This works because ring_size is a power of 2 */ 403 /* This works because ring_size is a power of 2 */
378 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); 404 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
379 ring->ring_free_dw -= ring->wptr; 405 ring->ring_free_dw -= ring->wptr;
@@ -465,8 +491,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
465 radeon_ring_write(ring, ring->nop); 491 radeon_ring_write(ring, ring->nop);
466 } 492 }
467 DRM_MEMORYBARRIER(); 493 DRM_MEMORYBARRIER();
468 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); 494 radeon_ring_set_wptr(rdev, ring);
469 (void)RREG32(ring->wptr_reg);
470} 495}
471 496
472/** 497/**
@@ -568,7 +593,6 @@ void radeon_ring_lockup_update(struct radeon_ring *ring)
568bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 593bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
569{ 594{
570 unsigned long cjiffies, elapsed; 595 unsigned long cjiffies, elapsed;
571 uint32_t rptr;
572 596
573 cjiffies = jiffies; 597 cjiffies = jiffies;
574 if (!time_after(cjiffies, ring->last_activity)) { 598 if (!time_after(cjiffies, ring->last_activity)) {
@@ -576,8 +600,7 @@ bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *rin
576 radeon_ring_lockup_update(ring); 600 radeon_ring_lockup_update(ring);
577 return false; 601 return false;
578 } 602 }
579 rptr = RREG32(ring->rptr_reg); 603 ring->rptr = radeon_ring_get_rptr(rdev, ring);
580 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
581 if (ring->rptr != ring->last_rptr) { 604 if (ring->rptr != ring->last_rptr) {
582 /* CP is still working no lockup */ 605 /* CP is still working no lockup */
583 radeon_ring_lockup_update(ring); 606 radeon_ring_lockup_update(ring);
@@ -804,9 +827,9 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
804 827
805 radeon_ring_free_size(rdev, ring); 828 radeon_ring_free_size(rdev, ring);
806 count = (ring->ring_size / 4) - ring->ring_free_dw; 829 count = (ring->ring_size / 4) - ring->ring_free_dw;
807 tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift; 830 tmp = radeon_ring_get_wptr(rdev, ring);
808 seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp); 831 seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
809 tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift; 832 tmp = radeon_ring_get_rptr(rdev, ring);
810 seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp); 833 seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
811 if (ring->rptr_save_reg) { 834 if (ring->rptr_save_reg) {
812 seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, 835 seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
new file mode 100644
index 000000000000..d8b05f7bcf1a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -0,0 +1,129 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __RADEON_UCODE_H__
24#define __RADEON_UCODE_H__
25
26/* CP */
27#define R600_PFP_UCODE_SIZE 576
28#define R600_PM4_UCODE_SIZE 1792
29#define R700_PFP_UCODE_SIZE 848
30#define R700_PM4_UCODE_SIZE 1360
31#define EVERGREEN_PFP_UCODE_SIZE 1120
32#define EVERGREEN_PM4_UCODE_SIZE 1376
33#define CAYMAN_PFP_UCODE_SIZE 2176
34#define CAYMAN_PM4_UCODE_SIZE 2176
35#define SI_PFP_UCODE_SIZE 2144
36#define SI_PM4_UCODE_SIZE 2144
37#define SI_CE_UCODE_SIZE 2144
38
39/* RLC */
40#define R600_RLC_UCODE_SIZE 768
41#define R700_RLC_UCODE_SIZE 1024
42#define EVERGREEN_RLC_UCODE_SIZE 768
43#define CAYMAN_RLC_UCODE_SIZE 1024
44#define ARUBA_RLC_UCODE_SIZE 1536
45#define SI_RLC_UCODE_SIZE 2048
46
47/* MC */
48#define BTC_MC_UCODE_SIZE 6024
49#define CAYMAN_MC_UCODE_SIZE 6037
50#define SI_MC_UCODE_SIZE 7769
51#define OLAND_MC_UCODE_SIZE 7863
52
53/* SMC */
54#define RV770_SMC_UCODE_START 0x0100
55#define RV770_SMC_UCODE_SIZE 0x410d
56#define RV770_SMC_INT_VECTOR_START 0xffc0
57#define RV770_SMC_INT_VECTOR_SIZE 0x0040
58
59#define RV730_SMC_UCODE_START 0x0100
60#define RV730_SMC_UCODE_SIZE 0x412c
61#define RV730_SMC_INT_VECTOR_START 0xffc0
62#define RV730_SMC_INT_VECTOR_SIZE 0x0040
63
64#define RV710_SMC_UCODE_START 0x0100
65#define RV710_SMC_UCODE_SIZE 0x3f1f
66#define RV710_SMC_INT_VECTOR_START 0xffc0
67#define RV710_SMC_INT_VECTOR_SIZE 0x0040
68
69#define RV740_SMC_UCODE_START 0x0100
70#define RV740_SMC_UCODE_SIZE 0x41c5
71#define RV740_SMC_INT_VECTOR_START 0xffc0
72#define RV740_SMC_INT_VECTOR_SIZE 0x0040
73
74#define CEDAR_SMC_UCODE_START 0x0100
75#define CEDAR_SMC_UCODE_SIZE 0x5d50
76#define CEDAR_SMC_INT_VECTOR_START 0xffc0
77#define CEDAR_SMC_INT_VECTOR_SIZE 0x0040
78
79#define REDWOOD_SMC_UCODE_START 0x0100
80#define REDWOOD_SMC_UCODE_SIZE 0x5f0a
81#define REDWOOD_SMC_INT_VECTOR_START 0xffc0
82#define REDWOOD_SMC_INT_VECTOR_SIZE 0x0040
83
84#define JUNIPER_SMC_UCODE_START 0x0100
85#define JUNIPER_SMC_UCODE_SIZE 0x5f1f
86#define JUNIPER_SMC_INT_VECTOR_START 0xffc0
87#define JUNIPER_SMC_INT_VECTOR_SIZE 0x0040
88
89#define CYPRESS_SMC_UCODE_START 0x0100
90#define CYPRESS_SMC_UCODE_SIZE 0x61f7
91#define CYPRESS_SMC_INT_VECTOR_START 0xffc0
92#define CYPRESS_SMC_INT_VECTOR_SIZE 0x0040
93
94#define BARTS_SMC_UCODE_START 0x0100
95#define BARTS_SMC_UCODE_SIZE 0x6107
96#define BARTS_SMC_INT_VECTOR_START 0xffc0
97#define BARTS_SMC_INT_VECTOR_SIZE 0x0040
98
99#define TURKS_SMC_UCODE_START 0x0100
100#define TURKS_SMC_UCODE_SIZE 0x605b
101#define TURKS_SMC_INT_VECTOR_START 0xffc0
102#define TURKS_SMC_INT_VECTOR_SIZE 0x0040
103
104#define CAICOS_SMC_UCODE_START 0x0100
105#define CAICOS_SMC_UCODE_SIZE 0x5fbd
106#define CAICOS_SMC_INT_VECTOR_START 0xffc0
107#define CAICOS_SMC_INT_VECTOR_SIZE 0x0040
108
109#define CAYMAN_SMC_UCODE_START 0x0100
110#define CAYMAN_SMC_UCODE_SIZE 0x79ec
111#define CAYMAN_SMC_INT_VECTOR_START 0xffc0
112#define CAYMAN_SMC_INT_VECTOR_SIZE 0x0040
113
114#define TAHITI_SMC_UCODE_START 0x10000
115#define TAHITI_SMC_UCODE_SIZE 0xf458
116
117#define PITCAIRN_SMC_UCODE_START 0x10000
118#define PITCAIRN_SMC_UCODE_SIZE 0xe9f4
119
120#define VERDE_SMC_UCODE_START 0x10000
121#define VERDE_SMC_UCODE_SIZE 0xebe4
122
123#define OLAND_SMC_UCODE_START 0x10000
124#define OLAND_SMC_UCODE_SIZE 0xe7b4
125
126#define HAINAN_SMC_UCODE_START 0x10000
127#define HAINAN_SMC_UCODE_SIZE 0xe67C
128
129#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index cad735dd02c6..ce5a10c8d338 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -44,11 +44,13 @@
44#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin" 44#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
45#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin" 45#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
46#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin" 46#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
47#define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin"
47 48
48MODULE_FIRMWARE(FIRMWARE_RV710); 49MODULE_FIRMWARE(FIRMWARE_RV710);
49MODULE_FIRMWARE(FIRMWARE_CYPRESS); 50MODULE_FIRMWARE(FIRMWARE_CYPRESS);
50MODULE_FIRMWARE(FIRMWARE_SUMO); 51MODULE_FIRMWARE(FIRMWARE_SUMO);
51MODULE_FIRMWARE(FIRMWARE_TAHITI); 52MODULE_FIRMWARE(FIRMWARE_TAHITI);
53MODULE_FIRMWARE(FIRMWARE_BONAIRE);
52 54
53static void radeon_uvd_idle_work_handler(struct work_struct *work); 55static void radeon_uvd_idle_work_handler(struct work_struct *work);
54 56
@@ -100,6 +102,12 @@ int radeon_uvd_init(struct radeon_device *rdev)
100 fw_name = FIRMWARE_TAHITI; 102 fw_name = FIRMWARE_TAHITI;
101 break; 103 break;
102 104
105 case CHIP_BONAIRE:
106 case CHIP_KABINI:
107 case CHIP_KAVERI:
108 fw_name = FIRMWARE_BONAIRE;
109 break;
110
103 default: 111 default:
104 return -EINVAL; 112 return -EINVAL;
105 } 113 }
@@ -691,11 +699,19 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
691 struct radeon_device *rdev = 699 struct radeon_device *rdev =
692 container_of(work, struct radeon_device, uvd.idle_work.work); 700 container_of(work, struct radeon_device, uvd.idle_work.work);
693 701
694 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) 702 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
695 radeon_set_uvd_clocks(rdev, 0, 0); 703 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
696 else 704 mutex_lock(&rdev->pm.mutex);
705 rdev->pm.dpm.uvd_active = false;
706 mutex_unlock(&rdev->pm.mutex);
707 radeon_pm_compute_clocks(rdev);
708 } else {
709 radeon_set_uvd_clocks(rdev, 0, 0);
710 }
711 } else {
697 schedule_delayed_work(&rdev->uvd.idle_work, 712 schedule_delayed_work(&rdev->uvd.idle_work,
698 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); 713 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
714 }
699} 715}
700 716
701void radeon_uvd_note_usage(struct radeon_device *rdev) 717void radeon_uvd_note_usage(struct radeon_device *rdev)
@@ -703,8 +719,14 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
703 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); 719 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
704 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, 720 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
705 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); 721 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
706 if (set_clocks) 722 if (set_clocks) {
707 radeon_set_uvd_clocks(rdev, 53300, 40000); 723 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
724 /* XXX pick SD/HD/MVC */
725 radeon_dpm_enable_power_state(rdev, POWER_STATE_TYPE_INTERNAL_UVD);
726 } else {
727 radeon_set_uvd_clocks(rdev, 53300, 40000);
728 }
729 }
708} 730}
709 731
710static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq, 732static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 55880d5962c3..d8ddfb34545d 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -248,13 +248,16 @@ struct rs690_watermark {
248}; 248};
249 249
250static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, 250static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
251 struct radeon_crtc *crtc, 251 struct radeon_crtc *crtc,
252 struct rs690_watermark *wm) 252 struct rs690_watermark *wm,
253 bool low)
253{ 254{
254 struct drm_display_mode *mode = &crtc->base.mode; 255 struct drm_display_mode *mode = &crtc->base.mode;
255 fixed20_12 a, b, c; 256 fixed20_12 a, b, c;
256 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; 257 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
257 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; 258 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
259 fixed20_12 sclk, core_bandwidth, max_bandwidth;
260 u32 selected_sclk;
258 261
259 if (!crtc->base.enabled) { 262 if (!crtc->base.enabled) {
260 /* FIXME: wouldn't it better to set priority mark to maximum */ 263 /* FIXME: wouldn't it better to set priority mark to maximum */
@@ -262,6 +265,21 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
262 return; 265 return;
263 } 266 }
264 267
268 if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) &&
269 (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
270 selected_sclk = radeon_dpm_get_sclk(rdev, low);
271 else
272 selected_sclk = rdev->pm.current_sclk;
273
274 /* sclk in Mhz */
275 a.full = dfixed_const(100);
276 sclk.full = dfixed_const(selected_sclk);
277 sclk.full = dfixed_div(sclk, a);
278
279 /* core_bandwidth = sclk(Mhz) * 16 */
280 a.full = dfixed_const(16);
281 core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
282
265 if (crtc->vsc.full > dfixed_const(2)) 283 if (crtc->vsc.full > dfixed_const(2))
266 wm->num_line_pair.full = dfixed_const(2); 284 wm->num_line_pair.full = dfixed_const(2);
267 else 285 else
@@ -322,36 +340,36 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
322 wm->active_time.full = dfixed_div(wm->active_time, a); 340 wm->active_time.full = dfixed_div(wm->active_time, a);
323 341
324 /* Maximun bandwidth is the minimun bandwidth of all component */ 342 /* Maximun bandwidth is the minimun bandwidth of all component */
325 rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; 343 max_bandwidth = core_bandwidth;
326 if (rdev->mc.igp_sideport_enabled) { 344 if (rdev->mc.igp_sideport_enabled) {
327 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && 345 if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
328 rdev->pm.sideport_bandwidth.full) 346 rdev->pm.sideport_bandwidth.full)
329 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; 347 max_bandwidth = rdev->pm.sideport_bandwidth;
330 read_delay_latency.full = dfixed_const(370 * 800 * 1000); 348 read_delay_latency.full = dfixed_const(370 * 800 * 1000);
331 read_delay_latency.full = dfixed_div(read_delay_latency, 349 read_delay_latency.full = dfixed_div(read_delay_latency,
332 rdev->pm.igp_sideport_mclk); 350 rdev->pm.igp_sideport_mclk);
333 } else { 351 } else {
334 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && 352 if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
335 rdev->pm.k8_bandwidth.full) 353 rdev->pm.k8_bandwidth.full)
336 rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth; 354 max_bandwidth = rdev->pm.k8_bandwidth;
337 if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && 355 if (max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
338 rdev->pm.ht_bandwidth.full) 356 rdev->pm.ht_bandwidth.full)
339 rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; 357 max_bandwidth = rdev->pm.ht_bandwidth;
340 read_delay_latency.full = dfixed_const(5000); 358 read_delay_latency.full = dfixed_const(5000);
341 } 359 }
342 360
343 /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ 361 /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
344 a.full = dfixed_const(16); 362 a.full = dfixed_const(16);
345 rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a); 363 sclk.full = dfixed_mul(max_bandwidth, a);
346 a.full = dfixed_const(1000); 364 a.full = dfixed_const(1000);
347 rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk); 365 sclk.full = dfixed_div(a, sclk);
348 /* Determine chunk time 366 /* Determine chunk time
349 * ChunkTime = the time it takes the DCP to send one chunk of data 367 * ChunkTime = the time it takes the DCP to send one chunk of data
350 * to the LB which consists of pipeline delay and inter chunk gap 368 * to the LB which consists of pipeline delay and inter chunk gap
351 * sclk = system clock(ns) 369 * sclk = system clock(ns)
352 */ 370 */
353 a.full = dfixed_const(256 * 13); 371 a.full = dfixed_const(256 * 13);
354 chunk_time.full = dfixed_mul(rdev->pm.sclk, a); 372 chunk_time.full = dfixed_mul(sclk, a);
355 a.full = dfixed_const(10); 373 a.full = dfixed_const(10);
356 chunk_time.full = dfixed_div(chunk_time, a); 374 chunk_time.full = dfixed_div(chunk_time, a);
357 375
@@ -415,175 +433,200 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
415 } 433 }
416} 434}
417 435
418void rs690_bandwidth_update(struct radeon_device *rdev) 436static void rs690_compute_mode_priority(struct radeon_device *rdev,
437 struct rs690_watermark *wm0,
438 struct rs690_watermark *wm1,
439 struct drm_display_mode *mode0,
440 struct drm_display_mode *mode1,
441 u32 *d1mode_priority_a_cnt,
442 u32 *d2mode_priority_a_cnt)
419{ 443{
420 struct drm_display_mode *mode0 = NULL;
421 struct drm_display_mode *mode1 = NULL;
422 struct rs690_watermark wm0;
423 struct rs690_watermark wm1;
424 u32 tmp;
425 u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
426 u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
427 fixed20_12 priority_mark02, priority_mark12, fill_rate; 444 fixed20_12 priority_mark02, priority_mark12, fill_rate;
428 fixed20_12 a, b; 445 fixed20_12 a, b;
429 446
430 radeon_update_display_priority(rdev); 447 *d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
431 448 *d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
432 if (rdev->mode_info.crtcs[0]->base.enabled)
433 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
434 if (rdev->mode_info.crtcs[1]->base.enabled)
435 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
436 /*
437 * Set display0/1 priority up in the memory controller for
438 * modes if the user specifies HIGH for displaypriority
439 * option.
440 */
441 if ((rdev->disp_priority == 2) &&
442 ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
443 tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
444 tmp &= C_000104_MC_DISP0R_INIT_LAT;
445 tmp &= C_000104_MC_DISP1R_INIT_LAT;
446 if (mode0)
447 tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
448 if (mode1)
449 tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
450 WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
451 }
452 rs690_line_buffer_adjust(rdev, mode0, mode1);
453
454 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
455 WREG32(R_006C9C_DCP_CONTROL, 0);
456 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
457 WREG32(R_006C9C_DCP_CONTROL, 2);
458
459 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
460 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
461
462 tmp = (wm0.lb_request_fifo_depth - 1);
463 tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
464 WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
465 449
466 if (mode0 && mode1) { 450 if (mode0 && mode1) {
467 if (dfixed_trunc(wm0.dbpp) > 64) 451 if (dfixed_trunc(wm0->dbpp) > 64)
468 a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); 452 a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
469 else 453 else
470 a.full = wm0.num_line_pair.full; 454 a.full = wm0->num_line_pair.full;
471 if (dfixed_trunc(wm1.dbpp) > 64) 455 if (dfixed_trunc(wm1->dbpp) > 64)
472 b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); 456 b.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
473 else 457 else
474 b.full = wm1.num_line_pair.full; 458 b.full = wm1->num_line_pair.full;
475 a.full += b.full; 459 a.full += b.full;
476 fill_rate.full = dfixed_div(wm0.sclk, a); 460 fill_rate.full = dfixed_div(wm0->sclk, a);
477 if (wm0.consumption_rate.full > fill_rate.full) { 461 if (wm0->consumption_rate.full > fill_rate.full) {
478 b.full = wm0.consumption_rate.full - fill_rate.full; 462 b.full = wm0->consumption_rate.full - fill_rate.full;
479 b.full = dfixed_mul(b, wm0.active_time); 463 b.full = dfixed_mul(b, wm0->active_time);
480 a.full = dfixed_mul(wm0.worst_case_latency, 464 a.full = dfixed_mul(wm0->worst_case_latency,
481 wm0.consumption_rate); 465 wm0->consumption_rate);
482 a.full = a.full + b.full; 466 a.full = a.full + b.full;
483 b.full = dfixed_const(16 * 1000); 467 b.full = dfixed_const(16 * 1000);
484 priority_mark02.full = dfixed_div(a, b); 468 priority_mark02.full = dfixed_div(a, b);
485 } else { 469 } else {
486 a.full = dfixed_mul(wm0.worst_case_latency, 470 a.full = dfixed_mul(wm0->worst_case_latency,
487 wm0.consumption_rate); 471 wm0->consumption_rate);
488 b.full = dfixed_const(16 * 1000); 472 b.full = dfixed_const(16 * 1000);
489 priority_mark02.full = dfixed_div(a, b); 473 priority_mark02.full = dfixed_div(a, b);
490 } 474 }
491 if (wm1.consumption_rate.full > fill_rate.full) { 475 if (wm1->consumption_rate.full > fill_rate.full) {
492 b.full = wm1.consumption_rate.full - fill_rate.full; 476 b.full = wm1->consumption_rate.full - fill_rate.full;
493 b.full = dfixed_mul(b, wm1.active_time); 477 b.full = dfixed_mul(b, wm1->active_time);
494 a.full = dfixed_mul(wm1.worst_case_latency, 478 a.full = dfixed_mul(wm1->worst_case_latency,
495 wm1.consumption_rate); 479 wm1->consumption_rate);
496 a.full = a.full + b.full; 480 a.full = a.full + b.full;
497 b.full = dfixed_const(16 * 1000); 481 b.full = dfixed_const(16 * 1000);
498 priority_mark12.full = dfixed_div(a, b); 482 priority_mark12.full = dfixed_div(a, b);
499 } else { 483 } else {
500 a.full = dfixed_mul(wm1.worst_case_latency, 484 a.full = dfixed_mul(wm1->worst_case_latency,
501 wm1.consumption_rate); 485 wm1->consumption_rate);
502 b.full = dfixed_const(16 * 1000); 486 b.full = dfixed_const(16 * 1000);
503 priority_mark12.full = dfixed_div(a, b); 487 priority_mark12.full = dfixed_div(a, b);
504 } 488 }
505 if (wm0.priority_mark.full > priority_mark02.full) 489 if (wm0->priority_mark.full > priority_mark02.full)
506 priority_mark02.full = wm0.priority_mark.full; 490 priority_mark02.full = wm0->priority_mark.full;
507 if (dfixed_trunc(priority_mark02) < 0) 491 if (dfixed_trunc(priority_mark02) < 0)
508 priority_mark02.full = 0; 492 priority_mark02.full = 0;
509 if (wm0.priority_mark_max.full > priority_mark02.full) 493 if (wm0->priority_mark_max.full > priority_mark02.full)
510 priority_mark02.full = wm0.priority_mark_max.full; 494 priority_mark02.full = wm0->priority_mark_max.full;
511 if (wm1.priority_mark.full > priority_mark12.full) 495 if (wm1->priority_mark.full > priority_mark12.full)
512 priority_mark12.full = wm1.priority_mark.full; 496 priority_mark12.full = wm1->priority_mark.full;
513 if (dfixed_trunc(priority_mark12) < 0) 497 if (dfixed_trunc(priority_mark12) < 0)
514 priority_mark12.full = 0; 498 priority_mark12.full = 0;
515 if (wm1.priority_mark_max.full > priority_mark12.full) 499 if (wm1->priority_mark_max.full > priority_mark12.full)
516 priority_mark12.full = wm1.priority_mark_max.full; 500 priority_mark12.full = wm1->priority_mark_max.full;
517 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 501 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
518 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 502 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
519 if (rdev->disp_priority == 2) { 503 if (rdev->disp_priority == 2) {
520 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 504 *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
521 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 505 *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
522 } 506 }
523 } else if (mode0) { 507 } else if (mode0) {
524 if (dfixed_trunc(wm0.dbpp) > 64) 508 if (dfixed_trunc(wm0->dbpp) > 64)
525 a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); 509 a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
526 else 510 else
527 a.full = wm0.num_line_pair.full; 511 a.full = wm0->num_line_pair.full;
528 fill_rate.full = dfixed_div(wm0.sclk, a); 512 fill_rate.full = dfixed_div(wm0->sclk, a);
529 if (wm0.consumption_rate.full > fill_rate.full) { 513 if (wm0->consumption_rate.full > fill_rate.full) {
530 b.full = wm0.consumption_rate.full - fill_rate.full; 514 b.full = wm0->consumption_rate.full - fill_rate.full;
531 b.full = dfixed_mul(b, wm0.active_time); 515 b.full = dfixed_mul(b, wm0->active_time);
532 a.full = dfixed_mul(wm0.worst_case_latency, 516 a.full = dfixed_mul(wm0->worst_case_latency,
533 wm0.consumption_rate); 517 wm0->consumption_rate);
534 a.full = a.full + b.full; 518 a.full = a.full + b.full;
535 b.full = dfixed_const(16 * 1000); 519 b.full = dfixed_const(16 * 1000);
536 priority_mark02.full = dfixed_div(a, b); 520 priority_mark02.full = dfixed_div(a, b);
537 } else { 521 } else {
538 a.full = dfixed_mul(wm0.worst_case_latency, 522 a.full = dfixed_mul(wm0->worst_case_latency,
539 wm0.consumption_rate); 523 wm0->consumption_rate);
540 b.full = dfixed_const(16 * 1000); 524 b.full = dfixed_const(16 * 1000);
541 priority_mark02.full = dfixed_div(a, b); 525 priority_mark02.full = dfixed_div(a, b);
542 } 526 }
543 if (wm0.priority_mark.full > priority_mark02.full) 527 if (wm0->priority_mark.full > priority_mark02.full)
544 priority_mark02.full = wm0.priority_mark.full; 528 priority_mark02.full = wm0->priority_mark.full;
545 if (dfixed_trunc(priority_mark02) < 0) 529 if (dfixed_trunc(priority_mark02) < 0)
546 priority_mark02.full = 0; 530 priority_mark02.full = 0;
547 if (wm0.priority_mark_max.full > priority_mark02.full) 531 if (wm0->priority_mark_max.full > priority_mark02.full)
548 priority_mark02.full = wm0.priority_mark_max.full; 532 priority_mark02.full = wm0->priority_mark_max.full;
549 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 533 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
550 if (rdev->disp_priority == 2) 534 if (rdev->disp_priority == 2)
551 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 535 *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
552 } else if (mode1) { 536 } else if (mode1) {
553 if (dfixed_trunc(wm1.dbpp) > 64) 537 if (dfixed_trunc(wm1->dbpp) > 64)
554 a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); 538 a.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
555 else 539 else
556 a.full = wm1.num_line_pair.full; 540 a.full = wm1->num_line_pair.full;
557 fill_rate.full = dfixed_div(wm1.sclk, a); 541 fill_rate.full = dfixed_div(wm1->sclk, a);
558 if (wm1.consumption_rate.full > fill_rate.full) { 542 if (wm1->consumption_rate.full > fill_rate.full) {
559 b.full = wm1.consumption_rate.full - fill_rate.full; 543 b.full = wm1->consumption_rate.full - fill_rate.full;
560 b.full = dfixed_mul(b, wm1.active_time); 544 b.full = dfixed_mul(b, wm1->active_time);
561 a.full = dfixed_mul(wm1.worst_case_latency, 545 a.full = dfixed_mul(wm1->worst_case_latency,
562 wm1.consumption_rate); 546 wm1->consumption_rate);
563 a.full = a.full + b.full; 547 a.full = a.full + b.full;
564 b.full = dfixed_const(16 * 1000); 548 b.full = dfixed_const(16 * 1000);
565 priority_mark12.full = dfixed_div(a, b); 549 priority_mark12.full = dfixed_div(a, b);
566 } else { 550 } else {
567 a.full = dfixed_mul(wm1.worst_case_latency, 551 a.full = dfixed_mul(wm1->worst_case_latency,
568 wm1.consumption_rate); 552 wm1->consumption_rate);
569 b.full = dfixed_const(16 * 1000); 553 b.full = dfixed_const(16 * 1000);
570 priority_mark12.full = dfixed_div(a, b); 554 priority_mark12.full = dfixed_div(a, b);
571 } 555 }
572 if (wm1.priority_mark.full > priority_mark12.full) 556 if (wm1->priority_mark.full > priority_mark12.full)
573 priority_mark12.full = wm1.priority_mark.full; 557 priority_mark12.full = wm1->priority_mark.full;
574 if (dfixed_trunc(priority_mark12) < 0) 558 if (dfixed_trunc(priority_mark12) < 0)
575 priority_mark12.full = 0; 559 priority_mark12.full = 0;
576 if (wm1.priority_mark_max.full > priority_mark12.full) 560 if (wm1->priority_mark_max.full > priority_mark12.full)
577 priority_mark12.full = wm1.priority_mark_max.full; 561 priority_mark12.full = wm1->priority_mark_max.full;
578 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 562 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
579 if (rdev->disp_priority == 2) 563 if (rdev->disp_priority == 2)
580 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 564 *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
581 } 565 }
566}
567
568void rs690_bandwidth_update(struct radeon_device *rdev)
569{
570 struct drm_display_mode *mode0 = NULL;
571 struct drm_display_mode *mode1 = NULL;
572 struct rs690_watermark wm0_high, wm0_low;
573 struct rs690_watermark wm1_high, wm1_low;
574 u32 tmp;
575 u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
576 u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
577
578 radeon_update_display_priority(rdev);
579
580 if (rdev->mode_info.crtcs[0]->base.enabled)
581 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
582 if (rdev->mode_info.crtcs[1]->base.enabled)
583 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
584 /*
585 * Set display0/1 priority up in the memory controller for
586 * modes if the user specifies HIGH for displaypriority
587 * option.
588 */
589 if ((rdev->disp_priority == 2) &&
590 ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
591 tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
592 tmp &= C_000104_MC_DISP0R_INIT_LAT;
593 tmp &= C_000104_MC_DISP1R_INIT_LAT;
594 if (mode0)
595 tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
596 if (mode1)
597 tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
598 WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
599 }
600 rs690_line_buffer_adjust(rdev, mode0, mode1);
601
602 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
603 WREG32(R_006C9C_DCP_CONTROL, 0);
604 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
605 WREG32(R_006C9C_DCP_CONTROL, 2);
606
607 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
608 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
609
610 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, true);
611 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, true);
612
613 tmp = (wm0_high.lb_request_fifo_depth - 1);
614 tmp |= (wm1_high.lb_request_fifo_depth - 1) << 16;
615 WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
616
617 rs690_compute_mode_priority(rdev,
618 &wm0_high, &wm1_high,
619 mode0, mode1,
620 &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
621 rs690_compute_mode_priority(rdev,
622 &wm0_low, &wm1_low,
623 mode0, mode1,
624 &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
582 625
583 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 626 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
584 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); 627 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
585 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 628 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
586 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 629 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
587} 630}
588 631
589uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) 632uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
new file mode 100644
index 000000000000..bef832a62fee
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -0,0 +1,963 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "rs780d.h"
28#include "r600_dpm.h"
29#include "rs780_dpm.h"
30#include "atom.h"
31
32static struct igp_ps *rs780_get_ps(struct radeon_ps *rps)
33{
34 struct igp_ps *ps = rps->ps_priv;
35
36 return ps;
37}
38
39static struct igp_power_info *rs780_get_pi(struct radeon_device *rdev)
40{
41 struct igp_power_info *pi = rdev->pm.dpm.priv;
42
43 return pi;
44}
45
46static void rs780_get_pm_mode_parameters(struct radeon_device *rdev)
47{
48 struct igp_power_info *pi = rs780_get_pi(rdev);
49 struct radeon_mode_info *minfo = &rdev->mode_info;
50 struct drm_crtc *crtc;
51 struct radeon_crtc *radeon_crtc;
52 int i;
53
54 /* defaults */
55 pi->crtc_id = 0;
56 pi->refresh_rate = 60;
57
58 for (i = 0; i < rdev->num_crtc; i++) {
59 crtc = (struct drm_crtc *)minfo->crtcs[i];
60 if (crtc && crtc->enabled) {
61 radeon_crtc = to_radeon_crtc(crtc);
62 pi->crtc_id = radeon_crtc->crtc_id;
63 if (crtc->mode.htotal && crtc->mode.vtotal)
64 pi->refresh_rate =
65 (crtc->mode.clock * 1000) /
66 (crtc->mode.htotal * crtc->mode.vtotal);
67 break;
68 }
69 }
70}
71
72static void rs780_voltage_scaling_enable(struct radeon_device *rdev, bool enable);
73
74static int rs780_initialize_dpm_power_state(struct radeon_device *rdev,
75 struct radeon_ps *boot_ps)
76{
77 struct atom_clock_dividers dividers;
78 struct igp_ps *default_state = rs780_get_ps(boot_ps);
79 int i, ret;
80
81 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
82 default_state->sclk_low, false, &dividers);
83 if (ret)
84 return ret;
85
86 r600_engine_clock_entry_set_reference_divider(rdev, 0, dividers.ref_div);
87 r600_engine_clock_entry_set_feedback_divider(rdev, 0, dividers.fb_div);
88 r600_engine_clock_entry_set_post_divider(rdev, 0, dividers.post_div);
89
90 if (dividers.enable_post_div)
91 r600_engine_clock_entry_enable_post_divider(rdev, 0, true);
92 else
93 r600_engine_clock_entry_enable_post_divider(rdev, 0, false);
94
95 r600_engine_clock_entry_set_step_time(rdev, 0, R600_SST_DFLT);
96 r600_engine_clock_entry_enable_pulse_skipping(rdev, 0, false);
97
98 r600_engine_clock_entry_enable(rdev, 0, true);
99 for (i = 1; i < R600_PM_NUMBER_OF_SCLKS; i++)
100 r600_engine_clock_entry_enable(rdev, i, false);
101
102 r600_enable_mclk_control(rdev, false);
103 r600_voltage_control_enable_pins(rdev, 0);
104
105 return 0;
106}
107
108static int rs780_initialize_dpm_parameters(struct radeon_device *rdev,
109 struct radeon_ps *boot_ps)
110{
111 int ret = 0;
112 int i;
113
114 r600_set_bsp(rdev, R600_BSU_DFLT, R600_BSP_DFLT);
115
116 r600_set_at(rdev, 0, 0, 0, 0);
117
118 r600_set_git(rdev, R600_GICST_DFLT);
119
120 for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
121 r600_set_tc(rdev, i, 0, 0);
122
123 r600_select_td(rdev, R600_TD_DFLT);
124 r600_set_vrc(rdev, 0);
125
126 r600_set_tpu(rdev, R600_TPU_DFLT);
127 r600_set_tpc(rdev, R600_TPC_DFLT);
128
129 r600_set_sstu(rdev, R600_SSTU_DFLT);
130 r600_set_sst(rdev, R600_SST_DFLT);
131
132 r600_set_fctu(rdev, R600_FCTU_DFLT);
133 r600_set_fct(rdev, R600_FCT_DFLT);
134
135 r600_set_vddc3d_oorsu(rdev, R600_VDDC3DOORSU_DFLT);
136 r600_set_vddc3d_oorphc(rdev, R600_VDDC3DOORPHC_DFLT);
137 r600_set_vddc3d_oorsdc(rdev, R600_VDDC3DOORSDC_DFLT);
138 r600_set_ctxcgtt3d_rphc(rdev, R600_CTXCGTT3DRPHC_DFLT);
139 r600_set_ctxcgtt3d_rsdc(rdev, R600_CTXCGTT3DRSDC_DFLT);
140
141 r600_vid_rt_set_vru(rdev, R600_VRU_DFLT);
142 r600_vid_rt_set_vrt(rdev, R600_VOLTAGERESPONSETIME_DFLT);
143 r600_vid_rt_set_ssu(rdev, R600_SPLLSTEPUNIT_DFLT);
144
145 ret = rs780_initialize_dpm_power_state(rdev, boot_ps);
146
147 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_LOW, 0);
148 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_MEDIUM, 0);
149 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_HIGH, 0);
150
151 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_LOW, 0);
152 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_MEDIUM, 0);
153 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_HIGH, 0);
154
155 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_LOW, 0);
156 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_MEDIUM, 0);
157 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_HIGH, 0);
158
159 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_LOW, R600_DISPLAY_WATERMARK_HIGH);
160 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_MEDIUM, R600_DISPLAY_WATERMARK_HIGH);
161 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_HIGH, R600_DISPLAY_WATERMARK_HIGH);
162
163 r600_power_level_enable(rdev, R600_POWER_LEVEL_CTXSW, false);
164 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
165 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
166 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
167
168 r600_power_level_set_enter_index(rdev, R600_POWER_LEVEL_LOW);
169
170 r600_set_vrc(rdev, RS780_CGFTV_DFLT);
171
172 return ret;
173}
174
175static void rs780_start_dpm(struct radeon_device *rdev)
176{
177 r600_enable_sclk_control(rdev, false);
178 r600_enable_mclk_control(rdev, false);
179
180 r600_dynamicpm_enable(rdev, true);
181
182 radeon_wait_for_vblank(rdev, 0);
183 radeon_wait_for_vblank(rdev, 1);
184
185 r600_enable_spll_bypass(rdev, true);
186 r600_wait_for_spll_change(rdev);
187 r600_enable_spll_bypass(rdev, false);
188 r600_wait_for_spll_change(rdev);
189
190 r600_enable_spll_bypass(rdev, true);
191 r600_wait_for_spll_change(rdev);
192 r600_enable_spll_bypass(rdev, false);
193 r600_wait_for_spll_change(rdev);
194
195 r600_enable_sclk_control(rdev, true);
196}
197
198
199static void rs780_preset_ranges_slow_clk_fbdiv_en(struct radeon_device *rdev)
200{
201 WREG32_P(FVTHROT_SLOW_CLK_FEEDBACK_DIV_REG1, RANGE_SLOW_CLK_FEEDBACK_DIV_EN,
202 ~RANGE_SLOW_CLK_FEEDBACK_DIV_EN);
203
204 WREG32_P(FVTHROT_SLOW_CLK_FEEDBACK_DIV_REG1,
205 RANGE0_SLOW_CLK_FEEDBACK_DIV(RS780_SLOWCLKFEEDBACKDIV_DFLT),
206 ~RANGE0_SLOW_CLK_FEEDBACK_DIV_MASK);
207}
208
209static void rs780_preset_starting_fbdiv(struct radeon_device *rdev)
210{
211 u32 fbdiv = (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
212
213 WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fbdiv),
214 ~STARTING_FEEDBACK_DIV_MASK);
215
216 WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fbdiv),
217 ~FORCED_FEEDBACK_DIV_MASK);
218
219 WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
220}
221
222static void rs780_voltage_scaling_init(struct radeon_device *rdev)
223{
224 struct igp_power_info *pi = rs780_get_pi(rdev);
225 struct drm_device *dev = rdev->ddev;
226 u32 fv_throt_pwm_fb_div_range[3];
227 u32 fv_throt_pwm_range[4];
228
229 if (dev->pdev->device == 0x9614) {
230 fv_throt_pwm_fb_div_range[0] = RS780D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
231 fv_throt_pwm_fb_div_range[1] = RS780D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
232 fv_throt_pwm_fb_div_range[2] = RS780D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
233 } else if ((dev->pdev->device == 0x9714) ||
234 (dev->pdev->device == 0x9715)) {
235 fv_throt_pwm_fb_div_range[0] = RS880D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
236 fv_throt_pwm_fb_div_range[1] = RS880D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
237 fv_throt_pwm_fb_div_range[2] = RS880D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
238 } else {
239 fv_throt_pwm_fb_div_range[0] = RS780_FVTHROTPWMFBDIVRANGEREG0_DFLT;
240 fv_throt_pwm_fb_div_range[1] = RS780_FVTHROTPWMFBDIVRANGEREG1_DFLT;
241 fv_throt_pwm_fb_div_range[2] = RS780_FVTHROTPWMFBDIVRANGEREG2_DFLT;
242 }
243
244 if (pi->pwm_voltage_control) {
245 fv_throt_pwm_range[0] = pi->min_voltage;
246 fv_throt_pwm_range[1] = pi->min_voltage;
247 fv_throt_pwm_range[2] = pi->max_voltage;
248 fv_throt_pwm_range[3] = pi->max_voltage;
249 } else {
250 fv_throt_pwm_range[0] = pi->invert_pwm_required ?
251 RS780_FVTHROTPWMRANGE3_GPIO_DFLT : RS780_FVTHROTPWMRANGE0_GPIO_DFLT;
252 fv_throt_pwm_range[1] = pi->invert_pwm_required ?
253 RS780_FVTHROTPWMRANGE2_GPIO_DFLT : RS780_FVTHROTPWMRANGE1_GPIO_DFLT;
254 fv_throt_pwm_range[2] = pi->invert_pwm_required ?
255 RS780_FVTHROTPWMRANGE1_GPIO_DFLT : RS780_FVTHROTPWMRANGE2_GPIO_DFLT;
256 fv_throt_pwm_range[3] = pi->invert_pwm_required ?
257 RS780_FVTHROTPWMRANGE0_GPIO_DFLT : RS780_FVTHROTPWMRANGE3_GPIO_DFLT;
258 }
259
260 WREG32_P(FVTHROT_PWM_CTRL_REG0,
261 STARTING_PWM_HIGHTIME(pi->max_voltage),
262 ~STARTING_PWM_HIGHTIME_MASK);
263
264 WREG32_P(FVTHROT_PWM_CTRL_REG0,
265 NUMBER_OF_CYCLES_IN_PERIOD(pi->num_of_cycles_in_period),
266 ~NUMBER_OF_CYCLES_IN_PERIOD_MASK);
267
268 WREG32_P(FVTHROT_PWM_CTRL_REG0, FORCE_STARTING_PWM_HIGHTIME,
269 ~FORCE_STARTING_PWM_HIGHTIME);
270
271 if (pi->invert_pwm_required)
272 WREG32_P(FVTHROT_PWM_CTRL_REG0, INVERT_PWM_WAVEFORM, ~INVERT_PWM_WAVEFORM);
273 else
274 WREG32_P(FVTHROT_PWM_CTRL_REG0, 0, ~INVERT_PWM_WAVEFORM);
275
276 rs780_voltage_scaling_enable(rdev, true);
277
278 WREG32(FVTHROT_PWM_CTRL_REG1,
279 (MIN_PWM_HIGHTIME(pi->min_voltage) |
280 MAX_PWM_HIGHTIME(pi->max_voltage)));
281
282 WREG32(FVTHROT_PWM_US_REG0, RS780_FVTHROTPWMUSREG0_DFLT);
283 WREG32(FVTHROT_PWM_US_REG1, RS780_FVTHROTPWMUSREG1_DFLT);
284 WREG32(FVTHROT_PWM_DS_REG0, RS780_FVTHROTPWMDSREG0_DFLT);
285 WREG32(FVTHROT_PWM_DS_REG1, RS780_FVTHROTPWMDSREG1_DFLT);
286
287 WREG32_P(FVTHROT_PWM_FEEDBACK_DIV_REG1,
288 RANGE0_PWM_FEEDBACK_DIV(fv_throt_pwm_fb_div_range[0]),
289 ~RANGE0_PWM_FEEDBACK_DIV_MASK);
290
291 WREG32(FVTHROT_PWM_FEEDBACK_DIV_REG2,
292 (RANGE1_PWM_FEEDBACK_DIV(fv_throt_pwm_fb_div_range[1]) |
293 RANGE2_PWM_FEEDBACK_DIV(fv_throt_pwm_fb_div_range[2])));
294
295 WREG32(FVTHROT_PWM_FEEDBACK_DIV_REG3,
296 (RANGE0_PWM(fv_throt_pwm_range[1]) |
297 RANGE1_PWM(fv_throt_pwm_range[2])));
298 WREG32(FVTHROT_PWM_FEEDBACK_DIV_REG4,
299 (RANGE2_PWM(fv_throt_pwm_range[1]) |
300 RANGE3_PWM(fv_throt_pwm_range[2])));
301}
302
303static void rs780_clk_scaling_enable(struct radeon_device *rdev, bool enable)
304{
305 if (enable)
306 WREG32_P(FVTHROT_CNTRL_REG, ENABLE_FV_THROT | ENABLE_FV_UPDATE,
307 ~(ENABLE_FV_THROT | ENABLE_FV_UPDATE));
308 else
309 WREG32_P(FVTHROT_CNTRL_REG, 0,
310 ~(ENABLE_FV_THROT | ENABLE_FV_UPDATE));
311}
312
313static void rs780_voltage_scaling_enable(struct radeon_device *rdev, bool enable)
314{
315 if (enable)
316 WREG32_P(FVTHROT_CNTRL_REG, ENABLE_FV_THROT_IO, ~ENABLE_FV_THROT_IO);
317 else
318 WREG32_P(FVTHROT_CNTRL_REG, 0, ~ENABLE_FV_THROT_IO);
319}
320
321static void rs780_set_engine_clock_wfc(struct radeon_device *rdev)
322{
323 WREG32(FVTHROT_UTC0, RS780_FVTHROTUTC0_DFLT);
324 WREG32(FVTHROT_UTC1, RS780_FVTHROTUTC1_DFLT);
325 WREG32(FVTHROT_UTC2, RS780_FVTHROTUTC2_DFLT);
326 WREG32(FVTHROT_UTC3, RS780_FVTHROTUTC3_DFLT);
327 WREG32(FVTHROT_UTC4, RS780_FVTHROTUTC4_DFLT);
328
329 WREG32(FVTHROT_DTC0, RS780_FVTHROTDTC0_DFLT);
330 WREG32(FVTHROT_DTC1, RS780_FVTHROTDTC1_DFLT);
331 WREG32(FVTHROT_DTC2, RS780_FVTHROTDTC2_DFLT);
332 WREG32(FVTHROT_DTC3, RS780_FVTHROTDTC3_DFLT);
333 WREG32(FVTHROT_DTC4, RS780_FVTHROTDTC4_DFLT);
334}
335
336static void rs780_set_engine_clock_sc(struct radeon_device *rdev)
337{
338 WREG32_P(FVTHROT_FBDIV_REG2,
339 FB_DIV_TIMER_VAL(RS780_FBDIVTIMERVAL_DFLT),
340 ~FB_DIV_TIMER_VAL_MASK);
341
342 WREG32_P(FVTHROT_CNTRL_REG,
343 REFRESH_RATE_DIVISOR(0) | MINIMUM_CIP(0xf),
344 ~(REFRESH_RATE_DIVISOR_MASK | MINIMUM_CIP_MASK));
345}
346
347static void rs780_set_engine_clock_tdc(struct radeon_device *rdev)
348{
349 WREG32_P(FVTHROT_CNTRL_REG, 0, ~(FORCE_TREND_SEL | TREND_SEL_MODE));
350}
351
352static void rs780_set_engine_clock_ssc(struct radeon_device *rdev)
353{
354 WREG32(FVTHROT_FB_US_REG0, RS780_FVTHROTFBUSREG0_DFLT);
355 WREG32(FVTHROT_FB_US_REG1, RS780_FVTHROTFBUSREG1_DFLT);
356 WREG32(FVTHROT_FB_DS_REG0, RS780_FVTHROTFBDSREG0_DFLT);
357 WREG32(FVTHROT_FB_DS_REG1, RS780_FVTHROTFBDSREG1_DFLT);
358
359 WREG32_P(FVTHROT_FBDIV_REG1, MAX_FEEDBACK_STEP(1), ~MAX_FEEDBACK_STEP_MASK);
360}
361
362static void rs780_program_at(struct radeon_device *rdev)
363{
364 struct igp_power_info *pi = rs780_get_pi(rdev);
365
366 WREG32(FVTHROT_TARGET_REG, 30000000 / pi->refresh_rate);
367 WREG32(FVTHROT_CB1, 1000000 * 5 / pi->refresh_rate);
368 WREG32(FVTHROT_CB2, 1000000 * 10 / pi->refresh_rate);
369 WREG32(FVTHROT_CB3, 1000000 * 30 / pi->refresh_rate);
370 WREG32(FVTHROT_CB4, 1000000 * 50 / pi->refresh_rate);
371}
372
373static void rs780_disable_vbios_powersaving(struct radeon_device *rdev)
374{
375 WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000);
376}
377
378static void rs780_force_voltage_to_high(struct radeon_device *rdev)
379{
380 struct igp_power_info *pi = rs780_get_pi(rdev);
381 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
382
383 if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
384 (current_state->min_voltage == RS780_VDDC_LEVEL_HIGH))
385 return;
386
387 WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
388
389 udelay(1);
390
391 WREG32_P(FVTHROT_PWM_CTRL_REG0,
392 STARTING_PWM_HIGHTIME(pi->max_voltage),
393 ~STARTING_PWM_HIGHTIME_MASK);
394
395 WREG32_P(FVTHROT_PWM_CTRL_REG0,
396 FORCE_STARTING_PWM_HIGHTIME, ~FORCE_STARTING_PWM_HIGHTIME);
397
398 WREG32_P(FVTHROT_PWM_FEEDBACK_DIV_REG1, 0,
399 ~RANGE_PWM_FEEDBACK_DIV_EN);
400
401 udelay(1);
402
403 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
404}
405
406static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
407 struct radeon_ps *new_ps,
408 struct radeon_ps *old_ps)
409{
410 struct atom_clock_dividers min_dividers, max_dividers, current_max_dividers;
411 struct igp_ps *new_state = rs780_get_ps(new_ps);
412 struct igp_ps *old_state = rs780_get_ps(old_ps);
413 int ret;
414
415 if ((new_state->sclk_high == old_state->sclk_high) &&
416 (new_state->sclk_low == old_state->sclk_low))
417 return 0;
418
419 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
420 new_state->sclk_low, false, &min_dividers);
421 if (ret)
422 return ret;
423
424 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
425 new_state->sclk_high, false, &max_dividers);
426 if (ret)
427 return ret;
428
429 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
430 old_state->sclk_high, false, &current_max_dividers);
431 if (ret)
432 return ret;
433
434 WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
435
436 WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div),
437 ~FORCED_FEEDBACK_DIV_MASK);
438 WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div),
439 ~STARTING_FEEDBACK_DIV_MASK);
440 WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
441
442 udelay(100);
443
444 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
445
446 if (max_dividers.fb_div > min_dividers.fb_div) {
447 WREG32_P(FVTHROT_FBDIV_REG0,
448 MIN_FEEDBACK_DIV(min_dividers.fb_div) |
449 MAX_FEEDBACK_DIV(max_dividers.fb_div),
450 ~(MIN_FEEDBACK_DIV_MASK | MAX_FEEDBACK_DIV_MASK));
451
452 WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV);
453 }
454
455 return 0;
456}
457
458static void rs780_set_engine_clock_spc(struct radeon_device *rdev,
459 struct radeon_ps *new_ps,
460 struct radeon_ps *old_ps)
461{
462 struct igp_ps *new_state = rs780_get_ps(new_ps);
463 struct igp_ps *old_state = rs780_get_ps(old_ps);
464 struct igp_power_info *pi = rs780_get_pi(rdev);
465
466 if ((new_state->sclk_high == old_state->sclk_high) &&
467 (new_state->sclk_low == old_state->sclk_low))
468 return;
469
470 if (pi->crtc_id == 0)
471 WREG32_P(CG_INTGFX_MISC, 0, ~FVTHROT_VBLANK_SEL);
472 else
473 WREG32_P(CG_INTGFX_MISC, FVTHROT_VBLANK_SEL, ~FVTHROT_VBLANK_SEL);
474
475}
476
477static void rs780_activate_engine_clk_scaling(struct radeon_device *rdev,
478 struct radeon_ps *new_ps,
479 struct radeon_ps *old_ps)
480{
481 struct igp_ps *new_state = rs780_get_ps(new_ps);
482 struct igp_ps *old_state = rs780_get_ps(old_ps);
483
484 if ((new_state->sclk_high == old_state->sclk_high) &&
485 (new_state->sclk_low == old_state->sclk_low))
486 return;
487
488 rs780_clk_scaling_enable(rdev, true);
489}
490
491static u32 rs780_get_voltage_for_vddc_level(struct radeon_device *rdev,
492 enum rs780_vddc_level vddc)
493{
494 struct igp_power_info *pi = rs780_get_pi(rdev);
495
496 if (vddc == RS780_VDDC_LEVEL_HIGH)
497 return pi->max_voltage;
498 else if (vddc == RS780_VDDC_LEVEL_LOW)
499 return pi->min_voltage;
500 else
501 return pi->max_voltage;
502}
503
504static void rs780_enable_voltage_scaling(struct radeon_device *rdev,
505 struct radeon_ps *new_ps)
506{
507 struct igp_ps *new_state = rs780_get_ps(new_ps);
508 struct igp_power_info *pi = rs780_get_pi(rdev);
509 enum rs780_vddc_level vddc_high, vddc_low;
510
511 udelay(100);
512
513 if ((new_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
514 (new_state->min_voltage == RS780_VDDC_LEVEL_HIGH))
515 return;
516
517 vddc_high = rs780_get_voltage_for_vddc_level(rdev,
518 new_state->max_voltage);
519 vddc_low = rs780_get_voltage_for_vddc_level(rdev,
520 new_state->min_voltage);
521
522 WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
523
524 udelay(1);
525 if (vddc_high > vddc_low) {
526 WREG32_P(FVTHROT_PWM_FEEDBACK_DIV_REG1,
527 RANGE_PWM_FEEDBACK_DIV_EN, ~RANGE_PWM_FEEDBACK_DIV_EN);
528
529 WREG32_P(FVTHROT_PWM_CTRL_REG0, 0, ~FORCE_STARTING_PWM_HIGHTIME);
530 } else if (vddc_high == vddc_low) {
531 if (pi->max_voltage != vddc_high) {
532 WREG32_P(FVTHROT_PWM_CTRL_REG0,
533 STARTING_PWM_HIGHTIME(vddc_high),
534 ~STARTING_PWM_HIGHTIME_MASK);
535
536 WREG32_P(FVTHROT_PWM_CTRL_REG0,
537 FORCE_STARTING_PWM_HIGHTIME,
538 ~FORCE_STARTING_PWM_HIGHTIME);
539 }
540 }
541
542 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
543}
544
545static void rs780_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
546 struct radeon_ps *new_ps,
547 struct radeon_ps *old_ps)
548{
549 struct igp_ps *new_state = rs780_get_ps(new_ps);
550 struct igp_ps *current_state = rs780_get_ps(old_ps);
551
552 if ((new_ps->vclk == old_ps->vclk) &&
553 (new_ps->dclk == old_ps->dclk))
554 return;
555
556 if (new_state->sclk_high >= current_state->sclk_high)
557 return;
558
559 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
560}
561
562static void rs780_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
563 struct radeon_ps *new_ps,
564 struct radeon_ps *old_ps)
565{
566 struct igp_ps *new_state = rs780_get_ps(new_ps);
567 struct igp_ps *current_state = rs780_get_ps(old_ps);
568
569 if ((new_ps->vclk == old_ps->vclk) &&
570 (new_ps->dclk == old_ps->dclk))
571 return;
572
573 if (new_state->sclk_high < current_state->sclk_high)
574 return;
575
576 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
577}
578
579int rs780_dpm_enable(struct radeon_device *rdev)
580{
581 struct igp_power_info *pi = rs780_get_pi(rdev);
582 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
583 int ret;
584
585 rs780_get_pm_mode_parameters(rdev);
586 rs780_disable_vbios_powersaving(rdev);
587
588 if (r600_dynamicpm_enabled(rdev))
589 return -EINVAL;
590 ret = rs780_initialize_dpm_parameters(rdev, boot_ps);
591 if (ret)
592 return ret;
593 rs780_start_dpm(rdev);
594
595 rs780_preset_ranges_slow_clk_fbdiv_en(rdev);
596 rs780_preset_starting_fbdiv(rdev);
597 if (pi->voltage_control)
598 rs780_voltage_scaling_init(rdev);
599 rs780_clk_scaling_enable(rdev, true);
600 rs780_set_engine_clock_sc(rdev);
601 rs780_set_engine_clock_wfc(rdev);
602 rs780_program_at(rdev);
603 rs780_set_engine_clock_tdc(rdev);
604 rs780_set_engine_clock_ssc(rdev);
605
606 if (pi->gfx_clock_gating)
607 r600_gfx_clockgating_enable(rdev, true);
608
609 if (rdev->irq.installed && (rdev->pm.int_thermal_type == THERMAL_TYPE_RV6XX)) {
610 ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
611 if (ret)
612 return ret;
613 rdev->irq.dpm_thermal = true;
614 radeon_irq_set(rdev);
615 }
616
617 return 0;
618}
619
620void rs780_dpm_disable(struct radeon_device *rdev)
621{
622 struct igp_power_info *pi = rs780_get_pi(rdev);
623
624 r600_dynamicpm_enable(rdev, false);
625
626 rs780_clk_scaling_enable(rdev, false);
627 rs780_voltage_scaling_enable(rdev, false);
628
629 if (pi->gfx_clock_gating)
630 r600_gfx_clockgating_enable(rdev, false);
631
632 if (rdev->irq.installed &&
633 (rdev->pm.int_thermal_type == THERMAL_TYPE_RV6XX)) {
634 rdev->irq.dpm_thermal = false;
635 radeon_irq_set(rdev);
636 }
637}
638
639int rs780_dpm_set_power_state(struct radeon_device *rdev)
640{
641 struct igp_power_info *pi = rs780_get_pi(rdev);
642 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
643 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
644 int ret;
645
646 rs780_get_pm_mode_parameters(rdev);
647
648 rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
649
650 if (pi->voltage_control) {
651 rs780_force_voltage_to_high(rdev);
652 mdelay(5);
653 }
654
655 ret = rs780_set_engine_clock_scaling(rdev, new_ps, old_ps);
656 if (ret)
657 return ret;
658 rs780_set_engine_clock_spc(rdev, new_ps, old_ps);
659
660 rs780_activate_engine_clk_scaling(rdev, new_ps, old_ps);
661
662 if (pi->voltage_control)
663 rs780_enable_voltage_scaling(rdev, new_ps);
664
665 rs780_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
666
667 return 0;
668}
669
670void rs780_dpm_setup_asic(struct radeon_device *rdev)
671{
672
673}
674
675void rs780_dpm_display_configuration_changed(struct radeon_device *rdev)
676{
677 rs780_get_pm_mode_parameters(rdev);
678 rs780_program_at(rdev);
679}
680
681union igp_info {
682 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
683 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
684};
685
686union power_info {
687 struct _ATOM_POWERPLAY_INFO info;
688 struct _ATOM_POWERPLAY_INFO_V2 info_2;
689 struct _ATOM_POWERPLAY_INFO_V3 info_3;
690 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
691 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
692 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
693};
694
695union pplib_clock_info {
696 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
697 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
698 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
699 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
700};
701
702union pplib_power_state {
703 struct _ATOM_PPLIB_STATE v1;
704 struct _ATOM_PPLIB_STATE_V2 v2;
705};
706
707static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev,
708 struct radeon_ps *rps,
709 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
710 u8 table_rev)
711{
712 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
713 rps->class = le16_to_cpu(non_clock_info->usClassification);
714 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
715
716 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
717 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
718 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
719 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
720 rps->vclk = RS780_DEFAULT_VCLK_FREQ;
721 rps->dclk = RS780_DEFAULT_DCLK_FREQ;
722 } else {
723 rps->vclk = 0;
724 rps->dclk = 0;
725 }
726
727 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
728 rdev->pm.dpm.boot_ps = rps;
729 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
730 rdev->pm.dpm.uvd_ps = rps;
731}
732
733static void rs780_parse_pplib_clock_info(struct radeon_device *rdev,
734 struct radeon_ps *rps,
735 union pplib_clock_info *clock_info)
736{
737 struct igp_ps *ps = rs780_get_ps(rps);
738 u32 sclk;
739
740 sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
741 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
742 ps->sclk_low = sclk;
743 sclk = le16_to_cpu(clock_info->rs780.usHighEngineClockLow);
744 sclk |= clock_info->rs780.ucHighEngineClockHigh << 16;
745 ps->sclk_high = sclk;
746 switch (le16_to_cpu(clock_info->rs780.usVDDC)) {
747 case ATOM_PPLIB_RS780_VOLTAGE_NONE:
748 default:
749 ps->min_voltage = RS780_VDDC_LEVEL_UNKNOWN;
750 ps->max_voltage = RS780_VDDC_LEVEL_UNKNOWN;
751 break;
752 case ATOM_PPLIB_RS780_VOLTAGE_LOW:
753 ps->min_voltage = RS780_VDDC_LEVEL_LOW;
754 ps->max_voltage = RS780_VDDC_LEVEL_LOW;
755 break;
756 case ATOM_PPLIB_RS780_VOLTAGE_HIGH:
757 ps->min_voltage = RS780_VDDC_LEVEL_HIGH;
758 ps->max_voltage = RS780_VDDC_LEVEL_HIGH;
759 break;
760 case ATOM_PPLIB_RS780_VOLTAGE_VARIABLE:
761 ps->min_voltage = RS780_VDDC_LEVEL_LOW;
762 ps->max_voltage = RS780_VDDC_LEVEL_HIGH;
763 break;
764 }
765 ps->flags = le32_to_cpu(clock_info->rs780.ulFlags);
766
767 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
768 ps->sclk_low = rdev->clock.default_sclk;
769 ps->sclk_high = rdev->clock.default_sclk;
770 ps->min_voltage = RS780_VDDC_LEVEL_HIGH;
771 ps->max_voltage = RS780_VDDC_LEVEL_HIGH;
772 }
773}
774
775static int rs780_parse_power_table(struct radeon_device *rdev)
776{
777 struct radeon_mode_info *mode_info = &rdev->mode_info;
778 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
779 union pplib_power_state *power_state;
780 int i;
781 union pplib_clock_info *clock_info;
782 union power_info *power_info;
783 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
784 u16 data_offset;
785 u8 frev, crev;
786 struct igp_ps *ps;
787
788 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
789 &frev, &crev, &data_offset))
790 return -EINVAL;
791 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
792
793 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
794 power_info->pplib.ucNumStates, GFP_KERNEL);
795 if (!rdev->pm.dpm.ps)
796 return -ENOMEM;
797 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
798 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
799 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
800
801 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
802 power_state = (union pplib_power_state *)
803 (mode_info->atom_context->bios + data_offset +
804 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
805 i * power_info->pplib.ucStateEntrySize);
806 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
807 (mode_info->atom_context->bios + data_offset +
808 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
809 (power_state->v1.ucNonClockStateIndex *
810 power_info->pplib.ucNonClockSize));
811 if (power_info->pplib.ucStateEntrySize - 1) {
812 clock_info = (union pplib_clock_info *)
813 (mode_info->atom_context->bios + data_offset +
814 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
815 (power_state->v1.ucClockStateIndices[0] *
816 power_info->pplib.ucClockInfoSize));
817 ps = kzalloc(sizeof(struct igp_ps), GFP_KERNEL);
818 if (ps == NULL) {
819 kfree(rdev->pm.dpm.ps);
820 return -ENOMEM;
821 }
822 rdev->pm.dpm.ps[i].ps_priv = ps;
823 rs780_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
824 non_clock_info,
825 power_info->pplib.ucNonClockSize);
826 rs780_parse_pplib_clock_info(rdev,
827 &rdev->pm.dpm.ps[i],
828 clock_info);
829 }
830 }
831 rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
832 return 0;
833}
834
835int rs780_dpm_init(struct radeon_device *rdev)
836{
837 struct igp_power_info *pi;
838 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
839 union igp_info *info;
840 u16 data_offset;
841 u8 frev, crev;
842 int ret;
843
844 pi = kzalloc(sizeof(struct igp_power_info), GFP_KERNEL);
845 if (pi == NULL)
846 return -ENOMEM;
847 rdev->pm.dpm.priv = pi;
848
849 ret = rs780_parse_power_table(rdev);
850 if (ret)
851 return ret;
852
853 pi->voltage_control = false;
854 pi->gfx_clock_gating = true;
855
856 if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
857 &frev, &crev, &data_offset)) {
858 info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
859
860 /* Get various system informations from bios */
861 switch (crev) {
862 case 1:
863 pi->num_of_cycles_in_period =
864 info->info.ucNumberOfCyclesInPeriod;
865 pi->num_of_cycles_in_period |=
866 info->info.ucNumberOfCyclesInPeriodHi << 8;
867 pi->invert_pwm_required =
868 (pi->num_of_cycles_in_period & 0x8000) ? true : false;
869 pi->boot_voltage = info->info.ucStartingPWM_HighTime;
870 pi->max_voltage = info->info.ucMaxNBVoltage;
871 pi->max_voltage |= info->info.ucMaxNBVoltageHigh << 8;
872 pi->min_voltage = info->info.ucMinNBVoltage;
873 pi->min_voltage |= info->info.ucMinNBVoltageHigh << 8;
874 pi->inter_voltage_low =
875 le16_to_cpu(info->info.usInterNBVoltageLow);
876 pi->inter_voltage_high =
877 le16_to_cpu(info->info.usInterNBVoltageHigh);
878 pi->voltage_control = true;
879 pi->bootup_uma_clk = info->info.usK8MemoryClock * 100;
880 break;
881 case 2:
882 pi->num_of_cycles_in_period =
883 le16_to_cpu(info->info_2.usNumberOfCyclesInPeriod);
884 pi->invert_pwm_required =
885 (pi->num_of_cycles_in_period & 0x8000) ? true : false;
886 pi->boot_voltage =
887 le16_to_cpu(info->info_2.usBootUpNBVoltage);
888 pi->max_voltage =
889 le16_to_cpu(info->info_2.usMaxNBVoltage);
890 pi->min_voltage =
891 le16_to_cpu(info->info_2.usMinNBVoltage);
892 pi->system_config =
893 le32_to_cpu(info->info_2.ulSystemConfig);
894 pi->pwm_voltage_control =
895 (pi->system_config & 0x4) ? true : false;
896 pi->voltage_control = true;
897 pi->bootup_uma_clk = le32_to_cpu(info->info_2.ulBootUpUMAClock);
898 break;
899 default:
900 DRM_ERROR("No integrated system info for your GPU\n");
901 return -EINVAL;
902 }
903 if (pi->min_voltage > pi->max_voltage)
904 pi->voltage_control = false;
905 if (pi->pwm_voltage_control) {
906 if ((pi->num_of_cycles_in_period == 0) ||
907 (pi->max_voltage == 0) ||
908 (pi->min_voltage == 0))
909 pi->voltage_control = false;
910 } else {
911 if ((pi->num_of_cycles_in_period == 0) ||
912 (pi->max_voltage == 0))
913 pi->voltage_control = false;
914 }
915
916 return 0;
917 }
918 radeon_dpm_fini(rdev);
919 return -EINVAL;
920}
921
922void rs780_dpm_print_power_state(struct radeon_device *rdev,
923 struct radeon_ps *rps)
924{
925 struct igp_ps *ps = rs780_get_ps(rps);
926
927 r600_dpm_print_class_info(rps->class, rps->class2);
928 r600_dpm_print_cap_info(rps->caps);
929 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
930 printk("\t\tpower level 0 sclk: %u vddc_index: %d\n",
931 ps->sclk_low, ps->min_voltage);
932 printk("\t\tpower level 1 sclk: %u vddc_index: %d\n",
933 ps->sclk_high, ps->max_voltage);
934 r600_dpm_print_ps_status(rdev, rps);
935}
936
937void rs780_dpm_fini(struct radeon_device *rdev)
938{
939 int i;
940
941 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
942 kfree(rdev->pm.dpm.ps[i].ps_priv);
943 }
944 kfree(rdev->pm.dpm.ps);
945 kfree(rdev->pm.dpm.priv);
946}
947
948u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low)
949{
950 struct igp_ps *requested_state = rs780_get_ps(rdev->pm.dpm.requested_ps);
951
952 if (low)
953 return requested_state->sclk_low;
954 else
955 return requested_state->sclk_high;
956}
957
958u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low)
959{
960 struct igp_power_info *pi = rs780_get_pi(rdev);
961
962 return pi->bootup_uma_clk;
963}
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.h b/drivers/gpu/drm/radeon/rs780_dpm.h
new file mode 100644
index 000000000000..47a40b14fa43
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs780_dpm.h
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __RS780_DPM_H__
24#define __RS780_DPM_H__
25
26enum rs780_vddc_level {
27 RS780_VDDC_LEVEL_UNKNOWN = 0,
28 RS780_VDDC_LEVEL_LOW = 1,
29 RS780_VDDC_LEVEL_HIGH = 2,
30};
31
32struct igp_power_info {
33 /* flags */
34 bool invert_pwm_required;
35 bool pwm_voltage_control;
36 bool voltage_control;
37 bool gfx_clock_gating;
38 /* stored values */
39 u32 system_config;
40 u32 bootup_uma_clk;
41 u16 max_voltage;
42 u16 min_voltage;
43 u16 boot_voltage;
44 u16 inter_voltage_low;
45 u16 inter_voltage_high;
46 u16 num_of_cycles_in_period;
47 /* variable */
48 int crtc_id;
49 int refresh_rate;
50};
51
52struct igp_ps {
53 enum rs780_vddc_level min_voltage;
54 enum rs780_vddc_level max_voltage;
55 u32 sclk_low;
56 u32 sclk_high;
57 u32 flags;
58};
59
60#define RS780_CGFTV_DFLT 0x0303000f
61#define RS780_FBDIVTIMERVAL_DFLT 0x2710
62
63#define RS780_FVTHROTUTC0_DFLT 0x04010040
64#define RS780_FVTHROTUTC1_DFLT 0x04010040
65#define RS780_FVTHROTUTC2_DFLT 0x04010040
66#define RS780_FVTHROTUTC3_DFLT 0x04010040
67#define RS780_FVTHROTUTC4_DFLT 0x04010040
68
69#define RS780_FVTHROTDTC0_DFLT 0x04010040
70#define RS780_FVTHROTDTC1_DFLT 0x04010040
71#define RS780_FVTHROTDTC2_DFLT 0x04010040
72#define RS780_FVTHROTDTC3_DFLT 0x04010040
73#define RS780_FVTHROTDTC4_DFLT 0x04010040
74
75#define RS780_FVTHROTFBUSREG0_DFLT 0x00001001
76#define RS780_FVTHROTFBUSREG1_DFLT 0x00002002
77#define RS780_FVTHROTFBDSREG0_DFLT 0x00004001
78#define RS780_FVTHROTFBDSREG1_DFLT 0x00020010
79
80#define RS780_FVTHROTPWMUSREG0_DFLT 0x00002001
81#define RS780_FVTHROTPWMUSREG1_DFLT 0x00004003
82#define RS780_FVTHROTPWMDSREG0_DFLT 0x00002001
83#define RS780_FVTHROTPWMDSREG1_DFLT 0x00004003
84
85#define RS780_FVTHROTPWMFBDIVRANGEREG0_DFLT 0x37
86#define RS780_FVTHROTPWMFBDIVRANGEREG1_DFLT 0x4b
87#define RS780_FVTHROTPWMFBDIVRANGEREG2_DFLT 0x8b
88
89#define RS780D_FVTHROTPWMFBDIVRANGEREG0_DFLT 0x8b
90#define RS780D_FVTHROTPWMFBDIVRANGEREG1_DFLT 0x8c
91#define RS780D_FVTHROTPWMFBDIVRANGEREG2_DFLT 0xb5
92
93#define RS880D_FVTHROTPWMFBDIVRANGEREG0_DFLT 0x8d
94#define RS880D_FVTHROTPWMFBDIVRANGEREG1_DFLT 0x8e
95#define RS880D_FVTHROTPWMFBDIVRANGEREG2_DFLT 0xBa
96
97#define RS780_FVTHROTPWMRANGE0_GPIO_DFLT 0x1a
98#define RS780_FVTHROTPWMRANGE1_GPIO_DFLT 0x1a
99#define RS780_FVTHROTPWMRANGE2_GPIO_DFLT 0x0
100#define RS780_FVTHROTPWMRANGE3_GPIO_DFLT 0x0
101
102#define RS780_SLOWCLKFEEDBACKDIV_DFLT 110
103
104#define RS780_CGCLKGATING_DFLT 0x0000E204
105
106#define RS780_DEFAULT_VCLK_FREQ 53300 /* 10 khz */
107#define RS780_DEFAULT_DCLK_FREQ 40000 /* 10 khz */
108
109#endif
diff --git a/drivers/gpu/drm/radeon/rs780d.h b/drivers/gpu/drm/radeon/rs780d.h
new file mode 100644
index 000000000000..b1142ed1c628
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs780d.h
@@ -0,0 +1,168 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __RS780D_H__
24#define __RS780D_H__
25
26#define CG_SPLL_FUNC_CNTL 0x600
27# define SPLL_RESET (1 << 0)
28# define SPLL_SLEEP (1 << 1)
29# define SPLL_REF_DIV(x) ((x) << 2)
30# define SPLL_REF_DIV_MASK (7 << 2)
31# define SPLL_FB_DIV(x) ((x) << 5)
32# define SPLL_FB_DIV_MASK (0xff << 2)
33# define SPLL_FB_DIV_SHIFT 2
34# define SPLL_PULSEEN (1 << 13)
35# define SPLL_PULSENUM(x) ((x) << 14)
36# define SPLL_PULSENUM_MASK (3 << 14)
37# define SPLL_SW_HILEN(x) ((x) << 16)
38# define SPLL_SW_HILEN_MASK (0xf << 16)
39# define SPLL_SW_LOLEN(x) ((x) << 20)
40# define SPLL_SW_LOLEN_MASK (0xf << 20)
41# define SPLL_DIVEN (1 << 24)
42# define SPLL_BYPASS_EN (1 << 25)
43# define SPLL_CHG_STATUS (1 << 29)
44# define SPLL_CTLREQ (1 << 30)
45# define SPLL_CTLACK (1 << 31)
46
47/* RS780/RS880 PM */
48#define FVTHROT_CNTRL_REG 0x3000
49#define DONT_WAIT_FOR_FBDIV_WRAP (1 << 0)
50#define MINIMUM_CIP(x) ((x) << 1)
51#define MINIMUM_CIP_SHIFT 1
52#define MINIMUM_CIP_MASK 0x1fffffe
53#define REFRESH_RATE_DIVISOR(x) ((x) << 25)
54#define REFRESH_RATE_DIVISOR_SHIFT 25
55#define REFRESH_RATE_DIVISOR_MASK (0x3 << 25)
56#define ENABLE_FV_THROT (1 << 27)
57#define ENABLE_FV_UPDATE (1 << 28)
58#define TREND_SEL_MODE (1 << 29)
59#define FORCE_TREND_SEL (1 << 30)
60#define ENABLE_FV_THROT_IO (1 << 31)
61#define FVTHROT_TARGET_REG 0x3004
62#define TARGET_IDLE_COUNT(x) ((x) << 0)
63#define TARGET_IDLE_COUNT_MASK 0xffffff
64#define TARGET_IDLE_COUNT_SHIFT 0
65#define FVTHROT_CB1 0x3008
66#define FVTHROT_CB2 0x300c
67#define FVTHROT_CB3 0x3010
68#define FVTHROT_CB4 0x3014
69#define FVTHROT_UTC0 0x3018
70#define FVTHROT_UTC1 0x301c
71#define FVTHROT_UTC2 0x3020
72#define FVTHROT_UTC3 0x3024
73#define FVTHROT_UTC4 0x3028
74#define FVTHROT_DTC0 0x302c
75#define FVTHROT_DTC1 0x3030
76#define FVTHROT_DTC2 0x3034
77#define FVTHROT_DTC3 0x3038
78#define FVTHROT_DTC4 0x303c
79#define FVTHROT_FBDIV_REG0 0x3040
80#define MIN_FEEDBACK_DIV(x) ((x) << 0)
81#define MIN_FEEDBACK_DIV_MASK 0xfff
82#define MIN_FEEDBACK_DIV_SHIFT 0
83#define MAX_FEEDBACK_DIV(x) ((x) << 12)
84#define MAX_FEEDBACK_DIV_MASK (0xfff << 12)
85#define MAX_FEEDBACK_DIV_SHIFT 12
86#define FVTHROT_FBDIV_REG1 0x3044
87#define MAX_FEEDBACK_STEP(x) ((x) << 0)
88#define MAX_FEEDBACK_STEP_MASK 0xfff
89#define MAX_FEEDBACK_STEP_SHIFT 0
90#define STARTING_FEEDBACK_DIV(x) ((x) << 12)
91#define STARTING_FEEDBACK_DIV_MASK (0xfff << 12)
92#define STARTING_FEEDBACK_DIV_SHIFT 12
93#define FORCE_FEEDBACK_DIV (1 << 24)
94#define FVTHROT_FBDIV_REG2 0x3048
95#define FORCED_FEEDBACK_DIV(x) ((x) << 0)
96#define FORCED_FEEDBACK_DIV_MASK 0xfff
97#define FORCED_FEEDBACK_DIV_SHIFT 0
98#define FB_DIV_TIMER_VAL(x) ((x) << 12)
99#define FB_DIV_TIMER_VAL_MASK (0xffff << 12)
100#define FB_DIV_TIMER_VAL_SHIFT 12
101#define FVTHROT_FB_US_REG0 0x304c
102#define FVTHROT_FB_US_REG1 0x3050
103#define FVTHROT_FB_DS_REG0 0x3054
104#define FVTHROT_FB_DS_REG1 0x3058
105#define FVTHROT_PWM_CTRL_REG0 0x305c
106#define STARTING_PWM_HIGHTIME(x) ((x) << 0)
107#define STARTING_PWM_HIGHTIME_MASK 0xfff
108#define STARTING_PWM_HIGHTIME_SHIFT 0
109#define NUMBER_OF_CYCLES_IN_PERIOD(x) ((x) << 12)
110#define NUMBER_OF_CYCLES_IN_PERIOD_MASK (0xfff << 12)
111#define NUMBER_OF_CYCLES_IN_PERIOD_SHIFT 12
112#define FORCE_STARTING_PWM_HIGHTIME (1 << 24)
113#define INVERT_PWM_WAVEFORM (1 << 25)
114#define FVTHROT_PWM_CTRL_REG1 0x3060
115#define MIN_PWM_HIGHTIME(x) ((x) << 0)
116#define MIN_PWM_HIGHTIME_MASK 0xfff
117#define MIN_PWM_HIGHTIME_SHIFT 0
118#define MAX_PWM_HIGHTIME(x) ((x) << 12)
119#define MAX_PWM_HIGHTIME_MASK (0xfff << 12)
120#define MAX_PWM_HIGHTIME_SHIFT 12
121#define FVTHROT_PWM_US_REG0 0x3064
122#define FVTHROT_PWM_US_REG1 0x3068
123#define FVTHROT_PWM_DS_REG0 0x306c
124#define FVTHROT_PWM_DS_REG1 0x3070
125#define FVTHROT_STATUS_REG0 0x3074
126#define CURRENT_FEEDBACK_DIV_MASK 0xfff
127#define CURRENT_FEEDBACK_DIV_SHIFT 0
128#define FVTHROT_STATUS_REG1 0x3078
129#define FVTHROT_STATUS_REG2 0x307c
130#define CG_INTGFX_MISC 0x3080
131#define FVTHROT_VBLANK_SEL (1 << 9)
132#define FVTHROT_PWM_FEEDBACK_DIV_REG1 0x308c
133#define RANGE0_PWM_FEEDBACK_DIV(x) ((x) << 0)
134#define RANGE0_PWM_FEEDBACK_DIV_MASK 0xfff
135#define RANGE0_PWM_FEEDBACK_DIV_SHIFT 0
136#define RANGE_PWM_FEEDBACK_DIV_EN (1 << 12)
137#define FVTHROT_PWM_FEEDBACK_DIV_REG2 0x3090
138#define RANGE1_PWM_FEEDBACK_DIV(x) ((x) << 0)
139#define RANGE1_PWM_FEEDBACK_DIV_MASK 0xfff
140#define RANGE1_PWM_FEEDBACK_DIV_SHIFT 0
141#define RANGE2_PWM_FEEDBACK_DIV(x) ((x) << 12)
142#define RANGE2_PWM_FEEDBACK_DIV_MASK (0xfff << 12)
143#define RANGE2_PWM_FEEDBACK_DIV_SHIFT 12
144#define FVTHROT_PWM_FEEDBACK_DIV_REG3 0x3094
145#define RANGE0_PWM(x) ((x) << 0)
146#define RANGE0_PWM_MASK 0xfff
147#define RANGE0_PWM_SHIFT 0
148#define RANGE1_PWM(x) ((x) << 12)
149#define RANGE1_PWM_MASK (0xfff << 12)
150#define RANGE1_PWM_SHIFT 12
151#define FVTHROT_PWM_FEEDBACK_DIV_REG4 0x3098
152#define RANGE2_PWM(x) ((x) << 0)
153#define RANGE2_PWM_MASK 0xfff
154#define RANGE2_PWM_SHIFT 0
155#define RANGE3_PWM(x) ((x) << 12)
156#define RANGE3_PWM_MASK (0xfff << 12)
157#define RANGE3_PWM_SHIFT 12
158#define FVTHROT_SLOW_CLK_FEEDBACK_DIV_REG1 0x30ac
159#define RANGE0_SLOW_CLK_FEEDBACK_DIV(x) ((x) << 0)
160#define RANGE0_SLOW_CLK_FEEDBACK_DIV_MASK 0xfff
161#define RANGE0_SLOW_CLK_FEEDBACK_DIV_SHIFT 0
162#define RANGE_SLOW_CLK_FEEDBACK_DIV_EN (1 << 12)
163
164#define GFX_MACRO_BYPASS_CNTL 0x30c0
165#define SPLL_BYPASS_CNTL (1 << 0)
166#define UPLL_BYPASS_CNTL (1 << 1)
167
168#endif
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 21c7d7b26e55..8ea1573ae820 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -937,13 +937,16 @@ struct rv515_watermark {
937}; 937};
938 938
939static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, 939static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
940 struct radeon_crtc *crtc, 940 struct radeon_crtc *crtc,
941 struct rv515_watermark *wm) 941 struct rv515_watermark *wm,
942 bool low)
942{ 943{
943 struct drm_display_mode *mode = &crtc->base.mode; 944 struct drm_display_mode *mode = &crtc->base.mode;
944 fixed20_12 a, b, c; 945 fixed20_12 a, b, c;
945 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; 946 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
946 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; 947 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
948 fixed20_12 sclk;
949 u32 selected_sclk;
947 950
948 if (!crtc->base.enabled) { 951 if (!crtc->base.enabled) {
949 /* FIXME: wouldn't it better to set priority mark to maximum */ 952 /* FIXME: wouldn't it better to set priority mark to maximum */
@@ -951,6 +954,18 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
951 return; 954 return;
952 } 955 }
953 956
957 /* rv6xx, rv7xx */
958 if ((rdev->family >= CHIP_RV610) &&
959 (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
960 selected_sclk = radeon_dpm_get_sclk(rdev, low);
961 else
962 selected_sclk = rdev->pm.current_sclk;
963
964 /* sclk in Mhz */
965 a.full = dfixed_const(100);
966 sclk.full = dfixed_const(selected_sclk);
967 sclk.full = dfixed_div(sclk, a);
968
954 if (crtc->vsc.full > dfixed_const(2)) 969 if (crtc->vsc.full > dfixed_const(2))
955 wm->num_line_pair.full = dfixed_const(2); 970 wm->num_line_pair.full = dfixed_const(2);
956 else 971 else
@@ -1016,7 +1031,7 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
1016 * sclk = system clock(Mhz) 1031 * sclk = system clock(Mhz)
1017 */ 1032 */
1018 a.full = dfixed_const(600 * 1000); 1033 a.full = dfixed_const(600 * 1000);
1019 chunk_time.full = dfixed_div(a, rdev->pm.sclk); 1034 chunk_time.full = dfixed_div(a, sclk);
1020 read_delay_latency.full = dfixed_const(1000); 1035 read_delay_latency.full = dfixed_const(1000);
1021 1036
1022 /* Determine the worst case latency 1037 /* Determine the worst case latency
@@ -1077,152 +1092,177 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
1077 } 1092 }
1078} 1093}
1079 1094
1080void rv515_bandwidth_avivo_update(struct radeon_device *rdev) 1095static void rv515_compute_mode_priority(struct radeon_device *rdev,
1096 struct rv515_watermark *wm0,
1097 struct rv515_watermark *wm1,
1098 struct drm_display_mode *mode0,
1099 struct drm_display_mode *mode1,
1100 u32 *d1mode_priority_a_cnt,
1101 u32 *d2mode_priority_a_cnt)
1081{ 1102{
1082 struct drm_display_mode *mode0 = NULL;
1083 struct drm_display_mode *mode1 = NULL;
1084 struct rv515_watermark wm0;
1085 struct rv515_watermark wm1;
1086 u32 tmp;
1087 u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
1088 u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
1089 fixed20_12 priority_mark02, priority_mark12, fill_rate; 1103 fixed20_12 priority_mark02, priority_mark12, fill_rate;
1090 fixed20_12 a, b; 1104 fixed20_12 a, b;
1091 1105
1092 if (rdev->mode_info.crtcs[0]->base.enabled) 1106 *d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
1093 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 1107 *d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
1094 if (rdev->mode_info.crtcs[1]->base.enabled)
1095 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1096 rs690_line_buffer_adjust(rdev, mode0, mode1);
1097
1098 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
1099 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
1100
1101 tmp = wm0.lb_request_fifo_depth;
1102 tmp |= wm1.lb_request_fifo_depth << 16;
1103 WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
1104 1108
1105 if (mode0 && mode1) { 1109 if (mode0 && mode1) {
1106 if (dfixed_trunc(wm0.dbpp) > 64) 1110 if (dfixed_trunc(wm0->dbpp) > 64)
1107 a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); 1111 a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
1108 else 1112 else
1109 a.full = wm0.num_line_pair.full; 1113 a.full = wm0->num_line_pair.full;
1110 if (dfixed_trunc(wm1.dbpp) > 64) 1114 if (dfixed_trunc(wm1->dbpp) > 64)
1111 b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); 1115 b.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
1112 else 1116 else
1113 b.full = wm1.num_line_pair.full; 1117 b.full = wm1->num_line_pair.full;
1114 a.full += b.full; 1118 a.full += b.full;
1115 fill_rate.full = dfixed_div(wm0.sclk, a); 1119 fill_rate.full = dfixed_div(wm0->sclk, a);
1116 if (wm0.consumption_rate.full > fill_rate.full) { 1120 if (wm0->consumption_rate.full > fill_rate.full) {
1117 b.full = wm0.consumption_rate.full - fill_rate.full; 1121 b.full = wm0->consumption_rate.full - fill_rate.full;
1118 b.full = dfixed_mul(b, wm0.active_time); 1122 b.full = dfixed_mul(b, wm0->active_time);
1119 a.full = dfixed_const(16); 1123 a.full = dfixed_const(16);
1120 b.full = dfixed_div(b, a); 1124 b.full = dfixed_div(b, a);
1121 a.full = dfixed_mul(wm0.worst_case_latency, 1125 a.full = dfixed_mul(wm0->worst_case_latency,
1122 wm0.consumption_rate); 1126 wm0->consumption_rate);
1123 priority_mark02.full = a.full + b.full; 1127 priority_mark02.full = a.full + b.full;
1124 } else { 1128 } else {
1125 a.full = dfixed_mul(wm0.worst_case_latency, 1129 a.full = dfixed_mul(wm0->worst_case_latency,
1126 wm0.consumption_rate); 1130 wm0->consumption_rate);
1127 b.full = dfixed_const(16 * 1000); 1131 b.full = dfixed_const(16 * 1000);
1128 priority_mark02.full = dfixed_div(a, b); 1132 priority_mark02.full = dfixed_div(a, b);
1129 } 1133 }
1130 if (wm1.consumption_rate.full > fill_rate.full) { 1134 if (wm1->consumption_rate.full > fill_rate.full) {
1131 b.full = wm1.consumption_rate.full - fill_rate.full; 1135 b.full = wm1->consumption_rate.full - fill_rate.full;
1132 b.full = dfixed_mul(b, wm1.active_time); 1136 b.full = dfixed_mul(b, wm1->active_time);
1133 a.full = dfixed_const(16); 1137 a.full = dfixed_const(16);
1134 b.full = dfixed_div(b, a); 1138 b.full = dfixed_div(b, a);
1135 a.full = dfixed_mul(wm1.worst_case_latency, 1139 a.full = dfixed_mul(wm1->worst_case_latency,
1136 wm1.consumption_rate); 1140 wm1->consumption_rate);
1137 priority_mark12.full = a.full + b.full; 1141 priority_mark12.full = a.full + b.full;
1138 } else { 1142 } else {
1139 a.full = dfixed_mul(wm1.worst_case_latency, 1143 a.full = dfixed_mul(wm1->worst_case_latency,
1140 wm1.consumption_rate); 1144 wm1->consumption_rate);
1141 b.full = dfixed_const(16 * 1000); 1145 b.full = dfixed_const(16 * 1000);
1142 priority_mark12.full = dfixed_div(a, b); 1146 priority_mark12.full = dfixed_div(a, b);
1143 } 1147 }
1144 if (wm0.priority_mark.full > priority_mark02.full) 1148 if (wm0->priority_mark.full > priority_mark02.full)
1145 priority_mark02.full = wm0.priority_mark.full; 1149 priority_mark02.full = wm0->priority_mark.full;
1146 if (dfixed_trunc(priority_mark02) < 0) 1150 if (dfixed_trunc(priority_mark02) < 0)
1147 priority_mark02.full = 0; 1151 priority_mark02.full = 0;
1148 if (wm0.priority_mark_max.full > priority_mark02.full) 1152 if (wm0->priority_mark_max.full > priority_mark02.full)
1149 priority_mark02.full = wm0.priority_mark_max.full; 1153 priority_mark02.full = wm0->priority_mark_max.full;
1150 if (wm1.priority_mark.full > priority_mark12.full) 1154 if (wm1->priority_mark.full > priority_mark12.full)
1151 priority_mark12.full = wm1.priority_mark.full; 1155 priority_mark12.full = wm1->priority_mark.full;
1152 if (dfixed_trunc(priority_mark12) < 0) 1156 if (dfixed_trunc(priority_mark12) < 0)
1153 priority_mark12.full = 0; 1157 priority_mark12.full = 0;
1154 if (wm1.priority_mark_max.full > priority_mark12.full) 1158 if (wm1->priority_mark_max.full > priority_mark12.full)
1155 priority_mark12.full = wm1.priority_mark_max.full; 1159 priority_mark12.full = wm1->priority_mark_max.full;
1156 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 1160 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
1157 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 1161 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
1158 if (rdev->disp_priority == 2) { 1162 if (rdev->disp_priority == 2) {
1159 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1163 *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1160 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1164 *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1161 } 1165 }
1162 } else if (mode0) { 1166 } else if (mode0) {
1163 if (dfixed_trunc(wm0.dbpp) > 64) 1167 if (dfixed_trunc(wm0->dbpp) > 64)
1164 a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); 1168 a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
1165 else 1169 else
1166 a.full = wm0.num_line_pair.full; 1170 a.full = wm0->num_line_pair.full;
1167 fill_rate.full = dfixed_div(wm0.sclk, a); 1171 fill_rate.full = dfixed_div(wm0->sclk, a);
1168 if (wm0.consumption_rate.full > fill_rate.full) { 1172 if (wm0->consumption_rate.full > fill_rate.full) {
1169 b.full = wm0.consumption_rate.full - fill_rate.full; 1173 b.full = wm0->consumption_rate.full - fill_rate.full;
1170 b.full = dfixed_mul(b, wm0.active_time); 1174 b.full = dfixed_mul(b, wm0->active_time);
1171 a.full = dfixed_const(16); 1175 a.full = dfixed_const(16);
1172 b.full = dfixed_div(b, a); 1176 b.full = dfixed_div(b, a);
1173 a.full = dfixed_mul(wm0.worst_case_latency, 1177 a.full = dfixed_mul(wm0->worst_case_latency,
1174 wm0.consumption_rate); 1178 wm0->consumption_rate);
1175 priority_mark02.full = a.full + b.full; 1179 priority_mark02.full = a.full + b.full;
1176 } else { 1180 } else {
1177 a.full = dfixed_mul(wm0.worst_case_latency, 1181 a.full = dfixed_mul(wm0->worst_case_latency,
1178 wm0.consumption_rate); 1182 wm0->consumption_rate);
1179 b.full = dfixed_const(16); 1183 b.full = dfixed_const(16);
1180 priority_mark02.full = dfixed_div(a, b); 1184 priority_mark02.full = dfixed_div(a, b);
1181 } 1185 }
1182 if (wm0.priority_mark.full > priority_mark02.full) 1186 if (wm0->priority_mark.full > priority_mark02.full)
1183 priority_mark02.full = wm0.priority_mark.full; 1187 priority_mark02.full = wm0->priority_mark.full;
1184 if (dfixed_trunc(priority_mark02) < 0) 1188 if (dfixed_trunc(priority_mark02) < 0)
1185 priority_mark02.full = 0; 1189 priority_mark02.full = 0;
1186 if (wm0.priority_mark_max.full > priority_mark02.full) 1190 if (wm0->priority_mark_max.full > priority_mark02.full)
1187 priority_mark02.full = wm0.priority_mark_max.full; 1191 priority_mark02.full = wm0->priority_mark_max.full;
1188 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 1192 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
1189 if (rdev->disp_priority == 2) 1193 if (rdev->disp_priority == 2)
1190 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1194 *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1191 } else if (mode1) { 1195 } else if (mode1) {
1192 if (dfixed_trunc(wm1.dbpp) > 64) 1196 if (dfixed_trunc(wm1->dbpp) > 64)
1193 a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); 1197 a.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
1194 else 1198 else
1195 a.full = wm1.num_line_pair.full; 1199 a.full = wm1->num_line_pair.full;
1196 fill_rate.full = dfixed_div(wm1.sclk, a); 1200 fill_rate.full = dfixed_div(wm1->sclk, a);
1197 if (wm1.consumption_rate.full > fill_rate.full) { 1201 if (wm1->consumption_rate.full > fill_rate.full) {
1198 b.full = wm1.consumption_rate.full - fill_rate.full; 1202 b.full = wm1->consumption_rate.full - fill_rate.full;
1199 b.full = dfixed_mul(b, wm1.active_time); 1203 b.full = dfixed_mul(b, wm1->active_time);
1200 a.full = dfixed_const(16); 1204 a.full = dfixed_const(16);
1201 b.full = dfixed_div(b, a); 1205 b.full = dfixed_div(b, a);
1202 a.full = dfixed_mul(wm1.worst_case_latency, 1206 a.full = dfixed_mul(wm1->worst_case_latency,
1203 wm1.consumption_rate); 1207 wm1->consumption_rate);
1204 priority_mark12.full = a.full + b.full; 1208 priority_mark12.full = a.full + b.full;
1205 } else { 1209 } else {
1206 a.full = dfixed_mul(wm1.worst_case_latency, 1210 a.full = dfixed_mul(wm1->worst_case_latency,
1207 wm1.consumption_rate); 1211 wm1->consumption_rate);
1208 b.full = dfixed_const(16 * 1000); 1212 b.full = dfixed_const(16 * 1000);
1209 priority_mark12.full = dfixed_div(a, b); 1213 priority_mark12.full = dfixed_div(a, b);
1210 } 1214 }
1211 if (wm1.priority_mark.full > priority_mark12.full) 1215 if (wm1->priority_mark.full > priority_mark12.full)
1212 priority_mark12.full = wm1.priority_mark.full; 1216 priority_mark12.full = wm1->priority_mark.full;
1213 if (dfixed_trunc(priority_mark12) < 0) 1217 if (dfixed_trunc(priority_mark12) < 0)
1214 priority_mark12.full = 0; 1218 priority_mark12.full = 0;
1215 if (wm1.priority_mark_max.full > priority_mark12.full) 1219 if (wm1->priority_mark_max.full > priority_mark12.full)
1216 priority_mark12.full = wm1.priority_mark_max.full; 1220 priority_mark12.full = wm1->priority_mark_max.full;
1217 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 1221 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
1218 if (rdev->disp_priority == 2) 1222 if (rdev->disp_priority == 2)
1219 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1223 *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1220 } 1224 }
1225}
1226
1227void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1228{
1229 struct drm_display_mode *mode0 = NULL;
1230 struct drm_display_mode *mode1 = NULL;
1231 struct rv515_watermark wm0_high, wm0_low;
1232 struct rv515_watermark wm1_high, wm1_low;
1233 u32 tmp;
1234 u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
1235 u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
1236
1237 if (rdev->mode_info.crtcs[0]->base.enabled)
1238 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1239 if (rdev->mode_info.crtcs[1]->base.enabled)
1240 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1241 rs690_line_buffer_adjust(rdev, mode0, mode1);
1242
1243 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
1244 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
1245
1246 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, false);
1247 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, false);
1248
1249 tmp = wm0_high.lb_request_fifo_depth;
1250 tmp |= wm1_high.lb_request_fifo_depth << 16;
1251 WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
1252
1253 rv515_compute_mode_priority(rdev,
1254 &wm0_high, &wm1_high,
1255 mode0, mode1,
1256 &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
1257 rv515_compute_mode_priority(rdev,
1258 &wm0_low, &wm1_low,
1259 mode0, mode1,
1260 &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
1221 1261
1222 WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 1262 WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
1223 WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); 1263 WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
1224 WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 1264 WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
1225 WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 1265 WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
1226} 1266}
1227 1267
1228void rv515_bandwidth_update(struct radeon_device *rdev) 1268void rv515_bandwidth_update(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
new file mode 100644
index 000000000000..0e8b7d9b954b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -0,0 +1,2059 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "rv6xxd.h"
28#include "r600_dpm.h"
29#include "rv6xx_dpm.h"
30#include "atom.h"
31
32static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
33 u32 unscaled_count, u32 unit);
34
35static struct rv6xx_ps *rv6xx_get_ps(struct radeon_ps *rps)
36{
37 struct rv6xx_ps *ps = rps->ps_priv;
38
39 return ps;
40}
41
42static struct rv6xx_power_info *rv6xx_get_pi(struct radeon_device *rdev)
43{
44 struct rv6xx_power_info *pi = rdev->pm.dpm.priv;
45
46 return pi;
47}
48
49static void rv6xx_force_pcie_gen1(struct radeon_device *rdev)
50{
51 u32 tmp;
52 int i;
53
54 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
55 tmp &= LC_GEN2_EN;
56 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
57
58 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
59 tmp |= LC_INITIATE_LINK_SPEED_CHANGE;
60 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
61
62 for (i = 0; i < rdev->usec_timeout; i++) {
63 if (!(RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE))
64 break;
65 udelay(1);
66 }
67
68 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
69 tmp &= ~LC_INITIATE_LINK_SPEED_CHANGE;
70 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
71}
72
73static void rv6xx_enable_pcie_gen2_support(struct radeon_device *rdev)
74{
75 u32 tmp;
76
77 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
78
79 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
80 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
81 tmp |= LC_GEN2_EN;
82 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
83 }
84}
85
86static void rv6xx_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
87 bool enable)
88{
89 u32 tmp;
90
91 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
92 if (enable)
93 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
94 else
95 tmp |= LC_HW_VOLTAGE_IF_CONTROL(0);
96 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
97}
98
99static void rv6xx_enable_l0s(struct radeon_device *rdev)
100{
101 u32 tmp;
102
103 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L0S_INACTIVITY_MASK;
104 tmp |= LC_L0S_INACTIVITY(3);
105 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
106}
107
108static void rv6xx_enable_l1(struct radeon_device *rdev)
109{
110 u32 tmp;
111
112 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL);
113 tmp &= ~LC_L1_INACTIVITY_MASK;
114 tmp |= LC_L1_INACTIVITY(4);
115 tmp &= ~LC_PMI_TO_L1_DIS;
116 tmp &= ~LC_ASPM_TO_L1_DIS;
117 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
118}
119
120static void rv6xx_enable_pll_sleep_in_l1(struct radeon_device *rdev)
121{
122 u32 tmp;
123
124 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L1_INACTIVITY_MASK;
125 tmp |= LC_L1_INACTIVITY(8);
126 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
127
128 /* NOTE, this is a PCIE indirect reg, not PCIE PORT */
129 tmp = RREG32_PCIE(PCIE_P_CNTL);
130 tmp |= P_PLL_PWRDN_IN_L1L23;
131 tmp &= ~P_PLL_BUF_PDNB;
132 tmp &= ~P_PLL_PDNB;
133 tmp |= P_ALLOW_PRX_FRONTEND_SHUTOFF;
134 WREG32_PCIE(PCIE_P_CNTL, tmp);
135}
136
137static int rv6xx_convert_clock_to_stepping(struct radeon_device *rdev,
138 u32 clock, struct rv6xx_sclk_stepping *step)
139{
140 int ret;
141 struct atom_clock_dividers dividers;
142
143 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
144 clock, false, &dividers);
145 if (ret)
146 return ret;
147
148 if (dividers.enable_post_div)
149 step->post_divider = 2 + (dividers.post_div & 0xF) + (dividers.post_div >> 4);
150 else
151 step->post_divider = 1;
152
153 step->vco_frequency = clock * step->post_divider;
154
155 return 0;
156}
157
158static void rv6xx_output_stepping(struct radeon_device *rdev,
159 u32 step_index, struct rv6xx_sclk_stepping *step)
160{
161 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
162 u32 ref_clk = rdev->clock.spll.reference_freq;
163 u32 fb_divider;
164 u32 spll_step_count = rv6xx_scale_count_given_unit(rdev,
165 R600_SPLLSTEPTIME_DFLT *
166 pi->spll_ref_div,
167 R600_SPLLSTEPUNIT_DFLT);
168
169 r600_engine_clock_entry_enable(rdev, step_index, true);
170 r600_engine_clock_entry_enable_pulse_skipping(rdev, step_index, false);
171
172 if (step->post_divider == 1)
173 r600_engine_clock_entry_enable_post_divider(rdev, step_index, false);
174 else {
175 u32 lo_len = (step->post_divider - 2) / 2;
176 u32 hi_len = step->post_divider - 2 - lo_len;
177
178 r600_engine_clock_entry_enable_post_divider(rdev, step_index, true);
179 r600_engine_clock_entry_set_post_divider(rdev, step_index, (hi_len << 4) | lo_len);
180 }
181
182 fb_divider = ((step->vco_frequency * pi->spll_ref_div) / ref_clk) >>
183 pi->fb_div_scale;
184
185 r600_engine_clock_entry_set_reference_divider(rdev, step_index,
186 pi->spll_ref_div - 1);
187 r600_engine_clock_entry_set_feedback_divider(rdev, step_index, fb_divider);
188 r600_engine_clock_entry_set_step_time(rdev, step_index, spll_step_count);
189
190}
191
192static struct rv6xx_sclk_stepping rv6xx_next_vco_step(struct radeon_device *rdev,
193 struct rv6xx_sclk_stepping *cur,
194 bool increasing_vco, u32 step_size)
195{
196 struct rv6xx_sclk_stepping next;
197
198 next.post_divider = cur->post_divider;
199
200 if (increasing_vco)
201 next.vco_frequency = (cur->vco_frequency * (100 + step_size)) / 100;
202 else
203 next.vco_frequency = (cur->vco_frequency * 100 + 99 + step_size) / (100 + step_size);
204
205 return next;
206}
207
208static bool rv6xx_can_step_post_div(struct radeon_device *rdev,
209 struct rv6xx_sclk_stepping *cur,
210 struct rv6xx_sclk_stepping *target)
211{
212 return (cur->post_divider > target->post_divider) &&
213 ((cur->vco_frequency * target->post_divider) <=
214 (target->vco_frequency * (cur->post_divider - 1)));
215}
216
217static struct rv6xx_sclk_stepping rv6xx_next_post_div_step(struct radeon_device *rdev,
218 struct rv6xx_sclk_stepping *cur,
219 struct rv6xx_sclk_stepping *target)
220{
221 struct rv6xx_sclk_stepping next = *cur;
222
223 while (rv6xx_can_step_post_div(rdev, &next, target))
224 next.post_divider--;
225
226 return next;
227}
228
229static bool rv6xx_reached_stepping_target(struct radeon_device *rdev,
230 struct rv6xx_sclk_stepping *cur,
231 struct rv6xx_sclk_stepping *target,
232 bool increasing_vco)
233{
234 return (increasing_vco && (cur->vco_frequency >= target->vco_frequency)) ||
235 (!increasing_vco && (cur->vco_frequency <= target->vco_frequency));
236}
237
238static void rv6xx_generate_steps(struct radeon_device *rdev,
239 u32 low, u32 high,
240 u32 start_index, u8 *end_index)
241{
242 struct rv6xx_sclk_stepping cur;
243 struct rv6xx_sclk_stepping target;
244 bool increasing_vco;
245 u32 step_index = start_index;
246
247 rv6xx_convert_clock_to_stepping(rdev, low, &cur);
248 rv6xx_convert_clock_to_stepping(rdev, high, &target);
249
250 rv6xx_output_stepping(rdev, step_index++, &cur);
251
252 increasing_vco = (target.vco_frequency >= cur.vco_frequency);
253
254 if (target.post_divider > cur.post_divider)
255 cur.post_divider = target.post_divider;
256
257 while (1) {
258 struct rv6xx_sclk_stepping next;
259
260 if (rv6xx_can_step_post_div(rdev, &cur, &target))
261 next = rv6xx_next_post_div_step(rdev, &cur, &target);
262 else
263 next = rv6xx_next_vco_step(rdev, &cur, increasing_vco, R600_VCOSTEPPCT_DFLT);
264
265 if (rv6xx_reached_stepping_target(rdev, &next, &target, increasing_vco)) {
266 struct rv6xx_sclk_stepping tiny =
267 rv6xx_next_vco_step(rdev, &target, !increasing_vco, R600_ENDINGVCOSTEPPCT_DFLT);
268 tiny.post_divider = next.post_divider;
269
270 if (!rv6xx_reached_stepping_target(rdev, &tiny, &cur, !increasing_vco))
271 rv6xx_output_stepping(rdev, step_index++, &tiny);
272
273 if ((next.post_divider != target.post_divider) &&
274 (next.vco_frequency != target.vco_frequency)) {
275 struct rv6xx_sclk_stepping final_vco;
276
277 final_vco.vco_frequency = target.vco_frequency;
278 final_vco.post_divider = next.post_divider;
279
280 rv6xx_output_stepping(rdev, step_index++, &final_vco);
281 }
282
283 rv6xx_output_stepping(rdev, step_index++, &target);
284 break;
285 } else
286 rv6xx_output_stepping(rdev, step_index++, &next);
287
288 cur = next;
289 }
290
291 *end_index = (u8)step_index - 1;
292
293}
294
295static void rv6xx_generate_single_step(struct radeon_device *rdev,
296 u32 clock, u32 index)
297{
298 struct rv6xx_sclk_stepping step;
299
300 rv6xx_convert_clock_to_stepping(rdev, clock, &step);
301 rv6xx_output_stepping(rdev, index, &step);
302}
303
304static void rv6xx_invalidate_intermediate_steps_range(struct radeon_device *rdev,
305 u32 start_index, u32 end_index)
306{
307 u32 step_index;
308
309 for (step_index = start_index + 1; step_index < end_index; step_index++)
310 r600_engine_clock_entry_enable(rdev, step_index, false);
311}
312
313static void rv6xx_set_engine_spread_spectrum_clk_s(struct radeon_device *rdev,
314 u32 index, u32 clk_s)
315{
316 WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
317 CLKS(clk_s), ~CLKS_MASK);
318}
319
320static void rv6xx_set_engine_spread_spectrum_clk_v(struct radeon_device *rdev,
321 u32 index, u32 clk_v)
322{
323 WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
324 CLKV(clk_v), ~CLKV_MASK);
325}
326
327static void rv6xx_enable_engine_spread_spectrum(struct radeon_device *rdev,
328 u32 index, bool enable)
329{
330 if (enable)
331 WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
332 SSEN, ~SSEN);
333 else
334 WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
335 0, ~SSEN);
336}
337
338static void rv6xx_set_memory_spread_spectrum_clk_s(struct radeon_device *rdev,
339 u32 clk_s)
340{
341 WREG32_P(CG_MPLL_SPREAD_SPECTRUM, CLKS(clk_s), ~CLKS_MASK);
342}
343
344static void rv6xx_set_memory_spread_spectrum_clk_v(struct radeon_device *rdev,
345 u32 clk_v)
346{
347 WREG32_P(CG_MPLL_SPREAD_SPECTRUM, CLKV(clk_v), ~CLKV_MASK);
348}
349
350static void rv6xx_enable_memory_spread_spectrum(struct radeon_device *rdev,
351 bool enable)
352{
353 if (enable)
354 WREG32_P(CG_MPLL_SPREAD_SPECTRUM, SSEN, ~SSEN);
355 else
356 WREG32_P(CG_MPLL_SPREAD_SPECTRUM, 0, ~SSEN);
357}
358
359static void rv6xx_enable_dynamic_spread_spectrum(struct radeon_device *rdev,
360 bool enable)
361{
362 if (enable)
363 WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
364 else
365 WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
366}
367
368static void rv6xx_memory_clock_entry_enable_post_divider(struct radeon_device *rdev,
369 u32 index, bool enable)
370{
371 if (enable)
372 WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4),
373 LEVEL0_MPLL_DIV_EN, ~LEVEL0_MPLL_DIV_EN);
374 else
375 WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4), 0, ~LEVEL0_MPLL_DIV_EN);
376}
377
378static void rv6xx_memory_clock_entry_set_post_divider(struct radeon_device *rdev,
379 u32 index, u32 divider)
380{
381 WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4),
382 LEVEL0_MPLL_POST_DIV(divider), ~LEVEL0_MPLL_POST_DIV_MASK);
383}
384
385static void rv6xx_memory_clock_entry_set_feedback_divider(struct radeon_device *rdev,
386 u32 index, u32 divider)
387{
388 WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4), LEVEL0_MPLL_FB_DIV(divider),
389 ~LEVEL0_MPLL_FB_DIV_MASK);
390}
391
392static void rv6xx_memory_clock_entry_set_reference_divider(struct radeon_device *rdev,
393 u32 index, u32 divider)
394{
395 WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4),
396 LEVEL0_MPLL_REF_DIV(divider), ~LEVEL0_MPLL_REF_DIV_MASK);
397}
398
399static void rv6xx_vid_response_set_brt(struct radeon_device *rdev, u32 rt)
400{
401 WREG32_P(VID_RT, BRT(rt), ~BRT_MASK);
402}
403
404static void rv6xx_enable_engine_feedback_and_reference_sync(struct radeon_device *rdev)
405{
406 WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
407}
408
409static u64 rv6xx_clocks_per_unit(u32 unit)
410{
411 u64 tmp = 1 << (2 * unit);
412
413 return tmp;
414}
415
416static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
417 u32 unscaled_count, u32 unit)
418{
419 u32 count_per_unit = (u32)rv6xx_clocks_per_unit(unit);
420
421 return (unscaled_count + count_per_unit - 1) / count_per_unit;
422}
423
424static u32 rv6xx_compute_count_for_delay(struct radeon_device *rdev,
425 u32 delay_us, u32 unit)
426{
427 u32 ref_clk = rdev->clock.spll.reference_freq;
428
429 return rv6xx_scale_count_given_unit(rdev, delay_us * (ref_clk / 100), unit);
430}
431
432static void rv6xx_calculate_engine_speed_stepping_parameters(struct radeon_device *rdev,
433 struct rv6xx_ps *state)
434{
435 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
436
437 pi->hw.sclks[R600_POWER_LEVEL_LOW] =
438 state->low.sclk;
439 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM] =
440 state->medium.sclk;
441 pi->hw.sclks[R600_POWER_LEVEL_HIGH] =
442 state->high.sclk;
443
444 pi->hw.low_sclk_index = R600_POWER_LEVEL_LOW;
445 pi->hw.medium_sclk_index = R600_POWER_LEVEL_MEDIUM;
446 pi->hw.high_sclk_index = R600_POWER_LEVEL_HIGH;
447}
448
449static void rv6xx_calculate_memory_clock_stepping_parameters(struct radeon_device *rdev,
450 struct rv6xx_ps *state)
451{
452 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
453
454 pi->hw.mclks[R600_POWER_LEVEL_CTXSW] =
455 state->high.mclk;
456 pi->hw.mclks[R600_POWER_LEVEL_HIGH] =
457 state->high.mclk;
458 pi->hw.mclks[R600_POWER_LEVEL_MEDIUM] =
459 state->medium.mclk;
460 pi->hw.mclks[R600_POWER_LEVEL_LOW] =
461 state->low.mclk;
462
463 pi->hw.high_mclk_index = R600_POWER_LEVEL_HIGH;
464
465 if (state->high.mclk == state->medium.mclk)
466 pi->hw.medium_mclk_index =
467 pi->hw.high_mclk_index;
468 else
469 pi->hw.medium_mclk_index = R600_POWER_LEVEL_MEDIUM;
470
471
472 if (state->medium.mclk == state->low.mclk)
473 pi->hw.low_mclk_index =
474 pi->hw.medium_mclk_index;
475 else
476 pi->hw.low_mclk_index = R600_POWER_LEVEL_LOW;
477}
478
479static void rv6xx_calculate_voltage_stepping_parameters(struct radeon_device *rdev,
480 struct rv6xx_ps *state)
481{
482 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
483
484 pi->hw.vddc[R600_POWER_LEVEL_CTXSW] = state->high.vddc;
485 pi->hw.vddc[R600_POWER_LEVEL_HIGH] = state->high.vddc;
486 pi->hw.vddc[R600_POWER_LEVEL_MEDIUM] = state->medium.vddc;
487 pi->hw.vddc[R600_POWER_LEVEL_LOW] = state->low.vddc;
488
489 pi->hw.backbias[R600_POWER_LEVEL_CTXSW] =
490 (state->high.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
491 pi->hw.backbias[R600_POWER_LEVEL_HIGH] =
492 (state->high.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
493 pi->hw.backbias[R600_POWER_LEVEL_MEDIUM] =
494 (state->medium.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
495 pi->hw.backbias[R600_POWER_LEVEL_LOW] =
496 (state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
497
498 pi->hw.pcie_gen2[R600_POWER_LEVEL_HIGH] =
499 (state->high.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? true : false;
500 pi->hw.pcie_gen2[R600_POWER_LEVEL_MEDIUM] =
501 (state->medium.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? true : false;
502 pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW] =
503 (state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? true : false;
504
505 pi->hw.high_vddc_index = R600_POWER_LEVEL_HIGH;
506
507 if ((state->high.vddc == state->medium.vddc) &&
508 ((state->high.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ==
509 (state->medium.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE)))
510 pi->hw.medium_vddc_index =
511 pi->hw.high_vddc_index;
512 else
513 pi->hw.medium_vddc_index = R600_POWER_LEVEL_MEDIUM;
514
515 if ((state->medium.vddc == state->low.vddc) &&
516 ((state->medium.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ==
517 (state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE)))
518 pi->hw.low_vddc_index =
519 pi->hw.medium_vddc_index;
520 else
521 pi->hw.medium_vddc_index = R600_POWER_LEVEL_LOW;
522}
523
524static inline u32 rv6xx_calculate_vco_frequency(u32 ref_clock,
525 struct atom_clock_dividers *dividers,
526 u32 fb_divider_scale)
527{
528 return ref_clock * ((dividers->fb_div & ~1) << fb_divider_scale) /
529 (dividers->ref_div + 1);
530}
531
532static inline u32 rv6xx_calculate_spread_spectrum_clk_v(u32 vco_freq, u32 ref_freq,
533 u32 ss_rate, u32 ss_percent,
534 u32 fb_divider_scale)
535{
536 u32 fb_divider = vco_freq / ref_freq;
537
538 return (ss_percent * ss_rate * 4 * (fb_divider * fb_divider) /
539 (5375 * ((vco_freq * 10) / (4096 >> fb_divider_scale))));
540}
541
542static inline u32 rv6xx_calculate_spread_spectrum_clk_s(u32 ss_rate, u32 ref_freq)
543{
544 return (((ref_freq * 10) / (ss_rate * 2)) - 1) / 4;
545}
546
547static void rv6xx_program_engine_spread_spectrum(struct radeon_device *rdev,
548 u32 clock, enum r600_power_level level)
549{
550 u32 ref_clk = rdev->clock.spll.reference_freq;
551 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
552 struct atom_clock_dividers dividers;
553 struct radeon_atom_ss ss;
554 u32 vco_freq, clk_v, clk_s;
555
556 rv6xx_enable_engine_spread_spectrum(rdev, level, false);
557
558 if (clock && pi->sclk_ss) {
559 if (radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, clock, false, &dividers) == 0) {
560 vco_freq = rv6xx_calculate_vco_frequency(ref_clk, &dividers,
561 pi->fb_div_scale);
562
563 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
564 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
565 clk_v = rv6xx_calculate_spread_spectrum_clk_v(vco_freq,
566 (ref_clk / (dividers.ref_div + 1)),
567 ss.rate,
568 ss.percentage,
569 pi->fb_div_scale);
570
571 clk_s = rv6xx_calculate_spread_spectrum_clk_s(ss.rate,
572 (ref_clk / (dividers.ref_div + 1)));
573
574 rv6xx_set_engine_spread_spectrum_clk_v(rdev, level, clk_v);
575 rv6xx_set_engine_spread_spectrum_clk_s(rdev, level, clk_s);
576 rv6xx_enable_engine_spread_spectrum(rdev, level, true);
577 }
578 }
579 }
580}
581
582static void rv6xx_program_sclk_spread_spectrum_parameters_except_lowest_entry(struct radeon_device *rdev)
583{
584 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
585
586 rv6xx_program_engine_spread_spectrum(rdev,
587 pi->hw.sclks[R600_POWER_LEVEL_HIGH],
588 R600_POWER_LEVEL_HIGH);
589
590 rv6xx_program_engine_spread_spectrum(rdev,
591 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM],
592 R600_POWER_LEVEL_MEDIUM);
593
594}
595
596static int rv6xx_program_mclk_stepping_entry(struct radeon_device *rdev,
597 u32 entry, u32 clock)
598{
599 struct atom_clock_dividers dividers;
600
601 if (radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM, clock, false, &dividers))
602 return -EINVAL;
603
604
605 rv6xx_memory_clock_entry_set_reference_divider(rdev, entry, dividers.ref_div);
606 rv6xx_memory_clock_entry_set_feedback_divider(rdev, entry, dividers.fb_div);
607 rv6xx_memory_clock_entry_set_post_divider(rdev, entry, dividers.post_div);
608
609 if (dividers.enable_post_div)
610 rv6xx_memory_clock_entry_enable_post_divider(rdev, entry, true);
611 else
612 rv6xx_memory_clock_entry_enable_post_divider(rdev, entry, false);
613
614 return 0;
615}
616
617static void rv6xx_program_mclk_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
618{
619 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
620 int i;
621
622 for (i = 1; i < R600_PM_NUMBER_OF_MCLKS; i++) {
623 if (pi->hw.mclks[i])
624 rv6xx_program_mclk_stepping_entry(rdev, i,
625 pi->hw.mclks[i]);
626 }
627}
628
629static void rv6xx_find_memory_clock_with_highest_vco(struct radeon_device *rdev,
630 u32 requested_memory_clock,
631 u32 ref_clk,
632 struct atom_clock_dividers *dividers,
633 u32 *vco_freq)
634{
635 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
636 struct atom_clock_dividers req_dividers;
637 u32 vco_freq_temp;
638
639 if (radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
640 requested_memory_clock, false, &req_dividers) == 0) {
641 vco_freq_temp = rv6xx_calculate_vco_frequency(ref_clk, &req_dividers,
642 pi->fb_div_scale);
643
644 if (vco_freq_temp > *vco_freq) {
645 *dividers = req_dividers;
646 *vco_freq = vco_freq_temp;
647 }
648 }
649}
650
651static void rv6xx_program_mclk_spread_spectrum_parameters(struct radeon_device *rdev)
652{
653 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
654 u32 ref_clk = rdev->clock.mpll.reference_freq;
655 struct atom_clock_dividers dividers;
656 struct radeon_atom_ss ss;
657 u32 vco_freq = 0, clk_v, clk_s;
658
659 rv6xx_enable_memory_spread_spectrum(rdev, false);
660
661 if (pi->mclk_ss) {
662 rv6xx_find_memory_clock_with_highest_vco(rdev,
663 pi->hw.mclks[pi->hw.high_mclk_index],
664 ref_clk,
665 &dividers,
666 &vco_freq);
667
668 rv6xx_find_memory_clock_with_highest_vco(rdev,
669 pi->hw.mclks[pi->hw.medium_mclk_index],
670 ref_clk,
671 &dividers,
672 &vco_freq);
673
674 rv6xx_find_memory_clock_with_highest_vco(rdev,
675 pi->hw.mclks[pi->hw.low_mclk_index],
676 ref_clk,
677 &dividers,
678 &vco_freq);
679
680 if (vco_freq) {
681 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
682 ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
683 clk_v = rv6xx_calculate_spread_spectrum_clk_v(vco_freq,
684 (ref_clk / (dividers.ref_div + 1)),
685 ss.rate,
686 ss.percentage,
687 pi->fb_div_scale);
688
689 clk_s = rv6xx_calculate_spread_spectrum_clk_s(ss.rate,
690 (ref_clk / (dividers.ref_div + 1)));
691
692 rv6xx_set_memory_spread_spectrum_clk_v(rdev, clk_v);
693 rv6xx_set_memory_spread_spectrum_clk_s(rdev, clk_s);
694 rv6xx_enable_memory_spread_spectrum(rdev, true);
695 }
696 }
697 }
698}
699
700static int rv6xx_program_voltage_stepping_entry(struct radeon_device *rdev,
701 u32 entry, u16 voltage)
702{
703 u32 mask, set_pins;
704 int ret;
705
706 ret = radeon_atom_get_voltage_gpio_settings(rdev, voltage,
707 SET_VOLTAGE_TYPE_ASIC_VDDC,
708 &set_pins, &mask);
709 if (ret)
710 return ret;
711
712 r600_voltage_control_program_voltages(rdev, entry, set_pins);
713
714 return 0;
715}
716
717static void rv6xx_program_voltage_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
718{
719 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
720 int i;
721
722 for (i = 1; i < R600_PM_NUMBER_OF_VOLTAGE_LEVELS; i++)
723 rv6xx_program_voltage_stepping_entry(rdev, i,
724 pi->hw.vddc[i]);
725
726}
727
728static void rv6xx_program_backbias_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
729{
730 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
731
732 if (pi->hw.backbias[1])
733 WREG32_P(VID_UPPER_GPIO_CNTL, MEDIUM_BACKBIAS_VALUE, ~MEDIUM_BACKBIAS_VALUE);
734 else
735 WREG32_P(VID_UPPER_GPIO_CNTL, 0, ~MEDIUM_BACKBIAS_VALUE);
736
737 if (pi->hw.backbias[2])
738 WREG32_P(VID_UPPER_GPIO_CNTL, HIGH_BACKBIAS_VALUE, ~HIGH_BACKBIAS_VALUE);
739 else
740 WREG32_P(VID_UPPER_GPIO_CNTL, 0, ~HIGH_BACKBIAS_VALUE);
741}
742
743static void rv6xx_program_sclk_spread_spectrum_parameters_lowest_entry(struct radeon_device *rdev)
744{
745 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
746
747 rv6xx_program_engine_spread_spectrum(rdev,
748 pi->hw.sclks[R600_POWER_LEVEL_LOW],
749 R600_POWER_LEVEL_LOW);
750}
751
752static void rv6xx_program_mclk_stepping_parameters_lowest_entry(struct radeon_device *rdev)
753{
754 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
755
756 if (pi->hw.mclks[0])
757 rv6xx_program_mclk_stepping_entry(rdev, 0,
758 pi->hw.mclks[0]);
759}
760
761static void rv6xx_program_voltage_stepping_parameters_lowest_entry(struct radeon_device *rdev)
762{
763 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
764
765 rv6xx_program_voltage_stepping_entry(rdev, 0,
766 pi->hw.vddc[0]);
767
768}
769
770static void rv6xx_program_backbias_stepping_parameters_lowest_entry(struct radeon_device *rdev)
771{
772 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
773
774 if (pi->hw.backbias[0])
775 WREG32_P(VID_UPPER_GPIO_CNTL, LOW_BACKBIAS_VALUE, ~LOW_BACKBIAS_VALUE);
776 else
777 WREG32_P(VID_UPPER_GPIO_CNTL, 0, ~LOW_BACKBIAS_VALUE);
778}
779
780static u32 calculate_memory_refresh_rate(struct radeon_device *rdev,
781 u32 engine_clock)
782{
783 u32 dram_rows, dram_refresh_rate;
784 u32 tmp;
785
786 tmp = (RREG32(RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
787 dram_rows = 1 << (tmp + 10);
788 dram_refresh_rate = 1 << ((RREG32(MC_SEQ_RESERVE_M) & 0x3) + 3);
789
790 return ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
791}
792
793static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev)
794{
795 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
796 u32 sqm_ratio;
797 u32 arb_refresh_rate;
798 u32 high_clock;
799
800 if (pi->hw.sclks[R600_POWER_LEVEL_HIGH] <
801 (pi->hw.sclks[R600_POWER_LEVEL_LOW] * 0xFF / 0x40))
802 high_clock = pi->hw.sclks[R600_POWER_LEVEL_HIGH];
803 else
804 high_clock =
805 pi->hw.sclks[R600_POWER_LEVEL_LOW] * 0xFF / 0x40;
806
807 radeon_atom_set_engine_dram_timings(rdev, high_clock, 0);
808
809 sqm_ratio = (STATE0(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_LOW]) |
810 STATE1(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_MEDIUM]) |
811 STATE2(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_HIGH]) |
812 STATE3(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_HIGH]));
813 WREG32(SQM_RATIO, sqm_ratio);
814
815 arb_refresh_rate =
816 (POWERMODE0(calculate_memory_refresh_rate(rdev,
817 pi->hw.sclks[R600_POWER_LEVEL_LOW])) |
818 POWERMODE1(calculate_memory_refresh_rate(rdev,
819 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
820 POWERMODE2(calculate_memory_refresh_rate(rdev,
821 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
822 POWERMODE3(calculate_memory_refresh_rate(rdev,
823 pi->hw.sclks[R600_POWER_LEVEL_HIGH])));
824 WREG32(ARB_RFSH_RATE, arb_refresh_rate);
825}
826
827static void rv6xx_program_mpll_timing_parameters(struct radeon_device *rdev)
828{
829 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
830
831 r600_set_mpll_lock_time(rdev, R600_MPLLLOCKTIME_DFLT *
832 pi->mpll_ref_div);
833 r600_set_mpll_reset_time(rdev, R600_MPLLRESETTIME_DFLT);
834}
835
836static void rv6xx_program_bsp(struct radeon_device *rdev)
837{
838 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
839 u32 ref_clk = rdev->clock.spll.reference_freq;
840
841 r600_calculate_u_and_p(R600_ASI_DFLT,
842 ref_clk, 16,
843 &pi->bsp,
844 &pi->bsu);
845
846 r600_set_bsp(rdev, pi->bsu, pi->bsp);
847}
848
849static void rv6xx_program_at(struct radeon_device *rdev)
850{
851 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
852
853 r600_set_at(rdev,
854 (pi->hw.rp[0] * pi->bsp) / 200,
855 (pi->hw.rp[1] * pi->bsp) / 200,
856 (pi->hw.lp[2] * pi->bsp) / 200,
857 (pi->hw.lp[1] * pi->bsp) / 200);
858}
859
860static void rv6xx_program_git(struct radeon_device *rdev)
861{
862 r600_set_git(rdev, R600_GICST_DFLT);
863}
864
865static void rv6xx_program_tp(struct radeon_device *rdev)
866{
867 int i;
868
869 for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
870 r600_set_tc(rdev, i, r600_utc[i], r600_dtc[i]);
871
872 r600_select_td(rdev, R600_TD_DFLT);
873}
874
875static void rv6xx_program_vc(struct radeon_device *rdev)
876{
877 r600_set_vrc(rdev, R600_VRC_DFLT);
878}
879
880static void rv6xx_clear_vc(struct radeon_device *rdev)
881{
882 r600_set_vrc(rdev, 0);
883}
884
885static void rv6xx_program_tpp(struct radeon_device *rdev)
886{
887 r600_set_tpu(rdev, R600_TPU_DFLT);
888 r600_set_tpc(rdev, R600_TPC_DFLT);
889}
890
891static void rv6xx_program_sstp(struct radeon_device *rdev)
892{
893 r600_set_sstu(rdev, R600_SSTU_DFLT);
894 r600_set_sst(rdev, R600_SST_DFLT);
895}
896
897static void rv6xx_program_fcp(struct radeon_device *rdev)
898{
899 r600_set_fctu(rdev, R600_FCTU_DFLT);
900 r600_set_fct(rdev, R600_FCT_DFLT);
901}
902
903static void rv6xx_program_vddc3d_parameters(struct radeon_device *rdev)
904{
905 r600_set_vddc3d_oorsu(rdev, R600_VDDC3DOORSU_DFLT);
906 r600_set_vddc3d_oorphc(rdev, R600_VDDC3DOORPHC_DFLT);
907 r600_set_vddc3d_oorsdc(rdev, R600_VDDC3DOORSDC_DFLT);
908 r600_set_ctxcgtt3d_rphc(rdev, R600_CTXCGTT3DRPHC_DFLT);
909 r600_set_ctxcgtt3d_rsdc(rdev, R600_CTXCGTT3DRSDC_DFLT);
910}
911
912static void rv6xx_program_voltage_timing_parameters(struct radeon_device *rdev)
913{
914 u32 rt;
915
916 r600_vid_rt_set_vru(rdev, R600_VRU_DFLT);
917
918 r600_vid_rt_set_vrt(rdev,
919 rv6xx_compute_count_for_delay(rdev,
920 rdev->pm.dpm.voltage_response_time,
921 R600_VRU_DFLT));
922
923 rt = rv6xx_compute_count_for_delay(rdev,
924 rdev->pm.dpm.backbias_response_time,
925 R600_VRU_DFLT);
926
927 rv6xx_vid_response_set_brt(rdev, (rt + 0x1F) >> 5);
928}
929
930static void rv6xx_program_engine_speed_parameters(struct radeon_device *rdev)
931{
932 r600_vid_rt_set_ssu(rdev, R600_SPLLSTEPUNIT_DFLT);
933 rv6xx_enable_engine_feedback_and_reference_sync(rdev);
934}
935
936static u64 rv6xx_get_master_voltage_mask(struct radeon_device *rdev)
937{
938 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
939 u64 master_mask = 0;
940 int i;
941
942 for (i = 0; i < R600_PM_NUMBER_OF_VOLTAGE_LEVELS; i++) {
943 u32 tmp_mask, tmp_set_pins;
944 int ret;
945
946 ret = radeon_atom_get_voltage_gpio_settings(rdev,
947 pi->hw.vddc[i],
948 SET_VOLTAGE_TYPE_ASIC_VDDC,
949 &tmp_set_pins, &tmp_mask);
950
951 if (ret == 0)
952 master_mask |= tmp_mask;
953 }
954
955 return master_mask;
956}
957
958static void rv6xx_program_voltage_gpio_pins(struct radeon_device *rdev)
959{
960 r600_voltage_control_enable_pins(rdev,
961 rv6xx_get_master_voltage_mask(rdev));
962}
963
964static void rv6xx_enable_static_voltage_control(struct radeon_device *rdev,
965 struct radeon_ps *new_ps,
966 bool enable)
967{
968 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
969
970 if (enable)
971 radeon_atom_set_voltage(rdev,
972 new_state->low.vddc,
973 SET_VOLTAGE_TYPE_ASIC_VDDC);
974 else
975 r600_voltage_control_deactivate_static_control(rdev,
976 rv6xx_get_master_voltage_mask(rdev));
977}
978
979static void rv6xx_enable_display_gap(struct radeon_device *rdev, bool enable)
980{
981 if (enable) {
982 u32 tmp = (DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM) |
983 DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM) |
984 DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
985 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
986 VBI_TIMER_COUNT(0x3FFF) |
987 VBI_TIMER_UNIT(7));
988 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
989
990 WREG32_P(MCLK_PWRMGT_CNTL, USE_DISPLAY_GAP, ~USE_DISPLAY_GAP);
991 } else
992 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~USE_DISPLAY_GAP);
993}
994
995static void rv6xx_program_power_level_enter_state(struct radeon_device *rdev)
996{
997 r600_power_level_set_enter_index(rdev, R600_POWER_LEVEL_MEDIUM);
998}
999
1000static void rv6xx_calculate_t(u32 l_f, u32 h_f, int h,
1001 int d_l, int d_r, u8 *l, u8 *r)
1002{
1003 int a_n, a_d, h_r, l_r;
1004
1005 h_r = d_l;
1006 l_r = 100 - d_r;
1007
1008 a_n = (int)h_f * d_l + (int)l_f * (h - d_r);
1009 a_d = (int)l_f * l_r + (int)h_f * h_r;
1010
1011 if (a_d != 0) {
1012 *l = d_l - h_r * a_n / a_d;
1013 *r = d_r + l_r * a_n / a_d;
1014 }
1015}
1016
1017static void rv6xx_calculate_ap(struct radeon_device *rdev,
1018 struct rv6xx_ps *state)
1019{
1020 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1021
1022 pi->hw.lp[0] = 0;
1023 pi->hw.rp[R600_PM_NUMBER_OF_ACTIVITY_LEVELS - 1]
1024 = 100;
1025
1026 rv6xx_calculate_t(state->low.sclk,
1027 state->medium.sclk,
1028 R600_AH_DFLT,
1029 R600_LMP_DFLT,
1030 R600_RLP_DFLT,
1031 &pi->hw.lp[1],
1032 &pi->hw.rp[0]);
1033
1034 rv6xx_calculate_t(state->medium.sclk,
1035 state->high.sclk,
1036 R600_AH_DFLT,
1037 R600_LHP_DFLT,
1038 R600_RMP_DFLT,
1039 &pi->hw.lp[2],
1040 &pi->hw.rp[1]);
1041
1042}
1043
1044static void rv6xx_calculate_stepping_parameters(struct radeon_device *rdev,
1045 struct radeon_ps *new_ps)
1046{
1047 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1048
1049 rv6xx_calculate_engine_speed_stepping_parameters(rdev, new_state);
1050 rv6xx_calculate_memory_clock_stepping_parameters(rdev, new_state);
1051 rv6xx_calculate_voltage_stepping_parameters(rdev, new_state);
1052 rv6xx_calculate_ap(rdev, new_state);
1053}
1054
1055static void rv6xx_program_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
1056{
1057 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1058
1059 rv6xx_program_mclk_stepping_parameters_except_lowest_entry(rdev);
1060 if (pi->voltage_control)
1061 rv6xx_program_voltage_stepping_parameters_except_lowest_entry(rdev);
1062 rv6xx_program_backbias_stepping_parameters_except_lowest_entry(rdev);
1063 rv6xx_program_sclk_spread_spectrum_parameters_except_lowest_entry(rdev);
1064 rv6xx_program_mclk_spread_spectrum_parameters(rdev);
1065 rv6xx_program_memory_timing_parameters(rdev);
1066}
1067
1068static void rv6xx_program_stepping_parameters_lowest_entry(struct radeon_device *rdev)
1069{
1070 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1071
1072 rv6xx_program_mclk_stepping_parameters_lowest_entry(rdev);
1073 if (pi->voltage_control)
1074 rv6xx_program_voltage_stepping_parameters_lowest_entry(rdev);
1075 rv6xx_program_backbias_stepping_parameters_lowest_entry(rdev);
1076 rv6xx_program_sclk_spread_spectrum_parameters_lowest_entry(rdev);
1077}
1078
1079static void rv6xx_program_power_level_low(struct radeon_device *rdev)
1080{
1081 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1082
1083 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_LOW,
1084 pi->hw.low_vddc_index);
1085 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_LOW,
1086 pi->hw.low_mclk_index);
1087 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_LOW,
1088 pi->hw.low_sclk_index);
1089 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_LOW,
1090 R600_DISPLAY_WATERMARK_LOW);
1091 r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_LOW,
1092 pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW]);
1093}
1094
1095static void rv6xx_program_power_level_low_to_lowest_state(struct radeon_device *rdev)
1096{
1097 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1098
1099 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_LOW, 0);
1100 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_LOW, 0);
1101 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_LOW, 0);
1102
1103 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_LOW,
1104 R600_DISPLAY_WATERMARK_LOW);
1105
1106 r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_LOW,
1107 pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW]);
1108
1109}
1110
1111static void rv6xx_program_power_level_medium(struct radeon_device *rdev)
1112{
1113 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1114
1115 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_MEDIUM,
1116 pi->hw.medium_vddc_index);
1117 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
1118 pi->hw.medium_mclk_index);
1119 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
1120 pi->hw.medium_sclk_index);
1121 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_MEDIUM,
1122 R600_DISPLAY_WATERMARK_LOW);
1123 r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_MEDIUM,
1124 pi->hw.pcie_gen2[R600_POWER_LEVEL_MEDIUM]);
1125}
1126
1127static void rv6xx_program_power_level_medium_for_transition(struct radeon_device *rdev)
1128{
1129 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1130
1131 rv6xx_program_mclk_stepping_entry(rdev,
1132 R600_POWER_LEVEL_CTXSW,
1133 pi->hw.mclks[pi->hw.low_mclk_index]);
1134
1135 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_MEDIUM, 1);
1136
1137 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
1138 R600_POWER_LEVEL_CTXSW);
1139 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
1140 pi->hw.medium_sclk_index);
1141
1142 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_MEDIUM,
1143 R600_DISPLAY_WATERMARK_LOW);
1144
1145 rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_MEDIUM, false);
1146
1147 r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_MEDIUM,
1148 pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW]);
1149}
1150
1151static void rv6xx_program_power_level_high(struct radeon_device *rdev)
1152{
1153 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1154
1155 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_HIGH,
1156 pi->hw.high_vddc_index);
1157 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_HIGH,
1158 pi->hw.high_mclk_index);
1159 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_HIGH,
1160 pi->hw.high_sclk_index);
1161
1162 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_HIGH,
1163 R600_DISPLAY_WATERMARK_HIGH);
1164
1165 r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_HIGH,
1166 pi->hw.pcie_gen2[R600_POWER_LEVEL_HIGH]);
1167}
1168
1169static void rv6xx_enable_backbias(struct radeon_device *rdev, bool enable)
1170{
1171 if (enable)
1172 WREG32_P(GENERAL_PWRMGT, BACKBIAS_PAD_EN | BACKBIAS_DPM_CNTL,
1173 ~(BACKBIAS_PAD_EN | BACKBIAS_DPM_CNTL));
1174 else
1175 WREG32_P(GENERAL_PWRMGT, 0,
1176 ~(BACKBIAS_VALUE | BACKBIAS_PAD_EN | BACKBIAS_DPM_CNTL));
1177}
1178
1179static void rv6xx_program_display_gap(struct radeon_device *rdev)
1180{
1181 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1182
1183 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1184 if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) {
1185 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1186 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1187 } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) {
1188 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1189 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1190 } else {
1191 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1192 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1193 }
1194 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1195}
1196
1197static void rv6xx_set_sw_voltage_to_safe(struct radeon_device *rdev,
1198 struct radeon_ps *new_ps,
1199 struct radeon_ps *old_ps)
1200{
1201 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1202 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1203 u16 safe_voltage;
1204
1205 safe_voltage = (new_state->low.vddc >= old_state->low.vddc) ?
1206 new_state->low.vddc : old_state->low.vddc;
1207
1208 rv6xx_program_voltage_stepping_entry(rdev, R600_POWER_LEVEL_CTXSW,
1209 safe_voltage);
1210
1211 WREG32_P(GENERAL_PWRMGT, SW_GPIO_INDEX(R600_POWER_LEVEL_CTXSW),
1212 ~SW_GPIO_INDEX_MASK);
1213}
1214
1215static void rv6xx_set_sw_voltage_to_low(struct radeon_device *rdev,
1216 struct radeon_ps *old_ps)
1217{
1218 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1219
1220 rv6xx_program_voltage_stepping_entry(rdev, R600_POWER_LEVEL_CTXSW,
1221 old_state->low.vddc);
1222
1223 WREG32_P(GENERAL_PWRMGT, SW_GPIO_INDEX(R600_POWER_LEVEL_CTXSW),
1224 ~SW_GPIO_INDEX_MASK);
1225}
1226
1227static void rv6xx_set_safe_backbias(struct radeon_device *rdev,
1228 struct radeon_ps *new_ps,
1229 struct radeon_ps *old_ps)
1230{
1231 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1232 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1233
1234 if ((new_state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) &&
1235 (old_state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE))
1236 WREG32_P(GENERAL_PWRMGT, BACKBIAS_VALUE, ~BACKBIAS_VALUE);
1237 else
1238 WREG32_P(GENERAL_PWRMGT, 0, ~BACKBIAS_VALUE);
1239}
1240
1241static void rv6xx_set_safe_pcie_gen2(struct radeon_device *rdev,
1242 struct radeon_ps *new_ps,
1243 struct radeon_ps *old_ps)
1244{
1245 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1246 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1247
1248 if ((new_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) !=
1249 (old_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2))
1250 rv6xx_force_pcie_gen1(rdev);
1251}
1252
1253static void rv6xx_enable_dynamic_voltage_control(struct radeon_device *rdev,
1254 bool enable)
1255{
1256 if (enable)
1257 WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
1258 else
1259 WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
1260}
1261
1262static void rv6xx_enable_dynamic_backbias_control(struct radeon_device *rdev,
1263 bool enable)
1264{
1265 if (enable)
1266 WREG32_P(GENERAL_PWRMGT, BACKBIAS_DPM_CNTL, ~BACKBIAS_DPM_CNTL);
1267 else
1268 WREG32_P(GENERAL_PWRMGT, 0, ~BACKBIAS_DPM_CNTL);
1269}
1270
1271static int rv6xx_step_sw_voltage(struct radeon_device *rdev,
1272 u16 initial_voltage,
1273 u16 target_voltage)
1274{
1275 u16 current_voltage;
1276 u16 true_target_voltage;
1277 u16 voltage_step;
1278 int signed_voltage_step;
1279
1280 if ((radeon_atom_get_voltage_step(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
1281 &voltage_step)) ||
1282 (radeon_atom_round_to_true_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
1283 initial_voltage, &current_voltage)) ||
1284 (radeon_atom_round_to_true_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
1285 target_voltage, &true_target_voltage)))
1286 return -EINVAL;
1287
1288 if (true_target_voltage < current_voltage)
1289 signed_voltage_step = -(int)voltage_step;
1290 else
1291 signed_voltage_step = voltage_step;
1292
1293 while (current_voltage != true_target_voltage) {
1294 current_voltage += signed_voltage_step;
1295 rv6xx_program_voltage_stepping_entry(rdev, R600_POWER_LEVEL_CTXSW,
1296 current_voltage);
1297 msleep((rdev->pm.dpm.voltage_response_time + 999) / 1000);
1298 }
1299
1300 return 0;
1301}
1302
1303static int rv6xx_step_voltage_if_increasing(struct radeon_device *rdev,
1304 struct radeon_ps *new_ps,
1305 struct radeon_ps *old_ps)
1306{
1307 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1308 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1309
1310 if (new_state->low.vddc > old_state->low.vddc)
1311 return rv6xx_step_sw_voltage(rdev,
1312 old_state->low.vddc,
1313 new_state->low.vddc);
1314
1315 return 0;
1316}
1317
1318static int rv6xx_step_voltage_if_decreasing(struct radeon_device *rdev,
1319 struct radeon_ps *new_ps,
1320 struct radeon_ps *old_ps)
1321{
1322 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1323 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1324
1325 if (new_state->low.vddc < old_state->low.vddc)
1326 return rv6xx_step_sw_voltage(rdev,
1327 old_state->low.vddc,
1328 new_state->low.vddc);
1329 else
1330 return 0;
1331}
1332
1333static void rv6xx_enable_high(struct radeon_device *rdev)
1334{
1335 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1336
1337 if ((pi->restricted_levels < 1) ||
1338 (pi->restricted_levels == 3))
1339 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true);
1340}
1341
1342static void rv6xx_enable_medium(struct radeon_device *rdev)
1343{
1344 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1345
1346 if (pi->restricted_levels < 2)
1347 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
1348}
1349
1350static void rv6xx_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1351{
1352 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1353 bool want_thermal_protection;
1354 enum radeon_dpm_event_src dpm_event_src;
1355
1356 switch (sources) {
1357 case 0:
1358 default:
1359 want_thermal_protection = false;
1360 break;
1361 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1362 want_thermal_protection = true;
1363 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1364 break;
1365
1366 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1367 want_thermal_protection = true;
1368 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1369 break;
1370
1371 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1372 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1373 want_thermal_protection = true;
1374 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1375 break;
1376 }
1377
1378 if (want_thermal_protection) {
1379 WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
1380 if (pi->thermal_protection)
1381 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
1382 } else {
1383 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
1384 }
1385}
1386
1387static void rv6xx_enable_auto_throttle_source(struct radeon_device *rdev,
1388 enum radeon_dpm_auto_throttle_src source,
1389 bool enable)
1390{
1391 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1392
1393 if (enable) {
1394 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1395 pi->active_auto_throttle_sources |= 1 << source;
1396 rv6xx_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1397 }
1398 } else {
1399 if (pi->active_auto_throttle_sources & (1 << source)) {
1400 pi->active_auto_throttle_sources &= ~(1 << source);
1401 rv6xx_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1402 }
1403 }
1404}
1405
1406
1407static void rv6xx_enable_thermal_protection(struct radeon_device *rdev,
1408 bool enable)
1409{
1410 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1411
1412 if (pi->active_auto_throttle_sources)
1413 r600_enable_thermal_protection(rdev, enable);
1414}
1415
1416static void rv6xx_generate_transition_stepping(struct radeon_device *rdev,
1417 struct radeon_ps *new_ps,
1418 struct radeon_ps *old_ps)
1419{
1420 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1421 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1422 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1423
1424 rv6xx_generate_steps(rdev,
1425 old_state->low.sclk,
1426 new_state->low.sclk,
1427 0, &pi->hw.medium_sclk_index);
1428}
1429
1430static void rv6xx_generate_low_step(struct radeon_device *rdev,
1431 struct radeon_ps *new_ps)
1432{
1433 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1434 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1435
1436 pi->hw.low_sclk_index = 0;
1437 rv6xx_generate_single_step(rdev,
1438 new_state->low.sclk,
1439 0);
1440}
1441
1442static void rv6xx_invalidate_intermediate_steps(struct radeon_device *rdev)
1443{
1444 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1445
1446 rv6xx_invalidate_intermediate_steps_range(rdev, 0,
1447 pi->hw.medium_sclk_index);
1448}
1449
1450static void rv6xx_generate_stepping_table(struct radeon_device *rdev,
1451 struct radeon_ps *new_ps)
1452{
1453 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1454 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1455
1456 pi->hw.low_sclk_index = 0;
1457
1458 rv6xx_generate_steps(rdev,
1459 new_state->low.sclk,
1460 new_state->medium.sclk,
1461 0,
1462 &pi->hw.medium_sclk_index);
1463 rv6xx_generate_steps(rdev,
1464 new_state->medium.sclk,
1465 new_state->high.sclk,
1466 pi->hw.medium_sclk_index,
1467 &pi->hw.high_sclk_index);
1468}
1469
1470static void rv6xx_enable_spread_spectrum(struct radeon_device *rdev,
1471 bool enable)
1472{
1473 if (enable)
1474 rv6xx_enable_dynamic_spread_spectrum(rdev, true);
1475 else {
1476 rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_LOW, false);
1477 rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_MEDIUM, false);
1478 rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_HIGH, false);
1479 rv6xx_enable_dynamic_spread_spectrum(rdev, false);
1480 rv6xx_enable_memory_spread_spectrum(rdev, false);
1481 }
1482}
1483
1484static void rv6xx_reset_lvtm_data_sync(struct radeon_device *rdev)
1485{
1486 if (ASIC_IS_DCE3(rdev))
1487 WREG32_P(DCE3_LVTMA_DATA_SYNCHRONIZATION, LVTMA_PFREQCHG, ~LVTMA_PFREQCHG);
1488 else
1489 WREG32_P(LVTMA_DATA_SYNCHRONIZATION, LVTMA_PFREQCHG, ~LVTMA_PFREQCHG);
1490}
1491
1492static void rv6xx_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
1493 struct radeon_ps *new_ps,
1494 bool enable)
1495{
1496 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1497
1498 if (enable) {
1499 rv6xx_enable_bif_dynamic_pcie_gen2(rdev, true);
1500 rv6xx_enable_pcie_gen2_support(rdev);
1501 r600_enable_dynamic_pcie_gen2(rdev, true);
1502 } else {
1503 if (!(new_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2))
1504 rv6xx_force_pcie_gen1(rdev);
1505 rv6xx_enable_bif_dynamic_pcie_gen2(rdev, false);
1506 r600_enable_dynamic_pcie_gen2(rdev, false);
1507 }
1508}
1509
1510static void rv6xx_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
1511 struct radeon_ps *new_ps,
1512 struct radeon_ps *old_ps)
1513{
1514 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1515 struct rv6xx_ps *current_state = rv6xx_get_ps(old_ps);
1516
1517 if ((new_ps->vclk == old_ps->vclk) &&
1518 (new_ps->dclk == old_ps->dclk))
1519 return;
1520
1521 if (new_state->high.sclk >= current_state->high.sclk)
1522 return;
1523
1524 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
1525}
1526
1527static void rv6xx_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
1528 struct radeon_ps *new_ps,
1529 struct radeon_ps *old_ps)
1530{
1531 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1532 struct rv6xx_ps *current_state = rv6xx_get_ps(old_ps);
1533
1534 if ((new_ps->vclk == old_ps->vclk) &&
1535 (new_ps->dclk == old_ps->dclk))
1536 return;
1537
1538 if (new_state->high.sclk < current_state->high.sclk)
1539 return;
1540
1541 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
1542}
1543
1544int rv6xx_dpm_enable(struct radeon_device *rdev)
1545{
1546 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1547 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1548 int ret;
1549
1550 if (r600_dynamicpm_enabled(rdev))
1551 return -EINVAL;
1552
1553 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1554 rv6xx_enable_backbias(rdev, true);
1555
1556 if (pi->dynamic_ss)
1557 rv6xx_enable_spread_spectrum(rdev, true);
1558
1559 rv6xx_program_mpll_timing_parameters(rdev);
1560 rv6xx_program_bsp(rdev);
1561 rv6xx_program_git(rdev);
1562 rv6xx_program_tp(rdev);
1563 rv6xx_program_tpp(rdev);
1564 rv6xx_program_sstp(rdev);
1565 rv6xx_program_fcp(rdev);
1566 rv6xx_program_vddc3d_parameters(rdev);
1567 rv6xx_program_voltage_timing_parameters(rdev);
1568 rv6xx_program_engine_speed_parameters(rdev);
1569
1570 rv6xx_enable_display_gap(rdev, true);
1571 if (pi->display_gap == false)
1572 rv6xx_enable_display_gap(rdev, false);
1573
1574 rv6xx_program_power_level_enter_state(rdev);
1575
1576 rv6xx_calculate_stepping_parameters(rdev, boot_ps);
1577
1578 if (pi->voltage_control)
1579 rv6xx_program_voltage_gpio_pins(rdev);
1580
1581 rv6xx_generate_stepping_table(rdev, boot_ps);
1582
1583 rv6xx_program_stepping_parameters_except_lowest_entry(rdev);
1584 rv6xx_program_stepping_parameters_lowest_entry(rdev);
1585
1586 rv6xx_program_power_level_low(rdev);
1587 rv6xx_program_power_level_medium(rdev);
1588 rv6xx_program_power_level_high(rdev);
1589 rv6xx_program_vc(rdev);
1590 rv6xx_program_at(rdev);
1591
1592 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
1593 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
1594 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true);
1595
1596 if (rdev->irq.installed &&
1597 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1598 ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1599 if (ret)
1600 return ret;
1601 rdev->irq.dpm_thermal = true;
1602 radeon_irq_set(rdev);
1603 }
1604
1605 rv6xx_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
1606
1607 r600_start_dpm(rdev);
1608
1609 if (pi->voltage_control)
1610 rv6xx_enable_static_voltage_control(rdev, boot_ps, false);
1611
1612 if (pi->dynamic_pcie_gen2)
1613 rv6xx_enable_dynamic_pcie_gen2(rdev, boot_ps, true);
1614
1615 if (pi->gfx_clock_gating)
1616 r600_gfx_clockgating_enable(rdev, true);
1617
1618 return 0;
1619}
1620
1621void rv6xx_dpm_disable(struct radeon_device *rdev)
1622{
1623 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1624 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1625
1626 if (!r600_dynamicpm_enabled(rdev))
1627 return;
1628
1629 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
1630 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
1631 rv6xx_enable_display_gap(rdev, false);
1632 rv6xx_clear_vc(rdev);
1633 r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
1634
1635 if (pi->thermal_protection)
1636 r600_enable_thermal_protection(rdev, false);
1637
1638 r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
1639 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
1640 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
1641
1642 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1643 rv6xx_enable_backbias(rdev, false);
1644
1645 rv6xx_enable_spread_spectrum(rdev, false);
1646
1647 if (pi->voltage_control)
1648 rv6xx_enable_static_voltage_control(rdev, boot_ps, true);
1649
1650 if (pi->dynamic_pcie_gen2)
1651 rv6xx_enable_dynamic_pcie_gen2(rdev, boot_ps, false);
1652
1653 if (rdev->irq.installed &&
1654 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1655 rdev->irq.dpm_thermal = false;
1656 radeon_irq_set(rdev);
1657 }
1658
1659 if (pi->gfx_clock_gating)
1660 r600_gfx_clockgating_enable(rdev, false);
1661
1662 r600_stop_dpm(rdev);
1663}
1664
1665int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
1666{
1667 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1668 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
1669 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
1670 int ret;
1671
1672 rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1673
1674 rv6xx_clear_vc(rdev);
1675 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
1676 r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
1677
1678 if (pi->thermal_protection)
1679 r600_enable_thermal_protection(rdev, false);
1680
1681 r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
1682 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
1683 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
1684
1685 rv6xx_generate_transition_stepping(rdev, new_ps, old_ps);
1686 rv6xx_program_power_level_medium_for_transition(rdev);
1687
1688 if (pi->voltage_control) {
1689 rv6xx_set_sw_voltage_to_safe(rdev, new_ps, old_ps);
1690 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1691 rv6xx_set_sw_voltage_to_low(rdev, old_ps);
1692 }
1693
1694 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1695 rv6xx_set_safe_backbias(rdev, new_ps, old_ps);
1696
1697 if (pi->dynamic_pcie_gen2)
1698 rv6xx_set_safe_pcie_gen2(rdev, new_ps, old_ps);
1699
1700 if (pi->voltage_control)
1701 rv6xx_enable_dynamic_voltage_control(rdev, false);
1702
1703 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1704 rv6xx_enable_dynamic_backbias_control(rdev, false);
1705
1706 if (pi->voltage_control) {
1707 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1708 rv6xx_step_voltage_if_increasing(rdev, new_ps, old_ps);
1709 msleep((rdev->pm.dpm.voltage_response_time + 999) / 1000);
1710 }
1711
1712 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
1713 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
1714 r600_wait_for_power_level_unequal(rdev, R600_POWER_LEVEL_LOW);
1715
1716 rv6xx_generate_low_step(rdev, new_ps);
1717 rv6xx_invalidate_intermediate_steps(rdev);
1718 rv6xx_calculate_stepping_parameters(rdev, new_ps);
1719 rv6xx_program_stepping_parameters_lowest_entry(rdev);
1720 rv6xx_program_power_level_low_to_lowest_state(rdev);
1721
1722 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
1723 r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
1724 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
1725
1726 if (pi->voltage_control) {
1727 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) {
1728 ret = rv6xx_step_voltage_if_decreasing(rdev, new_ps, old_ps);
1729 if (ret)
1730 return ret;
1731 }
1732 rv6xx_enable_dynamic_voltage_control(rdev, true);
1733 }
1734
1735 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1736 rv6xx_enable_dynamic_backbias_control(rdev, true);
1737
1738 if (pi->dynamic_pcie_gen2)
1739 rv6xx_enable_dynamic_pcie_gen2(rdev, new_ps, true);
1740
1741 rv6xx_reset_lvtm_data_sync(rdev);
1742
1743 rv6xx_generate_stepping_table(rdev, new_ps);
1744 rv6xx_program_stepping_parameters_except_lowest_entry(rdev);
1745 rv6xx_program_power_level_low(rdev);
1746 rv6xx_program_power_level_medium(rdev);
1747 rv6xx_program_power_level_high(rdev);
1748 rv6xx_enable_medium(rdev);
1749 rv6xx_enable_high(rdev);
1750
1751 if (pi->thermal_protection)
1752 rv6xx_enable_thermal_protection(rdev, true);
1753 rv6xx_program_vc(rdev);
1754 rv6xx_program_at(rdev);
1755
1756 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1757
1758 return 0;
1759}
1760
1761void rv6xx_setup_asic(struct radeon_device *rdev)
1762{
1763 r600_enable_acpi_pm(rdev);
1764
1765 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
1766 rv6xx_enable_l0s(rdev);
1767 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
1768 rv6xx_enable_l1(rdev);
1769 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
1770 rv6xx_enable_pll_sleep_in_l1(rdev);
1771}
1772
1773void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev)
1774{
1775 rv6xx_program_display_gap(rdev);
1776}
1777
1778union power_info {
1779 struct _ATOM_POWERPLAY_INFO info;
1780 struct _ATOM_POWERPLAY_INFO_V2 info_2;
1781 struct _ATOM_POWERPLAY_INFO_V3 info_3;
1782 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
1783 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
1784 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
1785};
1786
1787union pplib_clock_info {
1788 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
1789 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
1790 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
1791 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
1792};
1793
1794union pplib_power_state {
1795 struct _ATOM_PPLIB_STATE v1;
1796 struct _ATOM_PPLIB_STATE_V2 v2;
1797};
1798
1799static void rv6xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
1800 struct radeon_ps *rps,
1801 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
1802{
1803 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
1804 rps->class = le16_to_cpu(non_clock_info->usClassification);
1805 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
1806
1807 if (r600_is_uvd_state(rps->class, rps->class2)) {
1808 rps->vclk = RV6XX_DEFAULT_VCLK_FREQ;
1809 rps->dclk = RV6XX_DEFAULT_DCLK_FREQ;
1810 } else {
1811 rps->vclk = 0;
1812 rps->dclk = 0;
1813 }
1814
1815 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
1816 rdev->pm.dpm.boot_ps = rps;
1817 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
1818 rdev->pm.dpm.uvd_ps = rps;
1819}
1820
1821static void rv6xx_parse_pplib_clock_info(struct radeon_device *rdev,
1822 struct radeon_ps *rps, int index,
1823 union pplib_clock_info *clock_info)
1824{
1825 struct rv6xx_ps *ps = rv6xx_get_ps(rps);
1826 u32 sclk, mclk;
1827 u16 vddc;
1828 struct rv6xx_pl *pl;
1829
1830 switch (index) {
1831 case 0:
1832 pl = &ps->low;
1833 break;
1834 case 1:
1835 pl = &ps->medium;
1836 break;
1837 case 2:
1838 default:
1839 pl = &ps->high;
1840 break;
1841 }
1842
1843 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
1844 sclk |= clock_info->r600.ucEngineClockHigh << 16;
1845 mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
1846 mclk |= clock_info->r600.ucMemoryClockHigh << 16;
1847
1848 pl->mclk = mclk;
1849 pl->sclk = sclk;
1850 pl->vddc = le16_to_cpu(clock_info->r600.usVDDC);
1851 pl->flags = le32_to_cpu(clock_info->r600.ulFlags);
1852
1853 /* patch up vddc if necessary */
1854 if (pl->vddc == 0xff01) {
1855 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
1856 pl->vddc = vddc;
1857 }
1858
1859 /* fix up pcie gen2 */
1860 if (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) {
1861 if ((rdev->family == CHIP_RV610) || (rdev->family == CHIP_RV630)) {
1862 if (pl->vddc < 1100)
1863 pl->flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
1864 }
1865 }
1866
1867 /* patch up boot state */
1868 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
1869 u16 vddc, vddci, mvdd;
1870 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
1871 pl->mclk = rdev->clock.default_mclk;
1872 pl->sclk = rdev->clock.default_sclk;
1873 pl->vddc = vddc;
1874 }
1875}
1876
1877static int rv6xx_parse_power_table(struct radeon_device *rdev)
1878{
1879 struct radeon_mode_info *mode_info = &rdev->mode_info;
1880 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
1881 union pplib_power_state *power_state;
1882 int i, j;
1883 union pplib_clock_info *clock_info;
1884 union power_info *power_info;
1885 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
1886 u16 data_offset;
1887 u8 frev, crev;
1888 struct rv6xx_ps *ps;
1889
1890 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
1891 &frev, &crev, &data_offset))
1892 return -EINVAL;
1893 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1894
1895 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
1896 power_info->pplib.ucNumStates, GFP_KERNEL);
1897 if (!rdev->pm.dpm.ps)
1898 return -ENOMEM;
1899 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
1900 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1901 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1902
1903 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
1904 power_state = (union pplib_power_state *)
1905 (mode_info->atom_context->bios + data_offset +
1906 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
1907 i * power_info->pplib.ucStateEntrySize);
1908 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
1909 (mode_info->atom_context->bios + data_offset +
1910 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
1911 (power_state->v1.ucNonClockStateIndex *
1912 power_info->pplib.ucNonClockSize));
1913 if (power_info->pplib.ucStateEntrySize - 1) {
1914 ps = kzalloc(sizeof(struct rv6xx_ps), GFP_KERNEL);
1915 if (ps == NULL) {
1916 kfree(rdev->pm.dpm.ps);
1917 return -ENOMEM;
1918 }
1919 rdev->pm.dpm.ps[i].ps_priv = ps;
1920 rv6xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
1921 non_clock_info);
1922 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
1923 clock_info = (union pplib_clock_info *)
1924 (mode_info->atom_context->bios + data_offset +
1925 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
1926 (power_state->v1.ucClockStateIndices[j] *
1927 power_info->pplib.ucClockInfoSize));
1928 rv6xx_parse_pplib_clock_info(rdev,
1929 &rdev->pm.dpm.ps[i], j,
1930 clock_info);
1931 }
1932 }
1933 }
1934 rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
1935 return 0;
1936}
1937
1938int rv6xx_dpm_init(struct radeon_device *rdev)
1939{
1940 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
1941 uint16_t data_offset, size;
1942 uint8_t frev, crev;
1943 struct atom_clock_dividers dividers;
1944 struct rv6xx_power_info *pi;
1945 int ret;
1946
1947 pi = kzalloc(sizeof(struct rv6xx_power_info), GFP_KERNEL);
1948 if (pi == NULL)
1949 return -ENOMEM;
1950 rdev->pm.dpm.priv = pi;
1951
1952 ret = rv6xx_parse_power_table(rdev);
1953 if (ret)
1954 return ret;
1955
1956 if (rdev->pm.dpm.voltage_response_time == 0)
1957 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
1958 if (rdev->pm.dpm.backbias_response_time == 0)
1959 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
1960
1961 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1962 0, false, &dividers);
1963 if (ret)
1964 pi->spll_ref_div = dividers.ref_div + 1;
1965 else
1966 pi->spll_ref_div = R600_REFERENCEDIVIDER_DFLT;
1967
1968 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
1969 0, false, &dividers);
1970 if (ret)
1971 pi->mpll_ref_div = dividers.ref_div + 1;
1972 else
1973 pi->mpll_ref_div = R600_REFERENCEDIVIDER_DFLT;
1974
1975 if (rdev->family >= CHIP_RV670)
1976 pi->fb_div_scale = 1;
1977 else
1978 pi->fb_div_scale = 0;
1979
1980 pi->voltage_control =
1981 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
1982
1983 pi->gfx_clock_gating = true;
1984
1985 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
1986 &frev, &crev, &data_offset)) {
1987 pi->sclk_ss = true;
1988 pi->mclk_ss = true;
1989 pi->dynamic_ss = true;
1990 } else {
1991 pi->sclk_ss = false;
1992 pi->mclk_ss = false;
1993 pi->dynamic_ss = false;
1994 }
1995
1996 pi->dynamic_pcie_gen2 = true;
1997
1998 if (pi->gfx_clock_gating &&
1999 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2000 pi->thermal_protection = true;
2001 else
2002 pi->thermal_protection = false;
2003
2004 pi->display_gap = true;
2005
2006 return 0;
2007}
2008
2009void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
2010 struct radeon_ps *rps)
2011{
2012 struct rv6xx_ps *ps = rv6xx_get_ps(rps);
2013 struct rv6xx_pl *pl;
2014
2015 r600_dpm_print_class_info(rps->class, rps->class2);
2016 r600_dpm_print_cap_info(rps->caps);
2017 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2018 pl = &ps->low;
2019 printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u\n",
2020 pl->sclk, pl->mclk, pl->vddc);
2021 pl = &ps->medium;
2022 printk("\t\tpower level 1 sclk: %u mclk: %u vddc: %u\n",
2023 pl->sclk, pl->mclk, pl->vddc);
2024 pl = &ps->high;
2025 printk("\t\tpower level 2 sclk: %u mclk: %u vddc: %u\n",
2026 pl->sclk, pl->mclk, pl->vddc);
2027 r600_dpm_print_ps_status(rdev, rps);
2028}
2029
2030void rv6xx_dpm_fini(struct radeon_device *rdev)
2031{
2032 int i;
2033
2034 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2035 kfree(rdev->pm.dpm.ps[i].ps_priv);
2036 }
2037 kfree(rdev->pm.dpm.ps);
2038 kfree(rdev->pm.dpm.priv);
2039}
2040
2041u32 rv6xx_dpm_get_sclk(struct radeon_device *rdev, bool low)
2042{
2043 struct rv6xx_ps *requested_state = rv6xx_get_ps(rdev->pm.dpm.requested_ps);
2044
2045 if (low)
2046 return requested_state->low.sclk;
2047 else
2048 return requested_state->high.sclk;
2049}
2050
2051u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low)
2052{
2053 struct rv6xx_ps *requested_state = rv6xx_get_ps(rdev->pm.dpm.requested_ps);
2054
2055 if (low)
2056 return requested_state->low.mclk;
2057 else
2058 return requested_state->high.mclk;
2059}
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.h b/drivers/gpu/drm/radeon/rv6xx_dpm.h
new file mode 100644
index 000000000000..8035d53ebea6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.h
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#ifndef __RV6XX_DPM_H__
26#define __RV6XX_DPM_H__
27
28#include "r600_dpm.h"
29
30/* Represents a single SCLK step. */
31struct rv6xx_sclk_stepping
32{
33 u32 vco_frequency;
34 u32 post_divider;
35};
36
37struct rv6xx_pm_hw_state {
38 u32 sclks[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
39 u32 mclks[R600_PM_NUMBER_OF_MCLKS];
40 u16 vddc[R600_PM_NUMBER_OF_VOLTAGE_LEVELS];
41 bool backbias[R600_PM_NUMBER_OF_VOLTAGE_LEVELS];
42 bool pcie_gen2[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
43 u8 high_sclk_index;
44 u8 medium_sclk_index;
45 u8 low_sclk_index;
46 u8 high_mclk_index;
47 u8 medium_mclk_index;
48 u8 low_mclk_index;
49 u8 high_vddc_index;
50 u8 medium_vddc_index;
51 u8 low_vddc_index;
52 u8 rp[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
53 u8 lp[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
54};
55
56struct rv6xx_power_info {
57 /* flags */
58 bool voltage_control;
59 bool sclk_ss;
60 bool mclk_ss;
61 bool dynamic_ss;
62 bool dynamic_pcie_gen2;
63 bool thermal_protection;
64 bool display_gap;
65 bool gfx_clock_gating;
66 /* clk values */
67 u32 fb_div_scale;
68 u32 spll_ref_div;
69 u32 mpll_ref_div;
70 u32 bsu;
71 u32 bsp;
72 /* */
73 u32 active_auto_throttle_sources;
74 /* current power state */
75 u32 restricted_levels;
76 struct rv6xx_pm_hw_state hw;
77};
78
79struct rv6xx_pl {
80 u32 sclk;
81 u32 mclk;
82 u16 vddc;
83 u32 flags;
84};
85
86struct rv6xx_ps {
87 struct rv6xx_pl high;
88 struct rv6xx_pl medium;
89 struct rv6xx_pl low;
90};
91
92#define RV6XX_DEFAULT_VCLK_FREQ 40000 /* 10 khz */
93#define RV6XX_DEFAULT_DCLK_FREQ 30000 /* 10 khz */
94
95#endif
diff --git a/drivers/gpu/drm/radeon/rv6xxd.h b/drivers/gpu/drm/radeon/rv6xxd.h
new file mode 100644
index 000000000000..34e86f90b431
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv6xxd.h
@@ -0,0 +1,246 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef RV6XXD_H
24#define RV6XXD_H
25
26/* RV6xx power management */
27#define SPLL_CNTL_MODE 0x60c
28# define SPLL_DIV_SYNC (1 << 5)
29
30#define GENERAL_PWRMGT 0x618
31# define GLOBAL_PWRMGT_EN (1 << 0)
32# define STATIC_PM_EN (1 << 1)
33# define MOBILE_SU (1 << 2)
34# define THERMAL_PROTECTION_DIS (1 << 3)
35# define THERMAL_PROTECTION_TYPE (1 << 4)
36# define ENABLE_GEN2PCIE (1 << 5)
37# define SW_GPIO_INDEX(x) ((x) << 6)
38# define SW_GPIO_INDEX_MASK (3 << 6)
39# define LOW_VOLT_D2_ACPI (1 << 8)
40# define LOW_VOLT_D3_ACPI (1 << 9)
41# define VOLT_PWRMGT_EN (1 << 10)
42# define BACKBIAS_PAD_EN (1 << 16)
43# define BACKBIAS_VALUE (1 << 17)
44# define BACKBIAS_DPM_CNTL (1 << 18)
45# define DYN_SPREAD_SPECTRUM_EN (1 << 21)
46
47#define MCLK_PWRMGT_CNTL 0x624
48# define MPLL_PWRMGT_OFF (1 << 0)
49# define YCLK_TURNOFF (1 << 1)
50# define MPLL_TURNOFF (1 << 2)
51# define SU_MCLK_USE_BCLK (1 << 3)
52# define DLL_READY (1 << 4)
53# define MC_BUSY (1 << 5)
54# define MC_INT_CNTL (1 << 7)
55# define MRDCKA_SLEEP (1 << 8)
56# define MRDCKB_SLEEP (1 << 9)
57# define MRDCKC_SLEEP (1 << 10)
58# define MRDCKD_SLEEP (1 << 11)
59# define MRDCKE_SLEEP (1 << 12)
60# define MRDCKF_SLEEP (1 << 13)
61# define MRDCKG_SLEEP (1 << 14)
62# define MRDCKH_SLEEP (1 << 15)
63# define MRDCKA_RESET (1 << 16)
64# define MRDCKB_RESET (1 << 17)
65# define MRDCKC_RESET (1 << 18)
66# define MRDCKD_RESET (1 << 19)
67# define MRDCKE_RESET (1 << 20)
68# define MRDCKF_RESET (1 << 21)
69# define MRDCKG_RESET (1 << 22)
70# define MRDCKH_RESET (1 << 23)
71# define DLL_READY_READ (1 << 24)
72# define USE_DISPLAY_GAP (1 << 25)
73# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
74# define USE_DISPLAY_GAP_CTXSW (1 << 27)
75# define MPLL_TURNOFF_D2 (1 << 28)
76# define USE_DISPLAY_URGENT_CTXSW (1 << 29)
77
78#define MPLL_FREQ_LEVEL_0 0x6e8
79# define LEVEL0_MPLL_POST_DIV(x) ((x) << 0)
80# define LEVEL0_MPLL_POST_DIV_MASK (0xff << 0)
81# define LEVEL0_MPLL_FB_DIV(x) ((x) << 8)
82# define LEVEL0_MPLL_FB_DIV_MASK (0xfff << 8)
83# define LEVEL0_MPLL_REF_DIV(x) ((x) << 20)
84# define LEVEL0_MPLL_REF_DIV_MASK (0x3f << 20)
85# define LEVEL0_MPLL_DIV_EN (1 << 28)
86# define LEVEL0_DLL_BYPASS (1 << 29)
87# define LEVEL0_DLL_RESET (1 << 30)
88
89#define VID_RT 0x6f8
90# define VID_CRT(x) ((x) << 0)
91# define VID_CRT_MASK (0x1fff << 0)
92# define VID_CRTU(x) ((x) << 13)
93# define VID_CRTU_MASK (7 << 13)
94# define SSTU(x) ((x) << 16)
95# define SSTU_MASK (7 << 16)
96# define VID_SWT(x) ((x) << 19)
97# define VID_SWT_MASK (0x1f << 19)
98# define BRT(x) ((x) << 24)
99# define BRT_MASK (0xff << 24)
100
101#define TARGET_AND_CURRENT_PROFILE_INDEX 0x70c
102# define TARGET_PROFILE_INDEX_MASK (3 << 0)
103# define TARGET_PROFILE_INDEX_SHIFT 0
104# define CURRENT_PROFILE_INDEX_MASK (3 << 2)
105# define CURRENT_PROFILE_INDEX_SHIFT 2
106# define DYN_PWR_ENTER_INDEX(x) ((x) << 4)
107# define DYN_PWR_ENTER_INDEX_MASK (3 << 4)
108# define DYN_PWR_ENTER_INDEX_SHIFT 4
109# define CURR_MCLK_INDEX_MASK (3 << 6)
110# define CURR_MCLK_INDEX_SHIFT 6
111# define CURR_SCLK_INDEX_MASK (0x1f << 8)
112# define CURR_SCLK_INDEX_SHIFT 8
113# define CURR_VID_INDEX_MASK (3 << 13)
114# define CURR_VID_INDEX_SHIFT 13
115
116#define VID_UPPER_GPIO_CNTL 0x740
117# define CTXSW_UPPER_GPIO_VALUES(x) ((x) << 0)
118# define CTXSW_UPPER_GPIO_VALUES_MASK (7 << 0)
119# define HIGH_UPPER_GPIO_VALUES(x) ((x) << 3)
120# define HIGH_UPPER_GPIO_VALUES_MASK (7 << 3)
121# define MEDIUM_UPPER_GPIO_VALUES(x) ((x) << 6)
122# define MEDIUM_UPPER_GPIO_VALUES_MASK (7 << 6)
123# define LOW_UPPER_GPIO_VALUES(x) ((x) << 9)
124# define LOW_UPPER_GPIO_VALUES_MASK (7 << 9)
125# define CTXSW_BACKBIAS_VALUE (1 << 12)
126# define HIGH_BACKBIAS_VALUE (1 << 13)
127# define MEDIUM_BACKBIAS_VALUE (1 << 14)
128# define LOW_BACKBIAS_VALUE (1 << 15)
129
130#define CG_DISPLAY_GAP_CNTL 0x7dc
131# define DISP1_GAP(x) ((x) << 0)
132# define DISP1_GAP_MASK (3 << 0)
133# define DISP2_GAP(x) ((x) << 2)
134# define DISP2_GAP_MASK (3 << 2)
135# define VBI_TIMER_COUNT(x) ((x) << 4)
136# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
137# define VBI_TIMER_UNIT(x) ((x) << 20)
138# define VBI_TIMER_UNIT_MASK (7 << 20)
139# define DISP1_GAP_MCHG(x) ((x) << 24)
140# define DISP1_GAP_MCHG_MASK (3 << 24)
141# define DISP2_GAP_MCHG(x) ((x) << 26)
142# define DISP2_GAP_MCHG_MASK (3 << 26)
143
144#define CG_THERMAL_CTRL 0x7f0
145# define DPM_EVENT_SRC(x) ((x) << 0)
146# define DPM_EVENT_SRC_MASK (7 << 0)
147# define THERM_INC_CLK (1 << 3)
148# define TOFFSET(x) ((x) << 4)
149# define TOFFSET_MASK (0xff << 4)
150# define DIG_THERM_DPM(x) ((x) << 12)
151# define DIG_THERM_DPM_MASK (0xff << 12)
152# define CTF_SEL(x) ((x) << 20)
153# define CTF_SEL_MASK (7 << 20)
154# define CTF_PAD_POLARITY (1 << 23)
155# define CTF_PAD_EN (1 << 24)
156
157#define CG_SPLL_SPREAD_SPECTRUM_LOW 0x820
158# define SSEN (1 << 0)
159# define CLKS(x) ((x) << 3)
160# define CLKS_MASK (0xff << 3)
161# define CLKS_SHIFT 3
162# define CLKV(x) ((x) << 11)
163# define CLKV_MASK (0x7ff << 11)
164# define CLKV_SHIFT 11
165#define CG_MPLL_SPREAD_SPECTRUM 0x830
166
167#define CITF_CNTL 0x200c
168# define BLACKOUT_RD (1 << 0)
169# define BLACKOUT_WR (1 << 1)
170
171#define RAMCFG 0x2408
172#define NOOFBANK_SHIFT 0
173#define NOOFBANK_MASK 0x00000001
174#define NOOFRANK_SHIFT 1
175#define NOOFRANK_MASK 0x00000002
176#define NOOFROWS_SHIFT 2
177#define NOOFROWS_MASK 0x0000001C
178#define NOOFCOLS_SHIFT 5
179#define NOOFCOLS_MASK 0x00000060
180#define CHANSIZE_SHIFT 7
181#define CHANSIZE_MASK 0x00000080
182#define BURSTLENGTH_SHIFT 8
183#define BURSTLENGTH_MASK 0x00000100
184#define CHANSIZE_OVERRIDE (1 << 10)
185
186#define SQM_RATIO 0x2424
187# define STATE0(x) ((x) << 0)
188# define STATE0_MASK (0xff << 0)
189# define STATE1(x) ((x) << 8)
190# define STATE1_MASK (0xff << 8)
191# define STATE2(x) ((x) << 16)
192# define STATE2_MASK (0xff << 16)
193# define STATE3(x) ((x) << 24)
194# define STATE3_MASK (0xff << 24)
195
196#define ARB_RFSH_CNTL 0x2460
197# define ENABLE (1 << 0)
198#define ARB_RFSH_RATE 0x2464
199# define POWERMODE0(x) ((x) << 0)
200# define POWERMODE0_MASK (0xff << 0)
201# define POWERMODE1(x) ((x) << 8)
202# define POWERMODE1_MASK (0xff << 8)
203# define POWERMODE2(x) ((x) << 16)
204# define POWERMODE2_MASK (0xff << 16)
205# define POWERMODE3(x) ((x) << 24)
206# define POWERMODE3_MASK (0xff << 24)
207
208#define MC_SEQ_DRAM 0x2608
209# define CKE_DYN (1 << 12)
210
211#define MC_SEQ_CMD 0x26c4
212
213#define MC_SEQ_RESERVE_S 0x2890
214#define MC_SEQ_RESERVE_M 0x2894
215
216#define LVTMA_DATA_SYNCHRONIZATION 0x7adc
217# define LVTMA_PFREQCHG (1 << 8)
218#define DCE3_LVTMA_DATA_SYNCHRONIZATION 0x7f98
219
220/* PCIE indirect regs */
221#define PCIE_P_CNTL 0x40
222# define P_PLL_PWRDN_IN_L1L23 (1 << 3)
223# define P_PLL_BUF_PDNB (1 << 4)
224# define P_PLL_PDNB (1 << 9)
225# define P_ALLOW_PRX_FRONTEND_SHUTOFF (1 << 12)
226/* PCIE PORT indirect regs */
227#define PCIE_LC_CNTL 0xa0
228# define LC_L0S_INACTIVITY(x) ((x) << 8)
229# define LC_L0S_INACTIVITY_MASK (0xf << 8)
230# define LC_L0S_INACTIVITY_SHIFT 8
231# define LC_L1_INACTIVITY(x) ((x) << 12)
232# define LC_L1_INACTIVITY_MASK (0xf << 12)
233# define LC_L1_INACTIVITY_SHIFT 12
234# define LC_PMI_TO_L1_DIS (1 << 16)
235# define LC_ASPM_TO_L1_DIS (1 << 24)
236#define PCIE_LC_SPEED_CNTL 0xa4
237# define LC_GEN2_EN (1 << 0)
238# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 7)
239# define LC_CURRENT_DATA_RATE (1 << 11)
240# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
241# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
242# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
243# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
244# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
245
246#endif
diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c
new file mode 100644
index 000000000000..3f5e1cf138ba
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv730_dpm.c
@@ -0,0 +1,508 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "rv730d.h"
28#include "r600_dpm.h"
29#include "rv770_dpm.h"
30#include "atom.h"
31
32#define MC_CG_ARB_FREQ_F0 0x0a
33#define MC_CG_ARB_FREQ_F1 0x0b
34#define MC_CG_ARB_FREQ_F2 0x0c
35#define MC_CG_ARB_FREQ_F3 0x0d
36
37struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
38struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
39
40int rv730_populate_sclk_value(struct radeon_device *rdev,
41 u32 engine_clock,
42 RV770_SMC_SCLK_VALUE *sclk)
43{
44 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
45 struct atom_clock_dividers dividers;
46 u32 spll_func_cntl = pi->clk_regs.rv730.cg_spll_func_cntl;
47 u32 spll_func_cntl_2 = pi->clk_regs.rv730.cg_spll_func_cntl_2;
48 u32 spll_func_cntl_3 = pi->clk_regs.rv730.cg_spll_func_cntl_3;
49 u32 cg_spll_spread_spectrum = pi->clk_regs.rv730.cg_spll_spread_spectrum;
50 u32 cg_spll_spread_spectrum_2 = pi->clk_regs.rv730.cg_spll_spread_spectrum_2;
51 u64 tmp;
52 u32 reference_clock = rdev->clock.spll.reference_freq;
53 u32 reference_divider, post_divider;
54 u32 fbdiv;
55 int ret;
56
57 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
58 engine_clock, false, &dividers);
59 if (ret)
60 return ret;
61
62 reference_divider = 1 + dividers.ref_div;
63
64 if (dividers.enable_post_div)
65 post_divider = ((dividers.post_div >> 4) & 0xf) +
66 (dividers.post_div & 0xf) + 2;
67 else
68 post_divider = 1;
69
70 tmp = (u64) engine_clock * reference_divider * post_divider * 16384;
71 do_div(tmp, reference_clock);
72 fbdiv = (u32) tmp;
73
74 /* set up registers */
75 if (dividers.enable_post_div)
76 spll_func_cntl |= SPLL_DIVEN;
77 else
78 spll_func_cntl &= ~SPLL_DIVEN;
79 spll_func_cntl &= ~(SPLL_HILEN_MASK | SPLL_LOLEN_MASK | SPLL_REF_DIV_MASK);
80 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
81 spll_func_cntl |= SPLL_HILEN((dividers.post_div >> 4) & 0xf);
82 spll_func_cntl |= SPLL_LOLEN(dividers.post_div & 0xf);
83
84 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
85 spll_func_cntl_2 |= SCLK_MUX_SEL(2);
86
87 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
88 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
89 spll_func_cntl_3 |= SPLL_DITHEN;
90
91 if (pi->sclk_ss) {
92 struct radeon_atom_ss ss;
93 u32 vco_freq = engine_clock * post_divider;
94
95 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
96 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
97 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
98 u32 clk_v = ss.percentage * fbdiv / (clk_s * 10000);
99
100 cg_spll_spread_spectrum &= ~CLK_S_MASK;
101 cg_spll_spread_spectrum |= CLK_S(clk_s);
102 cg_spll_spread_spectrum |= SSEN;
103
104 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
105 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
106 }
107 }
108
109 sclk->sclk_value = cpu_to_be32(engine_clock);
110 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
111 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
112 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
113 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum);
114 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2);
115
116 return 0;
117}
118
119int rv730_populate_mclk_value(struct radeon_device *rdev,
120 u32 engine_clock, u32 memory_clock,
121 LPRV7XX_SMC_MCLK_VALUE mclk)
122{
123 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
124 u32 mclk_pwrmgt_cntl = pi->clk_regs.rv730.mclk_pwrmgt_cntl;
125 u32 dll_cntl = pi->clk_regs.rv730.dll_cntl;
126 u32 mpll_func_cntl = pi->clk_regs.rv730.mpll_func_cntl;
127 u32 mpll_func_cntl_2 = pi->clk_regs.rv730.mpll_func_cntl2;
128 u32 mpll_func_cntl_3 = pi->clk_regs.rv730.mpll_func_cntl3;
129 u32 mpll_ss = pi->clk_regs.rv730.mpll_ss;
130 u32 mpll_ss2 = pi->clk_regs.rv730.mpll_ss2;
131 struct atom_clock_dividers dividers;
132 u32 post_divider, reference_divider;
133 int ret;
134
135 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
136 memory_clock, false, &dividers);
137 if (ret)
138 return ret;
139
140 reference_divider = dividers.ref_div + 1;
141
142 if (dividers.enable_post_div)
143 post_divider = ((dividers.post_div >> 4) & 0xf) +
144 (dividers.post_div & 0xf) + 2;
145 else
146 post_divider = 1;
147
148 /* setup the registers */
149 if (dividers.enable_post_div)
150 mpll_func_cntl |= MPLL_DIVEN;
151 else
152 mpll_func_cntl &= ~MPLL_DIVEN;
153
154 mpll_func_cntl &= ~(MPLL_REF_DIV_MASK | MPLL_HILEN_MASK | MPLL_LOLEN_MASK);
155 mpll_func_cntl |= MPLL_REF_DIV(dividers.ref_div);
156 mpll_func_cntl |= MPLL_HILEN((dividers.post_div >> 4) & 0xf);
157 mpll_func_cntl |= MPLL_LOLEN(dividers.post_div & 0xf);
158
159 mpll_func_cntl_3 &= ~MPLL_FB_DIV_MASK;
160 mpll_func_cntl_3 |= MPLL_FB_DIV(dividers.fb_div);
161 if (dividers.enable_dithen)
162 mpll_func_cntl_3 |= MPLL_DITHEN;
163 else
164 mpll_func_cntl_3 &= ~MPLL_DITHEN;
165
166 if (pi->mclk_ss) {
167 struct radeon_atom_ss ss;
168 u32 vco_freq = memory_clock * post_divider;
169
170 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
171 ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
172 u32 reference_clock = rdev->clock.mpll.reference_freq;
173 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
174 u32 clk_v = ss.percentage * dividers.fb_div / (clk_s * 10000);
175
176 mpll_ss &= ~CLK_S_MASK;
177 mpll_ss |= CLK_S(clk_s);
178 mpll_ss |= SSEN;
179
180 mpll_ss2 &= ~CLK_V_MASK;
181 mpll_ss |= CLK_V(clk_v);
182 }
183 }
184
185
186 mclk->mclk730.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
187 mclk->mclk730.vDLL_CNTL = cpu_to_be32(dll_cntl);
188 mclk->mclk730.mclk_value = cpu_to_be32(memory_clock);
189 mclk->mclk730.vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
190 mclk->mclk730.vMPLL_FUNC_CNTL2 = cpu_to_be32(mpll_func_cntl_2);
191 mclk->mclk730.vMPLL_FUNC_CNTL3 = cpu_to_be32(mpll_func_cntl_3);
192 mclk->mclk730.vMPLL_SS = cpu_to_be32(mpll_ss);
193 mclk->mclk730.vMPLL_SS2 = cpu_to_be32(mpll_ss2);
194
195 return 0;
196}
197
198void rv730_read_clock_registers(struct radeon_device *rdev)
199{
200 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
201
202 pi->clk_regs.rv730.cg_spll_func_cntl =
203 RREG32(CG_SPLL_FUNC_CNTL);
204 pi->clk_regs.rv730.cg_spll_func_cntl_2 =
205 RREG32(CG_SPLL_FUNC_CNTL_2);
206 pi->clk_regs.rv730.cg_spll_func_cntl_3 =
207 RREG32(CG_SPLL_FUNC_CNTL_3);
208 pi->clk_regs.rv730.cg_spll_spread_spectrum =
209 RREG32(CG_SPLL_SPREAD_SPECTRUM);
210 pi->clk_regs.rv730.cg_spll_spread_spectrum_2 =
211 RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
212
213 pi->clk_regs.rv730.mclk_pwrmgt_cntl =
214 RREG32(TCI_MCLK_PWRMGT_CNTL);
215 pi->clk_regs.rv730.dll_cntl =
216 RREG32(TCI_DLL_CNTL);
217 pi->clk_regs.rv730.mpll_func_cntl =
218 RREG32(CG_MPLL_FUNC_CNTL);
219 pi->clk_regs.rv730.mpll_func_cntl2 =
220 RREG32(CG_MPLL_FUNC_CNTL_2);
221 pi->clk_regs.rv730.mpll_func_cntl3 =
222 RREG32(CG_MPLL_FUNC_CNTL_3);
223 pi->clk_regs.rv730.mpll_ss =
224 RREG32(CG_TCI_MPLL_SPREAD_SPECTRUM);
225 pi->clk_regs.rv730.mpll_ss2 =
226 RREG32(CG_TCI_MPLL_SPREAD_SPECTRUM_2);
227}
228
229int rv730_populate_smc_acpi_state(struct radeon_device *rdev,
230 RV770_SMC_STATETABLE *table)
231{
232 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
233 u32 mpll_func_cntl = 0;
234 u32 mpll_func_cntl_2 = 0 ;
235 u32 mpll_func_cntl_3 = 0;
236 u32 mclk_pwrmgt_cntl;
237 u32 dll_cntl;
238 u32 spll_func_cntl;
239 u32 spll_func_cntl_2;
240 u32 spll_func_cntl_3;
241
242 table->ACPIState = table->initialState;
243 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
244
245 if (pi->acpi_vddc) {
246 rv770_populate_vddc_value(rdev, pi->acpi_vddc,
247 &table->ACPIState.levels[0].vddc);
248 table->ACPIState.levels[0].gen2PCIE = pi->pcie_gen2 ?
249 pi->acpi_pcie_gen2 : 0;
250 table->ACPIState.levels[0].gen2XSP =
251 pi->acpi_pcie_gen2;
252 } else {
253 rv770_populate_vddc_value(rdev, pi->min_vddc_in_table,
254 &table->ACPIState.levels[0].vddc);
255 table->ACPIState.levels[0].gen2PCIE = 0;
256 }
257
258 mpll_func_cntl = pi->clk_regs.rv730.mpll_func_cntl;
259 mpll_func_cntl_2 = pi->clk_regs.rv730.mpll_func_cntl2;
260 mpll_func_cntl_3 = pi->clk_regs.rv730.mpll_func_cntl3;
261
262 mpll_func_cntl |= MPLL_RESET | MPLL_BYPASS_EN;
263 mpll_func_cntl &= ~MPLL_SLEEP;
264
265 mpll_func_cntl_2 &= ~MCLK_MUX_SEL_MASK;
266 mpll_func_cntl_2 |= MCLK_MUX_SEL(1);
267
268 mclk_pwrmgt_cntl = (MRDCKA_RESET |
269 MRDCKB_RESET |
270 MRDCKC_RESET |
271 MRDCKD_RESET |
272 MRDCKE_RESET |
273 MRDCKF_RESET |
274 MRDCKG_RESET |
275 MRDCKH_RESET |
276 MRDCKA_SLEEP |
277 MRDCKB_SLEEP |
278 MRDCKC_SLEEP |
279 MRDCKD_SLEEP |
280 MRDCKE_SLEEP |
281 MRDCKF_SLEEP |
282 MRDCKG_SLEEP |
283 MRDCKH_SLEEP);
284
285 dll_cntl = 0xff000000;
286
287 spll_func_cntl = pi->clk_regs.rv730.cg_spll_func_cntl;
288 spll_func_cntl_2 = pi->clk_regs.rv730.cg_spll_func_cntl_2;
289 spll_func_cntl_3 = pi->clk_regs.rv730.cg_spll_func_cntl_3;
290
291 spll_func_cntl |= SPLL_RESET | SPLL_BYPASS_EN;
292 spll_func_cntl &= ~SPLL_SLEEP;
293
294 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
295 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
296
297 table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
298 table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL2 = cpu_to_be32(mpll_func_cntl_2);
299 table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL3 = cpu_to_be32(mpll_func_cntl_3);
300 table->ACPIState.levels[0].mclk.mclk730.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
301 table->ACPIState.levels[0].mclk.mclk730.vDLL_CNTL = cpu_to_be32(dll_cntl);
302
303 table->ACPIState.levels[0].mclk.mclk730.mclk_value = 0;
304
305 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
306 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
307 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
308
309 table->ACPIState.levels[0].sclk.sclk_value = 0;
310
311 rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
312
313 table->ACPIState.levels[1] = table->ACPIState.levels[0];
314 table->ACPIState.levels[2] = table->ACPIState.levels[0];
315
316 return 0;
317}
318
319int rv730_populate_smc_initial_state(struct radeon_device *rdev,
320 struct radeon_ps *radeon_state,
321 RV770_SMC_STATETABLE *table)
322{
323 struct rv7xx_ps *initial_state = rv770_get_ps(radeon_state);
324 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
325 u32 a_t;
326
327 table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL =
328 cpu_to_be32(pi->clk_regs.rv730.mpll_func_cntl);
329 table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL2 =
330 cpu_to_be32(pi->clk_regs.rv730.mpll_func_cntl2);
331 table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL3 =
332 cpu_to_be32(pi->clk_regs.rv730.mpll_func_cntl3);
333 table->initialState.levels[0].mclk.mclk730.vMCLK_PWRMGT_CNTL =
334 cpu_to_be32(pi->clk_regs.rv730.mclk_pwrmgt_cntl);
335 table->initialState.levels[0].mclk.mclk730.vDLL_CNTL =
336 cpu_to_be32(pi->clk_regs.rv730.dll_cntl);
337 table->initialState.levels[0].mclk.mclk730.vMPLL_SS =
338 cpu_to_be32(pi->clk_regs.rv730.mpll_ss);
339 table->initialState.levels[0].mclk.mclk730.vMPLL_SS2 =
340 cpu_to_be32(pi->clk_regs.rv730.mpll_ss2);
341
342 table->initialState.levels[0].mclk.mclk730.mclk_value =
343 cpu_to_be32(initial_state->low.mclk);
344
345 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
346 cpu_to_be32(pi->clk_regs.rv730.cg_spll_func_cntl);
347 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
348 cpu_to_be32(pi->clk_regs.rv730.cg_spll_func_cntl_2);
349 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
350 cpu_to_be32(pi->clk_regs.rv730.cg_spll_func_cntl_3);
351 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
352 cpu_to_be32(pi->clk_regs.rv730.cg_spll_spread_spectrum);
353 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
354 cpu_to_be32(pi->clk_regs.rv730.cg_spll_spread_spectrum_2);
355
356 table->initialState.levels[0].sclk.sclk_value =
357 cpu_to_be32(initial_state->low.sclk);
358
359 table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
360
361 table->initialState.levels[0].seqValue =
362 rv770_get_seq_value(rdev, &initial_state->low);
363
364 rv770_populate_vddc_value(rdev,
365 initial_state->low.vddc,
366 &table->initialState.levels[0].vddc);
367 rv770_populate_initial_mvdd_value(rdev,
368 &table->initialState.levels[0].mvdd);
369
370 a_t = CG_R(0xffff) | CG_L(0);
371
372 table->initialState.levels[0].aT = cpu_to_be32(a_t);
373
374 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
375
376 if (pi->boot_in_gen2)
377 table->initialState.levels[0].gen2PCIE = 1;
378 else
379 table->initialState.levels[0].gen2PCIE = 0;
380 if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
381 table->initialState.levels[0].gen2XSP = 1;
382 else
383 table->initialState.levels[0].gen2XSP = 0;
384
385 table->initialState.levels[1] = table->initialState.levels[0];
386 table->initialState.levels[2] = table->initialState.levels[0];
387
388 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
389
390 return 0;
391}
392
393void rv730_program_memory_timing_parameters(struct radeon_device *rdev,
394 struct radeon_ps *radeon_state)
395{
396 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
397 u32 arb_refresh_rate = 0;
398 u32 dram_timing = 0;
399 u32 dram_timing2 = 0;
400 u32 old_dram_timing = 0;
401 u32 old_dram_timing2 = 0;
402
403 arb_refresh_rate = RREG32(MC_ARB_RFSH_RATE) &
404 ~(POWERMODE1_MASK | POWERMODE2_MASK | POWERMODE3_MASK);
405 arb_refresh_rate |=
406 (POWERMODE1(rv770_calculate_memory_refresh_rate(rdev, state->low.sclk)) |
407 POWERMODE2(rv770_calculate_memory_refresh_rate(rdev, state->medium.sclk)) |
408 POWERMODE3(rv770_calculate_memory_refresh_rate(rdev, state->high.sclk)));
409 WREG32(MC_ARB_RFSH_RATE, arb_refresh_rate);
410
411 /* save the boot dram timings */
412 old_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
413 old_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
414
415 radeon_atom_set_engine_dram_timings(rdev,
416 state->high.sclk,
417 state->high.mclk);
418
419 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
420 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
421
422 WREG32(MC_ARB_DRAM_TIMING_3, dram_timing);
423 WREG32(MC_ARB_DRAM_TIMING2_3, dram_timing2);
424
425 radeon_atom_set_engine_dram_timings(rdev,
426 state->medium.sclk,
427 state->medium.mclk);
428
429 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
430 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
431
432 WREG32(MC_ARB_DRAM_TIMING_2, dram_timing);
433 WREG32(MC_ARB_DRAM_TIMING2_2, dram_timing2);
434
435 radeon_atom_set_engine_dram_timings(rdev,
436 state->low.sclk,
437 state->low.mclk);
438
439 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
440 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
441
442 WREG32(MC_ARB_DRAM_TIMING_1, dram_timing);
443 WREG32(MC_ARB_DRAM_TIMING2_1, dram_timing2);
444
445 /* restore the boot dram timings */
446 WREG32(MC_ARB_DRAM_TIMING, old_dram_timing);
447 WREG32(MC_ARB_DRAM_TIMING2, old_dram_timing2);
448
449}
450
451void rv730_start_dpm(struct radeon_device *rdev)
452{
453 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
454
455 WREG32_P(TCI_MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
456
457 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
458}
459
460void rv730_stop_dpm(struct radeon_device *rdev)
461{
462 PPSMC_Result result;
463
464 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
465
466 if (result != PPSMC_Result_OK)
467 DRM_ERROR("Could not force DPM to low\n");
468
469 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
470
471 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
472
473 WREG32_P(TCI_MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
474}
475
476void rv730_program_dcodt(struct radeon_device *rdev, bool use_dcodt)
477{
478 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
479 u32 i = use_dcodt ? 0 : 1;
480 u32 mc4_io_pad_cntl;
481
482 mc4_io_pad_cntl = RREG32(MC4_IO_DQ_PAD_CNTL_D0_I0);
483 mc4_io_pad_cntl &= 0xFFFFFF00;
484 mc4_io_pad_cntl |= pi->odt_value_0[i];
485 WREG32(MC4_IO_DQ_PAD_CNTL_D0_I0, mc4_io_pad_cntl);
486 WREG32(MC4_IO_DQ_PAD_CNTL_D0_I1, mc4_io_pad_cntl);
487
488 mc4_io_pad_cntl = RREG32(MC4_IO_QS_PAD_CNTL_D0_I0);
489 mc4_io_pad_cntl &= 0xFFFFFF00;
490 mc4_io_pad_cntl |= pi->odt_value_1[i];
491 WREG32(MC4_IO_QS_PAD_CNTL_D0_I0, mc4_io_pad_cntl);
492 WREG32(MC4_IO_QS_PAD_CNTL_D0_I1, mc4_io_pad_cntl);
493}
494
495void rv730_get_odt_values(struct radeon_device *rdev)
496{
497 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
498 u32 mc4_io_pad_cntl;
499
500 pi->odt_value_0[0] = (u8)0;
501 pi->odt_value_1[0] = (u8)0x80;
502
503 mc4_io_pad_cntl = RREG32(MC4_IO_DQ_PAD_CNTL_D0_I0);
504 pi->odt_value_0[1] = (u8)(mc4_io_pad_cntl & 0xff);
505
506 mc4_io_pad_cntl = RREG32(MC4_IO_QS_PAD_CNTL_D0_I0);
507 pi->odt_value_1[1] = (u8)(mc4_io_pad_cntl & 0xff);
508}
diff --git a/drivers/gpu/drm/radeon/rv730d.h b/drivers/gpu/drm/radeon/rv730d.h
new file mode 100644
index 000000000000..f0a7954fb1cb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv730d.h
@@ -0,0 +1,165 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef RV730_H
24#define RV730_H
25
26#define CG_SPLL_FUNC_CNTL 0x600
27#define SPLL_RESET (1 << 0)
28#define SPLL_SLEEP (1 << 1)
29#define SPLL_DIVEN (1 << 2)
30#define SPLL_BYPASS_EN (1 << 3)
31#define SPLL_REF_DIV(x) ((x) << 4)
32#define SPLL_REF_DIV_MASK (0x3f << 4)
33#define SPLL_HILEN(x) ((x) << 12)
34#define SPLL_HILEN_MASK (0xf << 12)
35#define SPLL_LOLEN(x) ((x) << 16)
36#define SPLL_LOLEN_MASK (0xf << 16)
37#define CG_SPLL_FUNC_CNTL_2 0x604
38#define SCLK_MUX_SEL(x) ((x) << 0)
39#define SCLK_MUX_SEL_MASK (0x1ff << 0)
40#define CG_SPLL_FUNC_CNTL_3 0x608
41#define SPLL_FB_DIV(x) ((x) << 0)
42#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
43#define SPLL_DITHEN (1 << 28)
44
45#define CG_MPLL_FUNC_CNTL 0x624
46#define MPLL_RESET (1 << 0)
47#define MPLL_SLEEP (1 << 1)
48#define MPLL_DIVEN (1 << 2)
49#define MPLL_BYPASS_EN (1 << 3)
50#define MPLL_REF_DIV(x) ((x) << 4)
51#define MPLL_REF_DIV_MASK (0x3f << 4)
52#define MPLL_HILEN(x) ((x) << 12)
53#define MPLL_HILEN_MASK (0xf << 12)
54#define MPLL_LOLEN(x) ((x) << 16)
55#define MPLL_LOLEN_MASK (0xf << 16)
56#define CG_MPLL_FUNC_CNTL_2 0x628
57#define MCLK_MUX_SEL(x) ((x) << 0)
58#define MCLK_MUX_SEL_MASK (0x1ff << 0)
59#define CG_MPLL_FUNC_CNTL_3 0x62c
60#define MPLL_FB_DIV(x) ((x) << 0)
61#define MPLL_FB_DIV_MASK (0x3ffffff << 0)
62#define MPLL_DITHEN (1 << 28)
63
64#define CG_TCI_MPLL_SPREAD_SPECTRUM 0x634
65#define CG_TCI_MPLL_SPREAD_SPECTRUM_2 0x638
66#define GENERAL_PWRMGT 0x63c
67# define GLOBAL_PWRMGT_EN (1 << 0)
68# define STATIC_PM_EN (1 << 1)
69# define THERMAL_PROTECTION_DIS (1 << 2)
70# define THERMAL_PROTECTION_TYPE (1 << 3)
71# define ENABLE_GEN2PCIE (1 << 4)
72# define ENABLE_GEN2XSP (1 << 5)
73# define SW_SMIO_INDEX(x) ((x) << 6)
74# define SW_SMIO_INDEX_MASK (3 << 6)
75# define LOW_VOLT_D2_ACPI (1 << 8)
76# define LOW_VOLT_D3_ACPI (1 << 9)
77# define VOLT_PWRMGT_EN (1 << 10)
78# define BACKBIAS_PAD_EN (1 << 18)
79# define BACKBIAS_VALUE (1 << 19)
80# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
81# define AC_DC_SW (1 << 24)
82
83#define SCLK_PWRMGT_CNTL 0x644
84# define SCLK_PWRMGT_OFF (1 << 0)
85# define SCLK_LOW_D1 (1 << 1)
86# define FIR_RESET (1 << 4)
87# define FIR_FORCE_TREND_SEL (1 << 5)
88# define FIR_TREND_MODE (1 << 6)
89# define DYN_GFX_CLK_OFF_EN (1 << 7)
90# define GFX_CLK_FORCE_ON (1 << 8)
91# define GFX_CLK_REQUEST_OFF (1 << 9)
92# define GFX_CLK_FORCE_OFF (1 << 10)
93# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
94# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
95# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
96
97#define TCI_MCLK_PWRMGT_CNTL 0x648
98# define MPLL_PWRMGT_OFF (1 << 5)
99# define DLL_READY (1 << 6)
100# define MC_INT_CNTL (1 << 7)
101# define MRDCKA_SLEEP (1 << 8)
102# define MRDCKB_SLEEP (1 << 9)
103# define MRDCKC_SLEEP (1 << 10)
104# define MRDCKD_SLEEP (1 << 11)
105# define MRDCKE_SLEEP (1 << 12)
106# define MRDCKF_SLEEP (1 << 13)
107# define MRDCKG_SLEEP (1 << 14)
108# define MRDCKH_SLEEP (1 << 15)
109# define MRDCKA_RESET (1 << 16)
110# define MRDCKB_RESET (1 << 17)
111# define MRDCKC_RESET (1 << 18)
112# define MRDCKD_RESET (1 << 19)
113# define MRDCKE_RESET (1 << 20)
114# define MRDCKF_RESET (1 << 21)
115# define MRDCKG_RESET (1 << 22)
116# define MRDCKH_RESET (1 << 23)
117# define DLL_READY_READ (1 << 24)
118# define USE_DISPLAY_GAP (1 << 25)
119# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
120# define MPLL_TURNOFF_D2 (1 << 28)
121#define TCI_DLL_CNTL 0x64c
122
123#define CG_PG_CNTL 0x858
124# define PWRGATE_ENABLE (1 << 0)
125
126#define CG_AT 0x6d4
127#define CG_R(x) ((x) << 0)
128#define CG_R_MASK (0xffff << 0)
129#define CG_L(x) ((x) << 16)
130#define CG_L_MASK (0xffff << 16)
131
132#define CG_SPLL_SPREAD_SPECTRUM 0x790
133#define SSEN (1 << 0)
134#define CLK_S(x) ((x) << 4)
135#define CLK_S_MASK (0xfff << 4)
136#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
137#define CLK_V(x) ((x) << 0)
138#define CLK_V_MASK (0x3ffffff << 0)
139
140#define MC_ARB_DRAM_TIMING 0x2774
141#define MC_ARB_DRAM_TIMING2 0x2778
142
143#define MC_ARB_RFSH_RATE 0x27b0
144#define POWERMODE0(x) ((x) << 0)
145#define POWERMODE0_MASK (0xff << 0)
146#define POWERMODE1(x) ((x) << 8)
147#define POWERMODE1_MASK (0xff << 8)
148#define POWERMODE2(x) ((x) << 16)
149#define POWERMODE2_MASK (0xff << 16)
150#define POWERMODE3(x) ((x) << 24)
151#define POWERMODE3_MASK (0xff << 24)
152
153#define MC_ARB_DRAM_TIMING_1 0x27f0
154#define MC_ARB_DRAM_TIMING_2 0x27f4
155#define MC_ARB_DRAM_TIMING_3 0x27f8
156#define MC_ARB_DRAM_TIMING2_1 0x27fc
157#define MC_ARB_DRAM_TIMING2_2 0x2800
158#define MC_ARB_DRAM_TIMING2_3 0x2804
159
160#define MC4_IO_DQ_PAD_CNTL_D0_I0 0x2978
161#define MC4_IO_DQ_PAD_CNTL_D0_I1 0x297c
162#define MC4_IO_QS_PAD_CNTL_D0_I0 0x2980
163#define MC4_IO_QS_PAD_CNTL_D0_I1 0x2984
164
165#endif
diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c
new file mode 100644
index 000000000000..c4c8da501da8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv740_dpm.c
@@ -0,0 +1,416 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "rv740d.h"
28#include "r600_dpm.h"
29#include "rv770_dpm.h"
30#include "atom.h"
31
32struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
33
34u32 rv740_get_decoded_reference_divider(u32 encoded_ref)
35{
36 u32 ref = 0;
37
38 switch (encoded_ref) {
39 case 0:
40 ref = 1;
41 break;
42 case 16:
43 ref = 2;
44 break;
45 case 17:
46 ref = 3;
47 break;
48 case 18:
49 ref = 2;
50 break;
51 case 19:
52 ref = 3;
53 break;
54 case 20:
55 ref = 4;
56 break;
57 case 21:
58 ref = 5;
59 break;
60 default:
61 DRM_ERROR("Invalid encoded Reference Divider\n");
62 ref = 0;
63 break;
64 }
65
66 return ref;
67}
68
69struct dll_speed_setting {
70 u16 min;
71 u16 max;
72 u32 dll_speed;
73};
74
75static struct dll_speed_setting dll_speed_table[16] =
76{
77 { 270, 320, 0x0f },
78 { 240, 270, 0x0e },
79 { 200, 240, 0x0d },
80 { 180, 200, 0x0c },
81 { 160, 180, 0x0b },
82 { 140, 160, 0x0a },
83 { 120, 140, 0x09 },
84 { 110, 120, 0x08 },
85 { 95, 110, 0x07 },
86 { 85, 95, 0x06 },
87 { 78, 85, 0x05 },
88 { 70, 78, 0x04 },
89 { 65, 70, 0x03 },
90 { 60, 65, 0x02 },
91 { 42, 60, 0x01 },
92 { 00, 42, 0x00 }
93};
94
95u32 rv740_get_dll_speed(bool is_gddr5, u32 memory_clock)
96{
97 int i;
98 u32 factor;
99 u16 data_rate;
100
101 if (is_gddr5)
102 factor = 4;
103 else
104 factor = 2;
105
106 data_rate = (u16)(memory_clock * factor / 1000);
107
108 if (data_rate < dll_speed_table[0].max) {
109 for (i = 0; i < 16; i++) {
110 if (data_rate > dll_speed_table[i].min &&
111 data_rate <= dll_speed_table[i].max)
112 return dll_speed_table[i].dll_speed;
113 }
114 }
115
116 DRM_DEBUG_KMS("Target MCLK greater than largest MCLK in DLL speed table\n");
117
118 return 0x0f;
119}
120
121int rv740_populate_sclk_value(struct radeon_device *rdev, u32 engine_clock,
122 RV770_SMC_SCLK_VALUE *sclk)
123{
124 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
125 struct atom_clock_dividers dividers;
126 u32 spll_func_cntl = pi->clk_regs.rv770.cg_spll_func_cntl;
127 u32 spll_func_cntl_2 = pi->clk_regs.rv770.cg_spll_func_cntl_2;
128 u32 spll_func_cntl_3 = pi->clk_regs.rv770.cg_spll_func_cntl_3;
129 u32 cg_spll_spread_spectrum = pi->clk_regs.rv770.cg_spll_spread_spectrum;
130 u32 cg_spll_spread_spectrum_2 = pi->clk_regs.rv770.cg_spll_spread_spectrum_2;
131 u64 tmp;
132 u32 reference_clock = rdev->clock.spll.reference_freq;
133 u32 reference_divider;
134 u32 fbdiv;
135 int ret;
136
137 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
138 engine_clock, false, &dividers);
139 if (ret)
140 return ret;
141
142 reference_divider = 1 + dividers.ref_div;
143
144 tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
145 do_div(tmp, reference_clock);
146 fbdiv = (u32) tmp;
147
148 spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
149 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
150 spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
151
152 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
153 spll_func_cntl_2 |= SCLK_MUX_SEL(2);
154
155 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
156 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
157 spll_func_cntl_3 |= SPLL_DITHEN;
158
159 if (pi->sclk_ss) {
160 struct radeon_atom_ss ss;
161 u32 vco_freq = engine_clock * dividers.post_div;
162
163 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
164 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
165 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
166 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
167
168 cg_spll_spread_spectrum &= ~CLK_S_MASK;
169 cg_spll_spread_spectrum |= CLK_S(clk_s);
170 cg_spll_spread_spectrum |= SSEN;
171
172 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
173 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
174 }
175 }
176
177 sclk->sclk_value = cpu_to_be32(engine_clock);
178 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
179 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
180 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
181 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum);
182 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2);
183
184 return 0;
185}
186
187int rv740_populate_mclk_value(struct radeon_device *rdev,
188 u32 engine_clock, u32 memory_clock,
189 RV7XX_SMC_MCLK_VALUE *mclk)
190{
191 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
192 u32 mpll_ad_func_cntl = pi->clk_regs.rv770.mpll_ad_func_cntl;
193 u32 mpll_ad_func_cntl_2 = pi->clk_regs.rv770.mpll_ad_func_cntl_2;
194 u32 mpll_dq_func_cntl = pi->clk_regs.rv770.mpll_dq_func_cntl;
195 u32 mpll_dq_func_cntl_2 = pi->clk_regs.rv770.mpll_dq_func_cntl_2;
196 u32 mclk_pwrmgt_cntl = pi->clk_regs.rv770.mclk_pwrmgt_cntl;
197 u32 dll_cntl = pi->clk_regs.rv770.dll_cntl;
198 u32 mpll_ss1 = pi->clk_regs.rv770.mpll_ss1;
199 u32 mpll_ss2 = pi->clk_regs.rv770.mpll_ss2;
200 struct atom_clock_dividers dividers;
201 u32 ibias;
202 u32 dll_speed;
203 int ret;
204
205 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
206 memory_clock, false, &dividers);
207 if (ret)
208 return ret;
209
210 ibias = rv770_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
211
212 mpll_ad_func_cntl &= ~(CLKR_MASK |
213 YCLK_POST_DIV_MASK |
214 CLKF_MASK |
215 CLKFRAC_MASK |
216 IBIAS_MASK);
217 mpll_ad_func_cntl |= CLKR(dividers.ref_div);
218 mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
219 mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
220 mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
221 mpll_ad_func_cntl |= IBIAS(ibias);
222
223 if (dividers.vco_mode)
224 mpll_ad_func_cntl_2 |= VCO_MODE;
225 else
226 mpll_ad_func_cntl_2 &= ~VCO_MODE;
227
228 if (pi->mem_gddr5) {
229 mpll_dq_func_cntl &= ~(CLKR_MASK |
230 YCLK_POST_DIV_MASK |
231 CLKF_MASK |
232 CLKFRAC_MASK |
233 IBIAS_MASK);
234 mpll_dq_func_cntl |= CLKR(dividers.ref_div);
235 mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
236 mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
237 mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
238 mpll_dq_func_cntl |= IBIAS(ibias);
239
240 if (dividers.vco_mode)
241 mpll_dq_func_cntl_2 |= VCO_MODE;
242 else
243 mpll_dq_func_cntl_2 &= ~VCO_MODE;
244 }
245
246 if (pi->mclk_ss) {
247 struct radeon_atom_ss ss;
248 u32 vco_freq = memory_clock * dividers.post_div;
249
250 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
251 ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
252 u32 reference_clock = rdev->clock.mpll.reference_freq;
253 u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
254 u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
255 u32 clk_v = 0x40000 * ss.percentage *
256 (dividers.whole_fb_div + (dividers.frac_fb_div / 8)) / (clk_s * 10000);
257
258 mpll_ss1 &= ~CLKV_MASK;
259 mpll_ss1 |= CLKV(clk_v);
260
261 mpll_ss2 &= ~CLKS_MASK;
262 mpll_ss2 |= CLKS(clk_s);
263 }
264 }
265
266 dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
267 memory_clock);
268
269 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
270 mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
271
272 mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
273 mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
274 mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
275 mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
276 mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
277 mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
278 mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
279 mclk->mclk770.vMPLL_SS = cpu_to_be32(mpll_ss1);
280 mclk->mclk770.vMPLL_SS2 = cpu_to_be32(mpll_ss2);
281
282 return 0;
283}
284
285void rv740_read_clock_registers(struct radeon_device *rdev)
286{
287 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
288
289 pi->clk_regs.rv770.cg_spll_func_cntl =
290 RREG32(CG_SPLL_FUNC_CNTL);
291 pi->clk_regs.rv770.cg_spll_func_cntl_2 =
292 RREG32(CG_SPLL_FUNC_CNTL_2);
293 pi->clk_regs.rv770.cg_spll_func_cntl_3 =
294 RREG32(CG_SPLL_FUNC_CNTL_3);
295 pi->clk_regs.rv770.cg_spll_spread_spectrum =
296 RREG32(CG_SPLL_SPREAD_SPECTRUM);
297 pi->clk_regs.rv770.cg_spll_spread_spectrum_2 =
298 RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
299
300 pi->clk_regs.rv770.mpll_ad_func_cntl =
301 RREG32(MPLL_AD_FUNC_CNTL);
302 pi->clk_regs.rv770.mpll_ad_func_cntl_2 =
303 RREG32(MPLL_AD_FUNC_CNTL_2);
304 pi->clk_regs.rv770.mpll_dq_func_cntl =
305 RREG32(MPLL_DQ_FUNC_CNTL);
306 pi->clk_regs.rv770.mpll_dq_func_cntl_2 =
307 RREG32(MPLL_DQ_FUNC_CNTL_2);
308 pi->clk_regs.rv770.mclk_pwrmgt_cntl =
309 RREG32(MCLK_PWRMGT_CNTL);
310 pi->clk_regs.rv770.dll_cntl = RREG32(DLL_CNTL);
311 pi->clk_regs.rv770.mpll_ss1 = RREG32(MPLL_SS1);
312 pi->clk_regs.rv770.mpll_ss2 = RREG32(MPLL_SS2);
313}
314
315int rv740_populate_smc_acpi_state(struct radeon_device *rdev,
316 RV770_SMC_STATETABLE *table)
317{
318 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
319 u32 mpll_ad_func_cntl = pi->clk_regs.rv770.mpll_ad_func_cntl;
320 u32 mpll_ad_func_cntl_2 = pi->clk_regs.rv770.mpll_ad_func_cntl_2;
321 u32 mpll_dq_func_cntl = pi->clk_regs.rv770.mpll_dq_func_cntl;
322 u32 mpll_dq_func_cntl_2 = pi->clk_regs.rv770.mpll_dq_func_cntl_2;
323 u32 spll_func_cntl = pi->clk_regs.rv770.cg_spll_func_cntl;
324 u32 spll_func_cntl_2 = pi->clk_regs.rv770.cg_spll_func_cntl_2;
325 u32 spll_func_cntl_3 = pi->clk_regs.rv770.cg_spll_func_cntl_3;
326 u32 mclk_pwrmgt_cntl = pi->clk_regs.rv770.mclk_pwrmgt_cntl;
327 u32 dll_cntl = pi->clk_regs.rv770.dll_cntl;
328
329 table->ACPIState = table->initialState;
330
331 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
332
333 if (pi->acpi_vddc) {
334 rv770_populate_vddc_value(rdev, pi->acpi_vddc,
335 &table->ACPIState.levels[0].vddc);
336 table->ACPIState.levels[0].gen2PCIE =
337 pi->pcie_gen2 ?
338 pi->acpi_pcie_gen2 : 0;
339 table->ACPIState.levels[0].gen2XSP =
340 pi->acpi_pcie_gen2;
341 } else {
342 rv770_populate_vddc_value(rdev, pi->min_vddc_in_table,
343 &table->ACPIState.levels[0].vddc);
344 table->ACPIState.levels[0].gen2PCIE = 0;
345 }
346
347 mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
348
349 mpll_dq_func_cntl_2 |= BYPASS | BIAS_GEN_PDNB | RESET_EN;
350
351 mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
352 MRDCKA1_RESET |
353 MRDCKB0_RESET |
354 MRDCKB1_RESET |
355 MRDCKC0_RESET |
356 MRDCKC1_RESET |
357 MRDCKD0_RESET |
358 MRDCKD1_RESET);
359
360 dll_cntl |= (MRDCKA0_BYPASS |
361 MRDCKA1_BYPASS |
362 MRDCKB0_BYPASS |
363 MRDCKB1_BYPASS |
364 MRDCKC0_BYPASS |
365 MRDCKC1_BYPASS |
366 MRDCKD0_BYPASS |
367 MRDCKD1_BYPASS);
368
369 spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
370
371 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
372 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
373
374 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
375 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
376 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
377 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
378 table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
379 table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
380
381 table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
382
383 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
384 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
385 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
386
387 table->ACPIState.levels[0].sclk.sclk_value = 0;
388
389 table->ACPIState.levels[1] = table->ACPIState.levels[0];
390 table->ACPIState.levels[2] = table->ACPIState.levels[0];
391
392 rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
393
394 return 0;
395}
396
397void rv740_enable_mclk_spread_spectrum(struct radeon_device *rdev,
398 bool enable)
399{
400 if (enable)
401 WREG32_P(MPLL_CNTL_MODE, SS_SSEN, ~SS_SSEN);
402 else
403 WREG32_P(MPLL_CNTL_MODE, 0, ~SS_SSEN);
404}
405
406u8 rv740_get_mclk_frequency_ratio(u32 memory_clock)
407{
408 u8 mc_para_index;
409
410 if ((memory_clock < 10000) || (memory_clock > 47500))
411 mc_para_index = 0x00;
412 else
413 mc_para_index = (u8)((memory_clock - 10000) / 2500);
414
415 return mc_para_index;
416}
diff --git a/drivers/gpu/drm/radeon/rv740d.h b/drivers/gpu/drm/radeon/rv740d.h
new file mode 100644
index 000000000000..fe5ab075dc17
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv740d.h
@@ -0,0 +1,117 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef RV740_H
24#define RV740_H
25
26#define CG_SPLL_FUNC_CNTL 0x600
27#define SPLL_RESET (1 << 0)
28#define SPLL_SLEEP (1 << 1)
29#define SPLL_BYPASS_EN (1 << 3)
30#define SPLL_REF_DIV(x) ((x) << 4)
31#define SPLL_REF_DIV_MASK (0x3f << 4)
32#define SPLL_PDIV_A(x) ((x) << 20)
33#define SPLL_PDIV_A_MASK (0x7f << 20)
34#define CG_SPLL_FUNC_CNTL_2 0x604
35#define SCLK_MUX_SEL(x) ((x) << 0)
36#define SCLK_MUX_SEL_MASK (0x1ff << 0)
37#define CG_SPLL_FUNC_CNTL_3 0x608
38#define SPLL_FB_DIV(x) ((x) << 0)
39#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
40#define SPLL_DITHEN (1 << 28)
41
42#define MPLL_CNTL_MODE 0x61c
43#define SS_SSEN (1 << 24)
44
45#define MPLL_AD_FUNC_CNTL 0x624
46#define CLKF(x) ((x) << 0)
47#define CLKF_MASK (0x7f << 0)
48#define CLKR(x) ((x) << 7)
49#define CLKR_MASK (0x1f << 7)
50#define CLKFRAC(x) ((x) << 12)
51#define CLKFRAC_MASK (0x1f << 12)
52#define YCLK_POST_DIV(x) ((x) << 17)
53#define YCLK_POST_DIV_MASK (3 << 17)
54#define IBIAS(x) ((x) << 20)
55#define IBIAS_MASK (0x3ff << 20)
56#define RESET (1 << 30)
57#define PDNB (1 << 31)
58#define MPLL_AD_FUNC_CNTL_2 0x628
59#define BYPASS (1 << 19)
60#define BIAS_GEN_PDNB (1 << 24)
61#define RESET_EN (1 << 25)
62#define VCO_MODE (1 << 29)
63#define MPLL_DQ_FUNC_CNTL 0x62c
64#define MPLL_DQ_FUNC_CNTL_2 0x630
65
66#define MCLK_PWRMGT_CNTL 0x648
67#define DLL_SPEED(x) ((x) << 0)
68#define DLL_SPEED_MASK (0x1f << 0)
69# define MPLL_PWRMGT_OFF (1 << 5)
70# define DLL_READY (1 << 6)
71# define MC_INT_CNTL (1 << 7)
72# define MRDCKA0_SLEEP (1 << 8)
73# define MRDCKA1_SLEEP (1 << 9)
74# define MRDCKB0_SLEEP (1 << 10)
75# define MRDCKB1_SLEEP (1 << 11)
76# define MRDCKC0_SLEEP (1 << 12)
77# define MRDCKC1_SLEEP (1 << 13)
78# define MRDCKD0_SLEEP (1 << 14)
79# define MRDCKD1_SLEEP (1 << 15)
80# define MRDCKA0_RESET (1 << 16)
81# define MRDCKA1_RESET (1 << 17)
82# define MRDCKB0_RESET (1 << 18)
83# define MRDCKB1_RESET (1 << 19)
84# define MRDCKC0_RESET (1 << 20)
85# define MRDCKC1_RESET (1 << 21)
86# define MRDCKD0_RESET (1 << 22)
87# define MRDCKD1_RESET (1 << 23)
88# define DLL_READY_READ (1 << 24)
89# define USE_DISPLAY_GAP (1 << 25)
90# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
91# define MPLL_TURNOFF_D2 (1 << 28)
92#define DLL_CNTL 0x64c
93# define MRDCKA0_BYPASS (1 << 24)
94# define MRDCKA1_BYPASS (1 << 25)
95# define MRDCKB0_BYPASS (1 << 26)
96# define MRDCKB1_BYPASS (1 << 27)
97# define MRDCKC0_BYPASS (1 << 28)
98# define MRDCKC1_BYPASS (1 << 29)
99# define MRDCKD0_BYPASS (1 << 30)
100# define MRDCKD1_BYPASS (1 << 31)
101
102#define CG_SPLL_SPREAD_SPECTRUM 0x790
103#define SSEN (1 << 0)
104#define CLK_S(x) ((x) << 4)
105#define CLK_S_MASK (0xfff << 4)
106#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
107#define CLK_V(x) ((x) << 0)
108#define CLK_V_MASK (0x3ffffff << 0)
109
110#define MPLL_SS1 0x85c
111#define CLKV(x) ((x) << 0)
112#define CLKV_MASK (0x3ffffff << 0)
113#define MPLL_SS2 0x860
114#define CLKS(x) ((x) << 0)
115#define CLKS_MASK (0xfff << 0)
116
117#endif
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
new file mode 100644
index 000000000000..7f6fa6221234
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -0,0 +1,2462 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "rv770d.h"
28#include "r600_dpm.h"
29#include "rv770_dpm.h"
30#include "cypress_dpm.h"
31#include "atom.h"
32
33#define MC_CG_ARB_FREQ_F0 0x0a
34#define MC_CG_ARB_FREQ_F1 0x0b
35#define MC_CG_ARB_FREQ_F2 0x0c
36#define MC_CG_ARB_FREQ_F3 0x0d
37
38#define MC_CG_SEQ_DRAMCONF_S0 0x05
39#define MC_CG_SEQ_DRAMCONF_S1 0x06
40
41#define PCIE_BUS_CLK 10000
42#define TCLK (PCIE_BUS_CLK / 10)
43
44#define SMC_RAM_END 0xC000
45
46struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps)
47{
48 struct rv7xx_ps *ps = rps->ps_priv;
49
50 return ps;
51}
52
53struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev)
54{
55 struct rv7xx_power_info *pi = rdev->pm.dpm.priv;
56
57 return pi;
58}
59
60struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev)
61{
62 struct evergreen_power_info *pi = rdev->pm.dpm.priv;
63
64 return pi;
65}
66
67static void rv770_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
68 bool enable)
69{
70 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
71 u32 tmp;
72
73 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
74 if (enable) {
75 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
76 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
77 tmp |= LC_GEN2_EN_STRAP;
78 } else {
79 if (!pi->boot_in_gen2) {
80 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
81 tmp &= ~LC_GEN2_EN_STRAP;
82 }
83 }
84 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
85 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
86 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
87
88}
89
90static void rv770_enable_l0s(struct radeon_device *rdev)
91{
92 u32 tmp;
93
94 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L0S_INACTIVITY_MASK;
95 tmp |= LC_L0S_INACTIVITY(3);
96 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
97}
98
99static void rv770_enable_l1(struct radeon_device *rdev)
100{
101 u32 tmp;
102
103 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL);
104 tmp &= ~LC_L1_INACTIVITY_MASK;
105 tmp |= LC_L1_INACTIVITY(4);
106 tmp &= ~LC_PMI_TO_L1_DIS;
107 tmp &= ~LC_ASPM_TO_L1_DIS;
108 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
109}
110
111static void rv770_enable_pll_sleep_in_l1(struct radeon_device *rdev)
112{
113 u32 tmp;
114
115 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L1_INACTIVITY_MASK;
116 tmp |= LC_L1_INACTIVITY(8);
117 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
118
119 /* NOTE, this is a PCIE indirect reg, not PCIE PORT */
120 tmp = RREG32_PCIE(PCIE_P_CNTL);
121 tmp |= P_PLL_PWRDN_IN_L1L23;
122 tmp &= ~P_PLL_BUF_PDNB;
123 tmp &= ~P_PLL_PDNB;
124 tmp |= P_ALLOW_PRX_FRONTEND_SHUTOFF;
125 WREG32_PCIE(PCIE_P_CNTL, tmp);
126}
127
128static void rv770_gfx_clock_gating_enable(struct radeon_device *rdev,
129 bool enable)
130{
131 if (enable)
132 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
133 else {
134 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
135 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
136 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
137 RREG32(GB_TILING_CONFIG);
138 }
139}
140
141static void rv770_mg_clock_gating_enable(struct radeon_device *rdev,
142 bool enable)
143{
144 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
145
146 if (enable) {
147 u32 mgcg_cgtt_local0;
148
149 if (rdev->family == CHIP_RV770)
150 mgcg_cgtt_local0 = RV770_MGCGTTLOCAL0_DFLT;
151 else
152 mgcg_cgtt_local0 = RV7XX_MGCGTTLOCAL0_DFLT;
153
154 WREG32(CG_CGTT_LOCAL_0, mgcg_cgtt_local0);
155 WREG32(CG_CGTT_LOCAL_1, (RV770_MGCGTTLOCAL1_DFLT & 0xFFFFCFFF));
156
157 if (pi->mgcgtssm)
158 WREG32(CGTS_SM_CTRL_REG, RV770_MGCGCGTSSMCTRL_DFLT);
159 } else {
160 WREG32(CG_CGTT_LOCAL_0, 0xFFFFFFFF);
161 WREG32(CG_CGTT_LOCAL_1, 0xFFFFCFFF);
162 }
163}
164
165void rv770_restore_cgcg(struct radeon_device *rdev)
166{
167 bool dpm_en = false, cg_en = false;
168
169 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
170 dpm_en = true;
171 if (RREG32(SCLK_PWRMGT_CNTL) & DYN_GFX_CLK_OFF_EN)
172 cg_en = true;
173
174 if (dpm_en && !cg_en)
175 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
176}
177
178static void rv770_start_dpm(struct radeon_device *rdev)
179{
180 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
181
182 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
183
184 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
185}
186
187void rv770_stop_dpm(struct radeon_device *rdev)
188{
189 PPSMC_Result result;
190
191 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
192
193 if (result != PPSMC_Result_OK)
194 DRM_ERROR("Could not force DPM to low.\n");
195
196 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
197
198 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
199
200 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
201}
202
203bool rv770_dpm_enabled(struct radeon_device *rdev)
204{
205 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
206 return true;
207 else
208 return false;
209}
210
211void rv770_enable_thermal_protection(struct radeon_device *rdev,
212 bool enable)
213{
214 if (enable)
215 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
216 else
217 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
218}
219
220void rv770_enable_acpi_pm(struct radeon_device *rdev)
221{
222 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
223}
224
225u8 rv770_get_seq_value(struct radeon_device *rdev,
226 struct rv7xx_pl *pl)
227{
228 return (pl->flags & ATOM_PPLIB_R600_FLAGS_LOWPOWER) ?
229 MC_CG_SEQ_DRAMCONF_S0 : MC_CG_SEQ_DRAMCONF_S1;
230}
231
232int rv770_read_smc_soft_register(struct radeon_device *rdev,
233 u16 reg_offset, u32 *value)
234{
235 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
236
237 return rv770_read_smc_sram_dword(rdev,
238 pi->soft_regs_start + reg_offset,
239 value, pi->sram_end);
240}
241
242int rv770_write_smc_soft_register(struct radeon_device *rdev,
243 u16 reg_offset, u32 value)
244{
245 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
246
247 return rv770_write_smc_sram_dword(rdev,
248 pi->soft_regs_start + reg_offset,
249 value, pi->sram_end);
250}
251
252int rv770_populate_smc_t(struct radeon_device *rdev,
253 struct radeon_ps *radeon_state,
254 RV770_SMC_SWSTATE *smc_state)
255{
256 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
257 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
258 int i;
259 int a_n;
260 int a_d;
261 u8 l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
262 u8 r[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
263 u32 a_t;
264
265 l[0] = 0;
266 r[2] = 100;
267
268 a_n = (int)state->medium.sclk * pi->lmp +
269 (int)state->low.sclk * (R600_AH_DFLT - pi->rlp);
270 a_d = (int)state->low.sclk * (100 - (int)pi->rlp) +
271 (int)state->medium.sclk * pi->lmp;
272
273 l[1] = (u8)(pi->lmp - (int)pi->lmp * a_n / a_d);
274 r[0] = (u8)(pi->rlp + (100 - (int)pi->rlp) * a_n / a_d);
275
276 a_n = (int)state->high.sclk * pi->lhp + (int)state->medium.sclk *
277 (R600_AH_DFLT - pi->rmp);
278 a_d = (int)state->medium.sclk * (100 - (int)pi->rmp) +
279 (int)state->high.sclk * pi->lhp;
280
281 l[2] = (u8)(pi->lhp - (int)pi->lhp * a_n / a_d);
282 r[1] = (u8)(pi->rmp + (100 - (int)pi->rmp) * a_n / a_d);
283
284 for (i = 0; i < (RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1); i++) {
285 a_t = CG_R(r[i] * pi->bsp / 200) | CG_L(l[i] * pi->bsp / 200);
286 smc_state->levels[i].aT = cpu_to_be32(a_t);
287 }
288
289 a_t = CG_R(r[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200) |
290 CG_L(l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200);
291
292 smc_state->levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1].aT =
293 cpu_to_be32(a_t);
294
295 return 0;
296}
297
298int rv770_populate_smc_sp(struct radeon_device *rdev,
299 struct radeon_ps *radeon_state,
300 RV770_SMC_SWSTATE *smc_state)
301{
302 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
303 int i;
304
305 for (i = 0; i < (RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1); i++)
306 smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
307
308 smc_state->levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1].bSP =
309 cpu_to_be32(pi->psp);
310
311 return 0;
312}
313
314static void rv770_calculate_fractional_mpll_feedback_divider(u32 memory_clock,
315 u32 reference_clock,
316 bool gddr5,
317 struct atom_clock_dividers *dividers,
318 u32 *clkf,
319 u32 *clkfrac)
320{
321 u32 post_divider, reference_divider, feedback_divider8;
322 u32 fyclk;
323
324 if (gddr5)
325 fyclk = (memory_clock * 8) / 2;
326 else
327 fyclk = (memory_clock * 4) / 2;
328
329 post_divider = dividers->post_div;
330 reference_divider = dividers->ref_div;
331
332 feedback_divider8 =
333 (8 * fyclk * reference_divider * post_divider) / reference_clock;
334
335 *clkf = feedback_divider8 / 8;
336 *clkfrac = feedback_divider8 % 8;
337}
338
339static int rv770_encode_yclk_post_div(u32 postdiv, u32 *encoded_postdiv)
340{
341 int ret = 0;
342
343 switch (postdiv) {
344 case 1:
345 *encoded_postdiv = 0;
346 break;
347 case 2:
348 *encoded_postdiv = 1;
349 break;
350 case 4:
351 *encoded_postdiv = 2;
352 break;
353 case 8:
354 *encoded_postdiv = 3;
355 break;
356 case 16:
357 *encoded_postdiv = 4;
358 break;
359 default:
360 ret = -EINVAL;
361 break;
362 }
363
364 return ret;
365}
366
367u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
368{
369 if (clkf <= 0x10)
370 return 0x4B;
371 if (clkf <= 0x19)
372 return 0x5B;
373 if (clkf <= 0x21)
374 return 0x2B;
375 if (clkf <= 0x27)
376 return 0x6C;
377 if (clkf <= 0x31)
378 return 0x9D;
379 return 0xC6;
380}
381
382static int rv770_populate_mclk_value(struct radeon_device *rdev,
383 u32 engine_clock, u32 memory_clock,
384 RV7XX_SMC_MCLK_VALUE *mclk)
385{
386 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
387 u8 encoded_reference_dividers[] = { 0, 16, 17, 20, 21 };
388 u32 mpll_ad_func_cntl =
389 pi->clk_regs.rv770.mpll_ad_func_cntl;
390 u32 mpll_ad_func_cntl_2 =
391 pi->clk_regs.rv770.mpll_ad_func_cntl_2;
392 u32 mpll_dq_func_cntl =
393 pi->clk_regs.rv770.mpll_dq_func_cntl;
394 u32 mpll_dq_func_cntl_2 =
395 pi->clk_regs.rv770.mpll_dq_func_cntl_2;
396 u32 mclk_pwrmgt_cntl =
397 pi->clk_regs.rv770.mclk_pwrmgt_cntl;
398 u32 dll_cntl = pi->clk_regs.rv770.dll_cntl;
399 struct atom_clock_dividers dividers;
400 u32 reference_clock = rdev->clock.mpll.reference_freq;
401 u32 clkf, clkfrac;
402 u32 postdiv_yclk;
403 u32 ibias;
404 int ret;
405
406 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
407 memory_clock, false, &dividers);
408 if (ret)
409 return ret;
410
411 if ((dividers.ref_div < 1) || (dividers.ref_div > 5))
412 return -EINVAL;
413
414 rv770_calculate_fractional_mpll_feedback_divider(memory_clock, reference_clock,
415 pi->mem_gddr5,
416 &dividers, &clkf, &clkfrac);
417
418 ret = rv770_encode_yclk_post_div(dividers.post_div, &postdiv_yclk);
419 if (ret)
420 return ret;
421
422 ibias = rv770_map_clkf_to_ibias(rdev, clkf);
423
424 mpll_ad_func_cntl &= ~(CLKR_MASK |
425 YCLK_POST_DIV_MASK |
426 CLKF_MASK |
427 CLKFRAC_MASK |
428 IBIAS_MASK);
429 mpll_ad_func_cntl |= CLKR(encoded_reference_dividers[dividers.ref_div - 1]);
430 mpll_ad_func_cntl |= YCLK_POST_DIV(postdiv_yclk);
431 mpll_ad_func_cntl |= CLKF(clkf);
432 mpll_ad_func_cntl |= CLKFRAC(clkfrac);
433 mpll_ad_func_cntl |= IBIAS(ibias);
434
435 if (dividers.vco_mode)
436 mpll_ad_func_cntl_2 |= VCO_MODE;
437 else
438 mpll_ad_func_cntl_2 &= ~VCO_MODE;
439
440 if (pi->mem_gddr5) {
441 rv770_calculate_fractional_mpll_feedback_divider(memory_clock,
442 reference_clock,
443 pi->mem_gddr5,
444 &dividers, &clkf, &clkfrac);
445
446 ibias = rv770_map_clkf_to_ibias(rdev, clkf);
447
448 ret = rv770_encode_yclk_post_div(dividers.post_div, &postdiv_yclk);
449 if (ret)
450 return ret;
451
452 mpll_dq_func_cntl &= ~(CLKR_MASK |
453 YCLK_POST_DIV_MASK |
454 CLKF_MASK |
455 CLKFRAC_MASK |
456 IBIAS_MASK);
457 mpll_dq_func_cntl |= CLKR(encoded_reference_dividers[dividers.ref_div - 1]);
458 mpll_dq_func_cntl |= YCLK_POST_DIV(postdiv_yclk);
459 mpll_dq_func_cntl |= CLKF(clkf);
460 mpll_dq_func_cntl |= CLKFRAC(clkfrac);
461 mpll_dq_func_cntl |= IBIAS(ibias);
462
463 if (dividers.vco_mode)
464 mpll_dq_func_cntl_2 |= VCO_MODE;
465 else
466 mpll_dq_func_cntl_2 &= ~VCO_MODE;
467 }
468
469 mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
470 mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
471 mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
472 mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
473 mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
474 mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
475 mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
476
477 return 0;
478}
479
480static int rv770_populate_sclk_value(struct radeon_device *rdev,
481 u32 engine_clock,
482 RV770_SMC_SCLK_VALUE *sclk)
483{
484 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
485 struct atom_clock_dividers dividers;
486 u32 spll_func_cntl =
487 pi->clk_regs.rv770.cg_spll_func_cntl;
488 u32 spll_func_cntl_2 =
489 pi->clk_regs.rv770.cg_spll_func_cntl_2;
490 u32 spll_func_cntl_3 =
491 pi->clk_regs.rv770.cg_spll_func_cntl_3;
492 u32 cg_spll_spread_spectrum =
493 pi->clk_regs.rv770.cg_spll_spread_spectrum;
494 u32 cg_spll_spread_spectrum_2 =
495 pi->clk_regs.rv770.cg_spll_spread_spectrum_2;
496 u64 tmp;
497 u32 reference_clock = rdev->clock.spll.reference_freq;
498 u32 reference_divider, post_divider;
499 u32 fbdiv;
500 int ret;
501
502 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
503 engine_clock, false, &dividers);
504 if (ret)
505 return ret;
506
507 reference_divider = 1 + dividers.ref_div;
508
509 if (dividers.enable_post_div)
510 post_divider = (0x0f & (dividers.post_div >> 4)) + (0x0f & dividers.post_div) + 2;
511 else
512 post_divider = 1;
513
514 tmp = (u64) engine_clock * reference_divider * post_divider * 16384;
515 do_div(tmp, reference_clock);
516 fbdiv = (u32) tmp;
517
518 if (dividers.enable_post_div)
519 spll_func_cntl |= SPLL_DIVEN;
520 else
521 spll_func_cntl &= ~SPLL_DIVEN;
522 spll_func_cntl &= ~(SPLL_HILEN_MASK | SPLL_LOLEN_MASK | SPLL_REF_DIV_MASK);
523 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
524 spll_func_cntl |= SPLL_HILEN((dividers.post_div >> 4) & 0xf);
525 spll_func_cntl |= SPLL_LOLEN(dividers.post_div & 0xf);
526
527 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
528 spll_func_cntl_2 |= SCLK_MUX_SEL(2);
529
530 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
531 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
532 spll_func_cntl_3 |= SPLL_DITHEN;
533
534 if (pi->sclk_ss) {
535 struct radeon_atom_ss ss;
536 u32 vco_freq = engine_clock * post_divider;
537
538 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
539 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
540 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
541 u32 clk_v = ss.percentage * fbdiv / (clk_s * 10000);
542
543 cg_spll_spread_spectrum &= ~CLKS_MASK;
544 cg_spll_spread_spectrum |= CLKS(clk_s);
545 cg_spll_spread_spectrum |= SSEN;
546
547 cg_spll_spread_spectrum_2 &= ~CLKV_MASK;
548 cg_spll_spread_spectrum_2 |= CLKV(clk_v);
549 }
550 }
551
552 sclk->sclk_value = cpu_to_be32(engine_clock);
553 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
554 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
555 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
556 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum);
557 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2);
558
559 return 0;
560}
561
562int rv770_populate_vddc_value(struct radeon_device *rdev, u16 vddc,
563 RV770_SMC_VOLTAGE_VALUE *voltage)
564{
565 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
566 int i;
567
568 if (!pi->voltage_control) {
569 voltage->index = 0;
570 voltage->value = 0;
571 return 0;
572 }
573
574 for (i = 0; i < pi->valid_vddc_entries; i++) {
575 if (vddc <= pi->vddc_table[i].vddc) {
576 voltage->index = pi->vddc_table[i].vddc_index;
577 voltage->value = cpu_to_be16(vddc);
578 break;
579 }
580 }
581
582 if (i == pi->valid_vddc_entries)
583 return -EINVAL;
584
585 return 0;
586}
587
588int rv770_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
589 RV770_SMC_VOLTAGE_VALUE *voltage)
590{
591 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
592
593 if (!pi->mvdd_control) {
594 voltage->index = MVDD_HIGH_INDEX;
595 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
596 return 0;
597 }
598
599 if (mclk <= pi->mvdd_split_frequency) {
600 voltage->index = MVDD_LOW_INDEX;
601 voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
602 } else {
603 voltage->index = MVDD_HIGH_INDEX;
604 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
605 }
606
607 return 0;
608}
609
610static int rv770_convert_power_level_to_smc(struct radeon_device *rdev,
611 struct rv7xx_pl *pl,
612 RV770_SMC_HW_PERFORMANCE_LEVEL *level,
613 u8 watermark_level)
614{
615 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
616 int ret;
617
618 level->gen2PCIE = pi->pcie_gen2 ?
619 ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
620 level->gen2XSP = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0;
621 level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0;
622 level->displayWatermark = watermark_level;
623
624 if (rdev->family == CHIP_RV740)
625 ret = rv740_populate_sclk_value(rdev, pl->sclk,
626 &level->sclk);
627 else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
628 ret = rv730_populate_sclk_value(rdev, pl->sclk,
629 &level->sclk);
630 else
631 ret = rv770_populate_sclk_value(rdev, pl->sclk,
632 &level->sclk);
633 if (ret)
634 return ret;
635
636 if (rdev->family == CHIP_RV740) {
637 if (pi->mem_gddr5) {
638 if (pl->mclk <= pi->mclk_strobe_mode_threshold)
639 level->strobeMode =
640 rv740_get_mclk_frequency_ratio(pl->mclk) | 0x10;
641 else
642 level->strobeMode = 0;
643
644 if (pl->mclk > pi->mclk_edc_enable_threshold)
645 level->mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
646 else
647 level->mcFlags = 0;
648 }
649 ret = rv740_populate_mclk_value(rdev, pl->sclk,
650 pl->mclk, &level->mclk);
651 } else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
652 ret = rv730_populate_mclk_value(rdev, pl->sclk,
653 pl->mclk, &level->mclk);
654 else
655 ret = rv770_populate_mclk_value(rdev, pl->sclk,
656 pl->mclk, &level->mclk);
657 if (ret)
658 return ret;
659
660 ret = rv770_populate_vddc_value(rdev, pl->vddc,
661 &level->vddc);
662 if (ret)
663 return ret;
664
665 ret = rv770_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
666
667 return ret;
668}
669
670static int rv770_convert_power_state_to_smc(struct radeon_device *rdev,
671 struct radeon_ps *radeon_state,
672 RV770_SMC_SWSTATE *smc_state)
673{
674 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
675 int ret;
676
677 if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
678 smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
679
680 ret = rv770_convert_power_level_to_smc(rdev,
681 &state->low,
682 &smc_state->levels[0],
683 PPSMC_DISPLAY_WATERMARK_LOW);
684 if (ret)
685 return ret;
686
687 ret = rv770_convert_power_level_to_smc(rdev,
688 &state->medium,
689 &smc_state->levels[1],
690 PPSMC_DISPLAY_WATERMARK_LOW);
691 if (ret)
692 return ret;
693
694 ret = rv770_convert_power_level_to_smc(rdev,
695 &state->high,
696 &smc_state->levels[2],
697 PPSMC_DISPLAY_WATERMARK_HIGH);
698 if (ret)
699 return ret;
700
701 smc_state->levels[0].arbValue = MC_CG_ARB_FREQ_F1;
702 smc_state->levels[1].arbValue = MC_CG_ARB_FREQ_F2;
703 smc_state->levels[2].arbValue = MC_CG_ARB_FREQ_F3;
704
705 smc_state->levels[0].seqValue = rv770_get_seq_value(rdev,
706 &state->low);
707 smc_state->levels[1].seqValue = rv770_get_seq_value(rdev,
708 &state->medium);
709 smc_state->levels[2].seqValue = rv770_get_seq_value(rdev,
710 &state->high);
711
712 rv770_populate_smc_sp(rdev, radeon_state, smc_state);
713
714 return rv770_populate_smc_t(rdev, radeon_state, smc_state);
715
716}
717
718u32 rv770_calculate_memory_refresh_rate(struct radeon_device *rdev,
719 u32 engine_clock)
720{
721 u32 dram_rows;
722 u32 dram_refresh_rate;
723 u32 mc_arb_rfsh_rate;
724 u32 tmp;
725
726 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
727 dram_rows = 1 << (tmp + 10);
728 tmp = RREG32(MC_SEQ_MISC0) & 3;
729 dram_refresh_rate = 1 << (tmp + 3);
730 mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
731
732 return mc_arb_rfsh_rate;
733}
734
735static void rv770_program_memory_timing_parameters(struct radeon_device *rdev,
736 struct radeon_ps *radeon_state)
737{
738 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
739 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
740 u32 sqm_ratio;
741 u32 arb_refresh_rate;
742 u32 high_clock;
743
744 if (state->high.sclk < (state->low.sclk * 0xFF / 0x40))
745 high_clock = state->high.sclk;
746 else
747 high_clock = (state->low.sclk * 0xFF / 0x40);
748
749 radeon_atom_set_engine_dram_timings(rdev, high_clock,
750 state->high.mclk);
751
752 sqm_ratio =
753 STATE0(64 * high_clock / pi->boot_sclk) |
754 STATE1(64 * high_clock / state->low.sclk) |
755 STATE2(64 * high_clock / state->medium.sclk) |
756 STATE3(64 * high_clock / state->high.sclk);
757 WREG32(MC_ARB_SQM_RATIO, sqm_ratio);
758
759 arb_refresh_rate =
760 POWERMODE0(rv770_calculate_memory_refresh_rate(rdev, pi->boot_sclk)) |
761 POWERMODE1(rv770_calculate_memory_refresh_rate(rdev, state->low.sclk)) |
762 POWERMODE2(rv770_calculate_memory_refresh_rate(rdev, state->medium.sclk)) |
763 POWERMODE3(rv770_calculate_memory_refresh_rate(rdev, state->high.sclk));
764 WREG32(MC_ARB_RFSH_RATE, arb_refresh_rate);
765}
766
767void rv770_enable_backbias(struct radeon_device *rdev,
768 bool enable)
769{
770 if (enable)
771 WREG32_P(GENERAL_PWRMGT, BACKBIAS_PAD_EN, ~BACKBIAS_PAD_EN);
772 else
773 WREG32_P(GENERAL_PWRMGT, 0, ~(BACKBIAS_VALUE | BACKBIAS_PAD_EN));
774}
775
776static void rv770_enable_spread_spectrum(struct radeon_device *rdev,
777 bool enable)
778{
779 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
780
781 if (enable) {
782 if (pi->sclk_ss)
783 WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
784
785 if (pi->mclk_ss) {
786 if (rdev->family == CHIP_RV740)
787 rv740_enable_mclk_spread_spectrum(rdev, true);
788 }
789 } else {
790 WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
791
792 WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
793
794 WREG32_P(CG_MPLL_SPREAD_SPECTRUM, 0, ~SSEN);
795
796 if (rdev->family == CHIP_RV740)
797 rv740_enable_mclk_spread_spectrum(rdev, false);
798 }
799}
800
801static void rv770_program_mpll_timing_parameters(struct radeon_device *rdev)
802{
803 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
804
805 if ((rdev->family == CHIP_RV770) && !pi->mem_gddr5) {
806 WREG32(MPLL_TIME,
807 (MPLL_LOCK_TIME(R600_MPLLLOCKTIME_DFLT * pi->ref_div) |
808 MPLL_RESET_TIME(R600_MPLLRESETTIME_DFLT)));
809 }
810}
811
812void rv770_setup_bsp(struct radeon_device *rdev)
813{
814 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
815 u32 xclk = radeon_get_xclk(rdev);
816
817 r600_calculate_u_and_p(pi->asi,
818 xclk,
819 16,
820 &pi->bsp,
821 &pi->bsu);
822
823 r600_calculate_u_and_p(pi->pasi,
824 xclk,
825 16,
826 &pi->pbsp,
827 &pi->pbsu);
828
829 pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
830 pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
831
832 WREG32(CG_BSP, pi->dsp);
833
834}
835
836void rv770_program_git(struct radeon_device *rdev)
837{
838 WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
839}
840
841void rv770_program_tp(struct radeon_device *rdev)
842{
843 int i;
844 enum r600_td td = R600_TD_DFLT;
845
846 for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
847 WREG32(CG_FFCT_0 + (i * 4), (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
848
849 if (td == R600_TD_AUTO)
850 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
851 else
852 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
853 if (td == R600_TD_UP)
854 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
855 if (td == R600_TD_DOWN)
856 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
857}
858
859void rv770_program_tpp(struct radeon_device *rdev)
860{
861 WREG32(CG_TPC, R600_TPC_DFLT);
862}
863
864void rv770_program_sstp(struct radeon_device *rdev)
865{
866 WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
867}
868
869void rv770_program_engine_speed_parameters(struct radeon_device *rdev)
870{
871 WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
872}
873
874static void rv770_enable_display_gap(struct radeon_device *rdev)
875{
876 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
877
878 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
879 tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
880 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
881 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
882}
883
884void rv770_program_vc(struct radeon_device *rdev)
885{
886 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
887
888 WREG32(CG_FTV, pi->vrc);
889}
890
891void rv770_clear_vc(struct radeon_device *rdev)
892{
893 WREG32(CG_FTV, 0);
894}
895
896int rv770_upload_firmware(struct radeon_device *rdev)
897{
898 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
899 int ret;
900
901 rv770_reset_smc(rdev);
902 rv770_stop_smc_clock(rdev);
903
904 ret = rv770_load_smc_ucode(rdev, pi->sram_end);
905 if (ret)
906 return ret;
907
908 return 0;
909}
910
911static int rv770_populate_smc_acpi_state(struct radeon_device *rdev,
912 RV770_SMC_STATETABLE *table)
913{
914 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
915
916 u32 mpll_ad_func_cntl =
917 pi->clk_regs.rv770.mpll_ad_func_cntl;
918 u32 mpll_ad_func_cntl_2 =
919 pi->clk_regs.rv770.mpll_ad_func_cntl_2;
920 u32 mpll_dq_func_cntl =
921 pi->clk_regs.rv770.mpll_dq_func_cntl;
922 u32 mpll_dq_func_cntl_2 =
923 pi->clk_regs.rv770.mpll_dq_func_cntl_2;
924 u32 spll_func_cntl =
925 pi->clk_regs.rv770.cg_spll_func_cntl;
926 u32 spll_func_cntl_2 =
927 pi->clk_regs.rv770.cg_spll_func_cntl_2;
928 u32 spll_func_cntl_3 =
929 pi->clk_regs.rv770.cg_spll_func_cntl_3;
930 u32 mclk_pwrmgt_cntl;
931 u32 dll_cntl;
932
933 table->ACPIState = table->initialState;
934
935 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
936
937 if (pi->acpi_vddc) {
938 rv770_populate_vddc_value(rdev, pi->acpi_vddc,
939 &table->ACPIState.levels[0].vddc);
940 if (pi->pcie_gen2) {
941 if (pi->acpi_pcie_gen2)
942 table->ACPIState.levels[0].gen2PCIE = 1;
943 else
944 table->ACPIState.levels[0].gen2PCIE = 0;
945 } else
946 table->ACPIState.levels[0].gen2PCIE = 0;
947 if (pi->acpi_pcie_gen2)
948 table->ACPIState.levels[0].gen2XSP = 1;
949 else
950 table->ACPIState.levels[0].gen2XSP = 0;
951 } else {
952 rv770_populate_vddc_value(rdev, pi->min_vddc_in_table,
953 &table->ACPIState.levels[0].vddc);
954 table->ACPIState.levels[0].gen2PCIE = 0;
955 }
956
957
958 mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
959
960 mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
961
962 mclk_pwrmgt_cntl = (MRDCKA0_RESET |
963 MRDCKA1_RESET |
964 MRDCKB0_RESET |
965 MRDCKB1_RESET |
966 MRDCKC0_RESET |
967 MRDCKC1_RESET |
968 MRDCKD0_RESET |
969 MRDCKD1_RESET);
970
971 dll_cntl = 0xff000000;
972
973 spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
974
975 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
976 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
977
978 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
979 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
980 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
981 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
982
983 table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
984 table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
985
986 table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
987
988 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
989 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
990 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
991
992 table->ACPIState.levels[0].sclk.sclk_value = 0;
993
994 rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
995
996 table->ACPIState.levels[1] = table->ACPIState.levels[0];
997 table->ACPIState.levels[2] = table->ACPIState.levels[0];
998
999 return 0;
1000}
1001
1002int rv770_populate_initial_mvdd_value(struct radeon_device *rdev,
1003 RV770_SMC_VOLTAGE_VALUE *voltage)
1004{
1005 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1006
1007 if ((pi->s0_vid_lower_smio_cntl & pi->mvdd_mask_low) ==
1008 (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low) ) {
1009 voltage->index = MVDD_LOW_INDEX;
1010 voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
1011 } else {
1012 voltage->index = MVDD_HIGH_INDEX;
1013 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1014 }
1015
1016 return 0;
1017}
1018
1019static int rv770_populate_smc_initial_state(struct radeon_device *rdev,
1020 struct radeon_ps *radeon_state,
1021 RV770_SMC_STATETABLE *table)
1022{
1023 struct rv7xx_ps *initial_state = rv770_get_ps(radeon_state);
1024 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1025 u32 a_t;
1026
1027 table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
1028 cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl);
1029 table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
1030 cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl_2);
1031 table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
1032 cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl);
1033 table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
1034 cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl_2);
1035 table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
1036 cpu_to_be32(pi->clk_regs.rv770.mclk_pwrmgt_cntl);
1037 table->initialState.levels[0].mclk.mclk770.vDLL_CNTL =
1038 cpu_to_be32(pi->clk_regs.rv770.dll_cntl);
1039
1040 table->initialState.levels[0].mclk.mclk770.vMPLL_SS =
1041 cpu_to_be32(pi->clk_regs.rv770.mpll_ss1);
1042 table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 =
1043 cpu_to_be32(pi->clk_regs.rv770.mpll_ss2);
1044
1045 table->initialState.levels[0].mclk.mclk770.mclk_value =
1046 cpu_to_be32(initial_state->low.mclk);
1047
1048 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1049 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl);
1050 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1051 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_2);
1052 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1053 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_3);
1054 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
1055 cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum);
1056 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
1057 cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum_2);
1058
1059 table->initialState.levels[0].sclk.sclk_value =
1060 cpu_to_be32(initial_state->low.sclk);
1061
1062 table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
1063
1064 table->initialState.levels[0].seqValue =
1065 rv770_get_seq_value(rdev, &initial_state->low);
1066
1067 rv770_populate_vddc_value(rdev,
1068 initial_state->low.vddc,
1069 &table->initialState.levels[0].vddc);
1070 rv770_populate_initial_mvdd_value(rdev,
1071 &table->initialState.levels[0].mvdd);
1072
1073 a_t = CG_R(0xffff) | CG_L(0);
1074 table->initialState.levels[0].aT = cpu_to_be32(a_t);
1075
1076 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
1077
1078 if (pi->boot_in_gen2)
1079 table->initialState.levels[0].gen2PCIE = 1;
1080 else
1081 table->initialState.levels[0].gen2PCIE = 0;
1082 if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
1083 table->initialState.levels[0].gen2XSP = 1;
1084 else
1085 table->initialState.levels[0].gen2XSP = 0;
1086
1087 if (rdev->family == CHIP_RV740) {
1088 if (pi->mem_gddr5) {
1089 if (initial_state->low.mclk <= pi->mclk_strobe_mode_threshold)
1090 table->initialState.levels[0].strobeMode =
1091 rv740_get_mclk_frequency_ratio(initial_state->low.mclk) | 0x10;
1092 else
1093 table->initialState.levels[0].strobeMode = 0;
1094
1095 if (initial_state->low.mclk >= pi->mclk_edc_enable_threshold)
1096 table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
1097 else
1098 table->initialState.levels[0].mcFlags = 0;
1099 }
1100 }
1101
1102 table->initialState.levels[1] = table->initialState.levels[0];
1103 table->initialState.levels[2] = table->initialState.levels[0];
1104
1105 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
1106
1107 return 0;
1108}
1109
1110static int rv770_populate_smc_vddc_table(struct radeon_device *rdev,
1111 RV770_SMC_STATETABLE *table)
1112{
1113 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1114 int i;
1115
1116 for (i = 0; i < pi->valid_vddc_entries; i++) {
1117 table->highSMIO[pi->vddc_table[i].vddc_index] =
1118 pi->vddc_table[i].high_smio;
1119 table->lowSMIO[pi->vddc_table[i].vddc_index] =
1120 cpu_to_be32(pi->vddc_table[i].low_smio);
1121 }
1122
1123 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0;
1124 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] =
1125 cpu_to_be32(pi->vddc_mask_low);
1126
1127 for (i = 0;
1128 ((i < pi->valid_vddc_entries) &&
1129 (pi->max_vddc_in_table >
1130 pi->vddc_table[i].vddc));
1131 i++);
1132
1133 table->maxVDDCIndexInPPTable =
1134 pi->vddc_table[i].vddc_index;
1135
1136 return 0;
1137}
1138
1139static int rv770_populate_smc_mvdd_table(struct radeon_device *rdev,
1140 RV770_SMC_STATETABLE *table)
1141{
1142 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1143
1144 if (pi->mvdd_control) {
1145 table->lowSMIO[MVDD_HIGH_INDEX] |=
1146 cpu_to_be32(pi->mvdd_low_smio[MVDD_HIGH_INDEX]);
1147 table->lowSMIO[MVDD_LOW_INDEX] |=
1148 cpu_to_be32(pi->mvdd_low_smio[MVDD_LOW_INDEX]);
1149
1150 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_MVDD] = 0;
1151 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_MVDD] =
1152 cpu_to_be32(pi->mvdd_mask_low);
1153 }
1154
1155 return 0;
1156}
1157
1158static int rv770_init_smc_table(struct radeon_device *rdev,
1159 struct radeon_ps *radeon_boot_state)
1160{
1161 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1162 struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1163 RV770_SMC_STATETABLE *table = &pi->smc_statetable;
1164 int ret;
1165
1166 memset(table, 0, sizeof(RV770_SMC_STATETABLE));
1167
1168 pi->boot_sclk = boot_state->low.sclk;
1169
1170 rv770_populate_smc_vddc_table(rdev, table);
1171 rv770_populate_smc_mvdd_table(rdev, table);
1172
1173 switch (rdev->pm.int_thermal_type) {
1174 case THERMAL_TYPE_RV770:
1175 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
1176 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
1177 break;
1178 case THERMAL_TYPE_NONE:
1179 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
1180 break;
1181 case THERMAL_TYPE_EXTERNAL_GPIO:
1182 default:
1183 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
1184 break;
1185 }
1186
1187 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) {
1188 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1189
1190 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT)
1191 table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK;
1192
1193 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT)
1194 table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE;
1195 }
1196
1197 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1198 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1199
1200 if (pi->mem_gddr5)
1201 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1202
1203 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1204 ret = rv730_populate_smc_initial_state(rdev, radeon_boot_state, table);
1205 else
1206 ret = rv770_populate_smc_initial_state(rdev, radeon_boot_state, table);
1207 if (ret)
1208 return ret;
1209
1210 if (rdev->family == CHIP_RV740)
1211 ret = rv740_populate_smc_acpi_state(rdev, table);
1212 else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1213 ret = rv730_populate_smc_acpi_state(rdev, table);
1214 else
1215 ret = rv770_populate_smc_acpi_state(rdev, table);
1216 if (ret)
1217 return ret;
1218
1219 table->driverState = table->initialState;
1220
1221 return rv770_copy_bytes_to_smc(rdev,
1222 pi->state_table_start,
1223 (const u8 *)table,
1224 sizeof(RV770_SMC_STATETABLE),
1225 pi->sram_end);
1226}
1227
1228static int rv770_construct_vddc_table(struct radeon_device *rdev)
1229{
1230 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1231 u16 min, max, step;
1232 u32 steps = 0;
1233 u8 vddc_index = 0;
1234 u32 i;
1235
1236 radeon_atom_get_min_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &min);
1237 radeon_atom_get_max_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &max);
1238 radeon_atom_get_voltage_step(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &step);
1239
1240 steps = (max - min) / step + 1;
1241
1242 if (steps > MAX_NO_VREG_STEPS)
1243 return -EINVAL;
1244
1245 for (i = 0; i < steps; i++) {
1246 u32 gpio_pins, gpio_mask;
1247
1248 pi->vddc_table[i].vddc = (u16)(min + i * step);
1249 radeon_atom_get_voltage_gpio_settings(rdev,
1250 pi->vddc_table[i].vddc,
1251 SET_VOLTAGE_TYPE_ASIC_VDDC,
1252 &gpio_pins, &gpio_mask);
1253 pi->vddc_table[i].low_smio = gpio_pins & gpio_mask;
1254 pi->vddc_table[i].high_smio = 0;
1255 pi->vddc_mask_low = gpio_mask;
1256 if (i > 0) {
1257 if ((pi->vddc_table[i].low_smio !=
1258 pi->vddc_table[i - 1].low_smio ) ||
1259 (pi->vddc_table[i].high_smio !=
1260 pi->vddc_table[i - 1].high_smio))
1261 vddc_index++;
1262 }
1263 pi->vddc_table[i].vddc_index = vddc_index;
1264 }
1265
1266 pi->valid_vddc_entries = (u8)steps;
1267
1268 return 0;
1269}
1270
1271static u32 rv770_get_mclk_split_point(struct atom_memory_info *memory_info)
1272{
1273 if (memory_info->mem_type == MEM_TYPE_GDDR3)
1274 return 30000;
1275
1276 return 0;
1277}
1278
1279static int rv770_get_mvdd_pin_configuration(struct radeon_device *rdev)
1280{
1281 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1282 u32 gpio_pins, gpio_mask;
1283
1284 radeon_atom_get_voltage_gpio_settings(rdev,
1285 MVDD_HIGH_VALUE, SET_VOLTAGE_TYPE_ASIC_MVDDC,
1286 &gpio_pins, &gpio_mask);
1287 pi->mvdd_mask_low = gpio_mask;
1288 pi->mvdd_low_smio[MVDD_HIGH_INDEX] =
1289 gpio_pins & gpio_mask;
1290
1291 radeon_atom_get_voltage_gpio_settings(rdev,
1292 MVDD_LOW_VALUE, SET_VOLTAGE_TYPE_ASIC_MVDDC,
1293 &gpio_pins, &gpio_mask);
1294 pi->mvdd_low_smio[MVDD_LOW_INDEX] =
1295 gpio_pins & gpio_mask;
1296
1297 return 0;
1298}
1299
1300u8 rv770_get_memory_module_index(struct radeon_device *rdev)
1301{
1302 return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
1303}
1304
1305static int rv770_get_mvdd_configuration(struct radeon_device *rdev)
1306{
1307 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1308 u8 memory_module_index;
1309 struct atom_memory_info memory_info;
1310
1311 memory_module_index = rv770_get_memory_module_index(rdev);
1312
1313 if (radeon_atom_get_memory_info(rdev, memory_module_index, &memory_info)) {
1314 pi->mvdd_control = false;
1315 return 0;
1316 }
1317
1318 pi->mvdd_split_frequency =
1319 rv770_get_mclk_split_point(&memory_info);
1320
1321 if (pi->mvdd_split_frequency == 0) {
1322 pi->mvdd_control = false;
1323 return 0;
1324 }
1325
1326 return rv770_get_mvdd_pin_configuration(rdev);
1327}
1328
1329void rv770_enable_voltage_control(struct radeon_device *rdev,
1330 bool enable)
1331{
1332 if (enable)
1333 WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
1334 else
1335 WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
1336}
1337
1338static void rv770_program_display_gap(struct radeon_device *rdev)
1339{
1340 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1341
1342 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1343 if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) {
1344 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1345 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1346 } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) {
1347 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1348 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1349 } else {
1350 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1351 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1352 }
1353 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1354}
1355
1356static void rv770_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
1357 bool enable)
1358{
1359 rv770_enable_bif_dynamic_pcie_gen2(rdev, enable);
1360
1361 if (enable)
1362 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
1363 else
1364 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
1365}
1366
1367static void r7xx_program_memory_timing_parameters(struct radeon_device *rdev,
1368 struct radeon_ps *radeon_new_state)
1369{
1370 if ((rdev->family == CHIP_RV730) ||
1371 (rdev->family == CHIP_RV710) ||
1372 (rdev->family == CHIP_RV740))
1373 rv730_program_memory_timing_parameters(rdev, radeon_new_state);
1374 else
1375 rv770_program_memory_timing_parameters(rdev, radeon_new_state);
1376}
1377
1378static int rv770_upload_sw_state(struct radeon_device *rdev,
1379 struct radeon_ps *radeon_new_state)
1380{
1381 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1382 u16 address = pi->state_table_start +
1383 offsetof(RV770_SMC_STATETABLE, driverState);
1384 RV770_SMC_SWSTATE state = { 0 };
1385 int ret;
1386
1387 ret = rv770_convert_power_state_to_smc(rdev, radeon_new_state, &state);
1388 if (ret)
1389 return ret;
1390
1391 return rv770_copy_bytes_to_smc(rdev, address, (const u8 *)&state,
1392 sizeof(RV770_SMC_SWSTATE),
1393 pi->sram_end);
1394}
1395
1396int rv770_halt_smc(struct radeon_device *rdev)
1397{
1398 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
1399 return -EINVAL;
1400
1401 if (rv770_wait_for_smc_inactive(rdev) != PPSMC_Result_OK)
1402 return -EINVAL;
1403
1404 return 0;
1405}
1406
1407int rv770_resume_smc(struct radeon_device *rdev)
1408{
1409 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_Resume) != PPSMC_Result_OK)
1410 return -EINVAL;
1411 return 0;
1412}
1413
1414int rv770_set_sw_state(struct radeon_device *rdev)
1415{
1416 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
1417 return -EINVAL;
1418 return 0;
1419}
1420
1421int rv770_set_boot_state(struct radeon_device *rdev)
1422{
1423 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToInitialState) != PPSMC_Result_OK)
1424 return -EINVAL;
1425 return 0;
1426}
1427
1428void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
1429 struct radeon_ps *new_ps,
1430 struct radeon_ps *old_ps)
1431{
1432 struct rv7xx_ps *new_state = rv770_get_ps(new_ps);
1433 struct rv7xx_ps *current_state = rv770_get_ps(old_ps);
1434
1435 if ((new_ps->vclk == old_ps->vclk) &&
1436 (new_ps->dclk == old_ps->dclk))
1437 return;
1438
1439 if (new_state->high.sclk >= current_state->high.sclk)
1440 return;
1441
1442 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
1443}
1444
1445void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
1446 struct radeon_ps *new_ps,
1447 struct radeon_ps *old_ps)
1448{
1449 struct rv7xx_ps *new_state = rv770_get_ps(new_ps);
1450 struct rv7xx_ps *current_state = rv770_get_ps(old_ps);
1451
1452 if ((new_ps->vclk == old_ps->vclk) &&
1453 (new_ps->dclk == old_ps->dclk))
1454 return;
1455
1456 if (new_state->high.sclk < current_state->high.sclk)
1457 return;
1458
1459 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
1460}
1461
1462int rv770_restrict_performance_levels_before_switch(struct radeon_device *rdev)
1463{
1464 if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_NoForcedLevel)) != PPSMC_Result_OK)
1465 return -EINVAL;
1466
1467 if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_TwoLevelsDisabled)) != PPSMC_Result_OK)
1468 return -EINVAL;
1469
1470 return 0;
1471}
1472
1473int rv770_unrestrict_performance_levels_after_switch(struct radeon_device *rdev)
1474{
1475 if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_NoForcedLevel)) != PPSMC_Result_OK)
1476 return -EINVAL;
1477
1478 if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_ZeroLevelsDisabled)) != PPSMC_Result_OK)
1479 return -EINVAL;
1480
1481 return 0;
1482}
1483
1484void r7xx_start_smc(struct radeon_device *rdev)
1485{
1486 rv770_start_smc(rdev);
1487 rv770_start_smc_clock(rdev);
1488}
1489
1490
1491void r7xx_stop_smc(struct radeon_device *rdev)
1492{
1493 rv770_reset_smc(rdev);
1494 rv770_stop_smc_clock(rdev);
1495}
1496
1497static void rv770_read_clock_registers(struct radeon_device *rdev)
1498{
1499 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1500
1501 pi->clk_regs.rv770.cg_spll_func_cntl =
1502 RREG32(CG_SPLL_FUNC_CNTL);
1503 pi->clk_regs.rv770.cg_spll_func_cntl_2 =
1504 RREG32(CG_SPLL_FUNC_CNTL_2);
1505 pi->clk_regs.rv770.cg_spll_func_cntl_3 =
1506 RREG32(CG_SPLL_FUNC_CNTL_3);
1507 pi->clk_regs.rv770.cg_spll_spread_spectrum =
1508 RREG32(CG_SPLL_SPREAD_SPECTRUM);
1509 pi->clk_regs.rv770.cg_spll_spread_spectrum_2 =
1510 RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
1511 pi->clk_regs.rv770.mpll_ad_func_cntl =
1512 RREG32(MPLL_AD_FUNC_CNTL);
1513 pi->clk_regs.rv770.mpll_ad_func_cntl_2 =
1514 RREG32(MPLL_AD_FUNC_CNTL_2);
1515 pi->clk_regs.rv770.mpll_dq_func_cntl =
1516 RREG32(MPLL_DQ_FUNC_CNTL);
1517 pi->clk_regs.rv770.mpll_dq_func_cntl_2 =
1518 RREG32(MPLL_DQ_FUNC_CNTL_2);
1519 pi->clk_regs.rv770.mclk_pwrmgt_cntl =
1520 RREG32(MCLK_PWRMGT_CNTL);
1521 pi->clk_regs.rv770.dll_cntl = RREG32(DLL_CNTL);
1522}
1523
1524static void r7xx_read_clock_registers(struct radeon_device *rdev)
1525{
1526 if (rdev->family == CHIP_RV740)
1527 rv740_read_clock_registers(rdev);
1528 else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1529 rv730_read_clock_registers(rdev);
1530 else
1531 rv770_read_clock_registers(rdev);
1532}
1533
1534void rv770_read_voltage_smio_registers(struct radeon_device *rdev)
1535{
1536 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1537
1538 pi->s0_vid_lower_smio_cntl =
1539 RREG32(S0_VID_LOWER_SMIO_CNTL);
1540}
1541
1542void rv770_reset_smio_status(struct radeon_device *rdev)
1543{
1544 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1545 u32 sw_smio_index, vid_smio_cntl;
1546
1547 sw_smio_index =
1548 (RREG32(GENERAL_PWRMGT) & SW_SMIO_INDEX_MASK) >> SW_SMIO_INDEX_SHIFT;
1549 switch (sw_smio_index) {
1550 case 3:
1551 vid_smio_cntl = RREG32(S3_VID_LOWER_SMIO_CNTL);
1552 break;
1553 case 2:
1554 vid_smio_cntl = RREG32(S2_VID_LOWER_SMIO_CNTL);
1555 break;
1556 case 1:
1557 vid_smio_cntl = RREG32(S1_VID_LOWER_SMIO_CNTL);
1558 break;
1559 case 0:
1560 return;
1561 default:
1562 vid_smio_cntl = pi->s0_vid_lower_smio_cntl;
1563 break;
1564 }
1565
1566 WREG32(S0_VID_LOWER_SMIO_CNTL, vid_smio_cntl);
1567 WREG32_P(GENERAL_PWRMGT, SW_SMIO_INDEX(0), ~SW_SMIO_INDEX_MASK);
1568}
1569
1570void rv770_get_memory_type(struct radeon_device *rdev)
1571{
1572 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1573 u32 tmp;
1574
1575 tmp = RREG32(MC_SEQ_MISC0);
1576
1577 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
1578 MC_SEQ_MISC0_GDDR5_VALUE)
1579 pi->mem_gddr5 = true;
1580 else
1581 pi->mem_gddr5 = false;
1582
1583}
1584
1585void rv770_get_pcie_gen2_status(struct radeon_device *rdev)
1586{
1587 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1588 u32 tmp;
1589
1590 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1591
1592 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
1593 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
1594 pi->pcie_gen2 = true;
1595 else
1596 pi->pcie_gen2 = false;
1597
1598 if (pi->pcie_gen2) {
1599 if (tmp & LC_CURRENT_DATA_RATE)
1600 pi->boot_in_gen2 = true;
1601 else
1602 pi->boot_in_gen2 = false;
1603 } else
1604 pi->boot_in_gen2 = false;
1605}
1606
1607#if 0
1608static int rv770_enter_ulp_state(struct radeon_device *rdev)
1609{
1610 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1611
1612 if (pi->gfx_clock_gating) {
1613 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
1614 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
1615 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
1616 RREG32(GB_TILING_CONFIG);
1617 }
1618
1619 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
1620 ~HOST_SMC_MSG_MASK);
1621
1622 udelay(7000);
1623
1624 return 0;
1625}
1626
1627static int rv770_exit_ulp_state(struct radeon_device *rdev)
1628{
1629 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1630 int i;
1631
1632 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_ResumeFromMinimumPower),
1633 ~HOST_SMC_MSG_MASK);
1634
1635 udelay(7000);
1636
1637 for (i = 0; i < rdev->usec_timeout; i++) {
1638 if (((RREG32(SMC_MSG) & HOST_SMC_RESP_MASK) >> HOST_SMC_RESP_SHIFT) == 1)
1639 break;
1640 udelay(1000);
1641 }
1642
1643 if (pi->gfx_clock_gating)
1644 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
1645
1646 return 0;
1647}
1648#endif
1649
1650static void rv770_get_mclk_odt_threshold(struct radeon_device *rdev)
1651{
1652 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1653 u8 memory_module_index;
1654 struct atom_memory_info memory_info;
1655
1656 pi->mclk_odt_threshold = 0;
1657
1658 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) {
1659 memory_module_index = rv770_get_memory_module_index(rdev);
1660
1661 if (radeon_atom_get_memory_info(rdev, memory_module_index, &memory_info))
1662 return;
1663
1664 if (memory_info.mem_type == MEM_TYPE_DDR2 ||
1665 memory_info.mem_type == MEM_TYPE_DDR3)
1666 pi->mclk_odt_threshold = 30000;
1667 }
1668}
1669
1670void rv770_get_max_vddc(struct radeon_device *rdev)
1671{
1672 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1673 u16 vddc;
1674
1675 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc))
1676 pi->max_vddc = 0;
1677 else
1678 pi->max_vddc = vddc;
1679}
1680
1681void rv770_program_response_times(struct radeon_device *rdev)
1682{
1683 u32 voltage_response_time, backbias_response_time;
1684 u32 acpi_delay_time, vbi_time_out;
1685 u32 vddc_dly, bb_dly, acpi_dly, vbi_dly;
1686 u32 reference_clock;
1687
1688 voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
1689 backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
1690
1691 if (voltage_response_time == 0)
1692 voltage_response_time = 1000;
1693
1694 if (backbias_response_time == 0)
1695 backbias_response_time = 1000;
1696
1697 acpi_delay_time = 15000;
1698 vbi_time_out = 100000;
1699
1700 reference_clock = radeon_get_xclk(rdev);
1701
1702 vddc_dly = (voltage_response_time * reference_clock) / 1600;
1703 bb_dly = (backbias_response_time * reference_clock) / 1600;
1704 acpi_dly = (acpi_delay_time * reference_clock) / 1600;
1705 vbi_dly = (vbi_time_out * reference_clock) / 1600;
1706
1707 rv770_write_smc_soft_register(rdev,
1708 RV770_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
1709 rv770_write_smc_soft_register(rdev,
1710 RV770_SMC_SOFT_REGISTER_delay_bbias, bb_dly);
1711 rv770_write_smc_soft_register(rdev,
1712 RV770_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
1713 rv770_write_smc_soft_register(rdev,
1714 RV770_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
1715#if 0
1716 /* XXX look up hw revision */
1717 if (WEKIVA_A21)
1718 rv770_write_smc_soft_register(rdev,
1719 RV770_SMC_SOFT_REGISTER_baby_step_timer,
1720 0x10);
1721#endif
1722}
1723
1724static void rv770_program_dcodt_before_state_switch(struct radeon_device *rdev,
1725 struct radeon_ps *radeon_new_state,
1726 struct radeon_ps *radeon_current_state)
1727{
1728 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1729 struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
1730 struct rv7xx_ps *current_state = rv770_get_ps(radeon_current_state);
1731 bool current_use_dc = false;
1732 bool new_use_dc = false;
1733
1734 if (pi->mclk_odt_threshold == 0)
1735 return;
1736
1737 if (current_state->high.mclk <= pi->mclk_odt_threshold)
1738 current_use_dc = true;
1739
1740 if (new_state->high.mclk <= pi->mclk_odt_threshold)
1741 new_use_dc = true;
1742
1743 if (current_use_dc == new_use_dc)
1744 return;
1745
1746 if (!current_use_dc && new_use_dc)
1747 return;
1748
1749 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1750 rv730_program_dcodt(rdev, new_use_dc);
1751}
1752
1753static void rv770_program_dcodt_after_state_switch(struct radeon_device *rdev,
1754 struct radeon_ps *radeon_new_state,
1755 struct radeon_ps *radeon_current_state)
1756{
1757 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1758 struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
1759 struct rv7xx_ps *current_state = rv770_get_ps(radeon_current_state);
1760 bool current_use_dc = false;
1761 bool new_use_dc = false;
1762
1763 if (pi->mclk_odt_threshold == 0)
1764 return;
1765
1766 if (current_state->high.mclk <= pi->mclk_odt_threshold)
1767 current_use_dc = true;
1768
1769 if (new_state->high.mclk <= pi->mclk_odt_threshold)
1770 new_use_dc = true;
1771
1772 if (current_use_dc == new_use_dc)
1773 return;
1774
1775 if (current_use_dc && !new_use_dc)
1776 return;
1777
1778 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1779 rv730_program_dcodt(rdev, new_use_dc);
1780}
1781
1782static void rv770_retrieve_odt_values(struct radeon_device *rdev)
1783{
1784 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1785
1786 if (pi->mclk_odt_threshold == 0)
1787 return;
1788
1789 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1790 rv730_get_odt_values(rdev);
1791}
1792
1793static void rv770_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1794{
1795 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1796 bool want_thermal_protection;
1797 enum radeon_dpm_event_src dpm_event_src;
1798
1799 switch (sources) {
1800 case 0:
1801 default:
1802 want_thermal_protection = false;
1803 break;
1804 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1805 want_thermal_protection = true;
1806 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1807 break;
1808
1809 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1810 want_thermal_protection = true;
1811 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1812 break;
1813
1814 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1815 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1816 want_thermal_protection = true;
1817 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1818 break;
1819 }
1820
1821 if (want_thermal_protection) {
1822 WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
1823 if (pi->thermal_protection)
1824 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
1825 } else {
1826 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
1827 }
1828}
1829
1830void rv770_enable_auto_throttle_source(struct radeon_device *rdev,
1831 enum radeon_dpm_auto_throttle_src source,
1832 bool enable)
1833{
1834 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1835
1836 if (enable) {
1837 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1838 pi->active_auto_throttle_sources |= 1 << source;
1839 rv770_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1840 }
1841 } else {
1842 if (pi->active_auto_throttle_sources & (1 << source)) {
1843 pi->active_auto_throttle_sources &= ~(1 << source);
1844 rv770_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1845 }
1846 }
1847}
1848
1849int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
1850 int min_temp, int max_temp)
1851{
1852 int low_temp = 0 * 1000;
1853 int high_temp = 255 * 1000;
1854
1855 if (low_temp < min_temp)
1856 low_temp = min_temp;
1857 if (high_temp > max_temp)
1858 high_temp = max_temp;
1859 if (high_temp < low_temp) {
1860 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1861 return -EINVAL;
1862 }
1863
1864 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
1865 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
1866 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
1867
1868 rdev->pm.dpm.thermal.min_temp = low_temp;
1869 rdev->pm.dpm.thermal.max_temp = high_temp;
1870
1871 return 0;
1872}
1873
1874int rv770_dpm_enable(struct radeon_device *rdev)
1875{
1876 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1877 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1878 int ret;
1879
1880 if (pi->gfx_clock_gating)
1881 rv770_restore_cgcg(rdev);
1882
1883 if (rv770_dpm_enabled(rdev))
1884 return -EINVAL;
1885
1886 if (pi->voltage_control) {
1887 rv770_enable_voltage_control(rdev, true);
1888 ret = rv770_construct_vddc_table(rdev);
1889 if (ret) {
1890 DRM_ERROR("rv770_construct_vddc_table failed\n");
1891 return ret;
1892 }
1893 }
1894
1895 if (pi->dcodt)
1896 rv770_retrieve_odt_values(rdev);
1897
1898 if (pi->mvdd_control) {
1899 ret = rv770_get_mvdd_configuration(rdev);
1900 if (ret) {
1901 DRM_ERROR("rv770_get_mvdd_configuration failed\n");
1902 return ret;
1903 }
1904 }
1905
1906 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1907 rv770_enable_backbias(rdev, true);
1908
1909 rv770_enable_spread_spectrum(rdev, true);
1910
1911 if (pi->thermal_protection)
1912 rv770_enable_thermal_protection(rdev, true);
1913
1914 rv770_program_mpll_timing_parameters(rdev);
1915 rv770_setup_bsp(rdev);
1916 rv770_program_git(rdev);
1917 rv770_program_tp(rdev);
1918 rv770_program_tpp(rdev);
1919 rv770_program_sstp(rdev);
1920 rv770_program_engine_speed_parameters(rdev);
1921 rv770_enable_display_gap(rdev);
1922 rv770_program_vc(rdev);
1923
1924 if (pi->dynamic_pcie_gen2)
1925 rv770_enable_dynamic_pcie_gen2(rdev, true);
1926
1927 ret = rv770_upload_firmware(rdev);
1928 if (ret) {
1929 DRM_ERROR("rv770_upload_firmware failed\n");
1930 return ret;
1931 }
1932 ret = rv770_init_smc_table(rdev, boot_ps);
1933 if (ret) {
1934 DRM_ERROR("rv770_init_smc_table failed\n");
1935 return ret;
1936 }
1937
1938 rv770_program_response_times(rdev);
1939 r7xx_start_smc(rdev);
1940
1941 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1942 rv730_start_dpm(rdev);
1943 else
1944 rv770_start_dpm(rdev);
1945
1946 if (pi->gfx_clock_gating)
1947 rv770_gfx_clock_gating_enable(rdev, true);
1948
1949 if (pi->mg_clock_gating)
1950 rv770_mg_clock_gating_enable(rdev, true);
1951
1952 if (rdev->irq.installed &&
1953 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1954 PPSMC_Result result;
1955
1956 ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1957 if (ret)
1958 return ret;
1959 rdev->irq.dpm_thermal = true;
1960 radeon_irq_set(rdev);
1961 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
1962
1963 if (result != PPSMC_Result_OK)
1964 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1965 }
1966
1967 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
1968
1969 return 0;
1970}
1971
1972void rv770_dpm_disable(struct radeon_device *rdev)
1973{
1974 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1975
1976 if (!rv770_dpm_enabled(rdev))
1977 return;
1978
1979 rv770_clear_vc(rdev);
1980
1981 if (pi->thermal_protection)
1982 rv770_enable_thermal_protection(rdev, false);
1983
1984 rv770_enable_spread_spectrum(rdev, false);
1985
1986 if (pi->dynamic_pcie_gen2)
1987 rv770_enable_dynamic_pcie_gen2(rdev, false);
1988
1989 if (rdev->irq.installed &&
1990 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1991 rdev->irq.dpm_thermal = false;
1992 radeon_irq_set(rdev);
1993 }
1994
1995 if (pi->gfx_clock_gating)
1996 rv770_gfx_clock_gating_enable(rdev, false);
1997
1998 if (pi->mg_clock_gating)
1999 rv770_mg_clock_gating_enable(rdev, false);
2000
2001 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
2002 rv730_stop_dpm(rdev);
2003 else
2004 rv770_stop_dpm(rdev);
2005
2006 r7xx_stop_smc(rdev);
2007 rv770_reset_smio_status(rdev);
2008}
2009
2010int rv770_dpm_set_power_state(struct radeon_device *rdev)
2011{
2012 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2013 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
2014 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
2015 int ret;
2016
2017 ret = rv770_restrict_performance_levels_before_switch(rdev);
2018 if (ret) {
2019 DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n");
2020 return ret;
2021 }
2022 rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
2023 ret = rv770_halt_smc(rdev);
2024 if (ret) {
2025 DRM_ERROR("rv770_halt_smc failed\n");
2026 return ret;
2027 }
2028 ret = rv770_upload_sw_state(rdev, new_ps);
2029 if (ret) {
2030 DRM_ERROR("rv770_upload_sw_state failed\n");
2031 return ret;
2032 }
2033 r7xx_program_memory_timing_parameters(rdev, new_ps);
2034 if (pi->dcodt)
2035 rv770_program_dcodt_before_state_switch(rdev, new_ps, old_ps);
2036 ret = rv770_resume_smc(rdev);
2037 if (ret) {
2038 DRM_ERROR("rv770_resume_smc failed\n");
2039 return ret;
2040 }
2041 ret = rv770_set_sw_state(rdev);
2042 if (ret) {
2043 DRM_ERROR("rv770_set_sw_state failed\n");
2044 return ret;
2045 }
2046 if (pi->dcodt)
2047 rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
2048 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
2049 ret = rv770_unrestrict_performance_levels_after_switch(rdev);
2050 if (ret) {
2051 DRM_ERROR("rv770_unrestrict_performance_levels_after_switch failed\n");
2052 return ret;
2053 }
2054
2055 return 0;
2056}
2057
2058void rv770_dpm_reset_asic(struct radeon_device *rdev)
2059{
2060 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2061 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
2062
2063 rv770_restrict_performance_levels_before_switch(rdev);
2064 if (pi->dcodt)
2065 rv770_program_dcodt_before_state_switch(rdev, boot_ps, boot_ps);
2066 rv770_set_boot_state(rdev);
2067 if (pi->dcodt)
2068 rv770_program_dcodt_after_state_switch(rdev, boot_ps, boot_ps);
2069}
2070
2071void rv770_dpm_setup_asic(struct radeon_device *rdev)
2072{
2073 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2074
2075 r7xx_read_clock_registers(rdev);
2076 rv770_read_voltage_smio_registers(rdev);
2077 rv770_get_memory_type(rdev);
2078 if (pi->dcodt)
2079 rv770_get_mclk_odt_threshold(rdev);
2080 rv770_get_pcie_gen2_status(rdev);
2081
2082 rv770_enable_acpi_pm(rdev);
2083
2084 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
2085 rv770_enable_l0s(rdev);
2086 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
2087 rv770_enable_l1(rdev);
2088 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
2089 rv770_enable_pll_sleep_in_l1(rdev);
2090}
2091
2092void rv770_dpm_display_configuration_changed(struct radeon_device *rdev)
2093{
2094 rv770_program_display_gap(rdev);
2095}
2096
2097union power_info {
2098 struct _ATOM_POWERPLAY_INFO info;
2099 struct _ATOM_POWERPLAY_INFO_V2 info_2;
2100 struct _ATOM_POWERPLAY_INFO_V3 info_3;
2101 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2102 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2103 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2104};
2105
2106union pplib_clock_info {
2107 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2108 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2109 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2110 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2111};
2112
2113union pplib_power_state {
2114 struct _ATOM_PPLIB_STATE v1;
2115 struct _ATOM_PPLIB_STATE_V2 v2;
2116};
2117
2118static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
2119 struct radeon_ps *rps,
2120 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2121 u8 table_rev)
2122{
2123 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2124 rps->class = le16_to_cpu(non_clock_info->usClassification);
2125 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2126
2127 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2128 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2129 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2130 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
2131 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
2132 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
2133 } else {
2134 rps->vclk = 0;
2135 rps->dclk = 0;
2136 }
2137
2138 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
2139 rdev->pm.dpm.boot_ps = rps;
2140 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2141 rdev->pm.dpm.uvd_ps = rps;
2142}
2143
2144static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
2145 struct radeon_ps *rps, int index,
2146 union pplib_clock_info *clock_info)
2147{
2148 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2149 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2150 struct rv7xx_ps *ps = rv770_get_ps(rps);
2151 u32 sclk, mclk;
2152 u16 vddc;
2153 struct rv7xx_pl *pl;
2154
2155 switch (index) {
2156 case 0:
2157 pl = &ps->low;
2158 break;
2159 case 1:
2160 pl = &ps->medium;
2161 break;
2162 case 2:
2163 default:
2164 pl = &ps->high;
2165 break;
2166 }
2167
2168 if (rdev->family >= CHIP_CEDAR) {
2169 sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
2170 sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
2171 mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
2172 mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
2173
2174 pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC);
2175 pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI);
2176 pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags);
2177 } else {
2178 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
2179 sclk |= clock_info->r600.ucEngineClockHigh << 16;
2180 mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
2181 mclk |= clock_info->r600.ucMemoryClockHigh << 16;
2182
2183 pl->vddc = le16_to_cpu(clock_info->r600.usVDDC);
2184 pl->flags = le32_to_cpu(clock_info->r600.ulFlags);
2185 }
2186
2187 pl->mclk = mclk;
2188 pl->sclk = sclk;
2189
2190 /* patch up vddc if necessary */
2191 if (pl->vddc == 0xff01) {
2192 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
2193 pl->vddc = vddc;
2194 }
2195
2196 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
2197 pi->acpi_vddc = pl->vddc;
2198 if (rdev->family >= CHIP_CEDAR)
2199 eg_pi->acpi_vddci = pl->vddci;
2200 if (ps->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
2201 pi->acpi_pcie_gen2 = true;
2202 else
2203 pi->acpi_pcie_gen2 = false;
2204 }
2205
2206 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
2207 if (rdev->family >= CHIP_BARTS) {
2208 eg_pi->ulv.supported = true;
2209 eg_pi->ulv.pl = pl;
2210 }
2211 }
2212
2213 if (pi->min_vddc_in_table > pl->vddc)
2214 pi->min_vddc_in_table = pl->vddc;
2215
2216 if (pi->max_vddc_in_table < pl->vddc)
2217 pi->max_vddc_in_table = pl->vddc;
2218
2219 /* patch up boot state */
2220 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2221 u16 vddc, vddci, mvdd;
2222 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
2223 pl->mclk = rdev->clock.default_mclk;
2224 pl->sclk = rdev->clock.default_sclk;
2225 pl->vddc = vddc;
2226 pl->vddci = vddci;
2227 }
2228
2229 if (rdev->family >= CHIP_BARTS) {
2230 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2231 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
2232 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
2233 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
2234 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
2235 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
2236 }
2237 }
2238}
2239
2240int rv7xx_parse_power_table(struct radeon_device *rdev)
2241{
2242 struct radeon_mode_info *mode_info = &rdev->mode_info;
2243 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2244 union pplib_power_state *power_state;
2245 int i, j;
2246 union pplib_clock_info *clock_info;
2247 union power_info *power_info;
2248 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2249 u16 data_offset;
2250 u8 frev, crev;
2251 struct rv7xx_ps *ps;
2252
2253 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2254 &frev, &crev, &data_offset))
2255 return -EINVAL;
2256 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2257
2258 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
2259 power_info->pplib.ucNumStates, GFP_KERNEL);
2260 if (!rdev->pm.dpm.ps)
2261 return -ENOMEM;
2262 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
2263 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
2264 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
2265
2266 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
2267 power_state = (union pplib_power_state *)
2268 (mode_info->atom_context->bios + data_offset +
2269 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
2270 i * power_info->pplib.ucStateEntrySize);
2271 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2272 (mode_info->atom_context->bios + data_offset +
2273 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
2274 (power_state->v1.ucNonClockStateIndex *
2275 power_info->pplib.ucNonClockSize));
2276 if (power_info->pplib.ucStateEntrySize - 1) {
2277 ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL);
2278 if (ps == NULL) {
2279 kfree(rdev->pm.dpm.ps);
2280 return -ENOMEM;
2281 }
2282 rdev->pm.dpm.ps[i].ps_priv = ps;
2283 rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2284 non_clock_info,
2285 power_info->pplib.ucNonClockSize);
2286 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
2287 clock_info = (union pplib_clock_info *)
2288 (mode_info->atom_context->bios + data_offset +
2289 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
2290 (power_state->v1.ucClockStateIndices[j] *
2291 power_info->pplib.ucClockInfoSize));
2292 rv7xx_parse_pplib_clock_info(rdev,
2293 &rdev->pm.dpm.ps[i], j,
2294 clock_info);
2295 }
2296 }
2297 }
2298 rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
2299 return 0;
2300}
2301
2302int rv770_dpm_init(struct radeon_device *rdev)
2303{
2304 struct rv7xx_power_info *pi;
2305 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
2306 uint16_t data_offset, size;
2307 uint8_t frev, crev;
2308 struct atom_clock_dividers dividers;
2309 int ret;
2310
2311 pi = kzalloc(sizeof(struct rv7xx_power_info), GFP_KERNEL);
2312 if (pi == NULL)
2313 return -ENOMEM;
2314 rdev->pm.dpm.priv = pi;
2315
2316 rv770_get_max_vddc(rdev);
2317
2318 pi->acpi_vddc = 0;
2319 pi->min_vddc_in_table = 0;
2320 pi->max_vddc_in_table = 0;
2321
2322 ret = rv7xx_parse_power_table(rdev);
2323 if (ret)
2324 return ret;
2325
2326 if (rdev->pm.dpm.voltage_response_time == 0)
2327 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
2328 if (rdev->pm.dpm.backbias_response_time == 0)
2329 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
2330
2331 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2332 0, false, &dividers);
2333 if (ret)
2334 pi->ref_div = dividers.ref_div + 1;
2335 else
2336 pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
2337
2338 pi->mclk_strobe_mode_threshold = 30000;
2339 pi->mclk_edc_enable_threshold = 30000;
2340
2341 pi->rlp = RV770_RLP_DFLT;
2342 pi->rmp = RV770_RMP_DFLT;
2343 pi->lhp = RV770_LHP_DFLT;
2344 pi->lmp = RV770_LMP_DFLT;
2345
2346 pi->voltage_control =
2347 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
2348
2349 pi->mvdd_control =
2350 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
2351
2352 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
2353 &frev, &crev, &data_offset)) {
2354 pi->sclk_ss = true;
2355 pi->mclk_ss = true;
2356 pi->dynamic_ss = true;
2357 } else {
2358 pi->sclk_ss = false;
2359 pi->mclk_ss = false;
2360 pi->dynamic_ss = false;
2361 }
2362
2363 pi->asi = RV770_ASI_DFLT;
2364 pi->pasi = RV770_HASI_DFLT;
2365 pi->vrc = RV770_VRC_DFLT;
2366
2367 pi->power_gating = false;
2368
2369 pi->gfx_clock_gating = true;
2370
2371 pi->mg_clock_gating = true;
2372 pi->mgcgtssm = true;
2373
2374 pi->dynamic_pcie_gen2 = true;
2375
2376 if (pi->gfx_clock_gating &&
2377 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2378 pi->thermal_protection = true;
2379 else
2380 pi->thermal_protection = false;
2381
2382 pi->display_gap = true;
2383
2384 if (rdev->flags & RADEON_IS_MOBILITY)
2385 pi->dcodt = true;
2386 else
2387 pi->dcodt = false;
2388
2389 pi->ulps = true;
2390
2391 pi->mclk_stutter_mode_threshold = 0;
2392
2393 pi->sram_end = SMC_RAM_END;
2394 pi->state_table_start = RV770_SMC_TABLE_ADDRESS;
2395 pi->soft_regs_start = RV770_SMC_SOFT_REGISTERS_START;
2396
2397 return 0;
2398}
2399
2400void rv770_dpm_print_power_state(struct radeon_device *rdev,
2401 struct radeon_ps *rps)
2402{
2403 struct rv7xx_ps *ps = rv770_get_ps(rps);
2404 struct rv7xx_pl *pl;
2405
2406 r600_dpm_print_class_info(rps->class, rps->class2);
2407 r600_dpm_print_cap_info(rps->caps);
2408 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2409 if (rdev->family >= CHIP_CEDAR) {
2410 pl = &ps->low;
2411 printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u vddci: %u\n",
2412 pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2413 pl = &ps->medium;
2414 printk("\t\tpower level 1 sclk: %u mclk: %u vddc: %u vddci: %u\n",
2415 pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2416 pl = &ps->high;
2417 printk("\t\tpower level 2 sclk: %u mclk: %u vddc: %u vddci: %u\n",
2418 pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2419 } else {
2420 pl = &ps->low;
2421 printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u\n",
2422 pl->sclk, pl->mclk, pl->vddc);
2423 pl = &ps->medium;
2424 printk("\t\tpower level 1 sclk: %u mclk: %u vddc: %u\n",
2425 pl->sclk, pl->mclk, pl->vddc);
2426 pl = &ps->high;
2427 printk("\t\tpower level 2 sclk: %u mclk: %u vddc: %u\n",
2428 pl->sclk, pl->mclk, pl->vddc);
2429 }
2430 r600_dpm_print_ps_status(rdev, rps);
2431}
2432
2433void rv770_dpm_fini(struct radeon_device *rdev)
2434{
2435 int i;
2436
2437 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2438 kfree(rdev->pm.dpm.ps[i].ps_priv);
2439 }
2440 kfree(rdev->pm.dpm.ps);
2441 kfree(rdev->pm.dpm.priv);
2442}
2443
2444u32 rv770_dpm_get_sclk(struct radeon_device *rdev, bool low)
2445{
2446 struct rv7xx_ps *requested_state = rv770_get_ps(rdev->pm.dpm.requested_ps);
2447
2448 if (low)
2449 return requested_state->low.sclk;
2450 else
2451 return requested_state->high.sclk;
2452}
2453
2454u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
2455{
2456 struct rv7xx_ps *requested_state = rv770_get_ps(rdev->pm.dpm.requested_ps);
2457
2458 if (low)
2459 return requested_state->low.mclk;
2460 else
2461 return requested_state->high.mclk;
2462}
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
new file mode 100644
index 000000000000..f1e1fcf7f622
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -0,0 +1,288 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __RV770_DPM_H__
24#define __RV770_DPM_H__
25
26#include "rv770_smc.h"
27
28struct rv770_clock_registers {
29 u32 cg_spll_func_cntl;
30 u32 cg_spll_func_cntl_2;
31 u32 cg_spll_func_cntl_3;
32 u32 cg_spll_spread_spectrum;
33 u32 cg_spll_spread_spectrum_2;
34 u32 mpll_ad_func_cntl;
35 u32 mpll_ad_func_cntl_2;
36 u32 mpll_dq_func_cntl;
37 u32 mpll_dq_func_cntl_2;
38 u32 mclk_pwrmgt_cntl;
39 u32 dll_cntl;
40 u32 mpll_ss1;
41 u32 mpll_ss2;
42};
43
44struct rv730_clock_registers {
45 u32 cg_spll_func_cntl;
46 u32 cg_spll_func_cntl_2;
47 u32 cg_spll_func_cntl_3;
48 u32 cg_spll_spread_spectrum;
49 u32 cg_spll_spread_spectrum_2;
50 u32 mclk_pwrmgt_cntl;
51 u32 dll_cntl;
52 u32 mpll_func_cntl;
53 u32 mpll_func_cntl2;
54 u32 mpll_func_cntl3;
55 u32 mpll_ss;
56 u32 mpll_ss2;
57};
58
59union r7xx_clock_registers {
60 struct rv770_clock_registers rv770;
61 struct rv730_clock_registers rv730;
62};
63
64struct vddc_table_entry {
65 u16 vddc;
66 u8 vddc_index;
67 u8 high_smio;
68 u32 low_smio;
69};
70
71#define MAX_NO_OF_MVDD_VALUES 2
72#define MAX_NO_VREG_STEPS 32
73
74struct rv7xx_power_info {
75 /* flags */
76 bool mem_gddr5;
77 bool pcie_gen2;
78 bool dynamic_pcie_gen2;
79 bool acpi_pcie_gen2;
80 bool boot_in_gen2;
81 bool voltage_control; /* vddc */
82 bool mvdd_control;
83 bool sclk_ss;
84 bool mclk_ss;
85 bool dynamic_ss;
86 bool gfx_clock_gating;
87 bool mg_clock_gating;
88 bool mgcgtssm;
89 bool power_gating;
90 bool thermal_protection;
91 bool display_gap;
92 bool dcodt;
93 bool ulps;
94 /* registers */
95 union r7xx_clock_registers clk_regs;
96 u32 s0_vid_lower_smio_cntl;
97 /* voltage */
98 u32 vddc_mask_low;
99 u32 mvdd_mask_low;
100 u32 mvdd_split_frequency;
101 u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
102 u16 max_vddc;
103 u16 max_vddc_in_table;
104 u16 min_vddc_in_table;
105 struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
106 u8 valid_vddc_entries;
107 /* dc odt */
108 u32 mclk_odt_threshold;
109 u8 odt_value_0[2];
110 u8 odt_value_1[2];
111 /* stored values */
112 u32 boot_sclk;
113 u16 acpi_vddc;
114 u32 ref_div;
115 u32 active_auto_throttle_sources;
116 u32 mclk_stutter_mode_threshold;
117 u32 mclk_strobe_mode_threshold;
118 u32 mclk_edc_enable_threshold;
119 u32 bsp;
120 u32 bsu;
121 u32 pbsp;
122 u32 pbsu;
123 u32 dsp;
124 u32 psp;
125 u32 asi;
126 u32 pasi;
127 u32 vrc;
128 u32 restricted_levels;
129 u32 rlp;
130 u32 rmp;
131 u32 lhp;
132 u32 lmp;
133 /* smc offsets */
134 u16 state_table_start;
135 u16 soft_regs_start;
136 u16 sram_end;
137 /* scratch structs */
138 RV770_SMC_STATETABLE smc_statetable;
139};
140
141struct rv7xx_pl {
142 u32 sclk;
143 u32 mclk;
144 u16 vddc;
145 u16 vddci; /* eg+ only */
146 u32 flags;
147 enum radeon_pcie_gen pcie_gen; /* si+ only */
148};
149
150struct rv7xx_ps {
151 struct rv7xx_pl high;
152 struct rv7xx_pl medium;
153 struct rv7xx_pl low;
154 bool dc_compatible;
155};
156
157#define RV770_RLP_DFLT 10
158#define RV770_RMP_DFLT 25
159#define RV770_LHP_DFLT 25
160#define RV770_LMP_DFLT 10
161#define RV770_VRC_DFLT 0x003f
162#define RV770_ASI_DFLT 1000
163#define RV770_HASI_DFLT 200000
164#define RV770_MGCGTTLOCAL0_DFLT 0x00100000
165#define RV7XX_MGCGTTLOCAL0_DFLT 0
166#define RV770_MGCGTTLOCAL1_DFLT 0xFFFF0000
167#define RV770_MGCGCGTSSMCTRL_DFLT 0x55940000
168
169#define MVDD_LOW_INDEX 0
170#define MVDD_HIGH_INDEX 1
171
172#define MVDD_LOW_VALUE 0
173#define MVDD_HIGH_VALUE 0xffff
174
175#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */
176#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */
177
178/* rv730/rv710 */
179int rv730_populate_sclk_value(struct radeon_device *rdev,
180 u32 engine_clock,
181 RV770_SMC_SCLK_VALUE *sclk);
182int rv730_populate_mclk_value(struct radeon_device *rdev,
183 u32 engine_clock, u32 memory_clock,
184 LPRV7XX_SMC_MCLK_VALUE mclk);
185void rv730_read_clock_registers(struct radeon_device *rdev);
186int rv730_populate_smc_acpi_state(struct radeon_device *rdev,
187 RV770_SMC_STATETABLE *table);
188int rv730_populate_smc_initial_state(struct radeon_device *rdev,
189 struct radeon_ps *radeon_initial_state,
190 RV770_SMC_STATETABLE *table);
191void rv730_program_memory_timing_parameters(struct radeon_device *rdev,
192 struct radeon_ps *radeon_state);
193void rv730_power_gating_enable(struct radeon_device *rdev,
194 bool enable);
195void rv730_start_dpm(struct radeon_device *rdev);
196void rv730_stop_dpm(struct radeon_device *rdev);
197void rv730_program_dcodt(struct radeon_device *rdev, bool use_dcodt);
198void rv730_get_odt_values(struct radeon_device *rdev);
199
200/* rv740 */
201int rv740_populate_sclk_value(struct radeon_device *rdev, u32 engine_clock,
202 RV770_SMC_SCLK_VALUE *sclk);
203int rv740_populate_mclk_value(struct radeon_device *rdev,
204 u32 engine_clock, u32 memory_clock,
205 RV7XX_SMC_MCLK_VALUE *mclk);
206void rv740_read_clock_registers(struct radeon_device *rdev);
207int rv740_populate_smc_acpi_state(struct radeon_device *rdev,
208 RV770_SMC_STATETABLE *table);
209void rv740_enable_mclk_spread_spectrum(struct radeon_device *rdev,
210 bool enable);
211u8 rv740_get_mclk_frequency_ratio(u32 memory_clock);
212u32 rv740_get_dll_speed(bool is_gddr5, u32 memory_clock);
213u32 rv740_get_decoded_reference_divider(u32 encoded_ref);
214
215/* rv770 */
216u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf);
217int rv770_populate_vddc_value(struct radeon_device *rdev, u16 vddc,
218 RV770_SMC_VOLTAGE_VALUE *voltage);
219int rv770_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
220 RV770_SMC_VOLTAGE_VALUE *voltage);
221u8 rv770_get_seq_value(struct radeon_device *rdev,
222 struct rv7xx_pl *pl);
223int rv770_populate_initial_mvdd_value(struct radeon_device *rdev,
224 RV770_SMC_VOLTAGE_VALUE *voltage);
225u32 rv770_calculate_memory_refresh_rate(struct radeon_device *rdev,
226 u32 engine_clock);
227void rv770_program_response_times(struct radeon_device *rdev);
228int rv770_populate_smc_sp(struct radeon_device *rdev,
229 struct radeon_ps *radeon_state,
230 RV770_SMC_SWSTATE *smc_state);
231int rv770_populate_smc_t(struct radeon_device *rdev,
232 struct radeon_ps *radeon_state,
233 RV770_SMC_SWSTATE *smc_state);
234void rv770_read_voltage_smio_registers(struct radeon_device *rdev);
235void rv770_get_memory_type(struct radeon_device *rdev);
236void r7xx_start_smc(struct radeon_device *rdev);
237u8 rv770_get_memory_module_index(struct radeon_device *rdev);
238void rv770_get_max_vddc(struct radeon_device *rdev);
239void rv770_get_pcie_gen2_status(struct radeon_device *rdev);
240void rv770_enable_acpi_pm(struct radeon_device *rdev);
241void rv770_restore_cgcg(struct radeon_device *rdev);
242bool rv770_dpm_enabled(struct radeon_device *rdev);
243void rv770_enable_voltage_control(struct radeon_device *rdev,
244 bool enable);
245void rv770_enable_backbias(struct radeon_device *rdev,
246 bool enable);
247void rv770_enable_thermal_protection(struct radeon_device *rdev,
248 bool enable);
249void rv770_enable_auto_throttle_source(struct radeon_device *rdev,
250 enum radeon_dpm_auto_throttle_src source,
251 bool enable);
252void rv770_setup_bsp(struct radeon_device *rdev);
253void rv770_program_git(struct radeon_device *rdev);
254void rv770_program_tp(struct radeon_device *rdev);
255void rv770_program_tpp(struct radeon_device *rdev);
256void rv770_program_sstp(struct radeon_device *rdev);
257void rv770_program_engine_speed_parameters(struct radeon_device *rdev);
258void rv770_program_vc(struct radeon_device *rdev);
259void rv770_clear_vc(struct radeon_device *rdev);
260int rv770_upload_firmware(struct radeon_device *rdev);
261void rv770_stop_dpm(struct radeon_device *rdev);
262void r7xx_stop_smc(struct radeon_device *rdev);
263void rv770_reset_smio_status(struct radeon_device *rdev);
264int rv770_restrict_performance_levels_before_switch(struct radeon_device *rdev);
265int rv770_unrestrict_performance_levels_after_switch(struct radeon_device *rdev);
266int rv770_halt_smc(struct radeon_device *rdev);
267int rv770_resume_smc(struct radeon_device *rdev);
268int rv770_set_sw_state(struct radeon_device *rdev);
269int rv770_set_boot_state(struct radeon_device *rdev);
270int rv7xx_parse_power_table(struct radeon_device *rdev);
271void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
272 struct radeon_ps *new_ps,
273 struct radeon_ps *old_ps);
274void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
275 struct radeon_ps *new_ps,
276 struct radeon_ps *old_ps);
277
278/* smc */
279int rv770_read_smc_soft_register(struct radeon_device *rdev,
280 u16 reg_offset, u32 *value);
281int rv770_write_smc_soft_register(struct radeon_device *rdev,
282 u16 reg_offset, u32 value);
283
284/* thermal */
285int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
286 int min_temp, int max_temp);
287
288#endif
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
new file mode 100644
index 000000000000..ab95da570215
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -0,0 +1,621 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "radeon.h"
28#include "rv770d.h"
29#include "rv770_dpm.h"
30#include "rv770_smc.h"
31#include "atom.h"
32#include "radeon_ucode.h"
33
34#define FIRST_SMC_INT_VECT_REG 0xFFD8
35#define FIRST_INT_VECT_S19 0xFFC0
36
37static const u8 rv770_smc_int_vectors[] =
38{
39 0x08, 0x10, 0x08, 0x10,
40 0x08, 0x10, 0x08, 0x10,
41 0x08, 0x10, 0x08, 0x10,
42 0x08, 0x10, 0x08, 0x10,
43 0x08, 0x10, 0x08, 0x10,
44 0x08, 0x10, 0x08, 0x10,
45 0x08, 0x10, 0x08, 0x10,
46 0x08, 0x10, 0x08, 0x10,
47 0x08, 0x10, 0x08, 0x10,
48 0x08, 0x10, 0x08, 0x10,
49 0x08, 0x10, 0x08, 0x10,
50 0x08, 0x10, 0x08, 0x10,
51 0x08, 0x10, 0x0C, 0xD7,
52 0x08, 0x2B, 0x08, 0x10,
53 0x03, 0x51, 0x03, 0x51,
54 0x03, 0x51, 0x03, 0x51
55};
56
57static const u8 rv730_smc_int_vectors[] =
58{
59 0x08, 0x15, 0x08, 0x15,
60 0x08, 0x15, 0x08, 0x15,
61 0x08, 0x15, 0x08, 0x15,
62 0x08, 0x15, 0x08, 0x15,
63 0x08, 0x15, 0x08, 0x15,
64 0x08, 0x15, 0x08, 0x15,
65 0x08, 0x15, 0x08, 0x15,
66 0x08, 0x15, 0x08, 0x15,
67 0x08, 0x15, 0x08, 0x15,
68 0x08, 0x15, 0x08, 0x15,
69 0x08, 0x15, 0x08, 0x15,
70 0x08, 0x15, 0x08, 0x15,
71 0x08, 0x15, 0x0C, 0xBB,
72 0x08, 0x30, 0x08, 0x15,
73 0x03, 0x56, 0x03, 0x56,
74 0x03, 0x56, 0x03, 0x56
75};
76
77static const u8 rv710_smc_int_vectors[] =
78{
79 0x08, 0x04, 0x08, 0x04,
80 0x08, 0x04, 0x08, 0x04,
81 0x08, 0x04, 0x08, 0x04,
82 0x08, 0x04, 0x08, 0x04,
83 0x08, 0x04, 0x08, 0x04,
84 0x08, 0x04, 0x08, 0x04,
85 0x08, 0x04, 0x08, 0x04,
86 0x08, 0x04, 0x08, 0x04,
87 0x08, 0x04, 0x08, 0x04,
88 0x08, 0x04, 0x08, 0x04,
89 0x08, 0x04, 0x08, 0x04,
90 0x08, 0x04, 0x08, 0x04,
91 0x08, 0x04, 0x0C, 0xCB,
92 0x08, 0x1F, 0x08, 0x04,
93 0x03, 0x51, 0x03, 0x51,
94 0x03, 0x51, 0x03, 0x51
95};
96
97static const u8 rv740_smc_int_vectors[] =
98{
99 0x08, 0x10, 0x08, 0x10,
100 0x08, 0x10, 0x08, 0x10,
101 0x08, 0x10, 0x08, 0x10,
102 0x08, 0x10, 0x08, 0x10,
103 0x08, 0x10, 0x08, 0x10,
104 0x08, 0x10, 0x08, 0x10,
105 0x08, 0x10, 0x08, 0x10,
106 0x08, 0x10, 0x08, 0x10,
107 0x08, 0x10, 0x08, 0x10,
108 0x08, 0x10, 0x08, 0x10,
109 0x08, 0x10, 0x08, 0x10,
110 0x08, 0x10, 0x08, 0x10,
111 0x08, 0x10, 0x0C, 0xD7,
112 0x08, 0x2B, 0x08, 0x10,
113 0x03, 0x51, 0x03, 0x51,
114 0x03, 0x51, 0x03, 0x51
115};
116
117static const u8 cedar_smc_int_vectors[] =
118{
119 0x0B, 0x05, 0x0B, 0x05,
120 0x0B, 0x05, 0x0B, 0x05,
121 0x0B, 0x05, 0x0B, 0x05,
122 0x0B, 0x05, 0x0B, 0x05,
123 0x0B, 0x05, 0x0B, 0x05,
124 0x0B, 0x05, 0x0B, 0x05,
125 0x0B, 0x05, 0x0B, 0x05,
126 0x0B, 0x05, 0x0B, 0x05,
127 0x0B, 0x05, 0x0B, 0x05,
128 0x0B, 0x05, 0x0B, 0x05,
129 0x0B, 0x05, 0x0B, 0x05,
130 0x0B, 0x05, 0x0B, 0x05,
131 0x0B, 0x05, 0x11, 0x8B,
132 0x0B, 0x20, 0x0B, 0x05,
133 0x04, 0xF6, 0x04, 0xF6,
134 0x04, 0xF6, 0x04, 0xF6
135};
136
137static const u8 redwood_smc_int_vectors[] =
138{
139 0x0B, 0x05, 0x0B, 0x05,
140 0x0B, 0x05, 0x0B, 0x05,
141 0x0B, 0x05, 0x0B, 0x05,
142 0x0B, 0x05, 0x0B, 0x05,
143 0x0B, 0x05, 0x0B, 0x05,
144 0x0B, 0x05, 0x0B, 0x05,
145 0x0B, 0x05, 0x0B, 0x05,
146 0x0B, 0x05, 0x0B, 0x05,
147 0x0B, 0x05, 0x0B, 0x05,
148 0x0B, 0x05, 0x0B, 0x05,
149 0x0B, 0x05, 0x0B, 0x05,
150 0x0B, 0x05, 0x0B, 0x05,
151 0x0B, 0x05, 0x11, 0x8B,
152 0x0B, 0x20, 0x0B, 0x05,
153 0x04, 0xF6, 0x04, 0xF6,
154 0x04, 0xF6, 0x04, 0xF6
155};
156
157static const u8 juniper_smc_int_vectors[] =
158{
159 0x0B, 0x05, 0x0B, 0x05,
160 0x0B, 0x05, 0x0B, 0x05,
161 0x0B, 0x05, 0x0B, 0x05,
162 0x0B, 0x05, 0x0B, 0x05,
163 0x0B, 0x05, 0x0B, 0x05,
164 0x0B, 0x05, 0x0B, 0x05,
165 0x0B, 0x05, 0x0B, 0x05,
166 0x0B, 0x05, 0x0B, 0x05,
167 0x0B, 0x05, 0x0B, 0x05,
168 0x0B, 0x05, 0x0B, 0x05,
169 0x0B, 0x05, 0x0B, 0x05,
170 0x0B, 0x05, 0x0B, 0x05,
171 0x0B, 0x05, 0x11, 0x8B,
172 0x0B, 0x20, 0x0B, 0x05,
173 0x04, 0xF6, 0x04, 0xF6,
174 0x04, 0xF6, 0x04, 0xF6
175};
176
177static const u8 cypress_smc_int_vectors[] =
178{
179 0x0B, 0x05, 0x0B, 0x05,
180 0x0B, 0x05, 0x0B, 0x05,
181 0x0B, 0x05, 0x0B, 0x05,
182 0x0B, 0x05, 0x0B, 0x05,
183 0x0B, 0x05, 0x0B, 0x05,
184 0x0B, 0x05, 0x0B, 0x05,
185 0x0B, 0x05, 0x0B, 0x05,
186 0x0B, 0x05, 0x0B, 0x05,
187 0x0B, 0x05, 0x0B, 0x05,
188 0x0B, 0x05, 0x0B, 0x05,
189 0x0B, 0x05, 0x0B, 0x05,
190 0x0B, 0x05, 0x0B, 0x05,
191 0x0B, 0x05, 0x11, 0x8B,
192 0x0B, 0x20, 0x0B, 0x05,
193 0x04, 0xF6, 0x04, 0xF6,
194 0x04, 0xF6, 0x04, 0xF6
195};
196
197static const u8 barts_smc_int_vectors[] =
198{
199 0x0C, 0x14, 0x0C, 0x14,
200 0x0C, 0x14, 0x0C, 0x14,
201 0x0C, 0x14, 0x0C, 0x14,
202 0x0C, 0x14, 0x0C, 0x14,
203 0x0C, 0x14, 0x0C, 0x14,
204 0x0C, 0x14, 0x0C, 0x14,
205 0x0C, 0x14, 0x0C, 0x14,
206 0x0C, 0x14, 0x0C, 0x14,
207 0x0C, 0x14, 0x0C, 0x14,
208 0x0C, 0x14, 0x0C, 0x14,
209 0x0C, 0x14, 0x0C, 0x14,
210 0x0C, 0x14, 0x0C, 0x14,
211 0x0C, 0x14, 0x12, 0xAA,
212 0x0C, 0x2F, 0x15, 0xF6,
213 0x15, 0xF6, 0x05, 0x0A,
214 0x05, 0x0A, 0x05, 0x0A
215};
216
217static const u8 turks_smc_int_vectors[] =
218{
219 0x0C, 0x14, 0x0C, 0x14,
220 0x0C, 0x14, 0x0C, 0x14,
221 0x0C, 0x14, 0x0C, 0x14,
222 0x0C, 0x14, 0x0C, 0x14,
223 0x0C, 0x14, 0x0C, 0x14,
224 0x0C, 0x14, 0x0C, 0x14,
225 0x0C, 0x14, 0x0C, 0x14,
226 0x0C, 0x14, 0x0C, 0x14,
227 0x0C, 0x14, 0x0C, 0x14,
228 0x0C, 0x14, 0x0C, 0x14,
229 0x0C, 0x14, 0x0C, 0x14,
230 0x0C, 0x14, 0x0C, 0x14,
231 0x0C, 0x14, 0x12, 0xAA,
232 0x0C, 0x2F, 0x15, 0xF6,
233 0x15, 0xF6, 0x05, 0x0A,
234 0x05, 0x0A, 0x05, 0x0A
235};
236
237static const u8 caicos_smc_int_vectors[] =
238{
239 0x0C, 0x14, 0x0C, 0x14,
240 0x0C, 0x14, 0x0C, 0x14,
241 0x0C, 0x14, 0x0C, 0x14,
242 0x0C, 0x14, 0x0C, 0x14,
243 0x0C, 0x14, 0x0C, 0x14,
244 0x0C, 0x14, 0x0C, 0x14,
245 0x0C, 0x14, 0x0C, 0x14,
246 0x0C, 0x14, 0x0C, 0x14,
247 0x0C, 0x14, 0x0C, 0x14,
248 0x0C, 0x14, 0x0C, 0x14,
249 0x0C, 0x14, 0x0C, 0x14,
250 0x0C, 0x14, 0x0C, 0x14,
251 0x0C, 0x14, 0x12, 0xAA,
252 0x0C, 0x2F, 0x15, 0xF6,
253 0x15, 0xF6, 0x05, 0x0A,
254 0x05, 0x0A, 0x05, 0x0A
255};
256
257static const u8 cayman_smc_int_vectors[] =
258{
259 0x12, 0x05, 0x12, 0x05,
260 0x12, 0x05, 0x12, 0x05,
261 0x12, 0x05, 0x12, 0x05,
262 0x12, 0x05, 0x12, 0x05,
263 0x12, 0x05, 0x12, 0x05,
264 0x12, 0x05, 0x12, 0x05,
265 0x12, 0x05, 0x12, 0x05,
266 0x12, 0x05, 0x12, 0x05,
267 0x12, 0x05, 0x12, 0x05,
268 0x12, 0x05, 0x12, 0x05,
269 0x12, 0x05, 0x12, 0x05,
270 0x12, 0x05, 0x12, 0x05,
271 0x12, 0x05, 0x18, 0xEA,
272 0x12, 0x20, 0x1C, 0x34,
273 0x1C, 0x34, 0x08, 0x72,
274 0x08, 0x72, 0x08, 0x72
275};
276
277int rv770_set_smc_sram_address(struct radeon_device *rdev,
278 u16 smc_address, u16 limit)
279{
280 u32 addr;
281
282 if (smc_address & 3)
283 return -EINVAL;
284 if ((smc_address + 3) > limit)
285 return -EINVAL;
286
287 addr = smc_address;
288 addr |= SMC_SRAM_AUTO_INC_DIS;
289
290 WREG32(SMC_SRAM_ADDR, addr);
291
292 return 0;
293}
294
295int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
296 u16 smc_start_address, const u8 *src,
297 u16 byte_count, u16 limit)
298{
299 u32 data, original_data, extra_shift;
300 u16 addr;
301 int ret;
302
303 if (smc_start_address & 3)
304 return -EINVAL;
305 if ((smc_start_address + byte_count) > limit)
306 return -EINVAL;
307
308 addr = smc_start_address;
309
310 while (byte_count >= 4) {
311 /* SMC address space is BE */
312 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
313
314 ret = rv770_set_smc_sram_address(rdev, addr, limit);
315 if (ret)
316 return ret;
317
318 WREG32(SMC_SRAM_DATA, data);
319
320 src += 4;
321 byte_count -= 4;
322 addr += 4;
323 }
324
325 /* RMW for final bytes */
326 if (byte_count > 0) {
327 data = 0;
328
329 ret = rv770_set_smc_sram_address(rdev, addr, limit);
330 if (ret)
331 return ret;
332
333 original_data = RREG32(SMC_SRAM_DATA);
334
335 extra_shift = 8 * (4 - byte_count);
336
337 while (byte_count > 0) {
338 /* SMC address space is BE */
339 data = (data << 8) + *src++;
340 byte_count--;
341 }
342
343 data <<= extra_shift;
344
345 data |= (original_data & ~((~0UL) << extra_shift));
346
347 ret = rv770_set_smc_sram_address(rdev, addr, limit);
348 if (ret)
349 return ret;
350
351 WREG32(SMC_SRAM_DATA, data);
352 }
353
354 return 0;
355}
356
357static int rv770_program_interrupt_vectors(struct radeon_device *rdev,
358 u32 smc_first_vector, const u8 *src,
359 u32 byte_count)
360{
361 u32 tmp, i;
362
363 if (byte_count % 4)
364 return -EINVAL;
365
366 if (smc_first_vector < FIRST_SMC_INT_VECT_REG) {
367 tmp = FIRST_SMC_INT_VECT_REG - smc_first_vector;
368
369 if (tmp > byte_count)
370 return 0;
371
372 byte_count -= tmp;
373 src += tmp;
374 smc_first_vector = FIRST_SMC_INT_VECT_REG;
375 }
376
377 for (i = 0; i < byte_count; i += 4) {
378 /* SMC address space is BE */
379 tmp = (src[i] << 24) | (src[i + 1] << 16) | (src[i + 2] << 8) | src[i + 3];
380
381 WREG32(SMC_ISR_FFD8_FFDB + i, tmp);
382 }
383
384 return 0;
385}
386
387void rv770_start_smc(struct radeon_device *rdev)
388{
389 WREG32_P(SMC_IO, SMC_RST_N, ~SMC_RST_N);
390}
391
392void rv770_reset_smc(struct radeon_device *rdev)
393{
394 WREG32_P(SMC_IO, 0, ~SMC_RST_N);
395}
396
397void rv770_stop_smc_clock(struct radeon_device *rdev)
398{
399 WREG32_P(SMC_IO, 0, ~SMC_CLK_EN);
400}
401
402void rv770_start_smc_clock(struct radeon_device *rdev)
403{
404 WREG32_P(SMC_IO, SMC_CLK_EN, ~SMC_CLK_EN);
405}
406
407bool rv770_is_smc_running(struct radeon_device *rdev)
408{
409 u32 tmp;
410
411 tmp = RREG32(SMC_IO);
412
413 if ((tmp & SMC_RST_N) && (tmp & SMC_CLK_EN))
414 return true;
415 else
416 return false;
417}
418
419PPSMC_Result rv770_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
420{
421 u32 tmp;
422 int i;
423 PPSMC_Result result;
424
425 if (!rv770_is_smc_running(rdev))
426 return PPSMC_Result_Failed;
427
428 WREG32_P(SMC_MSG, HOST_SMC_MSG(msg), ~HOST_SMC_MSG_MASK);
429
430 for (i = 0; i < rdev->usec_timeout; i++) {
431 tmp = RREG32(SMC_MSG) & HOST_SMC_RESP_MASK;
432 tmp >>= HOST_SMC_RESP_SHIFT;
433 if (tmp != 0)
434 break;
435 udelay(1);
436 }
437
438 tmp = RREG32(SMC_MSG) & HOST_SMC_RESP_MASK;
439 tmp >>= HOST_SMC_RESP_SHIFT;
440
441 result = (PPSMC_Result)tmp;
442 return result;
443}
444
445PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev)
446{
447 int i;
448 PPSMC_Result result = PPSMC_Result_OK;
449
450 if (!rv770_is_smc_running(rdev))
451 return result;
452
453 for (i = 0; i < rdev->usec_timeout; i++) {
454 if (RREG32(SMC_IO) & SMC_STOP_MODE)
455 break;
456 udelay(1);
457 }
458
459 return result;
460}
461
462static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit)
463{
464 u16 i;
465
466 for (i = 0; i < limit; i += 4) {
467 rv770_set_smc_sram_address(rdev, i, limit);
468 WREG32(SMC_SRAM_DATA, 0);
469 }
470}
471
472int rv770_load_smc_ucode(struct radeon_device *rdev,
473 u16 limit)
474{
475 int ret;
476 const u8 *int_vect;
477 u16 int_vect_start_address;
478 u16 int_vect_size;
479 const u8 *ucode_data;
480 u16 ucode_start_address;
481 u16 ucode_size;
482
483 if (!rdev->smc_fw)
484 return -EINVAL;
485
486 rv770_clear_smc_sram(rdev, limit);
487
488 switch (rdev->family) {
489 case CHIP_RV770:
490 ucode_start_address = RV770_SMC_UCODE_START;
491 ucode_size = RV770_SMC_UCODE_SIZE;
492 int_vect = (const u8 *)&rv770_smc_int_vectors;
493 int_vect_start_address = RV770_SMC_INT_VECTOR_START;
494 int_vect_size = RV770_SMC_INT_VECTOR_SIZE;
495 break;
496 case CHIP_RV730:
497 ucode_start_address = RV730_SMC_UCODE_START;
498 ucode_size = RV730_SMC_UCODE_SIZE;
499 int_vect = (const u8 *)&rv730_smc_int_vectors;
500 int_vect_start_address = RV730_SMC_INT_VECTOR_START;
501 int_vect_size = RV730_SMC_INT_VECTOR_SIZE;
502 break;
503 case CHIP_RV710:
504 ucode_start_address = RV710_SMC_UCODE_START;
505 ucode_size = RV710_SMC_UCODE_SIZE;
506 int_vect = (const u8 *)&rv710_smc_int_vectors;
507 int_vect_start_address = RV710_SMC_INT_VECTOR_START;
508 int_vect_size = RV710_SMC_INT_VECTOR_SIZE;
509 break;
510 case CHIP_RV740:
511 ucode_start_address = RV740_SMC_UCODE_START;
512 ucode_size = RV740_SMC_UCODE_SIZE;
513 int_vect = (const u8 *)&rv740_smc_int_vectors;
514 int_vect_start_address = RV740_SMC_INT_VECTOR_START;
515 int_vect_size = RV740_SMC_INT_VECTOR_SIZE;
516 break;
517 case CHIP_CEDAR:
518 ucode_start_address = CEDAR_SMC_UCODE_START;
519 ucode_size = CEDAR_SMC_UCODE_SIZE;
520 int_vect = (const u8 *)&cedar_smc_int_vectors;
521 int_vect_start_address = CEDAR_SMC_INT_VECTOR_START;
522 int_vect_size = CEDAR_SMC_INT_VECTOR_SIZE;
523 break;
524 case CHIP_REDWOOD:
525 ucode_start_address = REDWOOD_SMC_UCODE_START;
526 ucode_size = REDWOOD_SMC_UCODE_SIZE;
527 int_vect = (const u8 *)&redwood_smc_int_vectors;
528 int_vect_start_address = REDWOOD_SMC_INT_VECTOR_START;
529 int_vect_size = REDWOOD_SMC_INT_VECTOR_SIZE;
530 break;
531 case CHIP_JUNIPER:
532 ucode_start_address = JUNIPER_SMC_UCODE_START;
533 ucode_size = JUNIPER_SMC_UCODE_SIZE;
534 int_vect = (const u8 *)&juniper_smc_int_vectors;
535 int_vect_start_address = JUNIPER_SMC_INT_VECTOR_START;
536 int_vect_size = JUNIPER_SMC_INT_VECTOR_SIZE;
537 break;
538 case CHIP_CYPRESS:
539 case CHIP_HEMLOCK:
540 ucode_start_address = CYPRESS_SMC_UCODE_START;
541 ucode_size = CYPRESS_SMC_UCODE_SIZE;
542 int_vect = (const u8 *)&cypress_smc_int_vectors;
543 int_vect_start_address = CYPRESS_SMC_INT_VECTOR_START;
544 int_vect_size = CYPRESS_SMC_INT_VECTOR_SIZE;
545 break;
546 case CHIP_BARTS:
547 ucode_start_address = BARTS_SMC_UCODE_START;
548 ucode_size = BARTS_SMC_UCODE_SIZE;
549 int_vect = (const u8 *)&barts_smc_int_vectors;
550 int_vect_start_address = BARTS_SMC_INT_VECTOR_START;
551 int_vect_size = BARTS_SMC_INT_VECTOR_SIZE;
552 break;
553 case CHIP_TURKS:
554 ucode_start_address = TURKS_SMC_UCODE_START;
555 ucode_size = TURKS_SMC_UCODE_SIZE;
556 int_vect = (const u8 *)&turks_smc_int_vectors;
557 int_vect_start_address = TURKS_SMC_INT_VECTOR_START;
558 int_vect_size = TURKS_SMC_INT_VECTOR_SIZE;
559 break;
560 case CHIP_CAICOS:
561 ucode_start_address = CAICOS_SMC_UCODE_START;
562 ucode_size = CAICOS_SMC_UCODE_SIZE;
563 int_vect = (const u8 *)&caicos_smc_int_vectors;
564 int_vect_start_address = CAICOS_SMC_INT_VECTOR_START;
565 int_vect_size = CAICOS_SMC_INT_VECTOR_SIZE;
566 break;
567 case CHIP_CAYMAN:
568 ucode_start_address = CAYMAN_SMC_UCODE_START;
569 ucode_size = CAYMAN_SMC_UCODE_SIZE;
570 int_vect = (const u8 *)&cayman_smc_int_vectors;
571 int_vect_start_address = CAYMAN_SMC_INT_VECTOR_START;
572 int_vect_size = CAYMAN_SMC_INT_VECTOR_SIZE;
573 break;
574 default:
575 DRM_ERROR("unknown asic in smc ucode loader\n");
576 BUG();
577 }
578
579 /* load the ucode */
580 ucode_data = (const u8 *)rdev->smc_fw->data;
581 ret = rv770_copy_bytes_to_smc(rdev, ucode_start_address,
582 ucode_data, ucode_size, limit);
583 if (ret)
584 return ret;
585
586 /* set up the int vectors */
587 ret = rv770_program_interrupt_vectors(rdev, int_vect_start_address,
588 int_vect, int_vect_size);
589 if (ret)
590 return ret;
591
592 return 0;
593}
594
595int rv770_read_smc_sram_dword(struct radeon_device *rdev,
596 u16 smc_address, u32 *value, u16 limit)
597{
598 int ret;
599
600 ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
601 if (ret)
602 return ret;
603
604 *value = RREG32(SMC_SRAM_DATA);
605
606 return 0;
607}
608
609int rv770_write_smc_sram_dword(struct radeon_device *rdev,
610 u16 smc_address, u32 value, u16 limit)
611{
612 int ret;
613
614 ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
615 if (ret)
616 return ret;
617
618 WREG32(SMC_SRAM_DATA, value);
619
620 return 0;
621}
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h
new file mode 100644
index 000000000000..f78d92a4b325
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_smc.h
@@ -0,0 +1,209 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __RV770_SMC_H__
24#define __RV770_SMC_H__
25
26#include "ppsmc.h"
27
28#pragma pack(push, 1)
29
30#define RV770_SMC_TABLE_ADDRESS 0xB000
31
32#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3
33
34struct RV770_SMC_SCLK_VALUE
35{
36 uint32_t vCG_SPLL_FUNC_CNTL;
37 uint32_t vCG_SPLL_FUNC_CNTL_2;
38 uint32_t vCG_SPLL_FUNC_CNTL_3;
39 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
40 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
41 uint32_t sclk_value;
42};
43
44typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
45
46struct RV770_SMC_MCLK_VALUE
47{
48 uint32_t vMPLL_AD_FUNC_CNTL;
49 uint32_t vMPLL_AD_FUNC_CNTL_2;
50 uint32_t vMPLL_DQ_FUNC_CNTL;
51 uint32_t vMPLL_DQ_FUNC_CNTL_2;
52 uint32_t vMCLK_PWRMGT_CNTL;
53 uint32_t vDLL_CNTL;
54 uint32_t vMPLL_SS;
55 uint32_t vMPLL_SS2;
56 uint32_t mclk_value;
57};
58
59typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
60
61
62struct RV730_SMC_MCLK_VALUE
63{
64 uint32_t vMCLK_PWRMGT_CNTL;
65 uint32_t vDLL_CNTL;
66 uint32_t vMPLL_FUNC_CNTL;
67 uint32_t vMPLL_FUNC_CNTL2;
68 uint32_t vMPLL_FUNC_CNTL3;
69 uint32_t vMPLL_SS;
70 uint32_t vMPLL_SS2;
71 uint32_t mclk_value;
72};
73
74typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
75
76struct RV770_SMC_VOLTAGE_VALUE
77{
78 uint16_t value;
79 uint8_t index;
80 uint8_t padding;
81};
82
83typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
84
85union RV7XX_SMC_MCLK_VALUE
86{
87 RV770_SMC_MCLK_VALUE mclk770;
88 RV730_SMC_MCLK_VALUE mclk730;
89};
90
91typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
92
93struct RV770_SMC_HW_PERFORMANCE_LEVEL
94{
95 uint8_t arbValue;
96 union{
97 uint8_t seqValue;
98 uint8_t ACIndex;
99 };
100 uint8_t displayWatermark;
101 uint8_t gen2PCIE;
102 uint8_t gen2XSP;
103 uint8_t backbias;
104 uint8_t strobeMode;
105 uint8_t mcFlags;
106 uint32_t aT;
107 uint32_t bSP;
108 RV770_SMC_SCLK_VALUE sclk;
109 RV7XX_SMC_MCLK_VALUE mclk;
110 RV770_SMC_VOLTAGE_VALUE vddc;
111 RV770_SMC_VOLTAGE_VALUE mvdd;
112 RV770_SMC_VOLTAGE_VALUE vddci;
113 uint8_t reserved1;
114 uint8_t reserved2;
115 uint8_t stateFlags;
116 uint8_t padding;
117};
118
119#define SMC_STROBE_RATIO 0x0F
120#define SMC_STROBE_ENABLE 0x10
121
122#define SMC_MC_EDC_RD_FLAG 0x01
123#define SMC_MC_EDC_WR_FLAG 0x02
124#define SMC_MC_RTT_ENABLE 0x04
125#define SMC_MC_STUTTER_EN 0x08
126
127typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
128
129struct RV770_SMC_SWSTATE
130{
131 uint8_t flags;
132 uint8_t padding1;
133 uint8_t padding2;
134 uint8_t padding3;
135 RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
136};
137
138typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
139
140#define RV770_SMC_VOLTAGEMASK_VDDC 0
141#define RV770_SMC_VOLTAGEMASK_MVDD 1
142#define RV770_SMC_VOLTAGEMASK_VDDCI 2
143#define RV770_SMC_VOLTAGEMASK_MAX 4
144
145struct RV770_SMC_VOLTAGEMASKTABLE
146{
147 uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX];
148 uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
149};
150
151typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
152
153#define MAX_NO_VREG_STEPS 32
154
155struct RV770_SMC_STATETABLE
156{
157 uint8_t thermalProtectType;
158 uint8_t systemFlags;
159 uint8_t maxVDDCIndexInPPTable;
160 uint8_t extraFlags;
161 uint8_t highSMIO[MAX_NO_VREG_STEPS];
162 uint32_t lowSMIO[MAX_NO_VREG_STEPS];
163 RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
164 RV770_SMC_SWSTATE initialState;
165 RV770_SMC_SWSTATE ACPIState;
166 RV770_SMC_SWSTATE driverState;
167 RV770_SMC_SWSTATE ULVState;
168};
169
170typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
171
172#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
173
174#pragma pack(pop)
175
176#define RV770_SMC_SOFT_REGISTERS_START 0x104
177
178#define RV770_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0
179#define RV770_SMC_SOFT_REGISTER_baby_step_timer 0x8
180#define RV770_SMC_SOFT_REGISTER_delay_bbias 0xC
181#define RV770_SMC_SOFT_REGISTER_delay_vreg 0x10
182#define RV770_SMC_SOFT_REGISTER_delay_acpi 0x2C
183#define RV770_SMC_SOFT_REGISTER_seq_index 0x64
184#define RV770_SMC_SOFT_REGISTER_mvdd_chg_time 0x68
185#define RV770_SMC_SOFT_REGISTER_mclk_switch_lim 0x78
186#define RV770_SMC_SOFT_REGISTER_mc_block_delay 0x90
187#define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C
188#define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0
189
190int rv770_set_smc_sram_address(struct radeon_device *rdev,
191 u16 smc_address, u16 limit);
192int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
193 u16 smc_start_address, const u8 *src,
194 u16 byte_count, u16 limit);
195void rv770_start_smc(struct radeon_device *rdev);
196void rv770_reset_smc(struct radeon_device *rdev);
197void rv770_stop_smc_clock(struct radeon_device *rdev);
198void rv770_start_smc_clock(struct radeon_device *rdev);
199bool rv770_is_smc_running(struct radeon_device *rdev);
200PPSMC_Result rv770_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
201PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev);
202int rv770_read_smc_sram_dword(struct radeon_device *rdev,
203 u16 smc_address, u32 *value, u16 limit);
204int rv770_write_smc_sram_dword(struct radeon_device *rdev,
205 u16 smc_address, u32 value, u16 limit);
206int rv770_load_smc_ucode(struct radeon_device *rdev,
207 u16 limit);
208
209#endif
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 85b16266f748..784eeaf315c3 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -62,6 +62,242 @@
62# define UPLL_FB_DIV(x) ((x) << 0) 62# define UPLL_FB_DIV(x) ((x) << 0)
63# define UPLL_FB_DIV_MASK 0x01FFFFFF 63# define UPLL_FB_DIV_MASK 0x01FFFFFF
64 64
65/* pm registers */
66#define SMC_SRAM_ADDR 0x200
67#define SMC_SRAM_AUTO_INC_DIS (1 << 16)
68#define SMC_SRAM_DATA 0x204
69#define SMC_IO 0x208
70#define SMC_RST_N (1 << 0)
71#define SMC_STOP_MODE (1 << 2)
72#define SMC_CLK_EN (1 << 11)
73#define SMC_MSG 0x20c
74#define HOST_SMC_MSG(x) ((x) << 0)
75#define HOST_SMC_MSG_MASK (0xff << 0)
76#define HOST_SMC_MSG_SHIFT 0
77#define HOST_SMC_RESP(x) ((x) << 8)
78#define HOST_SMC_RESP_MASK (0xff << 8)
79#define HOST_SMC_RESP_SHIFT 8
80#define SMC_HOST_MSG(x) ((x) << 16)
81#define SMC_HOST_MSG_MASK (0xff << 16)
82#define SMC_HOST_MSG_SHIFT 16
83#define SMC_HOST_RESP(x) ((x) << 24)
84#define SMC_HOST_RESP_MASK (0xff << 24)
85#define SMC_HOST_RESP_SHIFT 24
86
87#define SMC_ISR_FFD8_FFDB 0x218
88
89#define CG_SPLL_FUNC_CNTL 0x600
90#define SPLL_RESET (1 << 0)
91#define SPLL_SLEEP (1 << 1)
92#define SPLL_DIVEN (1 << 2)
93#define SPLL_BYPASS_EN (1 << 3)
94#define SPLL_REF_DIV(x) ((x) << 4)
95#define SPLL_REF_DIV_MASK (0x3f << 4)
96#define SPLL_HILEN(x) ((x) << 12)
97#define SPLL_HILEN_MASK (0xf << 12)
98#define SPLL_LOLEN(x) ((x) << 16)
99#define SPLL_LOLEN_MASK (0xf << 16)
100#define CG_SPLL_FUNC_CNTL_2 0x604
101#define SCLK_MUX_SEL(x) ((x) << 0)
102#define SCLK_MUX_SEL_MASK (0x1ff << 0)
103#define CG_SPLL_FUNC_CNTL_3 0x608
104#define SPLL_FB_DIV(x) ((x) << 0)
105#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
106#define SPLL_DITHEN (1 << 28)
107
108#define SPLL_CNTL_MODE 0x610
109#define SPLL_DIV_SYNC (1 << 5)
110
111#define MPLL_AD_FUNC_CNTL 0x624
112#define CLKF(x) ((x) << 0)
113#define CLKF_MASK (0x7f << 0)
114#define CLKR(x) ((x) << 7)
115#define CLKR_MASK (0x1f << 7)
116#define CLKFRAC(x) ((x) << 12)
117#define CLKFRAC_MASK (0x1f << 12)
118#define YCLK_POST_DIV(x) ((x) << 17)
119#define YCLK_POST_DIV_MASK (3 << 17)
120#define IBIAS(x) ((x) << 20)
121#define IBIAS_MASK (0x3ff << 20)
122#define RESET (1 << 30)
123#define PDNB (1 << 31)
124#define MPLL_AD_FUNC_CNTL_2 0x628
125#define BYPASS (1 << 19)
126#define BIAS_GEN_PDNB (1 << 24)
127#define RESET_EN (1 << 25)
128#define VCO_MODE (1 << 29)
129#define MPLL_DQ_FUNC_CNTL 0x62c
130#define MPLL_DQ_FUNC_CNTL_2 0x630
131
132#define GENERAL_PWRMGT 0x63c
133# define GLOBAL_PWRMGT_EN (1 << 0)
134# define STATIC_PM_EN (1 << 1)
135# define THERMAL_PROTECTION_DIS (1 << 2)
136# define THERMAL_PROTECTION_TYPE (1 << 3)
137# define ENABLE_GEN2PCIE (1 << 4)
138# define ENABLE_GEN2XSP (1 << 5)
139# define SW_SMIO_INDEX(x) ((x) << 6)
140# define SW_SMIO_INDEX_MASK (3 << 6)
141# define SW_SMIO_INDEX_SHIFT 6
142# define LOW_VOLT_D2_ACPI (1 << 8)
143# define LOW_VOLT_D3_ACPI (1 << 9)
144# define VOLT_PWRMGT_EN (1 << 10)
145# define BACKBIAS_PAD_EN (1 << 18)
146# define BACKBIAS_VALUE (1 << 19)
147# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
148# define AC_DC_SW (1 << 24)
149
150#define CG_TPC 0x640
151#define SCLK_PWRMGT_CNTL 0x644
152# define SCLK_PWRMGT_OFF (1 << 0)
153# define SCLK_LOW_D1 (1 << 1)
154# define FIR_RESET (1 << 4)
155# define FIR_FORCE_TREND_SEL (1 << 5)
156# define FIR_TREND_MODE (1 << 6)
157# define DYN_GFX_CLK_OFF_EN (1 << 7)
158# define GFX_CLK_FORCE_ON (1 << 8)
159# define GFX_CLK_REQUEST_OFF (1 << 9)
160# define GFX_CLK_FORCE_OFF (1 << 10)
161# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
162# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
163# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
164#define MCLK_PWRMGT_CNTL 0x648
165# define DLL_SPEED(x) ((x) << 0)
166# define DLL_SPEED_MASK (0x1f << 0)
167# define MPLL_PWRMGT_OFF (1 << 5)
168# define DLL_READY (1 << 6)
169# define MC_INT_CNTL (1 << 7)
170# define MRDCKA0_SLEEP (1 << 8)
171# define MRDCKA1_SLEEP (1 << 9)
172# define MRDCKB0_SLEEP (1 << 10)
173# define MRDCKB1_SLEEP (1 << 11)
174# define MRDCKC0_SLEEP (1 << 12)
175# define MRDCKC1_SLEEP (1 << 13)
176# define MRDCKD0_SLEEP (1 << 14)
177# define MRDCKD1_SLEEP (1 << 15)
178# define MRDCKA0_RESET (1 << 16)
179# define MRDCKA1_RESET (1 << 17)
180# define MRDCKB0_RESET (1 << 18)
181# define MRDCKB1_RESET (1 << 19)
182# define MRDCKC0_RESET (1 << 20)
183# define MRDCKC1_RESET (1 << 21)
184# define MRDCKD0_RESET (1 << 22)
185# define MRDCKD1_RESET (1 << 23)
186# define DLL_READY_READ (1 << 24)
187# define USE_DISPLAY_GAP (1 << 25)
188# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
189# define MPLL_TURNOFF_D2 (1 << 28)
190#define DLL_CNTL 0x64c
191# define MRDCKA0_BYPASS (1 << 24)
192# define MRDCKA1_BYPASS (1 << 25)
193# define MRDCKB0_BYPASS (1 << 26)
194# define MRDCKB1_BYPASS (1 << 27)
195# define MRDCKC0_BYPASS (1 << 28)
196# define MRDCKC1_BYPASS (1 << 29)
197# define MRDCKD0_BYPASS (1 << 30)
198# define MRDCKD1_BYPASS (1 << 31)
199
200#define MPLL_TIME 0x654
201# define MPLL_LOCK_TIME(x) ((x) << 0)
202# define MPLL_LOCK_TIME_MASK (0xffff << 0)
203# define MPLL_RESET_TIME(x) ((x) << 16)
204# define MPLL_RESET_TIME_MASK (0xffff << 16)
205
206#define CG_CLKPIN_CNTL 0x660
207# define MUX_TCLK_TO_XCLK (1 << 8)
208# define XTALIN_DIVIDE (1 << 9)
209
210#define S0_VID_LOWER_SMIO_CNTL 0x678
211#define S1_VID_LOWER_SMIO_CNTL 0x67c
212#define S2_VID_LOWER_SMIO_CNTL 0x680
213#define S3_VID_LOWER_SMIO_CNTL 0x684
214
215#define CG_FTV 0x690
216#define CG_FFCT_0 0x694
217# define UTC_0(x) ((x) << 0)
218# define UTC_0_MASK (0x3ff << 0)
219# define DTC_0(x) ((x) << 10)
220# define DTC_0_MASK (0x3ff << 10)
221
222#define CG_BSP 0x6d0
223# define BSP(x) ((x) << 0)
224# define BSP_MASK (0xffff << 0)
225# define BSU(x) ((x) << 16)
226# define BSU_MASK (0xf << 16)
227#define CG_AT 0x6d4
228# define CG_R(x) ((x) << 0)
229# define CG_R_MASK (0xffff << 0)
230# define CG_L(x) ((x) << 16)
231# define CG_L_MASK (0xffff << 16)
232#define CG_GIT 0x6d8
233# define CG_GICST(x) ((x) << 0)
234# define CG_GICST_MASK (0xffff << 0)
235# define CG_GIPOT(x) ((x) << 16)
236# define CG_GIPOT_MASK (0xffff << 16)
237
238#define CG_SSP 0x6e8
239# define SST(x) ((x) << 0)
240# define SST_MASK (0xffff << 0)
241# define SSTU(x) ((x) << 16)
242# define SSTU_MASK (0xf << 16)
243
244#define CG_DISPLAY_GAP_CNTL 0x714
245# define DISP1_GAP(x) ((x) << 0)
246# define DISP1_GAP_MASK (3 << 0)
247# define DISP2_GAP(x) ((x) << 2)
248# define DISP2_GAP_MASK (3 << 2)
249# define VBI_TIMER_COUNT(x) ((x) << 4)
250# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
251# define VBI_TIMER_UNIT(x) ((x) << 20)
252# define VBI_TIMER_UNIT_MASK (7 << 20)
253# define DISP1_GAP_MCHG(x) ((x) << 24)
254# define DISP1_GAP_MCHG_MASK (3 << 24)
255# define DISP2_GAP_MCHG(x) ((x) << 26)
256# define DISP2_GAP_MCHG_MASK (3 << 26)
257
258#define CG_SPLL_SPREAD_SPECTRUM 0x790
259#define SSEN (1 << 0)
260#define CLKS(x) ((x) << 4)
261#define CLKS_MASK (0xfff << 4)
262#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
263#define CLKV(x) ((x) << 0)
264#define CLKV_MASK (0x3ffffff << 0)
265#define CG_MPLL_SPREAD_SPECTRUM 0x798
266#define CG_UPLL_SPREAD_SPECTRUM 0x79c
267# define SSEN_MASK 0x00000001
268
269#define CG_CGTT_LOCAL_0 0x7d0
270#define CG_CGTT_LOCAL_1 0x7d4
271
272#define BIOS_SCRATCH_4 0x1734
273
274#define MC_SEQ_MISC0 0x2a00
275#define MC_SEQ_MISC0_GDDR5_SHIFT 28
276#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
277#define MC_SEQ_MISC0_GDDR5_VALUE 5
278
279#define MC_ARB_SQM_RATIO 0x2770
280#define STATE0(x) ((x) << 0)
281#define STATE0_MASK (0xff << 0)
282#define STATE1(x) ((x) << 8)
283#define STATE1_MASK (0xff << 8)
284#define STATE2(x) ((x) << 16)
285#define STATE2_MASK (0xff << 16)
286#define STATE3(x) ((x) << 24)
287#define STATE3_MASK (0xff << 24)
288
289#define MC_ARB_RFSH_RATE 0x27b0
290#define POWERMODE0(x) ((x) << 0)
291#define POWERMODE0_MASK (0xff << 0)
292#define POWERMODE1(x) ((x) << 8)
293#define POWERMODE1_MASK (0xff << 8)
294#define POWERMODE2(x) ((x) << 16)
295#define POWERMODE2_MASK (0xff << 16)
296#define POWERMODE3(x) ((x) << 24)
297#define POWERMODE3_MASK (0xff << 24)
298
299#define CGTS_SM_CTRL_REG 0x9150
300
65/* Registers */ 301/* Registers */
66#define CB_COLOR0_BASE 0x28040 302#define CB_COLOR0_BASE 0x28040
67#define CB_COLOR1_BASE 0x28044 303#define CB_COLOR1_BASE 0x28044
@@ -86,8 +322,8 @@
86#define CONFIG_MEMSIZE 0x5428 322#define CONFIG_MEMSIZE 0x5428
87 323
88#define CP_ME_CNTL 0x86D8 324#define CP_ME_CNTL 0x86D8
89#define CP_ME_HALT (1<<28) 325#define CP_ME_HALT (1 << 28)
90#define CP_PFP_HALT (1<<26) 326#define CP_PFP_HALT (1 << 26)
91#define CP_ME_RAM_DATA 0xC160 327#define CP_ME_RAM_DATA 0xC160
92#define CP_ME_RAM_RADDR 0xC158 328#define CP_ME_RAM_RADDR 0xC158
93#define CP_ME_RAM_WADDR 0xC15C 329#define CP_ME_RAM_WADDR 0xC15C
@@ -157,9 +393,22 @@
157#define GUI_ACTIVE (1<<31) 393#define GUI_ACTIVE (1<<31)
158#define GRBM_STATUS2 0x8014 394#define GRBM_STATUS2 0x8014
159 395
160#define CG_CLKPIN_CNTL 0x660 396#define CG_THERMAL_CTRL 0x72C
161# define MUX_TCLK_TO_XCLK (1 << 8) 397#define DPM_EVENT_SRC(x) ((x) << 0)
162# define XTALIN_DIVIDE (1 << 9) 398#define DPM_EVENT_SRC_MASK (7 << 0)
399#define DIG_THERM_DPM(x) ((x) << 14)
400#define DIG_THERM_DPM_MASK 0x003FC000
401#define DIG_THERM_DPM_SHIFT 14
402
403#define CG_THERMAL_INT 0x734
404#define DIG_THERM_INTH(x) ((x) << 8)
405#define DIG_THERM_INTH_MASK 0x0000FF00
406#define DIG_THERM_INTH_SHIFT 8
407#define DIG_THERM_INTL(x) ((x) << 16)
408#define DIG_THERM_INTL_MASK 0x00FF0000
409#define DIG_THERM_INTL_SHIFT 16
410#define THERM_INT_MASK_HIGH (1 << 24)
411#define THERM_INT_MASK_LOW (1 << 25)
163 412
164#define CG_MULT_THERMAL_STATUS 0x740 413#define CG_MULT_THERMAL_STATUS 0x740
165#define ASIC_T(x) ((x) << 16) 414#define ASIC_T(x) ((x) << 16)
@@ -662,7 +911,22 @@
662#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c 911#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
663#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c 912#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
664 913
665/* PCIE link stuff */ 914/* PCIE indirect regs */
915#define PCIE_P_CNTL 0x40
916# define P_PLL_PWRDN_IN_L1L23 (1 << 3)
917# define P_PLL_BUF_PDNB (1 << 4)
918# define P_PLL_PDNB (1 << 9)
919# define P_ALLOW_PRX_FRONTEND_SHUTOFF (1 << 12)
920/* PCIE PORT regs */
921#define PCIE_LC_CNTL 0xa0
922# define LC_L0S_INACTIVITY(x) ((x) << 8)
923# define LC_L0S_INACTIVITY_MASK (0xf << 8)
924# define LC_L0S_INACTIVITY_SHIFT 8
925# define LC_L1_INACTIVITY(x) ((x) << 12)
926# define LC_L1_INACTIVITY_MASK (0xf << 12)
927# define LC_L1_INACTIVITY_SHIFT 12
928# define LC_PMI_TO_L1_DIS (1 << 16)
929# define LC_ASPM_TO_L1_DIS (1 << 24)
666#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ 930#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
667#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ 931#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
668# define LC_LINK_WIDTH_SHIFT 0 932# define LC_LINK_WIDTH_SHIFT 0
@@ -690,6 +954,9 @@
690# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) 954# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
691# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 955# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
692# define LC_CURRENT_DATA_RATE (1 << 11) 956# define LC_CURRENT_DATA_RATE (1 << 11)
957# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
958# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
959# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
693# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) 960# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
694# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) 961# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
695# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) 962# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index a1b0da6b5808..234906709067 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -32,40 +32,43 @@
32#include "sid.h" 32#include "sid.h"
33#include "atom.h" 33#include "atom.h"
34#include "si_blit_shaders.h" 34#include "si_blit_shaders.h"
35#include "clearstate_si.h"
36#include "radeon_ucode.h"
35 37
36#define SI_PFP_UCODE_SIZE 2144
37#define SI_PM4_UCODE_SIZE 2144
38#define SI_CE_UCODE_SIZE 2144
39#define SI_RLC_UCODE_SIZE 2048
40#define SI_MC_UCODE_SIZE 7769
41#define OLAND_MC_UCODE_SIZE 7863
42 38
43MODULE_FIRMWARE("radeon/TAHITI_pfp.bin"); 39MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
44MODULE_FIRMWARE("radeon/TAHITI_me.bin"); 40MODULE_FIRMWARE("radeon/TAHITI_me.bin");
45MODULE_FIRMWARE("radeon/TAHITI_ce.bin"); 41MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
46MODULE_FIRMWARE("radeon/TAHITI_mc.bin"); 42MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
47MODULE_FIRMWARE("radeon/TAHITI_rlc.bin"); 43MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
44MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
48MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); 45MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
49MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); 46MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
50MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin"); 47MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
51MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin"); 48MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
52MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin"); 49MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
50MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
53MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); 51MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
54MODULE_FIRMWARE("radeon/VERDE_me.bin"); 52MODULE_FIRMWARE("radeon/VERDE_me.bin");
55MODULE_FIRMWARE("radeon/VERDE_ce.bin"); 53MODULE_FIRMWARE("radeon/VERDE_ce.bin");
56MODULE_FIRMWARE("radeon/VERDE_mc.bin"); 54MODULE_FIRMWARE("radeon/VERDE_mc.bin");
57MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); 55MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
56MODULE_FIRMWARE("radeon/VERDE_smc.bin");
58MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); 57MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
59MODULE_FIRMWARE("radeon/OLAND_me.bin"); 58MODULE_FIRMWARE("radeon/OLAND_me.bin");
60MODULE_FIRMWARE("radeon/OLAND_ce.bin"); 59MODULE_FIRMWARE("radeon/OLAND_ce.bin");
61MODULE_FIRMWARE("radeon/OLAND_mc.bin"); 60MODULE_FIRMWARE("radeon/OLAND_mc.bin");
62MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); 61MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
62MODULE_FIRMWARE("radeon/OLAND_smc.bin");
63MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); 63MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
64MODULE_FIRMWARE("radeon/HAINAN_me.bin"); 64MODULE_FIRMWARE("radeon/HAINAN_me.bin");
65MODULE_FIRMWARE("radeon/HAINAN_ce.bin"); 65MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
66MODULE_FIRMWARE("radeon/HAINAN_mc.bin"); 66MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
67MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); 67MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
68MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
68 69
70static void si_pcie_gen3_enable(struct radeon_device *rdev);
71static void si_program_aspm(struct radeon_device *rdev);
69extern int r600_ih_ring_alloc(struct radeon_device *rdev); 72extern int r600_ih_ring_alloc(struct radeon_device *rdev);
70extern void r600_ih_ring_fini(struct radeon_device *rdev); 73extern void r600_ih_ring_fini(struct radeon_device *rdev);
71extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 74extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
@@ -75,6 +78,228 @@ extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
75extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 78extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
76extern bool evergreen_is_display_hung(struct radeon_device *rdev); 79extern bool evergreen_is_display_hung(struct radeon_device *rdev);
77 80
81static const u32 verde_rlc_save_restore_register_list[] =
82{
83 (0x8000 << 16) | (0x98f4 >> 2),
84 0x00000000,
85 (0x8040 << 16) | (0x98f4 >> 2),
86 0x00000000,
87 (0x8000 << 16) | (0xe80 >> 2),
88 0x00000000,
89 (0x8040 << 16) | (0xe80 >> 2),
90 0x00000000,
91 (0x8000 << 16) | (0x89bc >> 2),
92 0x00000000,
93 (0x8040 << 16) | (0x89bc >> 2),
94 0x00000000,
95 (0x8000 << 16) | (0x8c1c >> 2),
96 0x00000000,
97 (0x8040 << 16) | (0x8c1c >> 2),
98 0x00000000,
99 (0x9c00 << 16) | (0x98f0 >> 2),
100 0x00000000,
101 (0x9c00 << 16) | (0xe7c >> 2),
102 0x00000000,
103 (0x8000 << 16) | (0x9148 >> 2),
104 0x00000000,
105 (0x8040 << 16) | (0x9148 >> 2),
106 0x00000000,
107 (0x9c00 << 16) | (0x9150 >> 2),
108 0x00000000,
109 (0x9c00 << 16) | (0x897c >> 2),
110 0x00000000,
111 (0x9c00 << 16) | (0x8d8c >> 2),
112 0x00000000,
113 (0x9c00 << 16) | (0xac54 >> 2),
114 0X00000000,
115 0x3,
116 (0x9c00 << 16) | (0x98f8 >> 2),
117 0x00000000,
118 (0x9c00 << 16) | (0x9910 >> 2),
119 0x00000000,
120 (0x9c00 << 16) | (0x9914 >> 2),
121 0x00000000,
122 (0x9c00 << 16) | (0x9918 >> 2),
123 0x00000000,
124 (0x9c00 << 16) | (0x991c >> 2),
125 0x00000000,
126 (0x9c00 << 16) | (0x9920 >> 2),
127 0x00000000,
128 (0x9c00 << 16) | (0x9924 >> 2),
129 0x00000000,
130 (0x9c00 << 16) | (0x9928 >> 2),
131 0x00000000,
132 (0x9c00 << 16) | (0x992c >> 2),
133 0x00000000,
134 (0x9c00 << 16) | (0x9930 >> 2),
135 0x00000000,
136 (0x9c00 << 16) | (0x9934 >> 2),
137 0x00000000,
138 (0x9c00 << 16) | (0x9938 >> 2),
139 0x00000000,
140 (0x9c00 << 16) | (0x993c >> 2),
141 0x00000000,
142 (0x9c00 << 16) | (0x9940 >> 2),
143 0x00000000,
144 (0x9c00 << 16) | (0x9944 >> 2),
145 0x00000000,
146 (0x9c00 << 16) | (0x9948 >> 2),
147 0x00000000,
148 (0x9c00 << 16) | (0x994c >> 2),
149 0x00000000,
150 (0x9c00 << 16) | (0x9950 >> 2),
151 0x00000000,
152 (0x9c00 << 16) | (0x9954 >> 2),
153 0x00000000,
154 (0x9c00 << 16) | (0x9958 >> 2),
155 0x00000000,
156 (0x9c00 << 16) | (0x995c >> 2),
157 0x00000000,
158 (0x9c00 << 16) | (0x9960 >> 2),
159 0x00000000,
160 (0x9c00 << 16) | (0x9964 >> 2),
161 0x00000000,
162 (0x9c00 << 16) | (0x9968 >> 2),
163 0x00000000,
164 (0x9c00 << 16) | (0x996c >> 2),
165 0x00000000,
166 (0x9c00 << 16) | (0x9970 >> 2),
167 0x00000000,
168 (0x9c00 << 16) | (0x9974 >> 2),
169 0x00000000,
170 (0x9c00 << 16) | (0x9978 >> 2),
171 0x00000000,
172 (0x9c00 << 16) | (0x997c >> 2),
173 0x00000000,
174 (0x9c00 << 16) | (0x9980 >> 2),
175 0x00000000,
176 (0x9c00 << 16) | (0x9984 >> 2),
177 0x00000000,
178 (0x9c00 << 16) | (0x9988 >> 2),
179 0x00000000,
180 (0x9c00 << 16) | (0x998c >> 2),
181 0x00000000,
182 (0x9c00 << 16) | (0x8c00 >> 2),
183 0x00000000,
184 (0x9c00 << 16) | (0x8c14 >> 2),
185 0x00000000,
186 (0x9c00 << 16) | (0x8c04 >> 2),
187 0x00000000,
188 (0x9c00 << 16) | (0x8c08 >> 2),
189 0x00000000,
190 (0x8000 << 16) | (0x9b7c >> 2),
191 0x00000000,
192 (0x8040 << 16) | (0x9b7c >> 2),
193 0x00000000,
194 (0x8000 << 16) | (0xe84 >> 2),
195 0x00000000,
196 (0x8040 << 16) | (0xe84 >> 2),
197 0x00000000,
198 (0x8000 << 16) | (0x89c0 >> 2),
199 0x00000000,
200 (0x8040 << 16) | (0x89c0 >> 2),
201 0x00000000,
202 (0x8000 << 16) | (0x914c >> 2),
203 0x00000000,
204 (0x8040 << 16) | (0x914c >> 2),
205 0x00000000,
206 (0x8000 << 16) | (0x8c20 >> 2),
207 0x00000000,
208 (0x8040 << 16) | (0x8c20 >> 2),
209 0x00000000,
210 (0x8000 << 16) | (0x9354 >> 2),
211 0x00000000,
212 (0x8040 << 16) | (0x9354 >> 2),
213 0x00000000,
214 (0x9c00 << 16) | (0x9060 >> 2),
215 0x00000000,
216 (0x9c00 << 16) | (0x9364 >> 2),
217 0x00000000,
218 (0x9c00 << 16) | (0x9100 >> 2),
219 0x00000000,
220 (0x9c00 << 16) | (0x913c >> 2),
221 0x00000000,
222 (0x8000 << 16) | (0x90e0 >> 2),
223 0x00000000,
224 (0x8000 << 16) | (0x90e4 >> 2),
225 0x00000000,
226 (0x8000 << 16) | (0x90e8 >> 2),
227 0x00000000,
228 (0x8040 << 16) | (0x90e0 >> 2),
229 0x00000000,
230 (0x8040 << 16) | (0x90e4 >> 2),
231 0x00000000,
232 (0x8040 << 16) | (0x90e8 >> 2),
233 0x00000000,
234 (0x9c00 << 16) | (0x8bcc >> 2),
235 0x00000000,
236 (0x9c00 << 16) | (0x8b24 >> 2),
237 0x00000000,
238 (0x9c00 << 16) | (0x88c4 >> 2),
239 0x00000000,
240 (0x9c00 << 16) | (0x8e50 >> 2),
241 0x00000000,
242 (0x9c00 << 16) | (0x8c0c >> 2),
243 0x00000000,
244 (0x9c00 << 16) | (0x8e58 >> 2),
245 0x00000000,
246 (0x9c00 << 16) | (0x8e5c >> 2),
247 0x00000000,
248 (0x9c00 << 16) | (0x9508 >> 2),
249 0x00000000,
250 (0x9c00 << 16) | (0x950c >> 2),
251 0x00000000,
252 (0x9c00 << 16) | (0x9494 >> 2),
253 0x00000000,
254 (0x9c00 << 16) | (0xac0c >> 2),
255 0x00000000,
256 (0x9c00 << 16) | (0xac10 >> 2),
257 0x00000000,
258 (0x9c00 << 16) | (0xac14 >> 2),
259 0x00000000,
260 (0x9c00 << 16) | (0xae00 >> 2),
261 0x00000000,
262 (0x9c00 << 16) | (0xac08 >> 2),
263 0x00000000,
264 (0x9c00 << 16) | (0x88d4 >> 2),
265 0x00000000,
266 (0x9c00 << 16) | (0x88c8 >> 2),
267 0x00000000,
268 (0x9c00 << 16) | (0x88cc >> 2),
269 0x00000000,
270 (0x9c00 << 16) | (0x89b0 >> 2),
271 0x00000000,
272 (0x9c00 << 16) | (0x8b10 >> 2),
273 0x00000000,
274 (0x9c00 << 16) | (0x8a14 >> 2),
275 0x00000000,
276 (0x9c00 << 16) | (0x9830 >> 2),
277 0x00000000,
278 (0x9c00 << 16) | (0x9834 >> 2),
279 0x00000000,
280 (0x9c00 << 16) | (0x9838 >> 2),
281 0x00000000,
282 (0x9c00 << 16) | (0x9a10 >> 2),
283 0x00000000,
284 (0x8000 << 16) | (0x9870 >> 2),
285 0x00000000,
286 (0x8000 << 16) | (0x9874 >> 2),
287 0x00000000,
288 (0x8001 << 16) | (0x9870 >> 2),
289 0x00000000,
290 (0x8001 << 16) | (0x9874 >> 2),
291 0x00000000,
292 (0x8040 << 16) | (0x9870 >> 2),
293 0x00000000,
294 (0x8040 << 16) | (0x9874 >> 2),
295 0x00000000,
296 (0x8041 << 16) | (0x9870 >> 2),
297 0x00000000,
298 (0x8041 << 16) | (0x9874 >> 2),
299 0x00000000,
300 0x00000000
301};
302
78static const u32 tahiti_golden_rlc_registers[] = 303static const u32 tahiti_golden_rlc_registers[] =
79{ 304{
80 0xc424, 0xffffffff, 0x00601005, 305 0xc424, 0xffffffff, 0x00601005,
@@ -1320,6 +1545,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1320 const char *chip_name; 1545 const char *chip_name;
1321 const char *rlc_chip_name; 1546 const char *rlc_chip_name;
1322 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; 1547 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
1548 size_t smc_req_size;
1323 char fw_name[30]; 1549 char fw_name[30];
1324 int err; 1550 int err;
1325 1551
@@ -1341,6 +1567,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1341 ce_req_size = SI_CE_UCODE_SIZE * 4; 1567 ce_req_size = SI_CE_UCODE_SIZE * 4;
1342 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1568 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1343 mc_req_size = SI_MC_UCODE_SIZE * 4; 1569 mc_req_size = SI_MC_UCODE_SIZE * 4;
1570 smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
1344 break; 1571 break;
1345 case CHIP_PITCAIRN: 1572 case CHIP_PITCAIRN:
1346 chip_name = "PITCAIRN"; 1573 chip_name = "PITCAIRN";
@@ -1350,6 +1577,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1350 ce_req_size = SI_CE_UCODE_SIZE * 4; 1577 ce_req_size = SI_CE_UCODE_SIZE * 4;
1351 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1578 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1352 mc_req_size = SI_MC_UCODE_SIZE * 4; 1579 mc_req_size = SI_MC_UCODE_SIZE * 4;
1580 smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
1353 break; 1581 break;
1354 case CHIP_VERDE: 1582 case CHIP_VERDE:
1355 chip_name = "VERDE"; 1583 chip_name = "VERDE";
@@ -1359,6 +1587,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1359 ce_req_size = SI_CE_UCODE_SIZE * 4; 1587 ce_req_size = SI_CE_UCODE_SIZE * 4;
1360 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1588 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1361 mc_req_size = SI_MC_UCODE_SIZE * 4; 1589 mc_req_size = SI_MC_UCODE_SIZE * 4;
1590 smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
1362 break; 1591 break;
1363 case CHIP_OLAND: 1592 case CHIP_OLAND:
1364 chip_name = "OLAND"; 1593 chip_name = "OLAND";
@@ -1368,6 +1597,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1368 ce_req_size = SI_CE_UCODE_SIZE * 4; 1597 ce_req_size = SI_CE_UCODE_SIZE * 4;
1369 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1598 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1370 mc_req_size = OLAND_MC_UCODE_SIZE * 4; 1599 mc_req_size = OLAND_MC_UCODE_SIZE * 4;
1600 smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
1371 break; 1601 break;
1372 case CHIP_HAINAN: 1602 case CHIP_HAINAN:
1373 chip_name = "HAINAN"; 1603 chip_name = "HAINAN";
@@ -1377,6 +1607,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1377 ce_req_size = SI_CE_UCODE_SIZE * 4; 1607 ce_req_size = SI_CE_UCODE_SIZE * 4;
1378 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1608 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1379 mc_req_size = OLAND_MC_UCODE_SIZE * 4; 1609 mc_req_size = OLAND_MC_UCODE_SIZE * 4;
1610 smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
1380 break; 1611 break;
1381 default: BUG(); 1612 default: BUG();
1382 } 1613 }
@@ -1439,6 +1670,17 @@ static int si_init_microcode(struct radeon_device *rdev)
1439 err = -EINVAL; 1670 err = -EINVAL;
1440 } 1671 }
1441 1672
1673 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1674 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev);
1675 if (err)
1676 goto out;
1677 if (rdev->smc_fw->size != smc_req_size) {
1678 printk(KERN_ERR
1679 "si_smc: Bogus length %zu in firmware \"%s\"\n",
1680 rdev->smc_fw->size, fw_name);
1681 err = -EINVAL;
1682 }
1683
1442out: 1684out:
1443 platform_device_unregister(pdev); 1685 platform_device_unregister(pdev);
1444 1686
@@ -1457,6 +1699,8 @@ out:
1457 rdev->rlc_fw = NULL; 1699 rdev->rlc_fw = NULL;
1458 release_firmware(rdev->mc_fw); 1700 release_firmware(rdev->mc_fw);
1459 rdev->mc_fw = NULL; 1701 rdev->mc_fw = NULL;
1702 release_firmware(rdev->smc_fw);
1703 rdev->smc_fw = NULL;
1460 } 1704 }
1461 return err; 1705 return err;
1462} 1706}
@@ -1792,7 +2036,8 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
1792 u32 lb_size, u32 num_heads) 2036 u32 lb_size, u32 num_heads)
1793{ 2037{
1794 struct drm_display_mode *mode = &radeon_crtc->base.mode; 2038 struct drm_display_mode *mode = &radeon_crtc->base.mode;
1795 struct dce6_wm_params wm; 2039 struct dce6_wm_params wm_low, wm_high;
2040 u32 dram_channels;
1796 u32 pixel_period; 2041 u32 pixel_period;
1797 u32 line_time = 0; 2042 u32 line_time = 0;
1798 u32 latency_watermark_a = 0, latency_watermark_b = 0; 2043 u32 latency_watermark_a = 0, latency_watermark_b = 0;
@@ -1808,38 +2053,83 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
1808 priority_a_cnt = 0; 2053 priority_a_cnt = 0;
1809 priority_b_cnt = 0; 2054 priority_b_cnt = 0;
1810 2055
1811 wm.yclk = rdev->pm.current_mclk * 10;
1812 wm.sclk = rdev->pm.current_sclk * 10;
1813 wm.disp_clk = mode->clock;
1814 wm.src_width = mode->crtc_hdisplay;
1815 wm.active_time = mode->crtc_hdisplay * pixel_period;
1816 wm.blank_time = line_time - wm.active_time;
1817 wm.interlaced = false;
1818 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1819 wm.interlaced = true;
1820 wm.vsc = radeon_crtc->vsc;
1821 wm.vtaps = 1;
1822 if (radeon_crtc->rmx_type != RMX_OFF)
1823 wm.vtaps = 2;
1824 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
1825 wm.lb_size = lb_size;
1826 if (rdev->family == CHIP_ARUBA) 2056 if (rdev->family == CHIP_ARUBA)
1827 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); 2057 dram_channels = evergreen_get_number_of_dram_channels(rdev);
1828 else 2058 else
1829 wm.dram_channels = si_get_number_of_dram_channels(rdev); 2059 dram_channels = si_get_number_of_dram_channels(rdev);
1830 wm.num_heads = num_heads; 2060
2061 /* watermark for high clocks */
2062 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2063 wm_high.yclk =
2064 radeon_dpm_get_mclk(rdev, false) * 10;
2065 wm_high.sclk =
2066 radeon_dpm_get_sclk(rdev, false) * 10;
2067 } else {
2068 wm_high.yclk = rdev->pm.current_mclk * 10;
2069 wm_high.sclk = rdev->pm.current_sclk * 10;
2070 }
2071
2072 wm_high.disp_clk = mode->clock;
2073 wm_high.src_width = mode->crtc_hdisplay;
2074 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
2075 wm_high.blank_time = line_time - wm_high.active_time;
2076 wm_high.interlaced = false;
2077 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2078 wm_high.interlaced = true;
2079 wm_high.vsc = radeon_crtc->vsc;
2080 wm_high.vtaps = 1;
2081 if (radeon_crtc->rmx_type != RMX_OFF)
2082 wm_high.vtaps = 2;
2083 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2084 wm_high.lb_size = lb_size;
2085 wm_high.dram_channels = dram_channels;
2086 wm_high.num_heads = num_heads;
2087
2088 /* watermark for low clocks */
2089 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2090 wm_low.yclk =
2091 radeon_dpm_get_mclk(rdev, true) * 10;
2092 wm_low.sclk =
2093 radeon_dpm_get_sclk(rdev, true) * 10;
2094 } else {
2095 wm_low.yclk = rdev->pm.current_mclk * 10;
2096 wm_low.sclk = rdev->pm.current_sclk * 10;
2097 }
2098
2099 wm_low.disp_clk = mode->clock;
2100 wm_low.src_width = mode->crtc_hdisplay;
2101 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
2102 wm_low.blank_time = line_time - wm_low.active_time;
2103 wm_low.interlaced = false;
2104 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2105 wm_low.interlaced = true;
2106 wm_low.vsc = radeon_crtc->vsc;
2107 wm_low.vtaps = 1;
2108 if (radeon_crtc->rmx_type != RMX_OFF)
2109 wm_low.vtaps = 2;
2110 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2111 wm_low.lb_size = lb_size;
2112 wm_low.dram_channels = dram_channels;
2113 wm_low.num_heads = num_heads;
1831 2114
1832 /* set for high clocks */ 2115 /* set for high clocks */
1833 latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535); 2116 latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535);
1834 /* set for low clocks */ 2117 /* set for low clocks */
1835 /* wm.yclk = low clk; wm.sclk = low clk */ 2118 latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535);
1836 latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535);
1837 2119
1838 /* possibly force display priority to high */ 2120 /* possibly force display priority to high */
1839 /* should really do this at mode validation time... */ 2121 /* should really do this at mode validation time... */
1840 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || 2122 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1841 !dce6_average_bandwidth_vs_available_bandwidth(&wm) || 2123 !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1842 !dce6_check_latency_hiding(&wm) || 2124 !dce6_check_latency_hiding(&wm_high) ||
2125 (rdev->disp_priority == 2)) {
2126 DRM_DEBUG_KMS("force priority to high\n");
2127 priority_a_cnt |= PRIORITY_ALWAYS_ON;
2128 priority_b_cnt |= PRIORITY_ALWAYS_ON;
2129 }
2130 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2131 !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2132 !dce6_check_latency_hiding(&wm_low) ||
1843 (rdev->disp_priority == 2)) { 2133 (rdev->disp_priority == 2)) {
1844 DRM_DEBUG_KMS("force priority to high\n"); 2134 DRM_DEBUG_KMS("force priority to high\n");
1845 priority_a_cnt |= PRIORITY_ALWAYS_ON; 2135 priority_a_cnt |= PRIORITY_ALWAYS_ON;
@@ -1895,6 +2185,10 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
1895 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); 2185 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
1896 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); 2186 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
1897 2187
2188 /* save values for DPM */
2189 radeon_crtc->line_time = line_time;
2190 radeon_crtc->wm_high = latency_watermark_a;
2191 radeon_crtc->wm_low = latency_watermark_b;
1898} 2192}
1899 2193
1900void dce6_bandwidth_update(struct radeon_device *rdev) 2194void dce6_bandwidth_update(struct radeon_device *rdev)
@@ -3535,8 +3829,8 @@ static void si_mc_program(struct radeon_device *rdev)
3535 } 3829 }
3536} 3830}
3537 3831
3538static void si_vram_gtt_location(struct radeon_device *rdev, 3832void si_vram_gtt_location(struct radeon_device *rdev,
3539 struct radeon_mc *mc) 3833 struct radeon_mc *mc)
3540{ 3834{
3541 if (mc->mc_vram_size > 0xFFC0000000ULL) { 3835 if (mc->mc_vram_size > 0xFFC0000000ULL) {
3542 /* leave room for at least 1024M GTT */ 3836 /* leave room for at least 1024M GTT */
@@ -4282,6 +4576,450 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4282} 4576}
4283 4577
4284/* 4578/*
4579 * Power and clock gating
4580 */
4581static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
4582{
4583 int i;
4584
4585 for (i = 0; i < rdev->usec_timeout; i++) {
4586 if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
4587 break;
4588 udelay(1);
4589 }
4590
4591 for (i = 0; i < rdev->usec_timeout; i++) {
4592 if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
4593 break;
4594 udelay(1);
4595 }
4596}
4597
4598static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
4599 bool enable)
4600{
4601 u32 tmp = RREG32(CP_INT_CNTL_RING0);
4602 u32 mask;
4603 int i;
4604
4605 if (enable)
4606 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4607 else
4608 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4609 WREG32(CP_INT_CNTL_RING0, tmp);
4610
4611 if (!enable) {
4612 /* read a gfx register */
4613 tmp = RREG32(DB_DEPTH_INFO);
4614
4615 mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
4616 for (i = 0; i < rdev->usec_timeout; i++) {
4617 if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
4618 break;
4619 udelay(1);
4620 }
4621 }
4622}
4623
4624static void si_set_uvd_dcm(struct radeon_device *rdev,
4625 bool sw_mode)
4626{
4627 u32 tmp, tmp2;
4628
4629 tmp = RREG32(UVD_CGC_CTRL);
4630 tmp &= ~(CLK_OD_MASK | CG_DT_MASK);
4631 tmp |= DCM | CG_DT(1) | CLK_OD(4);
4632
4633 if (sw_mode) {
4634 tmp &= ~0x7ffff800;
4635 tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7);
4636 } else {
4637 tmp |= 0x7ffff800;
4638 tmp2 = 0;
4639 }
4640
4641 WREG32(UVD_CGC_CTRL, tmp);
4642 WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
4643}
4644
4645static void si_init_uvd_internal_cg(struct radeon_device *rdev)
4646{
4647 bool hw_mode = true;
4648
4649 if (hw_mode) {
4650 si_set_uvd_dcm(rdev, false);
4651 } else {
4652 u32 tmp = RREG32(UVD_CGC_CTRL);
4653 tmp &= ~DCM;
4654 WREG32(UVD_CGC_CTRL, tmp);
4655 }
4656}
4657
4658static u32 si_halt_rlc(struct radeon_device *rdev)
4659{
4660 u32 data, orig;
4661
4662 orig = data = RREG32(RLC_CNTL);
4663
4664 if (data & RLC_ENABLE) {
4665 data &= ~RLC_ENABLE;
4666 WREG32(RLC_CNTL, data);
4667
4668 si_wait_for_rlc_serdes(rdev);
4669 }
4670
4671 return orig;
4672}
4673
4674static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
4675{
4676 u32 tmp;
4677
4678 tmp = RREG32(RLC_CNTL);
4679 if (tmp != rlc)
4680 WREG32(RLC_CNTL, rlc);
4681}
4682
4683static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
4684{
4685 u32 data, orig;
4686
4687 orig = data = RREG32(DMA_PG);
4688 if (enable)
4689 data |= PG_CNTL_ENABLE;
4690 else
4691 data &= ~PG_CNTL_ENABLE;
4692 if (orig != data)
4693 WREG32(DMA_PG, data);
4694}
4695
4696static void si_init_dma_pg(struct radeon_device *rdev)
4697{
4698 u32 tmp;
4699
4700 WREG32(DMA_PGFSM_WRITE, 0x00002000);
4701 WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
4702
4703 for (tmp = 0; tmp < 5; tmp++)
4704 WREG32(DMA_PGFSM_WRITE, 0);
4705}
4706
4707static void si_enable_gfx_cgpg(struct radeon_device *rdev,
4708 bool enable)
4709{
4710 u32 tmp;
4711
4712 if (enable) {
4713 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
4714 WREG32(RLC_TTOP_D, tmp);
4715
4716 tmp = RREG32(RLC_PG_CNTL);
4717 tmp |= GFX_PG_ENABLE;
4718 WREG32(RLC_PG_CNTL, tmp);
4719
4720 tmp = RREG32(RLC_AUTO_PG_CTRL);
4721 tmp |= AUTO_PG_EN;
4722 WREG32(RLC_AUTO_PG_CTRL, tmp);
4723 } else {
4724 tmp = RREG32(RLC_AUTO_PG_CTRL);
4725 tmp &= ~AUTO_PG_EN;
4726 WREG32(RLC_AUTO_PG_CTRL, tmp);
4727
4728 tmp = RREG32(DB_RENDER_CONTROL);
4729 }
4730}
4731
4732static void si_init_gfx_cgpg(struct radeon_device *rdev)
4733{
4734 u32 tmp;
4735
4736 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
4737
4738 tmp = RREG32(RLC_PG_CNTL);
4739 tmp |= GFX_PG_SRC;
4740 WREG32(RLC_PG_CNTL, tmp);
4741
4742 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
4743
4744 tmp = RREG32(RLC_AUTO_PG_CTRL);
4745
4746 tmp &= ~GRBM_REG_SGIT_MASK;
4747 tmp |= GRBM_REG_SGIT(0x700);
4748 tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
4749 WREG32(RLC_AUTO_PG_CTRL, tmp);
4750}
4751
4752static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
4753{
4754 u32 mask = 0, tmp, tmp1;
4755 int i;
4756
4757 si_select_se_sh(rdev, se, sh);
4758 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
4759 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
4760 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
4761
4762 tmp &= 0xffff0000;
4763
4764 tmp |= tmp1;
4765 tmp >>= 16;
4766
4767 for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
4768 mask <<= 1;
4769 mask |= 1;
4770 }
4771
4772 return (~tmp) & mask;
4773}
4774
4775static void si_init_ao_cu_mask(struct radeon_device *rdev)
4776{
4777 u32 i, j, k, active_cu_number = 0;
4778 u32 mask, counter, cu_bitmap;
4779 u32 tmp = 0;
4780
4781 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
4782 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
4783 mask = 1;
4784 cu_bitmap = 0;
4785 counter = 0;
4786 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
4787 if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
4788 if (counter < 2)
4789 cu_bitmap |= mask;
4790 counter++;
4791 }
4792 mask <<= 1;
4793 }
4794
4795 active_cu_number += counter;
4796 tmp |= (cu_bitmap << (i * 16 + j * 8));
4797 }
4798 }
4799
4800 WREG32(RLC_PG_AO_CU_MASK, tmp);
4801
4802 tmp = RREG32(RLC_MAX_PG_CU);
4803 tmp &= ~MAX_PU_CU_MASK;
4804 tmp |= MAX_PU_CU(active_cu_number);
4805 WREG32(RLC_MAX_PG_CU, tmp);
4806}
4807
4808static void si_enable_cgcg(struct radeon_device *rdev,
4809 bool enable)
4810{
4811 u32 data, orig, tmp;
4812
4813 orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
4814
4815 si_enable_gui_idle_interrupt(rdev, enable);
4816
4817 if (enable) {
4818 WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
4819
4820 tmp = si_halt_rlc(rdev);
4821
4822 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
4823 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
4824 WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
4825
4826 si_wait_for_rlc_serdes(rdev);
4827
4828 si_update_rlc(rdev, tmp);
4829
4830 WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
4831
4832 data |= CGCG_EN | CGLS_EN;
4833 } else {
4834 RREG32(CB_CGTT_SCLK_CTRL);
4835 RREG32(CB_CGTT_SCLK_CTRL);
4836 RREG32(CB_CGTT_SCLK_CTRL);
4837 RREG32(CB_CGTT_SCLK_CTRL);
4838
4839 data &= ~(CGCG_EN | CGLS_EN);
4840 }
4841
4842 if (orig != data)
4843 WREG32(RLC_CGCG_CGLS_CTRL, data);
4844}
4845
4846static void si_enable_mgcg(struct radeon_device *rdev,
4847 bool enable)
4848{
4849 u32 data, orig, tmp = 0;
4850
4851 if (enable) {
4852 orig = data = RREG32(CGTS_SM_CTRL_REG);
4853 data = 0x96940200;
4854 if (orig != data)
4855 WREG32(CGTS_SM_CTRL_REG, data);
4856
4857 orig = data = RREG32(CP_MEM_SLP_CNTL);
4858 data |= CP_MEM_LS_EN;
4859 if (orig != data)
4860 WREG32(CP_MEM_SLP_CNTL, data);
4861
4862 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
4863 data &= 0xffffffc0;
4864 if (orig != data)
4865 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
4866
4867 tmp = si_halt_rlc(rdev);
4868
4869 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
4870 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
4871 WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
4872
4873 si_update_rlc(rdev, tmp);
4874 } else {
4875 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
4876 data |= 0x00000003;
4877 if (orig != data)
4878 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
4879
4880 data = RREG32(CP_MEM_SLP_CNTL);
4881 if (data & CP_MEM_LS_EN) {
4882 data &= ~CP_MEM_LS_EN;
4883 WREG32(CP_MEM_SLP_CNTL, data);
4884 }
4885 orig = data = RREG32(CGTS_SM_CTRL_REG);
4886 data |= LS_OVERRIDE | OVERRIDE;
4887 if (orig != data)
4888 WREG32(CGTS_SM_CTRL_REG, data);
4889
4890 tmp = si_halt_rlc(rdev);
4891
4892 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
4893 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
4894 WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
4895
4896 si_update_rlc(rdev, tmp);
4897 }
4898}
4899
4900static void si_enable_uvd_mgcg(struct radeon_device *rdev,
4901 bool enable)
4902{
4903 u32 orig, data, tmp;
4904
4905 if (enable) {
4906 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
4907 tmp |= 0x3fff;
4908 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
4909
4910 orig = data = RREG32(UVD_CGC_CTRL);
4911 data |= DCM;
4912 if (orig != data)
4913 WREG32(UVD_CGC_CTRL, data);
4914
4915 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0);
4916 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0);
4917 } else {
4918 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
4919 tmp &= ~0x3fff;
4920 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
4921
4922 orig = data = RREG32(UVD_CGC_CTRL);
4923 data &= ~DCM;
4924 if (orig != data)
4925 WREG32(UVD_CGC_CTRL, data);
4926
4927 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff);
4928 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff);
4929 }
4930}
4931
4932static const u32 mc_cg_registers[] =
4933{
4934 MC_HUB_MISC_HUB_CG,
4935 MC_HUB_MISC_SIP_CG,
4936 MC_HUB_MISC_VM_CG,
4937 MC_XPB_CLK_GAT,
4938 ATC_MISC_CG,
4939 MC_CITF_MISC_WR_CG,
4940 MC_CITF_MISC_RD_CG,
4941 MC_CITF_MISC_VM_CG,
4942 VM_L2_CG,
4943};
4944
4945static void si_enable_mc_ls(struct radeon_device *rdev,
4946 bool enable)
4947{
4948 int i;
4949 u32 orig, data;
4950
4951 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
4952 orig = data = RREG32(mc_cg_registers[i]);
4953 if (enable)
4954 data |= MC_LS_ENABLE;
4955 else
4956 data &= ~MC_LS_ENABLE;
4957 if (data != orig)
4958 WREG32(mc_cg_registers[i], data);
4959 }
4960}
4961
4962
4963static void si_init_cg(struct radeon_device *rdev)
4964{
4965 bool has_uvd = true;
4966
4967 si_enable_mgcg(rdev, true);
4968 si_enable_cgcg(rdev, true);
4969 /* disable MC LS on Tahiti */
4970 if (rdev->family == CHIP_TAHITI)
4971 si_enable_mc_ls(rdev, false);
4972 if (has_uvd) {
4973 si_enable_uvd_mgcg(rdev, true);
4974 si_init_uvd_internal_cg(rdev);
4975 }
4976}
4977
4978static void si_fini_cg(struct radeon_device *rdev)
4979{
4980 bool has_uvd = true;
4981
4982 if (has_uvd)
4983 si_enable_uvd_mgcg(rdev, false);
4984 si_enable_cgcg(rdev, false);
4985 si_enable_mgcg(rdev, false);
4986}
4987
4988static void si_init_pg(struct radeon_device *rdev)
4989{
4990 bool has_pg = false;
4991
4992 /* only cape verde supports PG */
4993 if (rdev->family == CHIP_VERDE)
4994 has_pg = true;
4995
4996 if (has_pg) {
4997 si_init_ao_cu_mask(rdev);
4998 si_init_dma_pg(rdev);
4999 si_enable_dma_pg(rdev, true);
5000 si_init_gfx_cgpg(rdev);
5001 si_enable_gfx_cgpg(rdev, true);
5002 } else {
5003 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5004 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5005 }
5006}
5007
5008static void si_fini_pg(struct radeon_device *rdev)
5009{
5010 bool has_pg = false;
5011
5012 /* only cape verde supports PG */
5013 if (rdev->family == CHIP_VERDE)
5014 has_pg = true;
5015
5016 if (has_pg) {
5017 si_enable_dma_pg(rdev, false);
5018 si_enable_gfx_cgpg(rdev, false);
5019 }
5020}
5021
5022/*
4285 * RLC 5023 * RLC
4286 */ 5024 */
4287void si_rlc_fini(struct radeon_device *rdev) 5025void si_rlc_fini(struct radeon_device *rdev)
@@ -4313,8 +5051,15 @@ void si_rlc_fini(struct radeon_device *rdev)
4313 } 5051 }
4314} 5052}
4315 5053
5054#define RLC_CLEAR_STATE_END_MARKER 0x00000001
5055
4316int si_rlc_init(struct radeon_device *rdev) 5056int si_rlc_init(struct radeon_device *rdev)
4317{ 5057{
5058 volatile u32 *dst_ptr;
5059 u32 dws, data, i, j, k, reg_num;
5060 u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index;
5061 u64 reg_list_mc_addr;
5062 const struct cs_section_def *cs_data = si_cs_data;
4318 int r; 5063 int r;
4319 5064
4320 /* save restore block */ 5065 /* save restore block */
@@ -4335,18 +5080,44 @@ int si_rlc_init(struct radeon_device *rdev)
4335 } 5080 }
4336 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, 5081 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
4337 &rdev->rlc.save_restore_gpu_addr); 5082 &rdev->rlc.save_restore_gpu_addr);
4338 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
4339 if (r) { 5083 if (r) {
5084 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
4340 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); 5085 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
4341 si_rlc_fini(rdev); 5086 si_rlc_fini(rdev);
4342 return r; 5087 return r;
4343 } 5088 }
4344 5089
5090 if (rdev->family == CHIP_VERDE) {
5091 r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
5092 if (r) {
5093 dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
5094 si_rlc_fini(rdev);
5095 return r;
5096 }
5097 /* write the sr buffer */
5098 dst_ptr = rdev->rlc.sr_ptr;
5099 for (i = 0; i < ARRAY_SIZE(verde_rlc_save_restore_register_list); i++) {
5100 dst_ptr[i] = verde_rlc_save_restore_register_list[i];
5101 }
5102 radeon_bo_kunmap(rdev->rlc.save_restore_obj);
5103 }
5104 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
5105
4345 /* clear state block */ 5106 /* clear state block */
5107 reg_list_num = 0;
5108 dws = 0;
5109 for (i = 0; cs_data[i].section != NULL; i++) {
5110 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
5111 reg_list_num++;
5112 dws += cs_data[i].section[j].reg_count;
5113 }
5114 }
5115 reg_list_blk_index = (3 * reg_list_num + 2);
5116 dws += reg_list_blk_index;
5117
4346 if (rdev->rlc.clear_state_obj == NULL) { 5118 if (rdev->rlc.clear_state_obj == NULL) {
4347 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 5119 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
4348 RADEON_GEM_DOMAIN_VRAM, NULL, 5120 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
4349 &rdev->rlc.clear_state_obj);
4350 if (r) { 5121 if (r) {
4351 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); 5122 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
4352 si_rlc_fini(rdev); 5123 si_rlc_fini(rdev);
@@ -4360,24 +5131,113 @@ int si_rlc_init(struct radeon_device *rdev)
4360 } 5131 }
4361 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, 5132 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
4362 &rdev->rlc.clear_state_gpu_addr); 5133 &rdev->rlc.clear_state_gpu_addr);
4363 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4364 if (r) { 5134 if (r) {
5135
5136 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4365 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); 5137 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
4366 si_rlc_fini(rdev); 5138 si_rlc_fini(rdev);
4367 return r; 5139 return r;
4368 } 5140 }
5141 r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
5142 if (r) {
5143 dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
5144 si_rlc_fini(rdev);
5145 return r;
5146 }
5147 /* set up the cs buffer */
5148 dst_ptr = rdev->rlc.cs_ptr;
5149 reg_list_hdr_blk_index = 0;
5150 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
5151 data = upper_32_bits(reg_list_mc_addr);
5152 dst_ptr[reg_list_hdr_blk_index] = data;
5153 reg_list_hdr_blk_index++;
5154 for (i = 0; cs_data[i].section != NULL; i++) {
5155 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
5156 reg_num = cs_data[i].section[j].reg_count;
5157 data = reg_list_mc_addr & 0xffffffff;
5158 dst_ptr[reg_list_hdr_blk_index] = data;
5159 reg_list_hdr_blk_index++;
5160
5161 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
5162 dst_ptr[reg_list_hdr_blk_index] = data;
5163 reg_list_hdr_blk_index++;
5164
5165 data = 0x08000000 | (reg_num * 4);
5166 dst_ptr[reg_list_hdr_blk_index] = data;
5167 reg_list_hdr_blk_index++;
5168
5169 for (k = 0; k < reg_num; k++) {
5170 data = cs_data[i].section[j].extent[k];
5171 dst_ptr[reg_list_blk_index + k] = data;
5172 }
5173 reg_list_mc_addr += reg_num * 4;
5174 reg_list_blk_index += reg_num;
5175 }
5176 }
5177 dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
5178
5179 radeon_bo_kunmap(rdev->rlc.clear_state_obj);
5180 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4369 5181
4370 return 0; 5182 return 0;
4371} 5183}
4372 5184
5185static void si_rlc_reset(struct radeon_device *rdev)
5186{
5187 u32 tmp = RREG32(GRBM_SOFT_RESET);
5188
5189 tmp |= SOFT_RESET_RLC;
5190 WREG32(GRBM_SOFT_RESET, tmp);
5191 udelay(50);
5192 tmp &= ~SOFT_RESET_RLC;
5193 WREG32(GRBM_SOFT_RESET, tmp);
5194 udelay(50);
5195}
5196
4373static void si_rlc_stop(struct radeon_device *rdev) 5197static void si_rlc_stop(struct radeon_device *rdev)
4374{ 5198{
4375 WREG32(RLC_CNTL, 0); 5199 WREG32(RLC_CNTL, 0);
5200
5201 si_enable_gui_idle_interrupt(rdev, false);
5202
5203 si_wait_for_rlc_serdes(rdev);
4376} 5204}
4377 5205
4378static void si_rlc_start(struct radeon_device *rdev) 5206static void si_rlc_start(struct radeon_device *rdev)
4379{ 5207{
4380 WREG32(RLC_CNTL, RLC_ENABLE); 5208 WREG32(RLC_CNTL, RLC_ENABLE);
5209
5210 si_enable_gui_idle_interrupt(rdev, true);
5211
5212 udelay(50);
5213}
5214
5215static bool si_lbpw_supported(struct radeon_device *rdev)
5216{
5217 u32 tmp;
5218
5219 /* Enable LBPW only for DDR3 */
5220 tmp = RREG32(MC_SEQ_MISC0);
5221 if ((tmp & 0xF0000000) == 0xB0000000)
5222 return true;
5223 return false;
5224}
5225
5226static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
5227{
5228 u32 tmp;
5229
5230 tmp = RREG32(RLC_LB_CNTL);
5231 if (enable)
5232 tmp |= LOAD_BALANCE_ENABLE;
5233 else
5234 tmp &= ~LOAD_BALANCE_ENABLE;
5235 WREG32(RLC_LB_CNTL, tmp);
5236
5237 if (!enable) {
5238 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5239 WREG32(SPI_LB_CU_MASK, 0x00ff);
5240 }
4381} 5241}
4382 5242
4383static int si_rlc_resume(struct radeon_device *rdev) 5243static int si_rlc_resume(struct radeon_device *rdev)
@@ -4390,14 +5250,18 @@ static int si_rlc_resume(struct radeon_device *rdev)
4390 5250
4391 si_rlc_stop(rdev); 5251 si_rlc_stop(rdev);
4392 5252
5253 si_rlc_reset(rdev);
5254
5255 si_init_pg(rdev);
5256
5257 si_init_cg(rdev);
5258
4393 WREG32(RLC_RL_BASE, 0); 5259 WREG32(RLC_RL_BASE, 0);
4394 WREG32(RLC_RL_SIZE, 0); 5260 WREG32(RLC_RL_SIZE, 0);
4395 WREG32(RLC_LB_CNTL, 0); 5261 WREG32(RLC_LB_CNTL, 0);
4396 WREG32(RLC_LB_CNTR_MAX, 0xffffffff); 5262 WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
4397 WREG32(RLC_LB_CNTR_INIT, 0); 5263 WREG32(RLC_LB_CNTR_INIT, 0);
4398 5264 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
4399 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
4400 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
4401 5265
4402 WREG32(RLC_MC_CNTL, 0); 5266 WREG32(RLC_MC_CNTL, 0);
4403 WREG32(RLC_UCODE_CNTL, 0); 5267 WREG32(RLC_UCODE_CNTL, 0);
@@ -4409,6 +5273,8 @@ static int si_rlc_resume(struct radeon_device *rdev)
4409 } 5273 }
4410 WREG32(RLC_UCODE_ADDR, 0); 5274 WREG32(RLC_UCODE_ADDR, 0);
4411 5275
5276 si_enable_lbpw(rdev, si_lbpw_supported(rdev));
5277
4412 si_rlc_start(rdev); 5278 si_rlc_start(rdev);
4413 5279
4414 return 0; 5280 return 0;
@@ -4578,6 +5444,7 @@ int si_irq_set(struct radeon_device *rdev)
4578 u32 grbm_int_cntl = 0; 5444 u32 grbm_int_cntl = 0;
4579 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 5445 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
4580 u32 dma_cntl, dma_cntl1; 5446 u32 dma_cntl, dma_cntl1;
5447 u32 thermal_int = 0;
4581 5448
4582 if (!rdev->irq.installed) { 5449 if (!rdev->irq.installed) {
4583 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 5450 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -4603,6 +5470,9 @@ int si_irq_set(struct radeon_device *rdev)
4603 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 5470 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
4604 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; 5471 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
4605 5472
5473 thermal_int = RREG32(CG_THERMAL_INT) &
5474 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
5475
4606 /* enable CP interrupts on all rings */ 5476 /* enable CP interrupts on all rings */
4607 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 5477 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
4608 DRM_DEBUG("si_irq_set: sw int gfx\n"); 5478 DRM_DEBUG("si_irq_set: sw int gfx\n");
@@ -4689,6 +5559,11 @@ int si_irq_set(struct radeon_device *rdev)
4689 5559
4690 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 5560 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
4691 5561
5562 if (rdev->irq.dpm_thermal) {
5563 DRM_DEBUG("dpm thermal\n");
5564 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
5565 }
5566
4692 if (rdev->num_crtc >= 2) { 5567 if (rdev->num_crtc >= 2) {
4693 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 5568 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
4694 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); 5569 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
@@ -4724,6 +5599,8 @@ int si_irq_set(struct radeon_device *rdev)
4724 WREG32(DC_HPD6_INT_CONTROL, hpd6); 5599 WREG32(DC_HPD6_INT_CONTROL, hpd6);
4725 } 5600 }
4726 5601
5602 WREG32(CG_THERMAL_INT, thermal_int);
5603
4727 return 0; 5604 return 0;
4728} 5605}
4729 5606
@@ -4888,6 +5765,7 @@ int si_irq_process(struct radeon_device *rdev)
4888 u32 src_id, src_data, ring_id; 5765 u32 src_id, src_data, ring_id;
4889 u32 ring_index; 5766 u32 ring_index;
4890 bool queue_hotplug = false; 5767 bool queue_hotplug = false;
5768 bool queue_thermal = false;
4891 5769
4892 if (!rdev->ih.enabled || rdev->shutdown) 5770 if (!rdev->ih.enabled || rdev->shutdown)
4893 return IRQ_NONE; 5771 return IRQ_NONE;
@@ -5158,6 +6036,16 @@ restart_ih:
5158 DRM_DEBUG("IH: DMA trap\n"); 6036 DRM_DEBUG("IH: DMA trap\n");
5159 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 6037 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
5160 break; 6038 break;
6039 case 230: /* thermal low to high */
6040 DRM_DEBUG("IH: thermal low to high\n");
6041 rdev->pm.dpm.thermal.high_to_low = false;
6042 queue_thermal = true;
6043 break;
6044 case 231: /* thermal high to low */
6045 DRM_DEBUG("IH: thermal high to low\n");
6046 rdev->pm.dpm.thermal.high_to_low = true;
6047 queue_thermal = true;
6048 break;
5161 case 233: /* GUI IDLE */ 6049 case 233: /* GUI IDLE */
5162 DRM_DEBUG("IH: GUI idle\n"); 6050 DRM_DEBUG("IH: GUI idle\n");
5163 break; 6051 break;
@@ -5176,6 +6064,8 @@ restart_ih:
5176 } 6064 }
5177 if (queue_hotplug) 6065 if (queue_hotplug)
5178 schedule_work(&rdev->hotplug_work); 6066 schedule_work(&rdev->hotplug_work);
6067 if (queue_thermal && rdev->pm.dpm_enabled)
6068 schedule_work(&rdev->pm.dpm.thermal.work);
5179 rdev->ih.rptr = rptr; 6069 rdev->ih.rptr = rptr;
5180 WREG32(IH_RB_RPTR, rdev->ih.rptr); 6070 WREG32(IH_RB_RPTR, rdev->ih.rptr);
5181 atomic_set(&rdev->ih.lock, 0); 6071 atomic_set(&rdev->ih.lock, 0);
@@ -5270,6 +6160,11 @@ static int si_startup(struct radeon_device *rdev)
5270 struct radeon_ring *ring; 6160 struct radeon_ring *ring;
5271 int r; 6161 int r;
5272 6162
6163 /* enable pcie gen2/3 link */
6164 si_pcie_gen3_enable(rdev);
6165 /* enable aspm */
6166 si_program_aspm(rdev);
6167
5273 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || 6168 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
5274 !rdev->rlc_fw || !rdev->mc_fw) { 6169 !rdev->rlc_fw || !rdev->mc_fw) {
5275 r = si_init_microcode(rdev); 6170 r = si_init_microcode(rdev);
@@ -5609,6 +6504,8 @@ void si_fini(struct radeon_device *rdev)
5609 cayman_dma_fini(rdev); 6504 cayman_dma_fini(rdev);
5610 si_irq_fini(rdev); 6505 si_irq_fini(rdev);
5611 si_rlc_fini(rdev); 6506 si_rlc_fini(rdev);
6507 si_fini_cg(rdev);
6508 si_fini_pg(rdev);
5612 radeon_wb_fini(rdev); 6509 radeon_wb_fini(rdev);
5613 radeon_vm_manager_fini(rdev); 6510 radeon_vm_manager_fini(rdev);
5614 radeon_ib_pool_fini(rdev); 6511 radeon_ib_pool_fini(rdev);
@@ -5735,3 +6632,361 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
5735 6632
5736 return 0; 6633 return 0;
5737} 6634}
6635
6636static void si_pcie_gen3_enable(struct radeon_device *rdev)
6637{
6638 struct pci_dev *root = rdev->pdev->bus->self;
6639 int bridge_pos, gpu_pos;
6640 u32 speed_cntl, mask, current_data_rate;
6641 int ret, i;
6642 u16 tmp16;
6643
6644 if (radeon_pcie_gen2 == 0)
6645 return;
6646
6647 if (rdev->flags & RADEON_IS_IGP)
6648 return;
6649
6650 if (!(rdev->flags & RADEON_IS_PCIE))
6651 return;
6652
6653 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
6654 if (ret != 0)
6655 return;
6656
6657 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
6658 return;
6659
6660 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
6661 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
6662 LC_CURRENT_DATA_RATE_SHIFT;
6663 if (mask & DRM_PCIE_SPEED_80) {
6664 if (current_data_rate == 2) {
6665 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
6666 return;
6667 }
6668 DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
6669 } else if (mask & DRM_PCIE_SPEED_50) {
6670 if (current_data_rate == 1) {
6671 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
6672 return;
6673 }
6674 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
6675 }
6676
6677 bridge_pos = pci_pcie_cap(root);
6678 if (!bridge_pos)
6679 return;
6680
6681 gpu_pos = pci_pcie_cap(rdev->pdev);
6682 if (!gpu_pos)
6683 return;
6684
6685 if (mask & DRM_PCIE_SPEED_80) {
6686 /* re-try equalization if gen3 is not already enabled */
6687 if (current_data_rate != 2) {
6688 u16 bridge_cfg, gpu_cfg;
6689 u16 bridge_cfg2, gpu_cfg2;
6690 u32 max_lw, current_lw, tmp;
6691
6692 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
6693 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
6694
6695 tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
6696 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
6697
6698 tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
6699 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
6700
6701 tmp = RREG32_PCIE(PCIE_LC_STATUS1);
6702 max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
6703 current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
6704
6705 if (current_lw < max_lw) {
6706 tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
6707 if (tmp & LC_RENEGOTIATION_SUPPORT) {
6708 tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
6709 tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
6710 tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
6711 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
6712 }
6713 }
6714
6715 for (i = 0; i < 10; i++) {
6716 /* check status */
6717 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
6718 if (tmp16 & PCI_EXP_DEVSTA_TRPND)
6719 break;
6720
6721 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
6722 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
6723
6724 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
6725 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
6726
6727 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
6728 tmp |= LC_SET_QUIESCE;
6729 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
6730
6731 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
6732 tmp |= LC_REDO_EQ;
6733 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
6734
6735 mdelay(100);
6736
6737 /* linkctl */
6738 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
6739 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
6740 tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
6741 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
6742
6743 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
6744 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
6745 tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
6746 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
6747
6748 /* linkctl2 */
6749 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
6750 tmp16 &= ~((1 << 4) | (7 << 9));
6751 tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
6752 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
6753
6754 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
6755 tmp16 &= ~((1 << 4) | (7 << 9));
6756 tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
6757 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
6758
6759 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
6760 tmp &= ~LC_SET_QUIESCE;
6761 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
6762 }
6763 }
6764 }
6765
6766 /* set the link speed */
6767 speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
6768 speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
6769 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
6770
6771 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
6772 tmp16 &= ~0xf;
6773 if (mask & DRM_PCIE_SPEED_80)
6774 tmp16 |= 3; /* gen3 */
6775 else if (mask & DRM_PCIE_SPEED_50)
6776 tmp16 |= 2; /* gen2 */
6777 else
6778 tmp16 |= 1; /* gen1 */
6779 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
6780
6781 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
6782 speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
6783 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
6784
6785 for (i = 0; i < rdev->usec_timeout; i++) {
6786 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
6787 if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
6788 break;
6789 udelay(1);
6790 }
6791}
6792
6793static void si_program_aspm(struct radeon_device *rdev)
6794{
6795 u32 data, orig;
6796 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
6797 bool disable_clkreq = false;
6798
6799 if (!(rdev->flags & RADEON_IS_PCIE))
6800 return;
6801
6802 orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
6803 data &= ~LC_XMIT_N_FTS_MASK;
6804 data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
6805 if (orig != data)
6806 WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
6807
6808 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
6809 data |= LC_GO_TO_RECOVERY;
6810 if (orig != data)
6811 WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
6812
6813 orig = data = RREG32_PCIE(PCIE_P_CNTL);
6814 data |= P_IGNORE_EDB_ERR;
6815 if (orig != data)
6816 WREG32_PCIE(PCIE_P_CNTL, data);
6817
6818 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
6819 data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
6820 data |= LC_PMI_TO_L1_DIS;
6821 if (!disable_l0s)
6822 data |= LC_L0S_INACTIVITY(7);
6823
6824 if (!disable_l1) {
6825 data |= LC_L1_INACTIVITY(7);
6826 data &= ~LC_PMI_TO_L1_DIS;
6827 if (orig != data)
6828 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
6829
6830 if (!disable_plloff_in_l1) {
6831 bool clk_req_support;
6832
6833 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
6834 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
6835 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
6836 if (orig != data)
6837 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
6838
6839 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
6840 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
6841 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
6842 if (orig != data)
6843 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
6844
6845 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
6846 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
6847 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
6848 if (orig != data)
6849 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
6850
6851 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
6852 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
6853 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
6854 if (orig != data)
6855 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
6856
6857 if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
6858 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
6859 data &= ~PLL_RAMP_UP_TIME_0_MASK;
6860 if (orig != data)
6861 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
6862
6863 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
6864 data &= ~PLL_RAMP_UP_TIME_1_MASK;
6865 if (orig != data)
6866 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
6867
6868 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2);
6869 data &= ~PLL_RAMP_UP_TIME_2_MASK;
6870 if (orig != data)
6871 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data);
6872
6873 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3);
6874 data &= ~PLL_RAMP_UP_TIME_3_MASK;
6875 if (orig != data)
6876 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data);
6877
6878 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
6879 data &= ~PLL_RAMP_UP_TIME_0_MASK;
6880 if (orig != data)
6881 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
6882
6883 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
6884 data &= ~PLL_RAMP_UP_TIME_1_MASK;
6885 if (orig != data)
6886 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
6887
6888 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2);
6889 data &= ~PLL_RAMP_UP_TIME_2_MASK;
6890 if (orig != data)
6891 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data);
6892
6893 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3);
6894 data &= ~PLL_RAMP_UP_TIME_3_MASK;
6895 if (orig != data)
6896 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data);
6897 }
6898 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
6899 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
6900 data |= LC_DYN_LANES_PWR_STATE(3);
6901 if (orig != data)
6902 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
6903
6904 orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL);
6905 data &= ~LS2_EXIT_TIME_MASK;
6906 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
6907 data |= LS2_EXIT_TIME(5);
6908 if (orig != data)
6909 WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
6910
6911 orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL);
6912 data &= ~LS2_EXIT_TIME_MASK;
6913 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
6914 data |= LS2_EXIT_TIME(5);
6915 if (orig != data)
6916 WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
6917
6918 if (!disable_clkreq) {
6919 struct pci_dev *root = rdev->pdev->bus->self;
6920 u32 lnkcap;
6921
6922 clk_req_support = false;
6923 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
6924 if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
6925 clk_req_support = true;
6926 } else {
6927 clk_req_support = false;
6928 }
6929
6930 if (clk_req_support) {
6931 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
6932 data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
6933 if (orig != data)
6934 WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
6935
6936 orig = data = RREG32(THM_CLK_CNTL);
6937 data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
6938 data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
6939 if (orig != data)
6940 WREG32(THM_CLK_CNTL, data);
6941
6942 orig = data = RREG32(MISC_CLK_CNTL);
6943 data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
6944 data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
6945 if (orig != data)
6946 WREG32(MISC_CLK_CNTL, data);
6947
6948 orig = data = RREG32(CG_CLKPIN_CNTL);
6949 data &= ~BCLK_AS_XCLK;
6950 if (orig != data)
6951 WREG32(CG_CLKPIN_CNTL, data);
6952
6953 orig = data = RREG32(CG_CLKPIN_CNTL_2);
6954 data &= ~FORCE_BIF_REFCLK_EN;
6955 if (orig != data)
6956 WREG32(CG_CLKPIN_CNTL_2, data);
6957
6958 orig = data = RREG32(MPLL_BYPASSCLK_SEL);
6959 data &= ~MPLL_CLKOUT_SEL_MASK;
6960 data |= MPLL_CLKOUT_SEL(4);
6961 if (orig != data)
6962 WREG32(MPLL_BYPASSCLK_SEL, data);
6963
6964 orig = data = RREG32(SPLL_CNTL_MODE);
6965 data &= ~SPLL_REFCLK_SEL_MASK;
6966 if (orig != data)
6967 WREG32(SPLL_CNTL_MODE, data);
6968 }
6969 }
6970 } else {
6971 if (orig != data)
6972 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
6973 }
6974
6975 orig = data = RREG32_PCIE(PCIE_CNTL2);
6976 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
6977 if (orig != data)
6978 WREG32_PCIE(PCIE_CNTL2, data);
6979
6980 if (!disable_l0s) {
6981 data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
6982 if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
6983 data = RREG32_PCIE(PCIE_LC_STATUS1);
6984 if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
6985 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
6986 data &= ~LC_L0S_INACTIVITY_MASK;
6987 if (orig != data)
6988 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
6989 }
6990 }
6991 }
6992}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
new file mode 100644
index 000000000000..6918f070eb52
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -0,0 +1,6387 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "sid.h"
27#include "r600_dpm.h"
28#include "si_dpm.h"
29#include "atom.h"
30#include <linux/math64.h>
31
32#define MC_CG_ARB_FREQ_F0 0x0a
33#define MC_CG_ARB_FREQ_F1 0x0b
34#define MC_CG_ARB_FREQ_F2 0x0c
35#define MC_CG_ARB_FREQ_F3 0x0d
36
37#define SMC_RAM_END 0x20000
38
39#define DDR3_DRAM_ROWS 0x2000
40
41#define SCLK_MIN_DEEPSLEEP_FREQ 1350
42
43static const struct si_cac_config_reg cac_weights_tahiti[] =
44{
45 { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
46 { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
47 { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
48 { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
49 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
50 { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
51 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
52 { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
53 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
54 { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
55 { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
56 { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
57 { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
58 { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
59 { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
60 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
61 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
62 { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
63 { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
64 { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
65 { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
66 { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
67 { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
68 { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
69 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
70 { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
71 { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
72 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
73 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
74 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
75 { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
76 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
77 { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
78 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
79 { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
80 { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
81 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
82 { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
83 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
84 { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
85 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
86 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
87 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
88 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
89 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
90 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
91 { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
92 { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
93 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
94 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
95 { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
96 { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
97 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
98 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
99 { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
100 { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
101 { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
102 { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
103 { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
104 { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
105 { 0xFFFFFFFF }
106};
107
108static const struct si_cac_config_reg lcac_tahiti[] =
109{
110 { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
111 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
112 { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
113 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
114 { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
115 { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
116 { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
117 { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
118 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
119 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
120 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
121 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
122 { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
123 { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
124 { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
125 { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
126 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
127 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
128 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
129 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
130 { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
131 { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
132 { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
133 { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
134 { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
135 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
136 { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
137 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
138 { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
139 { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
140 { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
141 { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
142 { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
143 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
144 { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
145 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
146 { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
147 { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
148 { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
149 { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
150 { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
151 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
152 { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
153 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
154 { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
155 { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
156 { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
157 { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
158 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
159 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
160 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
161 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
162 { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
163 { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
164 { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
165 { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
166 { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
167 { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
168 { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
169 { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
170 { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
171 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
172 { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
173 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
174 { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
175 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
176 { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
177 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
178 { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
179 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
180 { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
181 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
182 { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
183 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
184 { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
185 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
186 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
187 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
188 { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
189 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
190 { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
191 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
192 { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
193 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
194 { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
195 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
196 { 0xFFFFFFFF }
197
198};
199
200static const struct si_cac_config_reg cac_override_tahiti[] =
201{
202 { 0xFFFFFFFF }
203};
204
205static const struct si_powertune_data powertune_data_tahiti =
206{
207 ((1 << 16) | 27027),
208 6,
209 0,
210 4,
211 95,
212 {
213 0UL,
214 0UL,
215 4521550UL,
216 309631529UL,
217 -1270850L,
218 4513710L,
219 40
220 },
221 595000000UL,
222 12,
223 {
224 0,
225 0,
226 0,
227 0,
228 0,
229 0,
230 0,
231 0
232 },
233 true
234};
235
236static const struct si_dte_data dte_data_tahiti =
237{
238 { 1159409, 0, 0, 0, 0 },
239 { 777, 0, 0, 0, 0 },
240 2,
241 54000,
242 127000,
243 25,
244 2,
245 10,
246 13,
247 { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
248 { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
249 { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
250 85,
251 false
252};
253
254static const struct si_dte_data dte_data_tahiti_le =
255{
256 { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
257 { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
258 0x5,
259 0xAFC8,
260 0x64,
261 0x32,
262 1,
263 0,
264 0x10,
265 { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
266 { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
267 { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
268 85,
269 true
270};
271
272static const struct si_dte_data dte_data_tahiti_pro =
273{
274 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
275 { 0x0, 0x0, 0x0, 0x0, 0x0 },
276 5,
277 45000,
278 100,
279 0xA,
280 1,
281 0,
282 0x10,
283 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
284 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
285 { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
286 90,
287 true
288};
289
290static const struct si_dte_data dte_data_new_zealand =
291{
292 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
293 { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
294 0x5,
295 0xAFC8,
296 0x69,
297 0x32,
298 1,
299 0,
300 0x10,
301 { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
302 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
303 { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
304 85,
305 true
306};
307
308static const struct si_dte_data dte_data_aruba_pro =
309{
310 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
311 { 0x0, 0x0, 0x0, 0x0, 0x0 },
312 5,
313 45000,
314 100,
315 0xA,
316 1,
317 0,
318 0x10,
319 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
320 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
321 { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
322 90,
323 true
324};
325
326static const struct si_dte_data dte_data_malta =
327{
328 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
329 { 0x0, 0x0, 0x0, 0x0, 0x0 },
330 5,
331 45000,
332 100,
333 0xA,
334 1,
335 0,
336 0x10,
337 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
338 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
339 { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
340 90,
341 true
342};
343
344struct si_cac_config_reg cac_weights_pitcairn[] =
345{
346 { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
347 { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
348 { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
349 { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
350 { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
351 { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
352 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
353 { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
354 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
355 { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
356 { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
357 { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
358 { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
359 { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
360 { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
361 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
362 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
363 { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
364 { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
365 { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
366 { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
367 { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
368 { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
369 { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
370 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
371 { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
372 { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
373 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
374 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
375 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
376 { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
377 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
378 { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
379 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
380 { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
381 { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
382 { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
383 { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
384 { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
385 { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
386 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
387 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
388 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
389 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
390 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
391 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
392 { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
393 { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
394 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
395 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
396 { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
397 { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
398 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
399 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
400 { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
401 { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
402 { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
403 { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
404 { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
405 { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
406 { 0xFFFFFFFF }
407};
408
409static const struct si_cac_config_reg lcac_pitcairn[] =
410{
411 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
412 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
413 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
414 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
415 { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
416 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
417 { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
418 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
419 { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
420 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
421 { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
422 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
423 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
424 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
425 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
426 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
427 { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
428 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
429 { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
430 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
431 { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
432 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
433 { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
434 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
435 { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
436 { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
437 { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
438 { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
439 { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
440 { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
441 { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
442 { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
443 { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
444 { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
445 { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
446 { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
447 { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
448 { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
449 { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
450 { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
451 { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
452 { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
453 { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
454 { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
455 { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
456 { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
457 { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
458 { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
459 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
460 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
461 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
462 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
463 { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
464 { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
465 { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
466 { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
467 { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
468 { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
469 { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
470 { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
471 { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
472 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
473 { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
474 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
475 { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
476 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
477 { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
478 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
479 { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
480 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
481 { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
482 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
483 { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
484 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
485 { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
486 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
487 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
488 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
489 { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
490 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
491 { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
492 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
493 { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
494 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
495 { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
496 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
497 { 0xFFFFFFFF }
498};
499
500static const struct si_cac_config_reg cac_override_pitcairn[] =
501{
502 { 0xFFFFFFFF }
503};
504
505static const struct si_powertune_data powertune_data_pitcairn =
506{
507 ((1 << 16) | 27027),
508 5,
509 0,
510 6,
511 100,
512 {
513 51600000UL,
514 1800000UL,
515 7194395UL,
516 309631529UL,
517 -1270850L,
518 4513710L,
519 100
520 },
521 117830498UL,
522 12,
523 {
524 0,
525 0,
526 0,
527 0,
528 0,
529 0,
530 0,
531 0
532 },
533 true
534};
535
536static const struct si_dte_data dte_data_pitcairn =
537{
538 { 0, 0, 0, 0, 0 },
539 { 0, 0, 0, 0, 0 },
540 0,
541 0,
542 0,
543 0,
544 0,
545 0,
546 0,
547 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
548 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
549 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
550 0,
551 false
552};
553
554static const struct si_dte_data dte_data_curacao_xt =
555{
556 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
557 { 0x0, 0x0, 0x0, 0x0, 0x0 },
558 5,
559 45000,
560 100,
561 0xA,
562 1,
563 0,
564 0x10,
565 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
566 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
567 { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
568 90,
569 true
570};
571
572static const struct si_dte_data dte_data_curacao_pro =
573{
574 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
575 { 0x0, 0x0, 0x0, 0x0, 0x0 },
576 5,
577 45000,
578 100,
579 0xA,
580 1,
581 0,
582 0x10,
583 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
584 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
585 { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
586 90,
587 true
588};
589
590static const struct si_dte_data dte_data_neptune_xt =
591{
592 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
593 { 0x0, 0x0, 0x0, 0x0, 0x0 },
594 5,
595 45000,
596 100,
597 0xA,
598 1,
599 0,
600 0x10,
601 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
602 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
603 { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
604 90,
605 true
606};
607
608static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
609{
610 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
611 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
612 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
613 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
614 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
615 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
616 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
617 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
618 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
619 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
620 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
621 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
622 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
623 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
624 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
625 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
626 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
627 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
628 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
629 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
630 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
631 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
632 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
633 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
634 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
635 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
636 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
637 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
638 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
639 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
640 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
641 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
642 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
643 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
644 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
645 { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
646 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
647 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
648 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
649 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
650 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
651 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
652 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
653 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
654 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
655 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
656 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
657 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
658 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
659 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
660 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
661 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
662 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
663 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
664 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
665 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
666 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
667 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
668 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
669 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
670 { 0xFFFFFFFF }
671};
672
673static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
674{
675 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
676 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
677 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
678 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
679 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
680 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
681 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
682 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
683 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
684 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
685 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
686 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
687 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
688 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
689 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
690 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
691 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
692 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
693 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
694 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
695 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
696 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
697 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
698 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
699 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
700 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
701 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
702 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
703 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
704 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
705 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
706 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
707 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
708 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
709 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
710 { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
711 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
712 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
713 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
714 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
715 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
716 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
717 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
718 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
719 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
720 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
721 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
722 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
723 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
724 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
725 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
726 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
727 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
728 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
729 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
730 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
731 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
732 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
733 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
734 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
735 { 0xFFFFFFFF }
736};
737
738static const struct si_cac_config_reg cac_weights_heathrow[] =
739{
740 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
741 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
742 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
743 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
744 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
745 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
746 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
747 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
748 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
749 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
750 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
751 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
752 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
753 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
754 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
755 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
756 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
757 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
758 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
759 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
760 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
761 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
762 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
763 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
764 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
765 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
766 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
767 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
768 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
769 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
770 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
771 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
772 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
773 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
774 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
775 { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
776 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
777 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
778 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
779 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
780 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
781 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
782 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
783 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
784 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
785 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
786 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
787 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
788 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
789 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
790 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
791 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
792 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
793 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
794 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
795 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
796 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
797 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
798 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
799 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
800 { 0xFFFFFFFF }
801};
802
803static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
804{
805 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
806 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
807 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
808 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
809 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
810 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
811 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
812 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
813 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
814 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
815 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
816 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
817 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
818 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
819 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
820 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
821 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
822 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
823 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
824 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
825 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
826 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
827 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
828 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
829 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
830 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
831 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
832 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
833 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
834 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
835 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
836 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
837 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
838 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
839 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
840 { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
841 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
842 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
843 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
844 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
845 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
846 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
847 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
848 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
849 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
850 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
851 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
852 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
853 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
854 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
855 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
856 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
857 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
858 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
859 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
860 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
861 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
862 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
863 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
864 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
865 { 0xFFFFFFFF }
866};
867
868static const struct si_cac_config_reg cac_weights_cape_verde[] =
869{
870 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
871 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
872 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
873 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
874 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
875 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
876 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
877 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
878 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
879 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
880 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
881 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
882 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
883 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
884 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
885 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
886 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
887 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
888 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
889 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
890 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
891 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
892 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
893 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
894 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
895 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
896 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
897 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
898 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
899 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
900 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
901 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
902 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
903 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
904 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
905 { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
906 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
907 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
908 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
909 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
910 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
911 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
912 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
913 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
914 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
915 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
916 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
917 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
918 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
919 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
920 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
921 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
922 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
923 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
924 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
925 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
926 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
927 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
928 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
929 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
930 { 0xFFFFFFFF }
931};
932
933static const struct si_cac_config_reg lcac_cape_verde[] =
934{
935 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
936 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
937 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
938 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
939 { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
940 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
941 { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
942 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
943 { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
944 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
945 { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
946 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
947 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
948 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
949 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
950 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
951 { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
952 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
953 { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
954 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
955 { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
956 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
957 { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
958 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
959 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
960 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
961 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
962 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
963 { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
964 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
965 { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
966 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
967 { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
968 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
969 { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
970 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
971 { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
972 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
973 { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
974 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
975 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
976 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
977 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
978 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
979 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
980 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
981 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
982 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
983 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
984 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
985 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
986 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
987 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
988 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
989 { 0xFFFFFFFF }
990};
991
992static const struct si_cac_config_reg cac_override_cape_verde[] =
993{
994 { 0xFFFFFFFF }
995};
996
997static const struct si_powertune_data powertune_data_cape_verde =
998{
999 ((1 << 16) | 0x6993),
1000 5,
1001 0,
1002 7,
1003 105,
1004 {
1005 0UL,
1006 0UL,
1007 7194395UL,
1008 309631529UL,
1009 -1270850L,
1010 4513710L,
1011 100
1012 },
1013 117830498UL,
1014 12,
1015 {
1016 0,
1017 0,
1018 0,
1019 0,
1020 0,
1021 0,
1022 0,
1023 0
1024 },
1025 true
1026};
1027
1028static const struct si_dte_data dte_data_cape_verde =
1029{
1030 { 0, 0, 0, 0, 0 },
1031 { 0, 0, 0, 0, 0 },
1032 0,
1033 0,
1034 0,
1035 0,
1036 0,
1037 0,
1038 0,
1039 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1040 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1041 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1042 0,
1043 false
1044};
1045
1046static const struct si_dte_data dte_data_venus_xtx =
1047{
1048 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1049 { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
1050 5,
1051 55000,
1052 0x69,
1053 0xA,
1054 1,
1055 0,
1056 0x3,
1057 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1058 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1059 { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1060 90,
1061 true
1062};
1063
1064static const struct si_dte_data dte_data_venus_xt =
1065{
1066 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1067 { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
1068 5,
1069 55000,
1070 0x69,
1071 0xA,
1072 1,
1073 0,
1074 0x3,
1075 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1076 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1077 { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1078 90,
1079 true
1080};
1081
1082static const struct si_dte_data dte_data_venus_pro =
1083{
1084 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1085 { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
1086 5,
1087 55000,
1088 0x69,
1089 0xA,
1090 1,
1091 0,
1092 0x3,
1093 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1094 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1095 { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1096 90,
1097 true
1098};
1099
1100struct si_cac_config_reg cac_weights_oland[] =
1101{
1102 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
1103 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
1104 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
1105 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
1106 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1107 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
1108 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
1109 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
1110 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
1111 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
1112 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
1113 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
1114 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
1115 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1116 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
1117 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
1118 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
1119 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
1120 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
1121 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
1122 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
1123 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
1124 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
1125 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
1126 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
1127 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1128 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1129 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1130 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1131 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
1132 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1133 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
1134 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
1135 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
1136 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1137 { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
1138 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1139 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1140 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1141 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1142 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
1143 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1144 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1145 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1146 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1147 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1148 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1149 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1150 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1151 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1152 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1153 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1154 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1155 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1156 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1157 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1158 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1159 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1160 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1161 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
1162 { 0xFFFFFFFF }
1163};
1164
1165static const struct si_cac_config_reg cac_weights_mars_pro[] =
1166{
1167 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1168 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1169 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1170 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1171 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1172 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1173 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1174 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1175 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1176 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1177 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1178 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1179 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1180 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1181 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1182 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1183 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1184 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1185 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1186 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1187 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1188 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1189 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1190 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1191 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1192 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1193 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1194 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1195 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1196 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1197 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1198 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1199 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1200 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1201 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1202 { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
1203 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1204 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1205 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1206 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1207 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1208 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1209 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1210 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1211 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1212 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1213 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1214 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1215 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1216 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1217 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1218 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1219 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1220 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1221 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1222 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1223 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1224 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1225 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1226 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1227 { 0xFFFFFFFF }
1228};
1229
1230static const struct si_cac_config_reg cac_weights_mars_xt[] =
1231{
1232 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1233 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1234 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1235 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1236 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1237 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1238 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1239 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1240 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1241 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1242 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1243 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1244 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1245 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1246 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1247 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1248 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1249 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1250 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1251 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1252 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1253 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1254 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1255 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1256 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1257 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1258 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1259 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1260 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1261 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1262 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1263 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1264 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1265 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1266 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1267 { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
1268 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1269 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1270 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1271 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1272 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1273 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1274 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1275 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1276 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1277 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1278 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1279 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1280 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1281 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1282 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1283 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1284 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1285 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1286 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1287 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1288 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1289 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1290 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1291 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1292 { 0xFFFFFFFF }
1293};
1294
1295static const struct si_cac_config_reg cac_weights_oland_pro[] =
1296{
1297 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1298 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1299 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1300 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1301 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1302 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1303 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1304 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1305 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1306 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1307 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1308 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1309 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1310 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1311 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1312 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1313 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1314 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1315 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1316 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1317 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1318 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1319 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1320 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1321 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1322 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1323 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1324 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1325 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1326 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1327 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1328 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1329 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1330 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1331 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1332 { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
1333 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1334 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1335 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1336 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1337 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1338 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1339 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1340 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1341 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1342 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1343 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1344 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1345 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1346 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1347 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1348 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1349 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1350 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1351 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1352 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1353 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1354 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1355 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1356 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1357 { 0xFFFFFFFF }
1358};
1359
1360static const struct si_cac_config_reg cac_weights_oland_xt[] =
1361{
1362 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1363 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1364 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1365 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1366 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1367 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1368 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1369 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1370 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1371 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1372 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1373 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1374 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1375 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1376 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1377 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1378 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1379 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1380 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1381 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1382 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1383 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1384 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1385 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1386 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1387 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1388 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1389 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1390 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1391 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1392 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1393 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1394 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1395 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1396 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1397 { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
1398 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1399 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1400 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1401 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1402 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1403 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1404 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1405 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1406 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1407 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1408 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1409 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1410 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1411 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1412 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1413 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1414 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1415 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1416 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1417 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1418 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1419 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1420 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1421 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1422 { 0xFFFFFFFF }
1423};
1424
1425static const struct si_cac_config_reg lcac_oland[] =
1426{
1427 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1428 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1429 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1430 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1431 { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1432 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1433 { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1434 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1435 { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1436 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1437 { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
1438 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1439 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1440 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1441 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1442 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1443 { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1444 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1445 { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1446 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1447 { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1448 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1449 { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1450 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1451 { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1452 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1453 { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1454 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1455 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1456 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1457 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1458 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1459 { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1460 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1461 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1462 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1463 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1464 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1465 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1466 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1467 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1468 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1469 { 0xFFFFFFFF }
1470};
1471
1472static const struct si_cac_config_reg lcac_mars_pro[] =
1473{
1474 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1475 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1476 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1477 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1478 { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1479 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1480 { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1481 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1482 { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1483 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1484 { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1485 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1486 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1487 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1488 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1489 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1490 { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1491 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1492 { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1493 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1494 { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1495 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1496 { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1497 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1498 { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1499 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1500 { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1501 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1502 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1503 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1504 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1505 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1506 { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1507 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1508 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1509 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1510 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1511 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1512 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1513 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1514 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1515 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1516 { 0xFFFFFFFF }
1517};
1518
1519static const struct si_cac_config_reg cac_override_oland[] =
1520{
1521 { 0xFFFFFFFF }
1522};
1523
1524static const struct si_powertune_data powertune_data_oland =
1525{
1526 ((1 << 16) | 0x6993),
1527 5,
1528 0,
1529 7,
1530 105,
1531 {
1532 0UL,
1533 0UL,
1534 7194395UL,
1535 309631529UL,
1536 -1270850L,
1537 4513710L,
1538 100
1539 },
1540 117830498UL,
1541 12,
1542 {
1543 0,
1544 0,
1545 0,
1546 0,
1547 0,
1548 0,
1549 0,
1550 0
1551 },
1552 true
1553};
1554
1555static const struct si_powertune_data powertune_data_mars_pro =
1556{
1557 ((1 << 16) | 0x6993),
1558 5,
1559 0,
1560 7,
1561 105,
1562 {
1563 0UL,
1564 0UL,
1565 7194395UL,
1566 309631529UL,
1567 -1270850L,
1568 4513710L,
1569 100
1570 },
1571 117830498UL,
1572 12,
1573 {
1574 0,
1575 0,
1576 0,
1577 0,
1578 0,
1579 0,
1580 0,
1581 0
1582 },
1583 true
1584};
1585
1586static const struct si_dte_data dte_data_oland =
1587{
1588 { 0, 0, 0, 0, 0 },
1589 { 0, 0, 0, 0, 0 },
1590 0,
1591 0,
1592 0,
1593 0,
1594 0,
1595 0,
1596 0,
1597 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1598 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1599 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1600 0,
1601 false
1602};
1603
1604static const struct si_dte_data dte_data_mars_pro =
1605{
1606 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1607 { 0x0, 0x0, 0x0, 0x0, 0x0 },
1608 5,
1609 55000,
1610 105,
1611 0xA,
1612 1,
1613 0,
1614 0x10,
1615 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1616 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1617 { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1618 90,
1619 true
1620};
1621
1622static const struct si_dte_data dte_data_sun_xt =
1623{
1624 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1625 { 0x0, 0x0, 0x0, 0x0, 0x0 },
1626 5,
1627 55000,
1628 105,
1629 0xA,
1630 1,
1631 0,
1632 0x10,
1633 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1634 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1635 { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1636 90,
1637 true
1638};
1639
1640
1641static const struct si_cac_config_reg cac_weights_hainan[] =
1642{
1643 { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
1644 { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
1645 { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
1646 { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
1647 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1648 { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
1649 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1650 { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1651 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1652 { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
1653 { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
1654 { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
1655 { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
1656 { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1657 { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
1658 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1659 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1660 { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
1661 { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
1662 { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
1663 { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
1664 { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
1665 { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
1666 { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
1667 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1668 { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
1669 { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
1670 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1671 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1672 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1673 { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
1674 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1675 { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1676 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1677 { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
1678 { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
1679 { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
1680 { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1681 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1682 { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
1683 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1684 { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
1685 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1686 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1687 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1688 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1689 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1690 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1691 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1692 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1693 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1694 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1695 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1696 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1697 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1698 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1699 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1700 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1701 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1702 { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
1703 { 0xFFFFFFFF }
1704};
1705
1706static const struct si_powertune_data powertune_data_hainan =
1707{
1708 ((1 << 16) | 0x6993),
1709 5,
1710 0,
1711 9,
1712 105,
1713 {
1714 0UL,
1715 0UL,
1716 7194395UL,
1717 309631529UL,
1718 -1270850L,
1719 4513710L,
1720 100
1721 },
1722 117830498UL,
1723 12,
1724 {
1725 0,
1726 0,
1727 0,
1728 0,
1729 0,
1730 0,
1731 0,
1732 0
1733 },
1734 true
1735};
1736
1737struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
1738struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
1739struct ni_power_info *ni_get_pi(struct radeon_device *rdev);
1740struct ni_ps *ni_get_ps(struct radeon_ps *rps);
1741
1742static int si_populate_voltage_value(struct radeon_device *rdev,
1743 const struct atom_voltage_table *table,
1744 u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
1745static int si_get_std_voltage_value(struct radeon_device *rdev,
1746 SISLANDS_SMC_VOLTAGE_VALUE *voltage,
1747 u16 *std_voltage);
1748static int si_write_smc_soft_register(struct radeon_device *rdev,
1749 u16 reg_offset, u32 value);
1750static int si_convert_power_level_to_smc(struct radeon_device *rdev,
1751 struct rv7xx_pl *pl,
1752 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
1753static int si_calculate_sclk_params(struct radeon_device *rdev,
1754 u32 engine_clock,
1755 SISLANDS_SMC_SCLK_VALUE *sclk);
1756
1757static struct si_power_info *si_get_pi(struct radeon_device *rdev)
1758{
1759 struct si_power_info *pi = rdev->pm.dpm.priv;
1760
1761 return pi;
1762}
1763
1764static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
1765 u16 v, s32 t, u32 ileakage, u32 *leakage)
1766{
1767 s64 kt, kv, leakage_w, i_leakage, vddc;
1768 s64 temperature, t_slope, t_intercept, av, bv, t_ref;
1769
1770 i_leakage = drm_int2fixp(ileakage / 100);
1771 vddc = div64_s64(drm_int2fixp(v), 1000);
1772 temperature = div64_s64(drm_int2fixp(t), 1000);
1773
1774 t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
1775 t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
1776 av = div64_s64(drm_int2fixp(coeff->av), 100000000);
1777 bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
1778 t_ref = drm_int2fixp(coeff->t_ref);
1779
1780 kt = drm_fixp_div(drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, temperature)),
1781 drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, t_ref)));
1782 kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
1783
1784 leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
1785
1786 *leakage = drm_fixp2int(leakage_w * 1000);
1787}
1788
1789static void si_calculate_leakage_for_v_and_t(struct radeon_device *rdev,
1790 const struct ni_leakage_coeffients *coeff,
1791 u16 v,
1792 s32 t,
1793 u32 i_leakage,
1794 u32 *leakage)
1795{
1796 si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
1797}
1798
1799static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
1800 const u32 fixed_kt, u16 v,
1801 u32 ileakage, u32 *leakage)
1802{
1803 s64 kt, kv, leakage_w, i_leakage, vddc;
1804
1805 i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
1806 vddc = div64_s64(drm_int2fixp(v), 1000);
1807
1808 kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
1809 kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
1810 drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
1811
1812 leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
1813
1814 *leakage = drm_fixp2int(leakage_w * 1000);
1815}
1816
1817static void si_calculate_leakage_for_v(struct radeon_device *rdev,
1818 const struct ni_leakage_coeffients *coeff,
1819 const u32 fixed_kt,
1820 u16 v,
1821 u32 i_leakage,
1822 u32 *leakage)
1823{
1824 si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
1825}
1826
1827
1828static void si_update_dte_from_pl2(struct radeon_device *rdev,
1829 struct si_dte_data *dte_data)
1830{
1831 u32 p_limit1 = rdev->pm.dpm.tdp_limit;
1832 u32 p_limit2 = rdev->pm.dpm.near_tdp_limit;
1833 u32 k = dte_data->k;
1834 u32 t_max = dte_data->max_t;
1835 u32 t_split[5] = { 10, 15, 20, 25, 30 };
1836 u32 t_0 = dte_data->t0;
1837 u32 i;
1838
1839 if (p_limit2 != 0 && p_limit2 <= p_limit1) {
1840 dte_data->tdep_count = 3;
1841
1842 for (i = 0; i < k; i++) {
1843 dte_data->r[i] =
1844 (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
1845 (p_limit2 * (u32)100);
1846 }
1847
1848 dte_data->tdep_r[1] = dte_data->r[4] * 2;
1849
1850 for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
1851 dte_data->tdep_r[i] = dte_data->r[4];
1852 }
1853 } else {
1854 DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
1855 }
1856}
1857
1858static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1859{
1860 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1861 struct si_power_info *si_pi = si_get_pi(rdev);
1862 bool update_dte_from_pl2 = false;
1863
1864 if (rdev->family == CHIP_TAHITI) {
1865 si_pi->cac_weights = cac_weights_tahiti;
1866 si_pi->lcac_config = lcac_tahiti;
1867 si_pi->cac_override = cac_override_tahiti;
1868 si_pi->powertune_data = &powertune_data_tahiti;
1869 si_pi->dte_data = dte_data_tahiti;
1870
1871 switch (rdev->pdev->device) {
1872 case 0x6798:
1873 si_pi->dte_data.enable_dte_by_default = true;
1874 break;
1875 case 0x6799:
1876 si_pi->dte_data = dte_data_new_zealand;
1877 break;
1878 case 0x6790:
1879 case 0x6791:
1880 case 0x6792:
1881 case 0x679E:
1882 si_pi->dte_data = dte_data_aruba_pro;
1883 update_dte_from_pl2 = true;
1884 break;
1885 case 0x679B:
1886 si_pi->dte_data = dte_data_malta;
1887 update_dte_from_pl2 = true;
1888 break;
1889 case 0x679A:
1890 si_pi->dte_data = dte_data_tahiti_pro;
1891 update_dte_from_pl2 = true;
1892 break;
1893 default:
1894 if (si_pi->dte_data.enable_dte_by_default == true)
1895 DRM_ERROR("DTE is not enabled!\n");
1896 break;
1897 }
1898 } else if (rdev->family == CHIP_PITCAIRN) {
1899 switch (rdev->pdev->device) {
1900 case 0x6810:
1901 case 0x6818:
1902 si_pi->cac_weights = cac_weights_pitcairn;
1903 si_pi->lcac_config = lcac_pitcairn;
1904 si_pi->cac_override = cac_override_pitcairn;
1905 si_pi->powertune_data = &powertune_data_pitcairn;
1906 si_pi->dte_data = dte_data_curacao_xt;
1907 update_dte_from_pl2 = true;
1908 break;
1909 case 0x6819:
1910 case 0x6811:
1911 si_pi->cac_weights = cac_weights_pitcairn;
1912 si_pi->lcac_config = lcac_pitcairn;
1913 si_pi->cac_override = cac_override_pitcairn;
1914 si_pi->powertune_data = &powertune_data_pitcairn;
1915 si_pi->dte_data = dte_data_curacao_pro;
1916 update_dte_from_pl2 = true;
1917 break;
1918 case 0x6800:
1919 case 0x6806:
1920 si_pi->cac_weights = cac_weights_pitcairn;
1921 si_pi->lcac_config = lcac_pitcairn;
1922 si_pi->cac_override = cac_override_pitcairn;
1923 si_pi->powertune_data = &powertune_data_pitcairn;
1924 si_pi->dte_data = dte_data_neptune_xt;
1925 update_dte_from_pl2 = true;
1926 break;
1927 default:
1928 si_pi->cac_weights = cac_weights_pitcairn;
1929 si_pi->lcac_config = lcac_pitcairn;
1930 si_pi->cac_override = cac_override_pitcairn;
1931 si_pi->powertune_data = &powertune_data_pitcairn;
1932 si_pi->dte_data = dte_data_pitcairn;
1933 }
1934 } else if (rdev->family == CHIP_VERDE) {
1935 si_pi->lcac_config = lcac_cape_verde;
1936 si_pi->cac_override = cac_override_cape_verde;
1937 si_pi->powertune_data = &powertune_data_cape_verde;
1938
1939 switch (rdev->pdev->device) {
1940 case 0x683B:
1941 case 0x683F:
1942 case 0x6829:
1943 si_pi->cac_weights = cac_weights_cape_verde_pro;
1944 si_pi->dte_data = dte_data_cape_verde;
1945 break;
1946 case 0x6825:
1947 case 0x6827:
1948 si_pi->cac_weights = cac_weights_heathrow;
1949 si_pi->dte_data = dte_data_cape_verde;
1950 break;
1951 case 0x6824:
1952 case 0x682D:
1953 si_pi->cac_weights = cac_weights_chelsea_xt;
1954 si_pi->dte_data = dte_data_cape_verde;
1955 break;
1956 case 0x682F:
1957 si_pi->cac_weights = cac_weights_chelsea_pro;
1958 si_pi->dte_data = dte_data_cape_verde;
1959 break;
1960 case 0x6820:
1961 si_pi->cac_weights = cac_weights_heathrow;
1962 si_pi->dte_data = dte_data_venus_xtx;
1963 break;
1964 case 0x6821:
1965 si_pi->cac_weights = cac_weights_heathrow;
1966 si_pi->dte_data = dte_data_venus_xt;
1967 break;
1968 case 0x6823:
1969 si_pi->cac_weights = cac_weights_chelsea_pro;
1970 si_pi->dte_data = dte_data_venus_pro;
1971 break;
1972 case 0x682B:
1973 si_pi->cac_weights = cac_weights_chelsea_pro;
1974 si_pi->dte_data = dte_data_venus_pro;
1975 break;
1976 default:
1977 si_pi->cac_weights = cac_weights_cape_verde;
1978 si_pi->dte_data = dte_data_cape_verde;
1979 break;
1980 }
1981 } else if (rdev->family == CHIP_OLAND) {
1982 switch (rdev->pdev->device) {
1983 case 0x6601:
1984 case 0x6621:
1985 case 0x6603:
1986 si_pi->cac_weights = cac_weights_mars_pro;
1987 si_pi->lcac_config = lcac_mars_pro;
1988 si_pi->cac_override = cac_override_oland;
1989 si_pi->powertune_data = &powertune_data_mars_pro;
1990 si_pi->dte_data = dte_data_mars_pro;
1991 update_dte_from_pl2 = true;
1992 break;
1993 case 0x6600:
1994 case 0x6606:
1995 case 0x6620:
1996 si_pi->cac_weights = cac_weights_mars_xt;
1997 si_pi->lcac_config = lcac_mars_pro;
1998 si_pi->cac_override = cac_override_oland;
1999 si_pi->powertune_data = &powertune_data_mars_pro;
2000 si_pi->dte_data = dte_data_mars_pro;
2001 update_dte_from_pl2 = true;
2002 break;
2003 case 0x6611:
2004 si_pi->cac_weights = cac_weights_oland_pro;
2005 si_pi->lcac_config = lcac_mars_pro;
2006 si_pi->cac_override = cac_override_oland;
2007 si_pi->powertune_data = &powertune_data_mars_pro;
2008 si_pi->dte_data = dte_data_mars_pro;
2009 update_dte_from_pl2 = true;
2010 break;
2011 case 0x6610:
2012 si_pi->cac_weights = cac_weights_oland_xt;
2013 si_pi->lcac_config = lcac_mars_pro;
2014 si_pi->cac_override = cac_override_oland;
2015 si_pi->powertune_data = &powertune_data_mars_pro;
2016 si_pi->dte_data = dte_data_mars_pro;
2017 update_dte_from_pl2 = true;
2018 break;
2019 default:
2020 si_pi->cac_weights = cac_weights_oland;
2021 si_pi->lcac_config = lcac_oland;
2022 si_pi->cac_override = cac_override_oland;
2023 si_pi->powertune_data = &powertune_data_oland;
2024 si_pi->dte_data = dte_data_oland;
2025 break;
2026 }
2027 } else if (rdev->family == CHIP_HAINAN) {
2028 si_pi->cac_weights = cac_weights_hainan;
2029 si_pi->lcac_config = lcac_oland;
2030 si_pi->cac_override = cac_override_oland;
2031 si_pi->powertune_data = &powertune_data_hainan;
2032 si_pi->dte_data = dte_data_sun_xt;
2033 update_dte_from_pl2 = true;
2034 } else {
2035 DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
2036 return;
2037 }
2038
2039 ni_pi->enable_power_containment = false;
2040 ni_pi->enable_cac = false;
2041 ni_pi->enable_sq_ramping = false;
2042 si_pi->enable_dte = false;
2043
2044 if (si_pi->powertune_data->enable_powertune_by_default) {
2045 ni_pi->enable_power_containment= true;
2046 ni_pi->enable_cac = true;
2047 if (si_pi->dte_data.enable_dte_by_default) {
2048 si_pi->enable_dte = true;
2049 if (update_dte_from_pl2)
2050 si_update_dte_from_pl2(rdev, &si_pi->dte_data);
2051
2052 }
2053 ni_pi->enable_sq_ramping = true;
2054 }
2055
2056 ni_pi->driver_calculate_cac_leakage = true;
2057 ni_pi->cac_configuration_required = true;
2058
2059 if (ni_pi->cac_configuration_required) {
2060 ni_pi->support_cac_long_term_average = true;
2061 si_pi->dyn_powertune_data.l2_lta_window_size =
2062 si_pi->powertune_data->l2_lta_window_size_default;
2063 si_pi->dyn_powertune_data.lts_truncate =
2064 si_pi->powertune_data->lts_truncate_default;
2065 } else {
2066 ni_pi->support_cac_long_term_average = false;
2067 si_pi->dyn_powertune_data.l2_lta_window_size = 0;
2068 si_pi->dyn_powertune_data.lts_truncate = 0;
2069 }
2070
2071 si_pi->dyn_powertune_data.disable_uvd_powertune = false;
2072}
2073
2074static u32 si_get_smc_power_scaling_factor(struct radeon_device *rdev)
2075{
2076 return 1;
2077}
2078
2079static u32 si_calculate_cac_wintime(struct radeon_device *rdev)
2080{
2081 u32 xclk;
2082 u32 wintime;
2083 u32 cac_window;
2084 u32 cac_window_size;
2085
2086 xclk = radeon_get_xclk(rdev);
2087
2088 if (xclk == 0)
2089 return 0;
2090
2091 cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
2092 cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
2093
2094 wintime = (cac_window_size * 100) / xclk;
2095
2096 return wintime;
2097}
2098
2099static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
2100{
2101 return power_in_watts;
2102}
2103
2104static int si_calculate_adjusted_tdp_limits(struct radeon_device *rdev,
2105 bool adjust_polarity,
2106 u32 tdp_adjustment,
2107 u32 *tdp_limit,
2108 u32 *near_tdp_limit)
2109{
2110 u32 adjustment_delta, max_tdp_limit;
2111
2112 if (tdp_adjustment > (u32)rdev->pm.dpm.tdp_od_limit)
2113 return -EINVAL;
2114
2115 max_tdp_limit = ((100 + 100) * rdev->pm.dpm.tdp_limit) / 100;
2116
2117 if (adjust_polarity) {
2118 *tdp_limit = ((100 + tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
2119 *near_tdp_limit = rdev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - rdev->pm.dpm.tdp_limit);
2120 } else {
2121 *tdp_limit = ((100 - tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
2122 adjustment_delta = rdev->pm.dpm.tdp_limit - *tdp_limit;
2123 if (adjustment_delta < rdev->pm.dpm.near_tdp_limit_adjusted)
2124 *near_tdp_limit = rdev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
2125 else
2126 *near_tdp_limit = 0;
2127 }
2128
2129 if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
2130 return -EINVAL;
2131 if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
2132 return -EINVAL;
2133
2134 return 0;
2135}
2136
2137static int si_populate_smc_tdp_limits(struct radeon_device *rdev,
2138 struct radeon_ps *radeon_state)
2139{
2140 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2141 struct si_power_info *si_pi = si_get_pi(rdev);
2142
2143 if (ni_pi->enable_power_containment) {
2144 SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
2145 PP_SIslands_PAPMParameters *papm_parm;
2146 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
2147 u32 scaling_factor = si_get_smc_power_scaling_factor(rdev);
2148 u32 tdp_limit;
2149 u32 near_tdp_limit;
2150 int ret;
2151
2152 if (scaling_factor == 0)
2153 return -EINVAL;
2154
2155 memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
2156
2157 ret = si_calculate_adjusted_tdp_limits(rdev,
2158 false, /* ??? */
2159 rdev->pm.dpm.tdp_adjustment,
2160 &tdp_limit,
2161 &near_tdp_limit);
2162 if (ret)
2163 return ret;
2164
2165 smc_table->dpm2Params.TDPLimit =
2166 cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
2167 smc_table->dpm2Params.NearTDPLimit =
2168 cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
2169 smc_table->dpm2Params.SafePowerLimit =
2170 cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
2171
2172 ret = si_copy_bytes_to_smc(rdev,
2173 (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
2174 offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
2175 (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
2176 sizeof(u32) * 3,
2177 si_pi->sram_end);
2178 if (ret)
2179 return ret;
2180
2181 if (si_pi->enable_ppm) {
2182 papm_parm = &si_pi->papm_parm;
2183 memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
2184 papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
2185 papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
2186 papm_parm->dGPU_T_Warning = cpu_to_be32(95);
2187 papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
2188 papm_parm->PlatformPowerLimit = 0xffffffff;
2189 papm_parm->NearTDPLimitPAPM = 0xffffffff;
2190
2191 ret = si_copy_bytes_to_smc(rdev, si_pi->papm_cfg_table_start,
2192 (u8 *)papm_parm,
2193 sizeof(PP_SIslands_PAPMParameters),
2194 si_pi->sram_end);
2195 if (ret)
2196 return ret;
2197 }
2198 }
2199 return 0;
2200}
2201
2202static int si_populate_smc_tdp_limits_2(struct radeon_device *rdev,
2203 struct radeon_ps *radeon_state)
2204{
2205 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2206 struct si_power_info *si_pi = si_get_pi(rdev);
2207
2208 if (ni_pi->enable_power_containment) {
2209 SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
2210 u32 scaling_factor = si_get_smc_power_scaling_factor(rdev);
2211 int ret;
2212
2213 memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
2214
2215 smc_table->dpm2Params.NearTDPLimit =
2216 cpu_to_be32(si_scale_power_for_smc(rdev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
2217 smc_table->dpm2Params.SafePowerLimit =
2218 cpu_to_be32(si_scale_power_for_smc((rdev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
2219
2220 ret = si_copy_bytes_to_smc(rdev,
2221 (si_pi->state_table_start +
2222 offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
2223 offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
2224 (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
2225 sizeof(u32) * 2,
2226 si_pi->sram_end);
2227 if (ret)
2228 return ret;
2229 }
2230
2231 return 0;
2232}
2233
2234static u16 si_calculate_power_efficiency_ratio(struct radeon_device *rdev,
2235 const u16 prev_std_vddc,
2236 const u16 curr_std_vddc)
2237{
2238 u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
2239 u64 prev_vddc = (u64)prev_std_vddc;
2240 u64 curr_vddc = (u64)curr_std_vddc;
2241 u64 pwr_efficiency_ratio, n, d;
2242
2243 if ((prev_vddc == 0) || (curr_vddc == 0))
2244 return 0;
2245
2246 n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
2247 d = prev_vddc * prev_vddc;
2248 pwr_efficiency_ratio = div64_u64(n, d);
2249
2250 if (pwr_efficiency_ratio > (u64)0xFFFF)
2251 return 0;
2252
2253 return (u16)pwr_efficiency_ratio;
2254}
2255
2256static bool si_should_disable_uvd_powertune(struct radeon_device *rdev,
2257 struct radeon_ps *radeon_state)
2258{
2259 struct si_power_info *si_pi = si_get_pi(rdev);
2260
2261 if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
2262 radeon_state->vclk && radeon_state->dclk)
2263 return true;
2264
2265 return false;
2266}
2267
2268static int si_populate_power_containment_values(struct radeon_device *rdev,
2269 struct radeon_ps *radeon_state,
2270 SISLANDS_SMC_SWSTATE *smc_state)
2271{
2272 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2273 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2274 struct ni_ps *state = ni_get_ps(radeon_state);
2275 SISLANDS_SMC_VOLTAGE_VALUE vddc;
2276 u32 prev_sclk;
2277 u32 max_sclk;
2278 u32 min_sclk;
2279 u16 prev_std_vddc;
2280 u16 curr_std_vddc;
2281 int i;
2282 u16 pwr_efficiency_ratio;
2283 u8 max_ps_percent;
2284 bool disable_uvd_power_tune;
2285 int ret;
2286
2287 if (ni_pi->enable_power_containment == false)
2288 return 0;
2289
2290 if (state->performance_level_count == 0)
2291 return -EINVAL;
2292
2293 if (smc_state->levelCount != state->performance_level_count)
2294 return -EINVAL;
2295
2296 disable_uvd_power_tune = si_should_disable_uvd_powertune(rdev, radeon_state);
2297
2298 smc_state->levels[0].dpm2.MaxPS = 0;
2299 smc_state->levels[0].dpm2.NearTDPDec = 0;
2300 smc_state->levels[0].dpm2.AboveSafeInc = 0;
2301 smc_state->levels[0].dpm2.BelowSafeInc = 0;
2302 smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
2303
2304 for (i = 1; i < state->performance_level_count; i++) {
2305 prev_sclk = state->performance_levels[i-1].sclk;
2306 max_sclk = state->performance_levels[i].sclk;
2307 if (i == 1)
2308 max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
2309 else
2310 max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
2311
2312 if (prev_sclk > max_sclk)
2313 return -EINVAL;
2314
2315 if ((max_ps_percent == 0) ||
2316 (prev_sclk == max_sclk) ||
2317 disable_uvd_power_tune) {
2318 min_sclk = max_sclk;
2319 } else if (i == 1) {
2320 min_sclk = prev_sclk;
2321 } else {
2322 min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
2323 }
2324
2325 if (min_sclk < state->performance_levels[0].sclk)
2326 min_sclk = state->performance_levels[0].sclk;
2327
2328 if (min_sclk == 0)
2329 return -EINVAL;
2330
2331 ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
2332 state->performance_levels[i-1].vddc, &vddc);
2333 if (ret)
2334 return ret;
2335
2336 ret = si_get_std_voltage_value(rdev, &vddc, &prev_std_vddc);
2337 if (ret)
2338 return ret;
2339
2340 ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
2341 state->performance_levels[i].vddc, &vddc);
2342 if (ret)
2343 return ret;
2344
2345 ret = si_get_std_voltage_value(rdev, &vddc, &curr_std_vddc);
2346 if (ret)
2347 return ret;
2348
2349 pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(rdev,
2350 prev_std_vddc, curr_std_vddc);
2351
2352 smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
2353 smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
2354 smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
2355 smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
2356 smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
2357 }
2358
2359 return 0;
2360}
2361
2362static int si_populate_sq_ramping_values(struct radeon_device *rdev,
2363 struct radeon_ps *radeon_state,
2364 SISLANDS_SMC_SWSTATE *smc_state)
2365{
2366 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2367 struct ni_ps *state = ni_get_ps(radeon_state);
2368 u32 sq_power_throttle, sq_power_throttle2;
2369 bool enable_sq_ramping = ni_pi->enable_sq_ramping;
2370 int i;
2371
2372 if (state->performance_level_count == 0)
2373 return -EINVAL;
2374
2375 if (smc_state->levelCount != state->performance_level_count)
2376 return -EINVAL;
2377
2378 if (rdev->pm.dpm.sq_ramping_threshold == 0)
2379 return -EINVAL;
2380
2381 if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
2382 enable_sq_ramping = false;
2383
2384 if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
2385 enable_sq_ramping = false;
2386
2387 if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
2388 enable_sq_ramping = false;
2389
2390 if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
2391 enable_sq_ramping = false;
2392
2393 if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
2394 enable_sq_ramping = false;
2395
2396 for (i = 0; i < state->performance_level_count; i++) {
2397 sq_power_throttle = 0;
2398 sq_power_throttle2 = 0;
2399
2400 if ((state->performance_levels[i].sclk >= rdev->pm.dpm.sq_ramping_threshold) &&
2401 enable_sq_ramping) {
2402 sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
2403 sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
2404 sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
2405 sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
2406 sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
2407 } else {
2408 sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
2409 sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
2410 }
2411
2412 smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
2413 smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
2414 }
2415
2416 return 0;
2417}
2418
2419static int si_enable_power_containment(struct radeon_device *rdev,
2420 struct radeon_ps *radeon_new_state,
2421 bool enable)
2422{
2423 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2424 PPSMC_Result smc_result;
2425 int ret = 0;
2426
2427 if (ni_pi->enable_power_containment) {
2428 if (enable) {
2429 if (!si_should_disable_uvd_powertune(rdev, radeon_new_state)) {
2430 smc_result = si_send_msg_to_smc(rdev, PPSMC_TDPClampingActive);
2431 if (smc_result != PPSMC_Result_OK) {
2432 ret = -EINVAL;
2433 ni_pi->pc_enabled = false;
2434 } else {
2435 ni_pi->pc_enabled = true;
2436 }
2437 }
2438 } else {
2439 smc_result = si_send_msg_to_smc(rdev, PPSMC_TDPClampingInactive);
2440 if (smc_result != PPSMC_Result_OK)
2441 ret = -EINVAL;
2442 ni_pi->pc_enabled = false;
2443 }
2444 }
2445
2446 return ret;
2447}
2448
2449static int si_initialize_smc_dte_tables(struct radeon_device *rdev)
2450{
2451 struct si_power_info *si_pi = si_get_pi(rdev);
2452 int ret = 0;
2453 struct si_dte_data *dte_data = &si_pi->dte_data;
2454 Smc_SIslands_DTE_Configuration *dte_tables = NULL;
2455 u32 table_size;
2456 u8 tdep_count;
2457 u32 i;
2458
2459 if (dte_data == NULL)
2460 si_pi->enable_dte = false;
2461
2462 if (si_pi->enable_dte == false)
2463 return 0;
2464
2465 if (dte_data->k <= 0)
2466 return -EINVAL;
2467
2468 dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
2469 if (dte_tables == NULL) {
2470 si_pi->enable_dte = false;
2471 return -ENOMEM;
2472 }
2473
2474 table_size = dte_data->k;
2475
2476 if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
2477 table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
2478
2479 tdep_count = dte_data->tdep_count;
2480 if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
2481 tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
2482
2483 dte_tables->K = cpu_to_be32(table_size);
2484 dte_tables->T0 = cpu_to_be32(dte_data->t0);
2485 dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
2486 dte_tables->WindowSize = dte_data->window_size;
2487 dte_tables->temp_select = dte_data->temp_select;
2488 dte_tables->DTE_mode = dte_data->dte_mode;
2489 dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
2490
2491 if (tdep_count > 0)
2492 table_size--;
2493
2494 for (i = 0; i < table_size; i++) {
2495 dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
2496 dte_tables->R[i] = cpu_to_be32(dte_data->r[i]);
2497 }
2498
2499 dte_tables->Tdep_count = tdep_count;
2500
2501 for (i = 0; i < (u32)tdep_count; i++) {
2502 dte_tables->T_limits[i] = dte_data->t_limits[i];
2503 dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
2504 dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
2505 }
2506
2507 ret = si_copy_bytes_to_smc(rdev, si_pi->dte_table_start, (u8 *)dte_tables,
2508 sizeof(Smc_SIslands_DTE_Configuration), si_pi->sram_end);
2509 kfree(dte_tables);
2510
2511 return ret;
2512}
2513
2514static int si_get_cac_std_voltage_max_min(struct radeon_device *rdev,
2515 u16 *max, u16 *min)
2516{
2517 struct si_power_info *si_pi = si_get_pi(rdev);
2518 struct radeon_cac_leakage_table *table =
2519 &rdev->pm.dpm.dyn_state.cac_leakage_table;
2520 u32 i;
2521 u32 v0_loadline;
2522
2523
2524 if (table == NULL)
2525 return -EINVAL;
2526
2527 *max = 0;
2528 *min = 0xFFFF;
2529
2530 for (i = 0; i < table->count; i++) {
2531 if (table->entries[i].vddc > *max)
2532 *max = table->entries[i].vddc;
2533 if (table->entries[i].vddc < *min)
2534 *min = table->entries[i].vddc;
2535 }
2536
2537 if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
2538 return -EINVAL;
2539
2540 v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
2541
2542 if (v0_loadline > 0xFFFFUL)
2543 return -EINVAL;
2544
2545 *min = (u16)v0_loadline;
2546
2547 if ((*min > *max) || (*max == 0) || (*min == 0))
2548 return -EINVAL;
2549
2550 return 0;
2551}
2552
2553static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
2554{
2555 return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
2556 SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
2557}
2558
2559static int si_init_dte_leakage_table(struct radeon_device *rdev,
2560 PP_SIslands_CacConfig *cac_tables,
2561 u16 vddc_max, u16 vddc_min, u16 vddc_step,
2562 u16 t0, u16 t_step)
2563{
2564 struct si_power_info *si_pi = si_get_pi(rdev);
2565 u32 leakage;
2566 unsigned int i, j;
2567 s32 t;
2568 u32 smc_leakage;
2569 u32 scaling_factor;
2570 u16 voltage;
2571
2572 scaling_factor = si_get_smc_power_scaling_factor(rdev);
2573
2574 for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
2575 t = (1000 * (i * t_step + t0));
2576
2577 for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
2578 voltage = vddc_max - (vddc_step * j);
2579
2580 si_calculate_leakage_for_v_and_t(rdev,
2581 &si_pi->powertune_data->leakage_coefficients,
2582 voltage,
2583 t,
2584 si_pi->dyn_powertune_data.cac_leakage,
2585 &leakage);
2586
2587 smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
2588
2589 if (smc_leakage > 0xFFFF)
2590 smc_leakage = 0xFFFF;
2591
2592 cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
2593 cpu_to_be16((u16)smc_leakage);
2594 }
2595 }
2596 return 0;
2597}
2598
2599static int si_init_simplified_leakage_table(struct radeon_device *rdev,
2600 PP_SIslands_CacConfig *cac_tables,
2601 u16 vddc_max, u16 vddc_min, u16 vddc_step)
2602{
2603 struct si_power_info *si_pi = si_get_pi(rdev);
2604 u32 leakage;
2605 unsigned int i, j;
2606 u32 smc_leakage;
2607 u32 scaling_factor;
2608 u16 voltage;
2609
2610 scaling_factor = si_get_smc_power_scaling_factor(rdev);
2611
2612 for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
2613 voltage = vddc_max - (vddc_step * j);
2614
2615 si_calculate_leakage_for_v(rdev,
2616 &si_pi->powertune_data->leakage_coefficients,
2617 si_pi->powertune_data->fixed_kt,
2618 voltage,
2619 si_pi->dyn_powertune_data.cac_leakage,
2620 &leakage);
2621
2622 smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
2623
2624 if (smc_leakage > 0xFFFF)
2625 smc_leakage = 0xFFFF;
2626
2627 for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
2628 cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
2629 cpu_to_be16((u16)smc_leakage);
2630 }
2631 return 0;
2632}
2633
2634static int si_initialize_smc_cac_tables(struct radeon_device *rdev)
2635{
2636 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2637 struct si_power_info *si_pi = si_get_pi(rdev);
2638 PP_SIslands_CacConfig *cac_tables = NULL;
2639 u16 vddc_max, vddc_min, vddc_step;
2640 u16 t0, t_step;
2641 u32 load_line_slope, reg;
2642 int ret = 0;
2643 u32 ticks_per_us = radeon_get_xclk(rdev) / 100;
2644
2645 if (ni_pi->enable_cac == false)
2646 return 0;
2647
2648 cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
2649 if (!cac_tables)
2650 return -ENOMEM;
2651
2652 reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
2653 reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
2654 WREG32(CG_CAC_CTRL, reg);
2655
2656 si_pi->dyn_powertune_data.cac_leakage = rdev->pm.dpm.cac_leakage;
2657 si_pi->dyn_powertune_data.dc_pwr_value =
2658 si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
2659 si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(rdev);
2660 si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
2661
2662 si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
2663
2664 ret = si_get_cac_std_voltage_max_min(rdev, &vddc_max, &vddc_min);
2665 if (ret)
2666 goto done_free;
2667
2668 vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
2669 vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
2670 t_step = 4;
2671 t0 = 60;
2672
2673 if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
2674 ret = si_init_dte_leakage_table(rdev, cac_tables,
2675 vddc_max, vddc_min, vddc_step,
2676 t0, t_step);
2677 else
2678 ret = si_init_simplified_leakage_table(rdev, cac_tables,
2679 vddc_max, vddc_min, vddc_step);
2680 if (ret)
2681 goto done_free;
2682
2683 load_line_slope = ((u32)rdev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
2684
2685 cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
2686 cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
2687 cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
2688 cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
2689 cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
2690 cac_tables->R_LL = cpu_to_be32(load_line_slope);
2691 cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
2692 cac_tables->calculation_repeats = cpu_to_be32(2);
2693 cac_tables->dc_cac = cpu_to_be32(0);
2694 cac_tables->log2_PG_LKG_SCALE = 12;
2695 cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
2696 cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
2697 cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
2698
2699 ret = si_copy_bytes_to_smc(rdev, si_pi->cac_table_start, (u8 *)cac_tables,
2700 sizeof(PP_SIslands_CacConfig), si_pi->sram_end);
2701
2702 if (ret)
2703 goto done_free;
2704
2705 ret = si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
2706
2707done_free:
2708 if (ret) {
2709 ni_pi->enable_cac = false;
2710 ni_pi->enable_power_containment = false;
2711 }
2712
2713 kfree(cac_tables);
2714
2715 return 0;
2716}
2717
2718static int si_program_cac_config_registers(struct radeon_device *rdev,
2719 const struct si_cac_config_reg *cac_config_regs)
2720{
2721 const struct si_cac_config_reg *config_regs = cac_config_regs;
2722 u32 data = 0, offset;
2723
2724 if (!config_regs)
2725 return -EINVAL;
2726
2727 while (config_regs->offset != 0xFFFFFFFF) {
2728 switch (config_regs->type) {
2729 case SISLANDS_CACCONFIG_CGIND:
2730 offset = SMC_CG_IND_START + config_regs->offset;
2731 if (offset < SMC_CG_IND_END)
2732 data = RREG32_SMC(offset);
2733 break;
2734 default:
2735 data = RREG32(config_regs->offset << 2);
2736 break;
2737 }
2738
2739 data &= ~config_regs->mask;
2740 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
2741
2742 switch (config_regs->type) {
2743 case SISLANDS_CACCONFIG_CGIND:
2744 offset = SMC_CG_IND_START + config_regs->offset;
2745 if (offset < SMC_CG_IND_END)
2746 WREG32_SMC(offset, data);
2747 break;
2748 default:
2749 WREG32(config_regs->offset << 2, data);
2750 break;
2751 }
2752 config_regs++;
2753 }
2754 return 0;
2755}
2756
2757static int si_initialize_hardware_cac_manager(struct radeon_device *rdev)
2758{
2759 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2760 struct si_power_info *si_pi = si_get_pi(rdev);
2761 int ret;
2762
2763 if ((ni_pi->enable_cac == false) ||
2764 (ni_pi->cac_configuration_required == false))
2765 return 0;
2766
2767 ret = si_program_cac_config_registers(rdev, si_pi->lcac_config);
2768 if (ret)
2769 return ret;
2770 ret = si_program_cac_config_registers(rdev, si_pi->cac_override);
2771 if (ret)
2772 return ret;
2773 ret = si_program_cac_config_registers(rdev, si_pi->cac_weights);
2774 if (ret)
2775 return ret;
2776
2777 return 0;
2778}
2779
2780static int si_enable_smc_cac(struct radeon_device *rdev,
2781 struct radeon_ps *radeon_new_state,
2782 bool enable)
2783{
2784 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2785 struct si_power_info *si_pi = si_get_pi(rdev);
2786 PPSMC_Result smc_result;
2787 int ret = 0;
2788
2789 if (ni_pi->enable_cac) {
2790 if (enable) {
2791 if (!si_should_disable_uvd_powertune(rdev, radeon_new_state)) {
2792 if (ni_pi->support_cac_long_term_average) {
2793 smc_result = si_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgEnable);
2794 if (smc_result != PPSMC_Result_OK)
2795 ni_pi->support_cac_long_term_average = false;
2796 }
2797
2798 smc_result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
2799 if (smc_result != PPSMC_Result_OK) {
2800 ret = -EINVAL;
2801 ni_pi->cac_enabled = false;
2802 } else {
2803 ni_pi->cac_enabled = true;
2804 }
2805
2806 if (si_pi->enable_dte) {
2807 smc_result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
2808 if (smc_result != PPSMC_Result_OK)
2809 ret = -EINVAL;
2810 }
2811 }
2812 } else if (ni_pi->cac_enabled) {
2813 if (si_pi->enable_dte)
2814 smc_result = si_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
2815
2816 smc_result = si_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
2817
2818 ni_pi->cac_enabled = false;
2819
2820 if (ni_pi->support_cac_long_term_average)
2821 smc_result = si_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgDisable);
2822 }
2823 }
2824 return ret;
2825}
2826
2827static int si_init_smc_spll_table(struct radeon_device *rdev)
2828{
2829 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2830 struct si_power_info *si_pi = si_get_pi(rdev);
2831 SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
2832 SISLANDS_SMC_SCLK_VALUE sclk_params;
2833 u32 fb_div, p_div;
2834 u32 clk_s, clk_v;
2835 u32 sclk = 0;
2836 int ret = 0;
2837 u32 tmp;
2838 int i;
2839
2840 if (si_pi->spll_table_start == 0)
2841 return -EINVAL;
2842
2843 spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
2844 if (spll_table == NULL)
2845 return -ENOMEM;
2846
2847 for (i = 0; i < 256; i++) {
2848 ret = si_calculate_sclk_params(rdev, sclk, &sclk_params);
2849 if (ret)
2850 break;
2851
2852 p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
2853 fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
2854 clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
2855 clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
2856
2857 fb_div &= ~0x00001FFF;
2858 fb_div >>= 1;
2859 clk_v >>= 6;
2860
2861 if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
2862 ret = -EINVAL;
2863 if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
2864 ret = -EINVAL;
2865 if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2866 ret = -EINVAL;
2867 if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
2868 ret = -EINVAL;
2869
2870 if (ret)
2871 break;
2872
2873 tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
2874 ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
2875 spll_table->freq[i] = cpu_to_be32(tmp);
2876
2877 tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
2878 ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
2879 spll_table->ss[i] = cpu_to_be32(tmp);
2880
2881 sclk += 512;
2882 }
2883
2884
2885 if (!ret)
2886 ret = si_copy_bytes_to_smc(rdev, si_pi->spll_table_start,
2887 (u8 *)spll_table, sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
2888 si_pi->sram_end);
2889
2890 if (ret)
2891 ni_pi->enable_power_containment = false;
2892
2893 kfree(spll_table);
2894
2895 return ret;
2896}
2897
2898static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2899 struct radeon_ps *rps)
2900{
2901 struct ni_ps *ps = ni_get_ps(rps);
2902 struct radeon_clock_and_voltage_limits *max_limits;
2903 bool disable_mclk_switching;
2904 u32 mclk, sclk;
2905 u16 vddc, vddci;
2906 int i;
2907
2908 if (rdev->pm.dpm.new_active_crtc_count > 1)
2909 disable_mclk_switching = true;
2910 else
2911 disable_mclk_switching = false;
2912
2913 if (rdev->pm.dpm.ac_power)
2914 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2915 else
2916 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
2917
2918 for (i = ps->performance_level_count - 2; i >= 0; i--) {
2919 if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
2920 ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
2921 }
2922 if (rdev->pm.dpm.ac_power == false) {
2923 for (i = 0; i < ps->performance_level_count; i++) {
2924 if (ps->performance_levels[i].mclk > max_limits->mclk)
2925 ps->performance_levels[i].mclk = max_limits->mclk;
2926 if (ps->performance_levels[i].sclk > max_limits->sclk)
2927 ps->performance_levels[i].sclk = max_limits->sclk;
2928 if (ps->performance_levels[i].vddc > max_limits->vddc)
2929 ps->performance_levels[i].vddc = max_limits->vddc;
2930 if (ps->performance_levels[i].vddci > max_limits->vddci)
2931 ps->performance_levels[i].vddci = max_limits->vddci;
2932 }
2933 }
2934
2935 /* XXX validate the min clocks required for display */
2936
2937 if (disable_mclk_switching) {
2938 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
2939 sclk = ps->performance_levels[0].sclk;
2940 vddc = ps->performance_levels[0].vddc;
2941 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
2942 } else {
2943 sclk = ps->performance_levels[0].sclk;
2944 mclk = ps->performance_levels[0].mclk;
2945 vddc = ps->performance_levels[0].vddc;
2946 vddci = ps->performance_levels[0].vddci;
2947 }
2948
2949 /* adjusted low state */
2950 ps->performance_levels[0].sclk = sclk;
2951 ps->performance_levels[0].mclk = mclk;
2952 ps->performance_levels[0].vddc = vddc;
2953 ps->performance_levels[0].vddci = vddci;
2954
2955 for (i = 1; i < ps->performance_level_count; i++) {
2956 if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
2957 ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
2958 if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
2959 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
2960 }
2961
2962 if (disable_mclk_switching) {
2963 mclk = ps->performance_levels[0].mclk;
2964 for (i = 1; i < ps->performance_level_count; i++) {
2965 if (mclk < ps->performance_levels[i].mclk)
2966 mclk = ps->performance_levels[i].mclk;
2967 }
2968 for (i = 0; i < ps->performance_level_count; i++) {
2969 ps->performance_levels[i].mclk = mclk;
2970 ps->performance_levels[i].vddci = vddci;
2971 }
2972 } else {
2973 for (i = 1; i < ps->performance_level_count; i++) {
2974 if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
2975 ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
2976 if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
2977 ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
2978 }
2979 }
2980
2981 for (i = 0; i < ps->performance_level_count; i++)
2982 btc_adjust_clock_combinations(rdev, max_limits,
2983 &ps->performance_levels[i]);
2984
2985 for (i = 0; i < ps->performance_level_count; i++) {
2986 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2987 ps->performance_levels[i].sclk,
2988 max_limits->vddc, &ps->performance_levels[i].vddc);
2989 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2990 ps->performance_levels[i].mclk,
2991 max_limits->vddci, &ps->performance_levels[i].vddci);
2992 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2993 ps->performance_levels[i].mclk,
2994 max_limits->vddc, &ps->performance_levels[i].vddc);
2995 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
2996 rdev->clock.current_dispclk,
2997 max_limits->vddc, &ps->performance_levels[i].vddc);
2998 }
2999
3000 for (i = 0; i < ps->performance_level_count; i++) {
3001 btc_apply_voltage_delta_rules(rdev,
3002 max_limits->vddc, max_limits->vddci,
3003 &ps->performance_levels[i].vddc,
3004 &ps->performance_levels[i].vddci);
3005 }
3006
3007 ps->dc_compatible = true;
3008 for (i = 0; i < ps->performance_level_count; i++) {
3009 if (ps->performance_levels[i].vddc > rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
3010 ps->dc_compatible = false;
3011 }
3012
3013}
3014
3015#if 0
3016static int si_read_smc_soft_register(struct radeon_device *rdev,
3017 u16 reg_offset, u32 *value)
3018{
3019 struct si_power_info *si_pi = si_get_pi(rdev);
3020
3021 return si_read_smc_sram_dword(rdev,
3022 si_pi->soft_regs_start + reg_offset, value,
3023 si_pi->sram_end);
3024}
3025#endif
3026
3027static int si_write_smc_soft_register(struct radeon_device *rdev,
3028 u16 reg_offset, u32 value)
3029{
3030 struct si_power_info *si_pi = si_get_pi(rdev);
3031
3032 return si_write_smc_sram_dword(rdev,
3033 si_pi->soft_regs_start + reg_offset,
3034 value, si_pi->sram_end);
3035}
3036
3037static bool si_is_special_1gb_platform(struct radeon_device *rdev)
3038{
3039 bool ret = false;
3040 u32 tmp, width, row, column, bank, density;
3041 bool is_memory_gddr5, is_special;
3042
3043 tmp = RREG32(MC_SEQ_MISC0);
3044 is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
3045 is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
3046 & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
3047
3048 WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
3049 width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
3050
3051 tmp = RREG32(MC_ARB_RAMCFG);
3052 row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
3053 column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
3054 bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
3055
3056 density = (1 << (row + column - 20 + bank)) * width;
3057
3058 if ((rdev->pdev->device == 0x6819) &&
3059 is_memory_gddr5 && is_special && (density == 0x400))
3060 ret = true;
3061
3062 return ret;
3063}
3064
3065static void si_get_leakage_vddc(struct radeon_device *rdev)
3066{
3067 struct si_power_info *si_pi = si_get_pi(rdev);
3068 u16 vddc, count = 0;
3069 int i, ret;
3070
3071 for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
3072 ret = radeon_atom_get_leakage_vddc_based_on_leakage_idx(rdev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
3073
3074 if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
3075 si_pi->leakage_voltage.entries[count].voltage = vddc;
3076 si_pi->leakage_voltage.entries[count].leakage_index =
3077 SISLANDS_LEAKAGE_INDEX0 + i;
3078 count++;
3079 }
3080 }
3081 si_pi->leakage_voltage.count = count;
3082}
3083
3084static int si_get_leakage_voltage_from_leakage_index(struct radeon_device *rdev,
3085 u32 index, u16 *leakage_voltage)
3086{
3087 struct si_power_info *si_pi = si_get_pi(rdev);
3088 int i;
3089
3090 if (leakage_voltage == NULL)
3091 return -EINVAL;
3092
3093 if ((index & 0xff00) != 0xff00)
3094 return -EINVAL;
3095
3096 if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
3097 return -EINVAL;
3098
3099 if (index < SISLANDS_LEAKAGE_INDEX0)
3100 return -EINVAL;
3101
3102 for (i = 0; i < si_pi->leakage_voltage.count; i++) {
3103 if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
3104 *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
3105 return 0;
3106 }
3107 }
3108 return -EAGAIN;
3109}
3110
3111static void si_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
3112{
3113 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3114 bool want_thermal_protection;
3115 enum radeon_dpm_event_src dpm_event_src;
3116
3117 switch (sources) {
3118 case 0:
3119 default:
3120 want_thermal_protection = false;
3121 break;
3122 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
3123 want_thermal_protection = true;
3124 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
3125 break;
3126 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
3127 want_thermal_protection = true;
3128 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
3129 break;
3130 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
3131 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
3132 want_thermal_protection = true;
3133 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
3134 break;
3135 }
3136
3137 if (want_thermal_protection) {
3138 WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
3139 if (pi->thermal_protection)
3140 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
3141 } else {
3142 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
3143 }
3144}
3145
3146static void si_enable_auto_throttle_source(struct radeon_device *rdev,
3147 enum radeon_dpm_auto_throttle_src source,
3148 bool enable)
3149{
3150 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3151
3152 if (enable) {
3153 if (!(pi->active_auto_throttle_sources & (1 << source))) {
3154 pi->active_auto_throttle_sources |= 1 << source;
3155 si_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
3156 }
3157 } else {
3158 if (pi->active_auto_throttle_sources & (1 << source)) {
3159 pi->active_auto_throttle_sources &= ~(1 << source);
3160 si_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
3161 }
3162 }
3163}
3164
3165static void si_start_dpm(struct radeon_device *rdev)
3166{
3167 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
3168}
3169
3170static void si_stop_dpm(struct radeon_device *rdev)
3171{
3172 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
3173}
3174
3175static void si_enable_sclk_control(struct radeon_device *rdev, bool enable)
3176{
3177 if (enable)
3178 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
3179 else
3180 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
3181
3182}
3183
3184#if 0
3185static int si_notify_hardware_of_thermal_state(struct radeon_device *rdev,
3186 u32 thermal_level)
3187{
3188 PPSMC_Result ret;
3189
3190 if (thermal_level == 0) {
3191 ret = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
3192 if (ret == PPSMC_Result_OK)
3193 return 0;
3194 else
3195 return -EINVAL;
3196 }
3197 return 0;
3198}
3199
3200static void si_notify_hardware_vpu_recovery_event(struct radeon_device *rdev)
3201{
3202 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
3203}
3204#endif
3205
3206#if 0
3207static int si_notify_hw_of_powersource(struct radeon_device *rdev, bool ac_power)
3208{
3209 if (ac_power)
3210 return (si_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
3211 0 : -EINVAL;
3212
3213 return 0;
3214}
3215#endif
3216
3217static PPSMC_Result si_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
3218 PPSMC_Msg msg, u32 parameter)
3219{
3220 WREG32(SMC_SCRATCH0, parameter);
3221 return si_send_msg_to_smc(rdev, msg);
3222}
3223
3224static int si_restrict_performance_levels_before_switch(struct radeon_device *rdev)
3225{
3226 if (si_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
3227 return -EINVAL;
3228
3229 return (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
3230 0 : -EINVAL;
3231}
3232
3233#if 0
3234static int si_unrestrict_performance_levels_after_switch(struct radeon_device *rdev)
3235{
3236 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
3237 return -EINVAL;
3238
3239 return (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) == PPSMC_Result_OK) ?
3240 0 : -EINVAL;
3241}
3242#endif
3243
3244static int si_set_boot_state(struct radeon_device *rdev)
3245{
3246 return (si_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
3247 0 : -EINVAL;
3248}
3249
3250static int si_set_sw_state(struct radeon_device *rdev)
3251{
3252 return (si_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
3253 0 : -EINVAL;
3254}
3255
3256static int si_halt_smc(struct radeon_device *rdev)
3257{
3258 if (si_send_msg_to_smc(rdev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
3259 return -EINVAL;
3260
3261 return (si_wait_for_smc_inactive(rdev) == PPSMC_Result_OK) ?
3262 0 : -EINVAL;
3263}
3264
3265static int si_resume_smc(struct radeon_device *rdev)
3266{
3267 if (si_send_msg_to_smc(rdev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
3268 return -EINVAL;
3269
3270 return (si_send_msg_to_smc(rdev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
3271 0 : -EINVAL;
3272}
3273
3274static void si_dpm_start_smc(struct radeon_device *rdev)
3275{
3276 si_program_jump_on_start(rdev);
3277 si_start_smc(rdev);
3278 si_start_smc_clock(rdev);
3279}
3280
3281static void si_dpm_stop_smc(struct radeon_device *rdev)
3282{
3283 si_reset_smc(rdev);
3284 si_stop_smc_clock(rdev);
3285}
3286
3287static int si_process_firmware_header(struct radeon_device *rdev)
3288{
3289 struct si_power_info *si_pi = si_get_pi(rdev);
3290 u32 tmp;
3291 int ret;
3292
3293 ret = si_read_smc_sram_dword(rdev,
3294 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3295 SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
3296 &tmp, si_pi->sram_end);
3297 if (ret)
3298 return ret;
3299
3300 si_pi->state_table_start = tmp;
3301
3302 ret = si_read_smc_sram_dword(rdev,
3303 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3304 SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
3305 &tmp, si_pi->sram_end);
3306 if (ret)
3307 return ret;
3308
3309 si_pi->soft_regs_start = tmp;
3310
3311 ret = si_read_smc_sram_dword(rdev,
3312 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3313 SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
3314 &tmp, si_pi->sram_end);
3315 if (ret)
3316 return ret;
3317
3318 si_pi->mc_reg_table_start = tmp;
3319
3320 ret = si_read_smc_sram_dword(rdev,
3321 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3322 SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
3323 &tmp, si_pi->sram_end);
3324 if (ret)
3325 return ret;
3326
3327 si_pi->arb_table_start = tmp;
3328
3329 ret = si_read_smc_sram_dword(rdev,
3330 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3331 SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
3332 &tmp, si_pi->sram_end);
3333 if (ret)
3334 return ret;
3335
3336 si_pi->cac_table_start = tmp;
3337
3338 ret = si_read_smc_sram_dword(rdev,
3339 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3340 SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
3341 &tmp, si_pi->sram_end);
3342 if (ret)
3343 return ret;
3344
3345 si_pi->dte_table_start = tmp;
3346
3347 ret = si_read_smc_sram_dword(rdev,
3348 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3349 SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
3350 &tmp, si_pi->sram_end);
3351 if (ret)
3352 return ret;
3353
3354 si_pi->spll_table_start = tmp;
3355
3356 ret = si_read_smc_sram_dword(rdev,
3357 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3358 SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
3359 &tmp, si_pi->sram_end);
3360 if (ret)
3361 return ret;
3362
3363 si_pi->papm_cfg_table_start = tmp;
3364
3365 return ret;
3366}
3367
3368static void si_read_clock_registers(struct radeon_device *rdev)
3369{
3370 struct si_power_info *si_pi = si_get_pi(rdev);
3371
3372 si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
3373 si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
3374 si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
3375 si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
3376 si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
3377 si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
3378 si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
3379 si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
3380 si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
3381 si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
3382 si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
3383 si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
3384 si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
3385 si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
3386 si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
3387}
3388
3389static void si_enable_thermal_protection(struct radeon_device *rdev,
3390 bool enable)
3391{
3392 if (enable)
3393 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
3394 else
3395 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
3396}
3397
3398static void si_enable_acpi_power_management(struct radeon_device *rdev)
3399{
3400 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
3401}
3402
3403#if 0
3404static int si_enter_ulp_state(struct radeon_device *rdev)
3405{
3406 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
3407
3408 udelay(25000);
3409
3410 return 0;
3411}
3412
3413static int si_exit_ulp_state(struct radeon_device *rdev)
3414{
3415 int i;
3416
3417 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
3418
3419 udelay(7000);
3420
3421 for (i = 0; i < rdev->usec_timeout; i++) {
3422 if (RREG32(SMC_RESP_0) == 1)
3423 break;
3424 udelay(1000);
3425 }
3426
3427 return 0;
3428}
3429#endif
3430
3431static int si_notify_smc_display_change(struct radeon_device *rdev,
3432 bool has_display)
3433{
3434 PPSMC_Msg msg = has_display ?
3435 PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
3436
3437 return (si_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?
3438 0 : -EINVAL;
3439}
3440
3441static void si_program_response_times(struct radeon_device *rdev)
3442{
3443 u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
3444 u32 vddc_dly, acpi_dly, vbi_dly;
3445 u32 reference_clock;
3446
3447 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
3448
3449 voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
3450 backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
3451
3452 if (voltage_response_time == 0)
3453 voltage_response_time = 1000;
3454
3455 acpi_delay_time = 15000;
3456 vbi_time_out = 100000;
3457
3458 reference_clock = radeon_get_xclk(rdev);
3459
3460 vddc_dly = (voltage_response_time * reference_clock) / 100;
3461 acpi_dly = (acpi_delay_time * reference_clock) / 100;
3462 vbi_dly = (vbi_time_out * reference_clock) / 100;
3463
3464 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
3465 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
3466 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
3467 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
3468}
3469
3470static void si_program_ds_registers(struct radeon_device *rdev)
3471{
3472 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3473 u32 tmp = 1; /* XXX: 0x10 on tahiti A0 */
3474
3475 if (eg_pi->sclk_deep_sleep) {
3476 WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
3477 WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
3478 ~AUTOSCALE_ON_SS_CLEAR);
3479 }
3480}
3481
3482static void si_program_display_gap(struct radeon_device *rdev)
3483{
3484 u32 tmp, pipe;
3485 int i;
3486
3487 tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
3488 if (rdev->pm.dpm.new_active_crtc_count > 0)
3489 tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
3490 else
3491 tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
3492
3493 if (rdev->pm.dpm.new_active_crtc_count > 1)
3494 tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
3495 else
3496 tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
3497
3498 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
3499
3500 tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
3501 pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
3502
3503 if ((rdev->pm.dpm.new_active_crtc_count > 0) &&
3504 (!(rdev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
3505 /* find the first active crtc */
3506 for (i = 0; i < rdev->num_crtc; i++) {
3507 if (rdev->pm.dpm.new_active_crtcs & (1 << i))
3508 break;
3509 }
3510 if (i == rdev->num_crtc)
3511 pipe = 0;
3512 else
3513 pipe = i;
3514
3515 tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
3516 tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
3517 WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
3518 }
3519
3520 si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
3521}
3522
3523static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
3524{
3525 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3526
3527 if (enable) {
3528 if (pi->sclk_ss)
3529 WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
3530 } else {
3531 WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
3532 WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
3533 }
3534}
3535
3536static void si_setup_bsp(struct radeon_device *rdev)
3537{
3538 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3539 u32 xclk = radeon_get_xclk(rdev);
3540
3541 r600_calculate_u_and_p(pi->asi,
3542 xclk,
3543 16,
3544 &pi->bsp,
3545 &pi->bsu);
3546
3547 r600_calculate_u_and_p(pi->pasi,
3548 xclk,
3549 16,
3550 &pi->pbsp,
3551 &pi->pbsu);
3552
3553
3554 pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
3555 pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
3556
3557 WREG32(CG_BSP, pi->dsp);
3558}
3559
3560static void si_program_git(struct radeon_device *rdev)
3561{
3562 WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
3563}
3564
3565static void si_program_tp(struct radeon_device *rdev)
3566{
3567 int i;
3568 enum r600_td td = R600_TD_DFLT;
3569
3570 for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
3571 WREG32(CG_FFCT_0 + (i * 4), (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
3572
3573 if (td == R600_TD_AUTO)
3574 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
3575 else
3576 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
3577
3578 if (td == R600_TD_UP)
3579 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
3580
3581 if (td == R600_TD_DOWN)
3582 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
3583}
3584
3585static void si_program_tpp(struct radeon_device *rdev)
3586{
3587 WREG32(CG_TPC, R600_TPC_DFLT);
3588}
3589
3590static void si_program_sstp(struct radeon_device *rdev)
3591{
3592 WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
3593}
3594
3595static void si_enable_display_gap(struct radeon_device *rdev)
3596{
3597 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
3598
3599 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
3600 tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
3601 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
3602 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
3603}
3604
3605static void si_program_vc(struct radeon_device *rdev)
3606{
3607 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3608
3609 WREG32(CG_FTV, pi->vrc);
3610}
3611
3612static void si_clear_vc(struct radeon_device *rdev)
3613{
3614 WREG32(CG_FTV, 0);
3615}
3616
3617static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
3618{
3619 u8 mc_para_index;
3620
3621 if (memory_clock < 10000)
3622 mc_para_index = 0;
3623 else if (memory_clock >= 80000)
3624 mc_para_index = 0x0f;
3625 else
3626 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
3627 return mc_para_index;
3628}
3629
3630static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
3631{
3632 u8 mc_para_index;
3633
3634 if (strobe_mode) {
3635 if (memory_clock < 12500)
3636 mc_para_index = 0x00;
3637 else if (memory_clock > 47500)
3638 mc_para_index = 0x0f;
3639 else
3640 mc_para_index = (u8)((memory_clock - 10000) / 2500);
3641 } else {
3642 if (memory_clock < 65000)
3643 mc_para_index = 0x00;
3644 else if (memory_clock > 135000)
3645 mc_para_index = 0x0f;
3646 else
3647 mc_para_index = (u8)((memory_clock - 60000) / 5000);
3648 }
3649 return mc_para_index;
3650}
3651
3652static u8 si_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk)
3653{
3654 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3655 bool strobe_mode = false;
3656 u8 result = 0;
3657
3658 if (mclk <= pi->mclk_strobe_mode_threshold)
3659 strobe_mode = true;
3660
3661 if (pi->mem_gddr5)
3662 result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
3663 else
3664 result = si_get_ddr3_mclk_frequency_ratio(mclk);
3665
3666 if (strobe_mode)
3667 result |= SISLANDS_SMC_STROBE_ENABLE;
3668
3669 return result;
3670}
3671
3672static int si_upload_firmware(struct radeon_device *rdev)
3673{
3674 struct si_power_info *si_pi = si_get_pi(rdev);
3675 int ret;
3676
3677 si_reset_smc(rdev);
3678 si_stop_smc_clock(rdev);
3679
3680 ret = si_load_smc_ucode(rdev, si_pi->sram_end);
3681
3682 return ret;
3683}
3684
3685static bool si_validate_phase_shedding_tables(struct radeon_device *rdev,
3686 const struct atom_voltage_table *table,
3687 const struct radeon_phase_shedding_limits_table *limits)
3688{
3689 u32 data, num_bits, num_levels;
3690
3691 if ((table == NULL) || (limits == NULL))
3692 return false;
3693
3694 data = table->mask_low;
3695
3696 num_bits = hweight32(data);
3697
3698 if (num_bits == 0)
3699 return false;
3700
3701 num_levels = (1 << num_bits);
3702
3703 if (table->count != num_levels)
3704 return false;
3705
3706 if (limits->count != (num_levels - 1))
3707 return false;
3708
3709 return true;
3710}
3711
3712static void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
3713 struct atom_voltage_table *voltage_table)
3714{
3715 unsigned int i, diff;
3716
3717 if (voltage_table->count <= SISLANDS_MAX_NO_VREG_STEPS)
3718 return;
3719
3720 diff = voltage_table->count - SISLANDS_MAX_NO_VREG_STEPS;
3721
3722 for (i= 0; i < SISLANDS_MAX_NO_VREG_STEPS; i++)
3723 voltage_table->entries[i] = voltage_table->entries[i + diff];
3724
3725 voltage_table->count = SISLANDS_MAX_NO_VREG_STEPS;
3726}
3727
3728static int si_construct_voltage_tables(struct radeon_device *rdev)
3729{
3730 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3731 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3732 struct si_power_info *si_pi = si_get_pi(rdev);
3733 int ret;
3734
3735 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
3736 VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
3737 if (ret)
3738 return ret;
3739
3740 if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3741 si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddc_voltage_table);
3742
3743 if (eg_pi->vddci_control) {
3744 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
3745 VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
3746 if (ret)
3747 return ret;
3748
3749 if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3750 si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddci_voltage_table);
3751 }
3752
3753 if (pi->mvdd_control) {
3754 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
3755 VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
3756
3757 if (ret) {
3758 pi->mvdd_control = false;
3759 return ret;
3760 }
3761
3762 if (si_pi->mvdd_voltage_table.count == 0) {
3763 pi->mvdd_control = false;
3764 return -EINVAL;
3765 }
3766
3767 if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3768 si_trim_voltage_table_to_fit_state_table(rdev, &si_pi->mvdd_voltage_table);
3769 }
3770
3771 if (si_pi->vddc_phase_shed_control) {
3772 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
3773 VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
3774 if (ret)
3775 si_pi->vddc_phase_shed_control = false;
3776
3777 if ((si_pi->vddc_phase_shed_table.count == 0) ||
3778 (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
3779 si_pi->vddc_phase_shed_control = false;
3780 }
3781
3782 return 0;
3783}
3784
3785static void si_populate_smc_voltage_table(struct radeon_device *rdev,
3786 const struct atom_voltage_table *voltage_table,
3787 SISLANDS_SMC_STATETABLE *table)
3788{
3789 unsigned int i;
3790
3791 for (i = 0; i < voltage_table->count; i++)
3792 table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
3793}
3794
3795static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
3796 SISLANDS_SMC_STATETABLE *table)
3797{
3798 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3799 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3800 struct si_power_info *si_pi = si_get_pi(rdev);
3801 u8 i;
3802
3803 if (eg_pi->vddc_voltage_table.count) {
3804 si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
3805 table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
3806 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
3807
3808 for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
3809 if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
3810 table->maxVDDCIndexInPPTable = i;
3811 break;
3812 }
3813 }
3814 }
3815
3816 if (eg_pi->vddci_voltage_table.count) {
3817 si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
3818
3819 table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
3820 cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
3821 }
3822
3823
3824 if (si_pi->mvdd_voltage_table.count) {
3825 si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
3826
3827 table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
3828 cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
3829 }
3830
3831 if (si_pi->vddc_phase_shed_control) {
3832 if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
3833 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
3834 si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
3835
3836 table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
3837 cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
3838
3839 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
3840 (u32)si_pi->vddc_phase_shed_table.phase_delay);
3841 } else {
3842 si_pi->vddc_phase_shed_control = false;
3843 }
3844 }
3845
3846 return 0;
3847}
3848
3849static int si_populate_voltage_value(struct radeon_device *rdev,
3850 const struct atom_voltage_table *table,
3851 u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
3852{
3853 unsigned int i;
3854
3855 for (i = 0; i < table->count; i++) {
3856 if (value <= table->entries[i].value) {
3857 voltage->index = (u8)i;
3858 voltage->value = cpu_to_be16(table->entries[i].value);
3859 break;
3860 }
3861 }
3862
3863 if (i >= table->count)
3864 return -EINVAL;
3865
3866 return 0;
3867}
3868
3869static int si_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
3870 SISLANDS_SMC_VOLTAGE_VALUE *voltage)
3871{
3872 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3873 struct si_power_info *si_pi = si_get_pi(rdev);
3874
3875 if (pi->mvdd_control) {
3876 if (mclk <= pi->mvdd_split_frequency)
3877 voltage->index = 0;
3878 else
3879 voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
3880
3881 voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
3882 }
3883 return 0;
3884}
3885
3886static int si_get_std_voltage_value(struct radeon_device *rdev,
3887 SISLANDS_SMC_VOLTAGE_VALUE *voltage,
3888 u16 *std_voltage)
3889{
3890 u16 v_index;
3891 bool voltage_found = false;
3892 *std_voltage = be16_to_cpu(voltage->value);
3893
3894 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
3895 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
3896 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
3897 return -EINVAL;
3898
3899 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
3900 if (be16_to_cpu(voltage->value) ==
3901 (u16)rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
3902 voltage_found = true;
3903 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
3904 *std_voltage =
3905 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
3906 else
3907 *std_voltage =
3908 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[rdev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
3909 break;
3910 }
3911 }
3912
3913 if (!voltage_found) {
3914 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
3915 if (be16_to_cpu(voltage->value) <=
3916 (u16)rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
3917 voltage_found = true;
3918 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
3919 *std_voltage =
3920 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
3921 else
3922 *std_voltage =
3923 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[rdev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
3924 break;
3925 }
3926 }
3927 }
3928 } else {
3929 if ((u32)voltage->index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
3930 *std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
3931 }
3932 }
3933
3934 return 0;
3935}
3936
3937static int si_populate_std_voltage_value(struct radeon_device *rdev,
3938 u16 value, u8 index,
3939 SISLANDS_SMC_VOLTAGE_VALUE *voltage)
3940{
3941 voltage->index = index;
3942 voltage->value = cpu_to_be16(value);
3943
3944 return 0;
3945}
3946
3947static int si_populate_phase_shedding_value(struct radeon_device *rdev,
3948 const struct radeon_phase_shedding_limits_table *limits,
3949 u16 voltage, u32 sclk, u32 mclk,
3950 SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
3951{
3952 unsigned int i;
3953
3954 for (i = 0; i < limits->count; i++) {
3955 if ((voltage <= limits->entries[i].voltage) &&
3956 (sclk <= limits->entries[i].sclk) &&
3957 (mclk <= limits->entries[i].mclk))
3958 break;
3959 }
3960
3961 smc_voltage->phase_settings = (u8)i;
3962
3963 return 0;
3964}
3965
3966static int si_init_arb_table_index(struct radeon_device *rdev)
3967{
3968 struct si_power_info *si_pi = si_get_pi(rdev);
3969 u32 tmp;
3970 int ret;
3971
3972 ret = si_read_smc_sram_dword(rdev, si_pi->arb_table_start, &tmp, si_pi->sram_end);
3973 if (ret)
3974 return ret;
3975
3976 tmp &= 0x00FFFFFF;
3977 tmp |= MC_CG_ARB_FREQ_F1 << 24;
3978
3979 return si_write_smc_sram_dword(rdev, si_pi->arb_table_start, tmp, si_pi->sram_end);
3980}
3981
3982static int si_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
3983{
3984 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
3985}
3986
3987static int si_reset_to_default(struct radeon_device *rdev)
3988{
3989 return (si_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
3990 0 : -EINVAL;
3991}
3992
3993static int si_force_switch_to_arb_f0(struct radeon_device *rdev)
3994{
3995 struct si_power_info *si_pi = si_get_pi(rdev);
3996 u32 tmp;
3997 int ret;
3998
3999 ret = si_read_smc_sram_dword(rdev, si_pi->arb_table_start,
4000 &tmp, si_pi->sram_end);
4001 if (ret)
4002 return ret;
4003
4004 tmp = (tmp >> 24) & 0xff;
4005
4006 if (tmp == MC_CG_ARB_FREQ_F0)
4007 return 0;
4008
4009 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
4010}
4011
4012static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev,
4013 u32 engine_clock)
4014{
4015 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4016 u32 dram_rows;
4017 u32 dram_refresh_rate;
4018 u32 mc_arb_rfsh_rate;
4019 u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
4020
4021 if (pi->mem_gddr5)
4022 dram_rows = 1 << (tmp + 10);
4023 else
4024 dram_rows = DDR3_DRAM_ROWS;
4025
4026 dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
4027 mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
4028
4029 return mc_arb_rfsh_rate;
4030}
4031
4032static int si_populate_memory_timing_parameters(struct radeon_device *rdev,
4033 struct rv7xx_pl *pl,
4034 SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
4035{
4036 u32 dram_timing;
4037 u32 dram_timing2;
4038 u32 burst_time;
4039
4040 arb_regs->mc_arb_rfsh_rate =
4041 (u8)si_calculate_memory_refresh_rate(rdev, pl->sclk);
4042
4043 radeon_atom_set_engine_dram_timings(rdev,
4044 pl->sclk,
4045 pl->mclk);
4046
4047 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
4048 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
4049 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
4050
4051 arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
4052 arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
4053 arb_regs->mc_arb_burst_time = (u8)burst_time;
4054
4055 return 0;
4056}
4057
4058static int si_do_program_memory_timing_parameters(struct radeon_device *rdev,
4059 struct radeon_ps *radeon_state,
4060 unsigned int first_arb_set)
4061{
4062 struct si_power_info *si_pi = si_get_pi(rdev);
4063 struct ni_ps *state = ni_get_ps(radeon_state);
4064 SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
4065 int i, ret = 0;
4066
4067 for (i = 0; i < state->performance_level_count; i++) {
4068 ret = si_populate_memory_timing_parameters(rdev, &state->performance_levels[i], &arb_regs);
4069 if (ret)
4070 break;
4071 ret = si_copy_bytes_to_smc(rdev,
4072 si_pi->arb_table_start +
4073 offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
4074 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
4075 (u8 *)&arb_regs,
4076 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
4077 si_pi->sram_end);
4078 if (ret)
4079 break;
4080 }
4081
4082 return ret;
4083}
4084
4085static int si_program_memory_timing_parameters(struct radeon_device *rdev,
4086 struct radeon_ps *radeon_new_state)
4087{
4088 return si_do_program_memory_timing_parameters(rdev, radeon_new_state,
4089 SISLANDS_DRIVER_STATE_ARB_INDEX);
4090}
4091
4092static int si_populate_initial_mvdd_value(struct radeon_device *rdev,
4093 struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4094{
4095 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4096 struct si_power_info *si_pi = si_get_pi(rdev);
4097
4098 if (pi->mvdd_control)
4099 return si_populate_voltage_value(rdev, &si_pi->mvdd_voltage_table,
4100 si_pi->mvdd_bootup_value, voltage);
4101
4102 return 0;
4103}
4104
4105static int si_populate_smc_initial_state(struct radeon_device *rdev,
4106 struct radeon_ps *radeon_initial_state,
4107 SISLANDS_SMC_STATETABLE *table)
4108{
4109 struct ni_ps *initial_state = ni_get_ps(radeon_initial_state);
4110 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4111 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4112 struct si_power_info *si_pi = si_get_pi(rdev);
4113 u32 reg;
4114 int ret;
4115
4116 table->initialState.levels[0].mclk.vDLL_CNTL =
4117 cpu_to_be32(si_pi->clock_registers.dll_cntl);
4118 table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
4119 cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
4120 table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
4121 cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
4122 table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
4123 cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
4124 table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
4125 cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
4126 table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
4127 cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
4128 table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
4129 cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
4130 table->initialState.levels[0].mclk.vMPLL_SS =
4131 cpu_to_be32(si_pi->clock_registers.mpll_ss1);
4132 table->initialState.levels[0].mclk.vMPLL_SS2 =
4133 cpu_to_be32(si_pi->clock_registers.mpll_ss2);
4134
4135 table->initialState.levels[0].mclk.mclk_value =
4136 cpu_to_be32(initial_state->performance_levels[0].mclk);
4137
4138 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
4139 cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
4140 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
4141 cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
4142 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
4143 cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
4144 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
4145 cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
4146 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
4147 cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
4148 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
4149 cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
4150
4151 table->initialState.levels[0].sclk.sclk_value =
4152 cpu_to_be32(initial_state->performance_levels[0].sclk);
4153
4154 table->initialState.levels[0].arbRefreshState =
4155 SISLANDS_INITIAL_STATE_ARB_INDEX;
4156
4157 table->initialState.levels[0].ACIndex = 0;
4158
4159 ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
4160 initial_state->performance_levels[0].vddc,
4161 &table->initialState.levels[0].vddc);
4162
4163 if (!ret) {
4164 u16 std_vddc;
4165
4166 ret = si_get_std_voltage_value(rdev,
4167 &table->initialState.levels[0].vddc,
4168 &std_vddc);
4169 if (!ret)
4170 si_populate_std_voltage_value(rdev, std_vddc,
4171 table->initialState.levels[0].vddc.index,
4172 &table->initialState.levels[0].std_vddc);
4173 }
4174
4175 if (eg_pi->vddci_control)
4176 si_populate_voltage_value(rdev,
4177 &eg_pi->vddci_voltage_table,
4178 initial_state->performance_levels[0].vddci,
4179 &table->initialState.levels[0].vddci);
4180
4181 if (si_pi->vddc_phase_shed_control)
4182 si_populate_phase_shedding_value(rdev,
4183 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
4184 initial_state->performance_levels[0].vddc,
4185 initial_state->performance_levels[0].sclk,
4186 initial_state->performance_levels[0].mclk,
4187 &table->initialState.levels[0].vddc);
4188
4189 si_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
4190
4191 reg = CG_R(0xffff) | CG_L(0);
4192 table->initialState.levels[0].aT = cpu_to_be32(reg);
4193
4194 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
4195
4196 table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
4197
4198 if (pi->mem_gddr5) {
4199 table->initialState.levels[0].strobeMode =
4200 si_get_strobe_mode_settings(rdev,
4201 initial_state->performance_levels[0].mclk);
4202
4203 if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
4204 table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
4205 else
4206 table->initialState.levels[0].mcFlags = 0;
4207 }
4208
4209 table->initialState.levelCount = 1;
4210
4211 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
4212
4213 table->initialState.levels[0].dpm2.MaxPS = 0;
4214 table->initialState.levels[0].dpm2.NearTDPDec = 0;
4215 table->initialState.levels[0].dpm2.AboveSafeInc = 0;
4216 table->initialState.levels[0].dpm2.BelowSafeInc = 0;
4217 table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
4218
4219 reg = MIN_POWER_MASK | MAX_POWER_MASK;
4220 table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
4221
4222 reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
4223 table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
4224
4225 return 0;
4226}
4227
4228static int si_populate_smc_acpi_state(struct radeon_device *rdev,
4229 SISLANDS_SMC_STATETABLE *table)
4230{
4231 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4232 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4233 struct si_power_info *si_pi = si_get_pi(rdev);
4234 u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
4235 u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
4236 u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
4237 u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
4238 u32 dll_cntl = si_pi->clock_registers.dll_cntl;
4239 u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
4240 u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
4241 u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
4242 u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
4243 u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
4244 u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
4245 u32 reg;
4246 int ret;
4247
4248 table->ACPIState = table->initialState;
4249
4250 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
4251
4252 if (pi->acpi_vddc) {
4253 ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
4254 pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
4255 if (!ret) {
4256 u16 std_vddc;
4257
4258 ret = si_get_std_voltage_value(rdev,
4259 &table->ACPIState.levels[0].vddc, &std_vddc);
4260 if (!ret)
4261 si_populate_std_voltage_value(rdev, std_vddc,
4262 table->ACPIState.levels[0].vddc.index,
4263 &table->ACPIState.levels[0].std_vddc);
4264 }
4265 table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
4266
4267 if (si_pi->vddc_phase_shed_control) {
4268 si_populate_phase_shedding_value(rdev,
4269 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
4270 pi->acpi_vddc,
4271 0,
4272 0,
4273 &table->ACPIState.levels[0].vddc);
4274 }
4275 } else {
4276 ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
4277 pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
4278 if (!ret) {
4279 u16 std_vddc;
4280
4281 ret = si_get_std_voltage_value(rdev,
4282 &table->ACPIState.levels[0].vddc, &std_vddc);
4283
4284 if (!ret)
4285 si_populate_std_voltage_value(rdev, std_vddc,
4286 table->ACPIState.levels[0].vddc.index,
4287 &table->ACPIState.levels[0].std_vddc);
4288 }
4289 table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(rdev,
4290 si_pi->sys_pcie_mask,
4291 si_pi->boot_pcie_gen,
4292 RADEON_PCIE_GEN1);
4293
4294 if (si_pi->vddc_phase_shed_control)
4295 si_populate_phase_shedding_value(rdev,
4296 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
4297 pi->min_vddc_in_table,
4298 0,
4299 0,
4300 &table->ACPIState.levels[0].vddc);
4301 }
4302
4303 if (pi->acpi_vddc) {
4304 if (eg_pi->acpi_vddci)
4305 si_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
4306 eg_pi->acpi_vddci,
4307 &table->ACPIState.levels[0].vddci);
4308 }
4309
4310 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
4311 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
4312
4313 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
4314
4315 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
4316 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
4317
4318 table->ACPIState.levels[0].mclk.vDLL_CNTL =
4319 cpu_to_be32(dll_cntl);
4320 table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
4321 cpu_to_be32(mclk_pwrmgt_cntl);
4322 table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
4323 cpu_to_be32(mpll_ad_func_cntl);
4324 table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
4325 cpu_to_be32(mpll_dq_func_cntl);
4326 table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
4327 cpu_to_be32(mpll_func_cntl);
4328 table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
4329 cpu_to_be32(mpll_func_cntl_1);
4330 table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
4331 cpu_to_be32(mpll_func_cntl_2);
4332 table->ACPIState.levels[0].mclk.vMPLL_SS =
4333 cpu_to_be32(si_pi->clock_registers.mpll_ss1);
4334 table->ACPIState.levels[0].mclk.vMPLL_SS2 =
4335 cpu_to_be32(si_pi->clock_registers.mpll_ss2);
4336
4337 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
4338 cpu_to_be32(spll_func_cntl);
4339 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
4340 cpu_to_be32(spll_func_cntl_2);
4341 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
4342 cpu_to_be32(spll_func_cntl_3);
4343 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
4344 cpu_to_be32(spll_func_cntl_4);
4345
4346 table->ACPIState.levels[0].mclk.mclk_value = 0;
4347 table->ACPIState.levels[0].sclk.sclk_value = 0;
4348
4349 si_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
4350
4351 if (eg_pi->dynamic_ac_timing)
4352 table->ACPIState.levels[0].ACIndex = 0;
4353
4354 table->ACPIState.levels[0].dpm2.MaxPS = 0;
4355 table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
4356 table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
4357 table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
4358 table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
4359
4360 reg = MIN_POWER_MASK | MAX_POWER_MASK;
4361 table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
4362
4363 reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
4364 table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
4365
4366 return 0;
4367}
4368
4369static int si_populate_ulv_state(struct radeon_device *rdev,
4370 SISLANDS_SMC_SWSTATE *state)
4371{
4372 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4373 struct si_power_info *si_pi = si_get_pi(rdev);
4374 struct si_ulv_param *ulv = &si_pi->ulv;
4375 u32 sclk_in_sr = 1350; /* ??? */
4376 int ret;
4377
4378 ret = si_convert_power_level_to_smc(rdev, &ulv->pl,
4379 &state->levels[0]);
4380 if (!ret) {
4381 if (eg_pi->sclk_deep_sleep) {
4382 if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
4383 state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
4384 else
4385 state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
4386 }
4387 if (ulv->one_pcie_lane_in_ulv)
4388 state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
4389 state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
4390 state->levels[0].ACIndex = 1;
4391 state->levels[0].std_vddc = state->levels[0].vddc;
4392 state->levelCount = 1;
4393
4394 state->flags |= PPSMC_SWSTATE_FLAG_DC;
4395 }
4396
4397 return ret;
4398}
4399
4400static int si_program_ulv_memory_timing_parameters(struct radeon_device *rdev)
4401{
4402 struct si_power_info *si_pi = si_get_pi(rdev);
4403 struct si_ulv_param *ulv = &si_pi->ulv;
4404 SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
4405 int ret;
4406
4407 ret = si_populate_memory_timing_parameters(rdev, &ulv->pl,
4408 &arb_regs);
4409 if (ret)
4410 return ret;
4411
4412 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
4413 ulv->volt_change_delay);
4414
4415 ret = si_copy_bytes_to_smc(rdev,
4416 si_pi->arb_table_start +
4417 offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
4418 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
4419 (u8 *)&arb_regs,
4420 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
4421 si_pi->sram_end);
4422
4423 return ret;
4424}
4425
4426static void si_get_mvdd_configuration(struct radeon_device *rdev)
4427{
4428 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4429
4430 pi->mvdd_split_frequency = 30000;
4431}
4432
4433static int si_init_smc_table(struct radeon_device *rdev)
4434{
4435 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4436 struct si_power_info *si_pi = si_get_pi(rdev);
4437 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
4438 const struct si_ulv_param *ulv = &si_pi->ulv;
4439 SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable;
4440 int ret;
4441 u32 lane_width;
4442 u32 vr_hot_gpio;
4443
4444 si_populate_smc_voltage_tables(rdev, table);
4445
4446 switch (rdev->pm.int_thermal_type) {
4447 case THERMAL_TYPE_SI:
4448 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
4449 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
4450 break;
4451 case THERMAL_TYPE_NONE:
4452 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
4453 break;
4454 default:
4455 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
4456 break;
4457 }
4458
4459 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
4460 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
4461
4462 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
4463 if ((rdev->pdev->device != 0x6818) && (rdev->pdev->device != 0x6819))
4464 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
4465 }
4466
4467 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
4468 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
4469
4470 if (pi->mem_gddr5)
4471 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
4472
4473 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
4474 table->systemFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
4475
4476 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
4477 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
4478 vr_hot_gpio = rdev->pm.dpm.backbias_response_time;
4479 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
4480 vr_hot_gpio);
4481 }
4482
4483 ret = si_populate_smc_initial_state(rdev, radeon_boot_state, table);
4484 if (ret)
4485 return ret;
4486
4487 ret = si_populate_smc_acpi_state(rdev, table);
4488 if (ret)
4489 return ret;
4490
4491 table->driverState = table->initialState;
4492
4493 ret = si_do_program_memory_timing_parameters(rdev, radeon_boot_state,
4494 SISLANDS_INITIAL_STATE_ARB_INDEX);
4495 if (ret)
4496 return ret;
4497
4498 if (ulv->supported && ulv->pl.vddc) {
4499 ret = si_populate_ulv_state(rdev, &table->ULVState);
4500 if (ret)
4501 return ret;
4502
4503 ret = si_program_ulv_memory_timing_parameters(rdev);
4504 if (ret)
4505 return ret;
4506
4507 WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
4508 WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
4509
4510 lane_width = radeon_get_pcie_lanes(rdev);
4511 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
4512 } else {
4513 table->ULVState = table->initialState;
4514 }
4515
4516 return si_copy_bytes_to_smc(rdev, si_pi->state_table_start,
4517 (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
4518 si_pi->sram_end);
4519}
4520
4521static int si_calculate_sclk_params(struct radeon_device *rdev,
4522 u32 engine_clock,
4523 SISLANDS_SMC_SCLK_VALUE *sclk)
4524{
4525 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4526 struct si_power_info *si_pi = si_get_pi(rdev);
4527 struct atom_clock_dividers dividers;
4528 u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
4529 u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
4530 u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
4531 u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
4532 u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
4533 u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
4534 u64 tmp;
4535 u32 reference_clock = rdev->clock.spll.reference_freq;
4536 u32 reference_divider;
4537 u32 fbdiv;
4538 int ret;
4539
4540 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
4541 engine_clock, false, &dividers);
4542 if (ret)
4543 return ret;
4544
4545 reference_divider = 1 + dividers.ref_div;
4546
4547 tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
4548 do_div(tmp, reference_clock);
4549 fbdiv = (u32) tmp;
4550
4551 spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
4552 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
4553 spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
4554
4555 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
4556 spll_func_cntl_2 |= SCLK_MUX_SEL(2);
4557
4558 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
4559 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
4560 spll_func_cntl_3 |= SPLL_DITHEN;
4561
4562 if (pi->sclk_ss) {
4563 struct radeon_atom_ss ss;
4564 u32 vco_freq = engine_clock * dividers.post_div;
4565
4566 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
4567 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
4568 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
4569 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
4570
4571 cg_spll_spread_spectrum &= ~CLK_S_MASK;
4572 cg_spll_spread_spectrum |= CLK_S(clk_s);
4573 cg_spll_spread_spectrum |= SSEN;
4574
4575 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
4576 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
4577 }
4578 }
4579
4580 sclk->sclk_value = engine_clock;
4581 sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
4582 sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
4583 sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
4584 sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
4585 sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
4586 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
4587
4588 return 0;
4589}
4590
4591static int si_populate_sclk_value(struct radeon_device *rdev,
4592 u32 engine_clock,
4593 SISLANDS_SMC_SCLK_VALUE *sclk)
4594{
4595 SISLANDS_SMC_SCLK_VALUE sclk_tmp;
4596 int ret;
4597
4598 ret = si_calculate_sclk_params(rdev, engine_clock, &sclk_tmp);
4599 if (!ret) {
4600 sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
4601 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
4602 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
4603 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
4604 sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
4605 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
4606 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
4607 }
4608
4609 return ret;
4610}
4611
4612static int si_populate_mclk_value(struct radeon_device *rdev,
4613 u32 engine_clock,
4614 u32 memory_clock,
4615 SISLANDS_SMC_MCLK_VALUE *mclk,
4616 bool strobe_mode,
4617 bool dll_state_on)
4618{
4619 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4620 struct si_power_info *si_pi = si_get_pi(rdev);
4621 u32 dll_cntl = si_pi->clock_registers.dll_cntl;
4622 u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
4623 u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
4624 u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
4625 u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
4626 u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
4627 u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
4628 u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1;
4629 u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2;
4630 struct atom_mpll_param mpll_param;
4631 int ret;
4632
4633 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
4634 if (ret)
4635 return ret;
4636
4637 mpll_func_cntl &= ~BWCTRL_MASK;
4638 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
4639
4640 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
4641 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
4642 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
4643
4644 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
4645 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
4646
4647 if (pi->mem_gddr5) {
4648 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
4649 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
4650 YCLK_POST_DIV(mpll_param.post_div);
4651 }
4652
4653 if (pi->mclk_ss) {
4654 struct radeon_atom_ss ss;
4655 u32 freq_nom;
4656 u32 tmp;
4657 u32 reference_clock = rdev->clock.mpll.reference_freq;
4658
4659 if (pi->mem_gddr5)
4660 freq_nom = memory_clock * 4;
4661 else
4662 freq_nom = memory_clock * 2;
4663
4664 tmp = freq_nom / reference_clock;
4665 tmp = tmp * tmp;
4666 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
4667 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
4668 u32 clks = reference_clock * 5 / ss.rate;
4669 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
4670
4671 mpll_ss1 &= ~CLKV_MASK;
4672 mpll_ss1 |= CLKV(clkv);
4673
4674 mpll_ss2 &= ~CLKS_MASK;
4675 mpll_ss2 |= CLKS(clks);
4676 }
4677 }
4678
4679 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
4680 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
4681
4682 if (dll_state_on)
4683 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
4684 else
4685 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
4686
4687 mclk->mclk_value = cpu_to_be32(memory_clock);
4688 mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
4689 mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
4690 mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
4691 mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
4692 mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
4693 mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
4694 mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
4695 mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
4696 mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
4697
4698 return 0;
4699}
4700
4701static void si_populate_smc_sp(struct radeon_device *rdev,
4702 struct radeon_ps *radeon_state,
4703 SISLANDS_SMC_SWSTATE *smc_state)
4704{
4705 struct ni_ps *ps = ni_get_ps(radeon_state);
4706 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4707 int i;
4708
4709 for (i = 0; i < ps->performance_level_count - 1; i++)
4710 smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
4711
4712 smc_state->levels[ps->performance_level_count - 1].bSP =
4713 cpu_to_be32(pi->psp);
4714}
4715
4716static int si_convert_power_level_to_smc(struct radeon_device *rdev,
4717 struct rv7xx_pl *pl,
4718 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
4719{
4720 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4721 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4722 struct si_power_info *si_pi = si_get_pi(rdev);
4723 int ret;
4724 bool dll_state_on;
4725 u16 std_vddc;
4726 bool gmc_pg = false;
4727
4728 if (eg_pi->pcie_performance_request &&
4729 (si_pi->force_pcie_gen != RADEON_PCIE_GEN_INVALID))
4730 level->gen2PCIE = (u8)si_pi->force_pcie_gen;
4731 else
4732 level->gen2PCIE = (u8)pl->pcie_gen;
4733
4734 ret = si_populate_sclk_value(rdev, pl->sclk, &level->sclk);
4735 if (ret)
4736 return ret;
4737
4738 level->mcFlags = 0;
4739
4740 if (pi->mclk_stutter_mode_threshold &&
4741 (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
4742 !eg_pi->uvd_enabled &&
4743 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
4744 (rdev->pm.dpm.new_active_crtc_count <= 2)) {
4745 level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
4746
4747 if (gmc_pg)
4748 level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
4749 }
4750
4751 if (pi->mem_gddr5) {
4752 if (pl->mclk > pi->mclk_edc_enable_threshold)
4753 level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
4754
4755 if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
4756 level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
4757
4758 level->strobeMode = si_get_strobe_mode_settings(rdev, pl->mclk);
4759
4760 if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
4761 if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
4762 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
4763 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
4764 else
4765 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
4766 } else {
4767 dll_state_on = false;
4768 }
4769 } else {
4770 level->strobeMode = si_get_strobe_mode_settings(rdev,
4771 pl->mclk);
4772
4773 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
4774 }
4775
4776 ret = si_populate_mclk_value(rdev,
4777 pl->sclk,
4778 pl->mclk,
4779 &level->mclk,
4780 (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
4781 if (ret)
4782 return ret;
4783
4784 ret = si_populate_voltage_value(rdev,
4785 &eg_pi->vddc_voltage_table,
4786 pl->vddc, &level->vddc);
4787 if (ret)
4788 return ret;
4789
4790
4791 ret = si_get_std_voltage_value(rdev, &level->vddc, &std_vddc);
4792 if (ret)
4793 return ret;
4794
4795 ret = si_populate_std_voltage_value(rdev, std_vddc,
4796 level->vddc.index, &level->std_vddc);
4797 if (ret)
4798 return ret;
4799
4800 if (eg_pi->vddci_control) {
4801 ret = si_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
4802 pl->vddci, &level->vddci);
4803 if (ret)
4804 return ret;
4805 }
4806
4807 if (si_pi->vddc_phase_shed_control) {
4808 ret = si_populate_phase_shedding_value(rdev,
4809 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
4810 pl->vddc,
4811 pl->sclk,
4812 pl->mclk,
4813 &level->vddc);
4814 if (ret)
4815 return ret;
4816 }
4817
4818 level->MaxPoweredUpCU = si_pi->max_cu;
4819
4820 ret = si_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
4821
4822 return ret;
4823}
4824
4825static int si_populate_smc_t(struct radeon_device *rdev,
4826 struct radeon_ps *radeon_state,
4827 SISLANDS_SMC_SWSTATE *smc_state)
4828{
4829 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4830 struct ni_ps *state = ni_get_ps(radeon_state);
4831 u32 a_t;
4832 u32 t_l, t_h;
4833 u32 high_bsp;
4834 int i, ret;
4835
4836 if (state->performance_level_count >= 9)
4837 return -EINVAL;
4838
4839 if (state->performance_level_count < 2) {
4840 a_t = CG_R(0xffff) | CG_L(0);
4841 smc_state->levels[0].aT = cpu_to_be32(a_t);
4842 return 0;
4843 }
4844
4845 smc_state->levels[0].aT = cpu_to_be32(0);
4846
4847 for (i = 0; i <= state->performance_level_count - 2; i++) {
4848 ret = r600_calculate_at(
4849 (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
4850 100 * R600_AH_DFLT,
4851 state->performance_levels[i + 1].sclk,
4852 state->performance_levels[i].sclk,
4853 &t_l,
4854 &t_h);
4855
4856 if (ret) {
4857 t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
4858 t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
4859 }
4860
4861 a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
4862 a_t |= CG_R(t_l * pi->bsp / 20000);
4863 smc_state->levels[i].aT = cpu_to_be32(a_t);
4864
4865 high_bsp = (i == state->performance_level_count - 2) ?
4866 pi->pbsp : pi->bsp;
4867 a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
4868 smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
4869 }
4870
4871 return 0;
4872}
4873
4874static int si_disable_ulv(struct radeon_device *rdev)
4875{
4876 struct si_power_info *si_pi = si_get_pi(rdev);
4877 struct si_ulv_param *ulv = &si_pi->ulv;
4878
4879 if (ulv->supported)
4880 return (si_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
4881 0 : -EINVAL;
4882
4883 return 0;
4884}
4885
4886static bool si_is_state_ulv_compatible(struct radeon_device *rdev,
4887 struct radeon_ps *radeon_state)
4888{
4889 const struct si_power_info *si_pi = si_get_pi(rdev);
4890 const struct si_ulv_param *ulv = &si_pi->ulv;
4891 const struct ni_ps *state = ni_get_ps(radeon_state);
4892 int i;
4893
4894 if (state->performance_levels[0].mclk != ulv->pl.mclk)
4895 return false;
4896
4897 /* XXX validate against display requirements! */
4898
4899 for (i = 0; i < rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
4900 if (rdev->clock.current_dispclk <=
4901 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
4902 if (ulv->pl.vddc <
4903 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
4904 return false;
4905 }
4906 }
4907
4908 if ((radeon_state->vclk != 0) || (radeon_state->dclk != 0))
4909 return false;
4910
4911 return true;
4912}
4913
4914static int si_set_power_state_conditionally_enable_ulv(struct radeon_device *rdev,
4915 struct radeon_ps *radeon_new_state)
4916{
4917 const struct si_power_info *si_pi = si_get_pi(rdev);
4918 const struct si_ulv_param *ulv = &si_pi->ulv;
4919
4920 if (ulv->supported) {
4921 if (si_is_state_ulv_compatible(rdev, radeon_new_state))
4922 return (si_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
4923 0 : -EINVAL;
4924 }
4925 return 0;
4926}
4927
4928static int si_convert_power_state_to_smc(struct radeon_device *rdev,
4929 struct radeon_ps *radeon_state,
4930 SISLANDS_SMC_SWSTATE *smc_state)
4931{
4932 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4933 struct ni_power_info *ni_pi = ni_get_pi(rdev);
4934 struct si_power_info *si_pi = si_get_pi(rdev);
4935 struct ni_ps *state = ni_get_ps(radeon_state);
4936 int i, ret;
4937 u32 threshold;
4938 u32 sclk_in_sr = 1350; /* ??? */
4939
4940 if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
4941 return -EINVAL;
4942
4943 threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
4944
4945 if (radeon_state->vclk && radeon_state->dclk) {
4946 eg_pi->uvd_enabled = true;
4947 if (eg_pi->smu_uvd_hs)
4948 smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
4949 } else {
4950 eg_pi->uvd_enabled = false;
4951 }
4952
4953 if (state->dc_compatible)
4954 smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
4955
4956 smc_state->levelCount = 0;
4957 for (i = 0; i < state->performance_level_count; i++) {
4958 if (eg_pi->sclk_deep_sleep) {
4959 if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
4960 if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
4961 smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
4962 else
4963 smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
4964 }
4965 }
4966
4967 ret = si_convert_power_level_to_smc(rdev, &state->performance_levels[i],
4968 &smc_state->levels[i]);
4969 smc_state->levels[i].arbRefreshState =
4970 (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
4971
4972 if (ret)
4973 return ret;
4974
4975 if (ni_pi->enable_power_containment)
4976 smc_state->levels[i].displayWatermark =
4977 (state->performance_levels[i].sclk < threshold) ?
4978 PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
4979 else
4980 smc_state->levels[i].displayWatermark = (i < 2) ?
4981 PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
4982
4983 if (eg_pi->dynamic_ac_timing)
4984 smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
4985 else
4986 smc_state->levels[i].ACIndex = 0;
4987
4988 smc_state->levelCount++;
4989 }
4990
4991 si_write_smc_soft_register(rdev,
4992 SI_SMC_SOFT_REGISTER_watermark_threshold,
4993 threshold / 512);
4994
4995 si_populate_smc_sp(rdev, radeon_state, smc_state);
4996
4997 ret = si_populate_power_containment_values(rdev, radeon_state, smc_state);
4998 if (ret)
4999 ni_pi->enable_power_containment = false;
5000
5001 ret = si_populate_sq_ramping_values(rdev, radeon_state, smc_state);
5002 if (ret)
5003 ni_pi->enable_sq_ramping = false;
5004
5005 return si_populate_smc_t(rdev, radeon_state, smc_state);
5006}
5007
5008static int si_upload_sw_state(struct radeon_device *rdev,
5009 struct radeon_ps *radeon_new_state)
5010{
5011 struct si_power_info *si_pi = si_get_pi(rdev);
5012 struct ni_ps *new_state = ni_get_ps(radeon_new_state);
5013 int ret;
5014 u32 address = si_pi->state_table_start +
5015 offsetof(SISLANDS_SMC_STATETABLE, driverState);
5016 u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
5017 ((new_state->performance_level_count - 1) *
5018 sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
5019 SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
5020
5021 memset(smc_state, 0, state_size);
5022
5023 ret = si_convert_power_state_to_smc(rdev, radeon_new_state, smc_state);
5024 if (ret)
5025 return ret;
5026
5027 ret = si_copy_bytes_to_smc(rdev, address, (u8 *)smc_state,
5028 state_size, si_pi->sram_end);
5029
5030 return ret;
5031}
5032
5033static int si_upload_ulv_state(struct radeon_device *rdev)
5034{
5035 struct si_power_info *si_pi = si_get_pi(rdev);
5036 struct si_ulv_param *ulv = &si_pi->ulv;
5037 int ret = 0;
5038
5039 if (ulv->supported && ulv->pl.vddc) {
5040 u32 address = si_pi->state_table_start +
5041 offsetof(SISLANDS_SMC_STATETABLE, ULVState);
5042 SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
5043 u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
5044
5045 memset(smc_state, 0, state_size);
5046
5047 ret = si_populate_ulv_state(rdev, smc_state);
5048 if (!ret)
5049 ret = si_copy_bytes_to_smc(rdev, address, (u8 *)smc_state,
5050 state_size, si_pi->sram_end);
5051 }
5052
5053 return ret;
5054}
5055
5056static int si_upload_smc_data(struct radeon_device *rdev)
5057{
5058 struct radeon_crtc *radeon_crtc = NULL;
5059 int i;
5060
5061 if (rdev->pm.dpm.new_active_crtc_count == 0)
5062 return 0;
5063
5064 for (i = 0; i < rdev->num_crtc; i++) {
5065 if (rdev->pm.dpm.new_active_crtcs & (1 << i)) {
5066 radeon_crtc = rdev->mode_info.crtcs[i];
5067 break;
5068 }
5069 }
5070
5071 if (radeon_crtc == NULL)
5072 return 0;
5073
5074 if (radeon_crtc->line_time <= 0)
5075 return 0;
5076
5077 if (si_write_smc_soft_register(rdev,
5078 SI_SMC_SOFT_REGISTER_crtc_index,
5079 radeon_crtc->crtc_id) != PPSMC_Result_OK)
5080 return 0;
5081
5082 if (si_write_smc_soft_register(rdev,
5083 SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
5084 radeon_crtc->wm_high / radeon_crtc->line_time) != PPSMC_Result_OK)
5085 return 0;
5086
5087 if (si_write_smc_soft_register(rdev,
5088 SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
5089 radeon_crtc->wm_low / radeon_crtc->line_time) != PPSMC_Result_OK)
5090 return 0;
5091
5092 return 0;
5093}
5094
5095static int si_set_mc_special_registers(struct radeon_device *rdev,
5096 struct si_mc_reg_table *table)
5097{
5098 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
5099 u8 i, j, k;
5100 u32 temp_reg;
5101
5102 for (i = 0, j = table->last; i < table->last; i++) {
5103 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5104 return -EINVAL;
5105 switch (table->mc_reg_address[i].s1 << 2) {
5106 case MC_SEQ_MISC1:
5107 temp_reg = RREG32(MC_PMG_CMD_EMRS);
5108 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
5109 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
5110 for (k = 0; k < table->num_entries; k++)
5111 table->mc_reg_table_entry[k].mc_data[j] =
5112 ((temp_reg & 0xffff0000)) |
5113 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
5114 j++;
5115 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5116 return -EINVAL;
5117
5118 temp_reg = RREG32(MC_PMG_CMD_MRS);
5119 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
5120 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
5121 for (k = 0; k < table->num_entries; k++) {
5122 table->mc_reg_table_entry[k].mc_data[j] =
5123 (temp_reg & 0xffff0000) |
5124 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5125 if (!pi->mem_gddr5)
5126 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
5127 }
5128 j++;
5129 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5130 return -EINVAL;
5131
5132 if (!pi->mem_gddr5) {
5133 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
5134 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
5135 for (k = 0; k < table->num_entries; k++)
5136 table->mc_reg_table_entry[k].mc_data[j] =
5137 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
5138 j++;
5139 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5140 return -EINVAL;
5141 }
5142 break;
5143 case MC_SEQ_RESERVE_M:
5144 temp_reg = RREG32(MC_PMG_CMD_MRS1);
5145 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
5146 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
5147 for(k = 0; k < table->num_entries; k++)
5148 table->mc_reg_table_entry[k].mc_data[j] =
5149 (temp_reg & 0xffff0000) |
5150 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5151 j++;
5152 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5153 return -EINVAL;
5154 break;
5155 default:
5156 break;
5157 }
5158 }
5159
5160 table->last = j;
5161
5162 return 0;
5163}
5164
5165static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
5166{
5167 bool result = true;
5168
5169 switch (in_reg) {
5170 case MC_SEQ_RAS_TIMING >> 2:
5171 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
5172 break;
5173 case MC_SEQ_CAS_TIMING >> 2:
5174 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
5175 break;
5176 case MC_SEQ_MISC_TIMING >> 2:
5177 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
5178 break;
5179 case MC_SEQ_MISC_TIMING2 >> 2:
5180 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
5181 break;
5182 case MC_SEQ_RD_CTL_D0 >> 2:
5183 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
5184 break;
5185 case MC_SEQ_RD_CTL_D1 >> 2:
5186 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
5187 break;
5188 case MC_SEQ_WR_CTL_D0 >> 2:
5189 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
5190 break;
5191 case MC_SEQ_WR_CTL_D1 >> 2:
5192 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
5193 break;
5194 case MC_PMG_CMD_EMRS >> 2:
5195 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
5196 break;
5197 case MC_PMG_CMD_MRS >> 2:
5198 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
5199 break;
5200 case MC_PMG_CMD_MRS1 >> 2:
5201 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
5202 break;
5203 case MC_SEQ_PMG_TIMING >> 2:
5204 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
5205 break;
5206 case MC_PMG_CMD_MRS2 >> 2:
5207 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
5208 break;
5209 case MC_SEQ_WR_CTL_2 >> 2:
5210 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
5211 break;
5212 default:
5213 result = false;
5214 break;
5215 }
5216
5217 return result;
5218}
5219
5220static void si_set_valid_flag(struct si_mc_reg_table *table)
5221{
5222 u8 i, j;
5223
5224 for (i = 0; i < table->last; i++) {
5225 for (j = 1; j < table->num_entries; j++) {
5226 if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
5227 table->valid_flag |= 1 << i;
5228 break;
5229 }
5230 }
5231 }
5232}
5233
5234static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
5235{
5236 u32 i;
5237 u16 address;
5238
5239 for (i = 0; i < table->last; i++)
5240 table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
5241 address : table->mc_reg_address[i].s1;
5242
5243}
5244
5245static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
5246 struct si_mc_reg_table *si_table)
5247{
5248 u8 i, j;
5249
5250 if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5251 return -EINVAL;
5252 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
5253 return -EINVAL;
5254
5255 for (i = 0; i < table->last; i++)
5256 si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
5257 si_table->last = table->last;
5258
5259 for (i = 0; i < table->num_entries; i++) {
5260 si_table->mc_reg_table_entry[i].mclk_max =
5261 table->mc_reg_table_entry[i].mclk_max;
5262 for (j = 0; j < table->last; j++) {
5263 si_table->mc_reg_table_entry[i].mc_data[j] =
5264 table->mc_reg_table_entry[i].mc_data[j];
5265 }
5266 }
5267 si_table->num_entries = table->num_entries;
5268
5269 return 0;
5270}
5271
5272static int si_initialize_mc_reg_table(struct radeon_device *rdev)
5273{
5274 struct si_power_info *si_pi = si_get_pi(rdev);
5275 struct atom_mc_reg_table *table;
5276 struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
5277 u8 module_index = rv770_get_memory_module_index(rdev);
5278 int ret;
5279
5280 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
5281 if (!table)
5282 return -ENOMEM;
5283
5284 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
5285 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
5286 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
5287 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
5288 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
5289 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
5290 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
5291 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
5292 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
5293 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
5294 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
5295 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
5296 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
5297 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
5298
5299 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
5300 if (ret)
5301 goto init_mc_done;
5302
5303 ret = si_copy_vbios_mc_reg_table(table, si_table);
5304 if (ret)
5305 goto init_mc_done;
5306
5307 si_set_s0_mc_reg_index(si_table);
5308
5309 ret = si_set_mc_special_registers(rdev, si_table);
5310 if (ret)
5311 goto init_mc_done;
5312
5313 si_set_valid_flag(si_table);
5314
5315init_mc_done:
5316 kfree(table);
5317
5318 return ret;
5319
5320}
5321
5322static void si_populate_mc_reg_addresses(struct radeon_device *rdev,
5323 SMC_SIslands_MCRegisters *mc_reg_table)
5324{
5325 struct si_power_info *si_pi = si_get_pi(rdev);
5326 u32 i, j;
5327
5328 for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
5329 if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
5330 if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
5331 break;
5332 mc_reg_table->address[i].s0 =
5333 cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
5334 mc_reg_table->address[i].s1 =
5335 cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
5336 i++;
5337 }
5338 }
5339 mc_reg_table->last = (u8)i;
5340}
5341
5342static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
5343 SMC_SIslands_MCRegisterSet *data,
5344 u32 num_entries, u32 valid_flag)
5345{
5346 u32 i, j;
5347
5348 for(i = 0, j = 0; j < num_entries; j++) {
5349 if (valid_flag & (1 << j)) {
5350 data->value[i] = cpu_to_be32(entry->mc_data[j]);
5351 i++;
5352 }
5353 }
5354}
5355
5356static void si_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
5357 struct rv7xx_pl *pl,
5358 SMC_SIslands_MCRegisterSet *mc_reg_table_data)
5359{
5360 struct si_power_info *si_pi = si_get_pi(rdev);
5361 u32 i = 0;
5362
5363 for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
5364 if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
5365 break;
5366 }
5367
5368 if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
5369 --i;
5370
5371 si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
5372 mc_reg_table_data, si_pi->mc_reg_table.last,
5373 si_pi->mc_reg_table.valid_flag);
5374}
5375
5376static void si_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
5377 struct radeon_ps *radeon_state,
5378 SMC_SIslands_MCRegisters *mc_reg_table)
5379{
5380 struct ni_ps *state = ni_get_ps(radeon_state);
5381 int i;
5382
5383 for (i = 0; i < state->performance_level_count; i++) {
5384 si_convert_mc_reg_table_entry_to_smc(rdev,
5385 &state->performance_levels[i],
5386 &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
5387 }
5388}
5389
5390static int si_populate_mc_reg_table(struct radeon_device *rdev,
5391 struct radeon_ps *radeon_boot_state)
5392{
5393 struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
5394 struct si_power_info *si_pi = si_get_pi(rdev);
5395 struct si_ulv_param *ulv = &si_pi->ulv;
5396 SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
5397
5398 memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
5399
5400 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_seq_index, 1);
5401
5402 si_populate_mc_reg_addresses(rdev, smc_mc_reg_table);
5403
5404 si_convert_mc_reg_table_entry_to_smc(rdev, &boot_state->performance_levels[0],
5405 &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
5406
5407 si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
5408 &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
5409 si_pi->mc_reg_table.last,
5410 si_pi->mc_reg_table.valid_flag);
5411
5412 if (ulv->supported && ulv->pl.vddc != 0)
5413 si_convert_mc_reg_table_entry_to_smc(rdev, &ulv->pl,
5414 &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
5415 else
5416 si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
5417 &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
5418 si_pi->mc_reg_table.last,
5419 si_pi->mc_reg_table.valid_flag);
5420
5421 si_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, smc_mc_reg_table);
5422
5423 return si_copy_bytes_to_smc(rdev, si_pi->mc_reg_table_start,
5424 (u8 *)smc_mc_reg_table,
5425 sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
5426}
5427
5428static int si_upload_mc_reg_table(struct radeon_device *rdev,
5429 struct radeon_ps *radeon_new_state)
5430{
5431 struct ni_ps *new_state = ni_get_ps(radeon_new_state);
5432 struct si_power_info *si_pi = si_get_pi(rdev);
5433 u32 address = si_pi->mc_reg_table_start +
5434 offsetof(SMC_SIslands_MCRegisters,
5435 data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
5436 SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
5437
5438 memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
5439
5440 si_convert_mc_reg_table_to_smc(rdev, radeon_new_state, smc_mc_reg_table);
5441
5442
5443 return si_copy_bytes_to_smc(rdev, address,
5444 (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
5445 sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
5446 si_pi->sram_end);
5447
5448}
5449
5450static void si_enable_voltage_control(struct radeon_device *rdev, bool enable)
5451{
5452 if (enable)
5453 WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
5454 else
5455 WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
5456}
5457
5458static enum radeon_pcie_gen si_get_maximum_link_speed(struct radeon_device *rdev,
5459 struct radeon_ps *radeon_state)
5460{
5461 struct ni_ps *state = ni_get_ps(radeon_state);
5462 int i;
5463 u16 pcie_speed, max_speed = 0;
5464
5465 for (i = 0; i < state->performance_level_count; i++) {
5466 pcie_speed = state->performance_levels[i].pcie_gen;
5467 if (max_speed < pcie_speed)
5468 max_speed = pcie_speed;
5469 }
5470 return max_speed;
5471}
5472
5473static u16 si_get_current_pcie_speed(struct radeon_device *rdev)
5474{
5475 u32 speed_cntl;
5476
5477 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
5478 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
5479
5480 return (u16)speed_cntl;
5481}
5482
5483static void si_request_link_speed_change_before_state_change(struct radeon_device *rdev,
5484 struct radeon_ps *radeon_new_state,
5485 struct radeon_ps *radeon_current_state)
5486{
5487 struct si_power_info *si_pi = si_get_pi(rdev);
5488 enum radeon_pcie_gen target_link_speed = si_get_maximum_link_speed(rdev, radeon_new_state);
5489 enum radeon_pcie_gen current_link_speed;
5490
5491 if (si_pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
5492 current_link_speed = si_get_maximum_link_speed(rdev, radeon_current_state);
5493 else
5494 current_link_speed = si_pi->force_pcie_gen;
5495
5496 si_pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5497 si_pi->pspp_notify_required = false;
5498 if (target_link_speed > current_link_speed) {
5499 switch (target_link_speed) {
5500#if defined(CONFIG_ACPI)
5501 case RADEON_PCIE_GEN3:
5502 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5503 break;
5504 si_pi->force_pcie_gen = RADEON_PCIE_GEN2;
5505 if (current_link_speed == RADEON_PCIE_GEN2)
5506 break;
5507 case RADEON_PCIE_GEN2:
5508 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5509 break;
5510#endif
5511 default:
5512 si_pi->force_pcie_gen = si_get_current_pcie_speed(rdev);
5513 break;
5514 }
5515 } else {
5516 if (target_link_speed < current_link_speed)
5517 si_pi->pspp_notify_required = true;
5518 }
5519}
5520
5521static void si_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
5522 struct radeon_ps *radeon_new_state,
5523 struct radeon_ps *radeon_current_state)
5524{
5525 struct si_power_info *si_pi = si_get_pi(rdev);
5526 enum radeon_pcie_gen target_link_speed = si_get_maximum_link_speed(rdev, radeon_new_state);
5527 u8 request;
5528
5529 if (si_pi->pspp_notify_required) {
5530 if (target_link_speed == RADEON_PCIE_GEN3)
5531 request = PCIE_PERF_REQ_PECI_GEN3;
5532 else if (target_link_speed == RADEON_PCIE_GEN2)
5533 request = PCIE_PERF_REQ_PECI_GEN2;
5534 else
5535 request = PCIE_PERF_REQ_PECI_GEN1;
5536
5537 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5538 (si_get_current_pcie_speed(rdev) > 0))
5539 return;
5540
5541#if defined(CONFIG_ACPI)
5542 radeon_acpi_pcie_performance_request(rdev, request, false);
5543#endif
5544 }
5545}
5546
5547#if 0
5548static int si_ds_request(struct radeon_device *rdev,
5549 bool ds_status_on, u32 count_write)
5550{
5551 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
5552
5553 if (eg_pi->sclk_deep_sleep) {
5554 if (ds_status_on)
5555 return (si_send_msg_to_smc(rdev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
5556 PPSMC_Result_OK) ?
5557 0 : -EINVAL;
5558 else
5559 return (si_send_msg_to_smc(rdev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
5560 PPSMC_Result_OK) ? 0 : -EINVAL;
5561 }
5562 return 0;
5563}
5564#endif
5565
5566static void si_set_max_cu_value(struct radeon_device *rdev)
5567{
5568 struct si_power_info *si_pi = si_get_pi(rdev);
5569
5570 if (rdev->family == CHIP_VERDE) {
5571 switch (rdev->pdev->device) {
5572 case 0x6820:
5573 case 0x6825:
5574 case 0x6821:
5575 case 0x6823:
5576 case 0x6827:
5577 si_pi->max_cu = 10;
5578 break;
5579 case 0x682D:
5580 case 0x6824:
5581 case 0x682F:
5582 case 0x6826:
5583 si_pi->max_cu = 8;
5584 break;
5585 case 0x6828:
5586 case 0x6830:
5587 case 0x6831:
5588 case 0x6838:
5589 case 0x6839:
5590 case 0x683D:
5591 si_pi->max_cu = 10;
5592 break;
5593 case 0x683B:
5594 case 0x683F:
5595 case 0x6829:
5596 si_pi->max_cu = 8;
5597 break;
5598 default:
5599 si_pi->max_cu = 0;
5600 break;
5601 }
5602 } else {
5603 si_pi->max_cu = 0;
5604 }
5605}
5606
5607static int si_patch_single_dependency_table_based_on_leakage(struct radeon_device *rdev,
5608 struct radeon_clock_voltage_dependency_table *table)
5609{
5610 u32 i;
5611 int j;
5612 u16 leakage_voltage;
5613
5614 if (table) {
5615 for (i = 0; i < table->count; i++) {
5616 switch (si_get_leakage_voltage_from_leakage_index(rdev,
5617 table->entries[i].v,
5618 &leakage_voltage)) {
5619 case 0:
5620 table->entries[i].v = leakage_voltage;
5621 break;
5622 case -EAGAIN:
5623 return -EINVAL;
5624 case -EINVAL:
5625 default:
5626 break;
5627 }
5628 }
5629
5630 for (j = (table->count - 2); j >= 0; j--) {
5631 table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
5632 table->entries[j].v : table->entries[j + 1].v;
5633 }
5634 }
5635 return 0;
5636}
5637
5638static int si_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev)
5639{
5640 int ret = 0;
5641
5642 ret = si_patch_single_dependency_table_based_on_leakage(rdev,
5643 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5644 ret = si_patch_single_dependency_table_based_on_leakage(rdev,
5645 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5646 ret = si_patch_single_dependency_table_based_on_leakage(rdev,
5647 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5648 return ret;
5649}
5650
5651static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
5652 struct radeon_ps *radeon_new_state,
5653 struct radeon_ps *radeon_current_state)
5654{
5655 u32 lane_width;
5656 u32 new_lane_width =
5657 (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
5658 u32 current_lane_width =
5659 (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
5660
5661 if (new_lane_width != current_lane_width) {
5662 radeon_set_pcie_lanes(rdev, new_lane_width);
5663 lane_width = radeon_get_pcie_lanes(rdev);
5664 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
5665 }
5666}
5667
5668void si_dpm_setup_asic(struct radeon_device *rdev)
5669{
5670 rv770_get_memory_type(rdev);
5671 si_read_clock_registers(rdev);
5672 si_enable_acpi_power_management(rdev);
5673}
5674
5675static int si_set_thermal_temperature_range(struct radeon_device *rdev,
5676 int min_temp, int max_temp)
5677{
5678 int low_temp = 0 * 1000;
5679 int high_temp = 255 * 1000;
5680
5681 if (low_temp < min_temp)
5682 low_temp = min_temp;
5683 if (high_temp > max_temp)
5684 high_temp = max_temp;
5685 if (high_temp < low_temp) {
5686 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
5687 return -EINVAL;
5688 }
5689
5690 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
5691 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
5692 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
5693
5694 rdev->pm.dpm.thermal.min_temp = low_temp;
5695 rdev->pm.dpm.thermal.max_temp = high_temp;
5696
5697 return 0;
5698}
5699
5700int si_dpm_enable(struct radeon_device *rdev)
5701{
5702 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
5703 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
5704 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5705 int ret;
5706
5707 if (si_is_smc_running(rdev))
5708 return -EINVAL;
5709 if (pi->voltage_control)
5710 si_enable_voltage_control(rdev, true);
5711 if (pi->mvdd_control)
5712 si_get_mvdd_configuration(rdev);
5713 if (pi->voltage_control) {
5714 ret = si_construct_voltage_tables(rdev);
5715 if (ret) {
5716 DRM_ERROR("si_construct_voltage_tables failed\n");
5717 return ret;
5718 }
5719 }
5720 if (eg_pi->dynamic_ac_timing) {
5721 ret = si_initialize_mc_reg_table(rdev);
5722 if (ret)
5723 eg_pi->dynamic_ac_timing = false;
5724 }
5725 if (pi->dynamic_ss)
5726 si_enable_spread_spectrum(rdev, true);
5727 if (pi->thermal_protection)
5728 si_enable_thermal_protection(rdev, true);
5729 si_setup_bsp(rdev);
5730 si_program_git(rdev);
5731 si_program_tp(rdev);
5732 si_program_tpp(rdev);
5733 si_program_sstp(rdev);
5734 si_enable_display_gap(rdev);
5735 si_program_vc(rdev);
5736 ret = si_upload_firmware(rdev);
5737 if (ret) {
5738 DRM_ERROR("si_upload_firmware failed\n");
5739 return ret;
5740 }
5741 ret = si_process_firmware_header(rdev);
5742 if (ret) {
5743 DRM_ERROR("si_process_firmware_header failed\n");
5744 return ret;
5745 }
5746 ret = si_initial_switch_from_arb_f0_to_f1(rdev);
5747 if (ret) {
5748 DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
5749 return ret;
5750 }
5751 ret = si_init_smc_table(rdev);
5752 if (ret) {
5753 DRM_ERROR("si_init_smc_table failed\n");
5754 return ret;
5755 }
5756 ret = si_init_smc_spll_table(rdev);
5757 if (ret) {
5758 DRM_ERROR("si_init_smc_spll_table failed\n");
5759 return ret;
5760 }
5761 ret = si_init_arb_table_index(rdev);
5762 if (ret) {
5763 DRM_ERROR("si_init_arb_table_index failed\n");
5764 return ret;
5765 }
5766 if (eg_pi->dynamic_ac_timing) {
5767 ret = si_populate_mc_reg_table(rdev, boot_ps);
5768 if (ret) {
5769 DRM_ERROR("si_populate_mc_reg_table failed\n");
5770 return ret;
5771 }
5772 }
5773 ret = si_initialize_smc_cac_tables(rdev);
5774 if (ret) {
5775 DRM_ERROR("si_initialize_smc_cac_tables failed\n");
5776 return ret;
5777 }
5778 ret = si_initialize_hardware_cac_manager(rdev);
5779 if (ret) {
5780 DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
5781 return ret;
5782 }
5783 ret = si_initialize_smc_dte_tables(rdev);
5784 if (ret) {
5785 DRM_ERROR("si_initialize_smc_dte_tables failed\n");
5786 return ret;
5787 }
5788 ret = si_populate_smc_tdp_limits(rdev, boot_ps);
5789 if (ret) {
5790 DRM_ERROR("si_populate_smc_tdp_limits failed\n");
5791 return ret;
5792 }
5793 ret = si_populate_smc_tdp_limits_2(rdev, boot_ps);
5794 if (ret) {
5795 DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
5796 return ret;
5797 }
5798 si_program_response_times(rdev);
5799 si_program_ds_registers(rdev);
5800 si_dpm_start_smc(rdev);
5801 ret = si_notify_smc_display_change(rdev, false);
5802 if (ret) {
5803 DRM_ERROR("si_notify_smc_display_change failed\n");
5804 return ret;
5805 }
5806 si_enable_sclk_control(rdev, true);
5807 si_start_dpm(rdev);
5808
5809 if (rdev->irq.installed &&
5810 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
5811 PPSMC_Result result;
5812
5813 ret = si_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
5814 if (ret)
5815 return ret;
5816 rdev->irq.dpm_thermal = true;
5817 radeon_irq_set(rdev);
5818 result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
5819
5820 if (result != PPSMC_Result_OK)
5821 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
5822 }
5823
5824 si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5825
5826 ni_update_current_ps(rdev, boot_ps);
5827
5828 return 0;
5829}
5830
5831void si_dpm_disable(struct radeon_device *rdev)
5832{
5833 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
5834 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5835
5836 if (!si_is_smc_running(rdev))
5837 return;
5838 si_disable_ulv(rdev);
5839 si_clear_vc(rdev);
5840 if (pi->thermal_protection)
5841 si_enable_thermal_protection(rdev, false);
5842 si_enable_power_containment(rdev, boot_ps, false);
5843 si_enable_smc_cac(rdev, boot_ps, false);
5844 si_enable_spread_spectrum(rdev, false);
5845 si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5846 si_stop_dpm(rdev);
5847 si_reset_to_default(rdev);
5848 si_dpm_stop_smc(rdev);
5849 si_force_switch_to_arb_f0(rdev);
5850
5851 ni_update_current_ps(rdev, boot_ps);
5852}
5853
5854int si_dpm_pre_set_power_state(struct radeon_device *rdev)
5855{
5856 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
5857 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
5858 struct radeon_ps *new_ps = &requested_ps;
5859
5860 ni_update_requested_ps(rdev, new_ps);
5861
5862 si_apply_state_adjust_rules(rdev, &eg_pi->requested_rps);
5863
5864 return 0;
5865}
5866
5867static int si_power_control_set_level(struct radeon_device *rdev)
5868{
5869 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
5870 int ret;
5871
5872 ret = si_restrict_performance_levels_before_switch(rdev);
5873 if (ret)
5874 return ret;
5875 ret = si_halt_smc(rdev);
5876 if (ret)
5877 return ret;
5878 ret = si_populate_smc_tdp_limits(rdev, new_ps);
5879 if (ret)
5880 return ret;
5881 ret = si_populate_smc_tdp_limits_2(rdev, new_ps);
5882 if (ret)
5883 return ret;
5884 ret = si_resume_smc(rdev);
5885 if (ret)
5886 return ret;
5887 ret = si_set_sw_state(rdev);
5888 if (ret)
5889 return ret;
5890 return 0;
5891}
5892
5893int si_dpm_set_power_state(struct radeon_device *rdev)
5894{
5895 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
5896 struct radeon_ps *new_ps = &eg_pi->requested_rps;
5897 struct radeon_ps *old_ps = &eg_pi->current_rps;
5898 int ret;
5899
5900 ret = si_disable_ulv(rdev);
5901 if (ret) {
5902 DRM_ERROR("si_disable_ulv failed\n");
5903 return ret;
5904 }
5905 ret = si_restrict_performance_levels_before_switch(rdev);
5906 if (ret) {
5907 DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
5908 return ret;
5909 }
5910 if (eg_pi->pcie_performance_request)
5911 si_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
5912 ni_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
5913 ret = si_enable_power_containment(rdev, new_ps, false);
5914 if (ret) {
5915 DRM_ERROR("si_enable_power_containment failed\n");
5916 return ret;
5917 }
5918 ret = si_enable_smc_cac(rdev, new_ps, false);
5919 if (ret) {
5920 DRM_ERROR("si_enable_smc_cac failed\n");
5921 return ret;
5922 }
5923 ret = si_halt_smc(rdev);
5924 if (ret) {
5925 DRM_ERROR("si_halt_smc failed\n");
5926 return ret;
5927 }
5928 ret = si_upload_sw_state(rdev, new_ps);
5929 if (ret) {
5930 DRM_ERROR("si_upload_sw_state failed\n");
5931 return ret;
5932 }
5933 ret = si_upload_smc_data(rdev);
5934 if (ret) {
5935 DRM_ERROR("si_upload_smc_data failed\n");
5936 return ret;
5937 }
5938 ret = si_upload_ulv_state(rdev);
5939 if (ret) {
5940 DRM_ERROR("si_upload_ulv_state failed\n");
5941 return ret;
5942 }
5943 if (eg_pi->dynamic_ac_timing) {
5944 ret = si_upload_mc_reg_table(rdev, new_ps);
5945 if (ret) {
5946 DRM_ERROR("si_upload_mc_reg_table failed\n");
5947 return ret;
5948 }
5949 }
5950 ret = si_program_memory_timing_parameters(rdev, new_ps);
5951 if (ret) {
5952 DRM_ERROR("si_program_memory_timing_parameters failed\n");
5953 return ret;
5954 }
5955 si_set_pcie_lane_width_in_smc(rdev, new_ps, old_ps);
5956
5957 ret = si_resume_smc(rdev);
5958 if (ret) {
5959 DRM_ERROR("si_resume_smc failed\n");
5960 return ret;
5961 }
5962 ret = si_set_sw_state(rdev);
5963 if (ret) {
5964 DRM_ERROR("si_set_sw_state failed\n");
5965 return ret;
5966 }
5967 ni_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
5968 if (eg_pi->pcie_performance_request)
5969 si_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
5970 ret = si_set_power_state_conditionally_enable_ulv(rdev, new_ps);
5971 if (ret) {
5972 DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
5973 return ret;
5974 }
5975 ret = si_enable_smc_cac(rdev, new_ps, true);
5976 if (ret) {
5977 DRM_ERROR("si_enable_smc_cac failed\n");
5978 return ret;
5979 }
5980 ret = si_enable_power_containment(rdev, new_ps, true);
5981 if (ret) {
5982 DRM_ERROR("si_enable_power_containment failed\n");
5983 return ret;
5984 }
5985
5986 ret = si_power_control_set_level(rdev);
5987 if (ret) {
5988 DRM_ERROR("si_power_control_set_level failed\n");
5989 return ret;
5990 }
5991
5992#if 0
5993 /* XXX */
5994 ret = si_unrestrict_performance_levels_after_switch(rdev);
5995 if (ret) {
5996 DRM_ERROR("si_unrestrict_performance_levels_after_switch failed\n");
5997 return ret;
5998 }
5999#endif
6000
6001 return 0;
6002}
6003
6004void si_dpm_post_set_power_state(struct radeon_device *rdev)
6005{
6006 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
6007 struct radeon_ps *new_ps = &eg_pi->requested_rps;
6008
6009 ni_update_current_ps(rdev, new_ps);
6010}
6011
6012
6013void si_dpm_reset_asic(struct radeon_device *rdev)
6014{
6015 si_restrict_performance_levels_before_switch(rdev);
6016 si_disable_ulv(rdev);
6017 si_set_boot_state(rdev);
6018}
6019
6020void si_dpm_display_configuration_changed(struct radeon_device *rdev)
6021{
6022 si_program_display_gap(rdev);
6023}
6024
6025union power_info {
6026 struct _ATOM_POWERPLAY_INFO info;
6027 struct _ATOM_POWERPLAY_INFO_V2 info_2;
6028 struct _ATOM_POWERPLAY_INFO_V3 info_3;
6029 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
6030 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
6031 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
6032};
6033
6034union pplib_clock_info {
6035 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
6036 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
6037 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
6038 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
6039 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
6040};
6041
6042union pplib_power_state {
6043 struct _ATOM_PPLIB_STATE v1;
6044 struct _ATOM_PPLIB_STATE_V2 v2;
6045};
6046
6047static void si_parse_pplib_non_clock_info(struct radeon_device *rdev,
6048 struct radeon_ps *rps,
6049 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
6050 u8 table_rev)
6051{
6052 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
6053 rps->class = le16_to_cpu(non_clock_info->usClassification);
6054 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
6055
6056 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
6057 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
6058 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
6059 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
6060 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
6061 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
6062 } else {
6063 rps->vclk = 0;
6064 rps->dclk = 0;
6065 }
6066
6067 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
6068 rdev->pm.dpm.boot_ps = rps;
6069 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
6070 rdev->pm.dpm.uvd_ps = rps;
6071}
6072
6073static void si_parse_pplib_clock_info(struct radeon_device *rdev,
6074 struct radeon_ps *rps, int index,
6075 union pplib_clock_info *clock_info)
6076{
6077 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
6078 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
6079 struct si_power_info *si_pi = si_get_pi(rdev);
6080 struct ni_ps *ps = ni_get_ps(rps);
6081 u16 leakage_voltage;
6082 struct rv7xx_pl *pl = &ps->performance_levels[index];
6083 int ret;
6084
6085 ps->performance_level_count = index + 1;
6086
6087 pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
6088 pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
6089 pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
6090 pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
6091
6092 pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
6093 pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
6094 pl->flags = le32_to_cpu(clock_info->si.ulFlags);
6095 pl->pcie_gen = r600_get_pcie_gen_support(rdev,
6096 si_pi->sys_pcie_mask,
6097 si_pi->boot_pcie_gen,
6098 clock_info->si.ucPCIEGen);
6099
6100 /* patch up vddc if necessary */
6101 ret = si_get_leakage_voltage_from_leakage_index(rdev, pl->vddc,
6102 &leakage_voltage);
6103 if (ret == 0)
6104 pl->vddc = leakage_voltage;
6105
6106 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
6107 pi->acpi_vddc = pl->vddc;
6108 eg_pi->acpi_vddci = pl->vddci;
6109 si_pi->acpi_pcie_gen = pl->pcie_gen;
6110 }
6111
6112 if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
6113 index == 0) {
6114 /* XXX disable for A0 tahiti */
6115 si_pi->ulv.supported = true;
6116 si_pi->ulv.pl = *pl;
6117 si_pi->ulv.one_pcie_lane_in_ulv = false;
6118 si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
6119 si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
6120 si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
6121 }
6122
6123 if (pi->min_vddc_in_table > pl->vddc)
6124 pi->min_vddc_in_table = pl->vddc;
6125
6126 if (pi->max_vddc_in_table < pl->vddc)
6127 pi->max_vddc_in_table = pl->vddc;
6128
6129 /* patch up boot state */
6130 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
6131 u16 vddc, vddci, mvdd;
6132 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
6133 pl->mclk = rdev->clock.default_mclk;
6134 pl->sclk = rdev->clock.default_sclk;
6135 pl->vddc = vddc;
6136 pl->vddci = vddci;
6137 si_pi->mvdd_bootup_value = mvdd;
6138 }
6139
6140 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
6141 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
6142 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
6143 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
6144 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
6145 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
6146 }
6147}
6148
6149static int si_parse_power_table(struct radeon_device *rdev)
6150{
6151 struct radeon_mode_info *mode_info = &rdev->mode_info;
6152 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
6153 union pplib_power_state *power_state;
6154 int i, j, k, non_clock_array_index, clock_array_index;
6155 union pplib_clock_info *clock_info;
6156 struct _StateArray *state_array;
6157 struct _ClockInfoArray *clock_info_array;
6158 struct _NonClockInfoArray *non_clock_info_array;
6159 union power_info *power_info;
6160 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
6161 u16 data_offset;
6162 u8 frev, crev;
6163 u8 *power_state_offset;
6164 struct ni_ps *ps;
6165
6166 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
6167 &frev, &crev, &data_offset))
6168 return -EINVAL;
6169 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
6170
6171 state_array = (struct _StateArray *)
6172 (mode_info->atom_context->bios + data_offset +
6173 le16_to_cpu(power_info->pplib.usStateArrayOffset));
6174 clock_info_array = (struct _ClockInfoArray *)
6175 (mode_info->atom_context->bios + data_offset +
6176 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
6177 non_clock_info_array = (struct _NonClockInfoArray *)
6178 (mode_info->atom_context->bios + data_offset +
6179 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
6180
6181 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
6182 state_array->ucNumEntries, GFP_KERNEL);
6183 if (!rdev->pm.dpm.ps)
6184 return -ENOMEM;
6185 power_state_offset = (u8 *)state_array->states;
6186 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
6187 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
6188 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
6189 for (i = 0; i < state_array->ucNumEntries; i++) {
6190 power_state = (union pplib_power_state *)power_state_offset;
6191 non_clock_array_index = power_state->v2.nonClockInfoIndex;
6192 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
6193 &non_clock_info_array->nonClockInfo[non_clock_array_index];
6194 if (!rdev->pm.power_state[i].clock_info)
6195 return -EINVAL;
6196 ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
6197 if (ps == NULL) {
6198 kfree(rdev->pm.dpm.ps);
6199 return -ENOMEM;
6200 }
6201 rdev->pm.dpm.ps[i].ps_priv = ps;
6202 si_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
6203 non_clock_info,
6204 non_clock_info_array->ucEntrySize);
6205 k = 0;
6206 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
6207 clock_array_index = power_state->v2.clockInfoIndex[j];
6208 if (clock_array_index >= clock_info_array->ucNumEntries)
6209 continue;
6210 if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
6211 break;
6212 clock_info = (union pplib_clock_info *)
6213 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
6214 si_parse_pplib_clock_info(rdev,
6215 &rdev->pm.dpm.ps[i], k,
6216 clock_info);
6217 k++;
6218 }
6219 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
6220 }
6221 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
6222 return 0;
6223}
6224
6225int si_dpm_init(struct radeon_device *rdev)
6226{
6227 struct rv7xx_power_info *pi;
6228 struct evergreen_power_info *eg_pi;
6229 struct ni_power_info *ni_pi;
6230 struct si_power_info *si_pi;
6231 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
6232 u16 data_offset, size;
6233 u8 frev, crev;
6234 struct atom_clock_dividers dividers;
6235 int ret;
6236 u32 mask;
6237
6238 si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
6239 if (si_pi == NULL)
6240 return -ENOMEM;
6241 rdev->pm.dpm.priv = si_pi;
6242 ni_pi = &si_pi->ni;
6243 eg_pi = &ni_pi->eg;
6244 pi = &eg_pi->rv7xx;
6245
6246 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
6247 if (ret)
6248 si_pi->sys_pcie_mask = 0;
6249 else
6250 si_pi->sys_pcie_mask = mask;
6251 si_pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
6252 si_pi->boot_pcie_gen = si_get_current_pcie_speed(rdev);
6253
6254 si_set_max_cu_value(rdev);
6255
6256 rv770_get_max_vddc(rdev);
6257 si_get_leakage_vddc(rdev);
6258 si_patch_dependency_tables_based_on_leakage(rdev);
6259
6260 pi->acpi_vddc = 0;
6261 eg_pi->acpi_vddci = 0;
6262 pi->min_vddc_in_table = 0;
6263 pi->max_vddc_in_table = 0;
6264
6265 ret = si_parse_power_table(rdev);
6266 if (ret)
6267 return ret;
6268 ret = r600_parse_extended_power_table(rdev);
6269 if (ret)
6270 return ret;
6271
6272 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
6273 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
6274 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
6275 r600_free_extended_power_table(rdev);
6276 return -ENOMEM;
6277 }
6278 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
6279 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
6280 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
6281 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
6282 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
6283 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
6284 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
6285 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
6286 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
6287
6288 if (rdev->pm.dpm.voltage_response_time == 0)
6289 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
6290 if (rdev->pm.dpm.backbias_response_time == 0)
6291 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
6292
6293 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
6294 0, false, &dividers);
6295 if (ret)
6296 pi->ref_div = dividers.ref_div + 1;
6297 else
6298 pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
6299
6300 eg_pi->smu_uvd_hs = false;
6301
6302 pi->mclk_strobe_mode_threshold = 40000;
6303 if (si_is_special_1gb_platform(rdev))
6304 pi->mclk_stutter_mode_threshold = 0;
6305 else
6306 pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
6307 pi->mclk_edc_enable_threshold = 40000;
6308 eg_pi->mclk_edc_wr_enable_threshold = 40000;
6309
6310 ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
6311
6312 pi->voltage_control =
6313 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_GPIO_LUT);
6314
6315 pi->mvdd_control =
6316 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, VOLTAGE_OBJ_GPIO_LUT);
6317
6318 eg_pi->vddci_control =
6319 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, VOLTAGE_OBJ_GPIO_LUT);
6320
6321 si_pi->vddc_phase_shed_control =
6322 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
6323
6324 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
6325 &frev, &crev, &data_offset)) {
6326 pi->sclk_ss = true;
6327 pi->mclk_ss = true;
6328 pi->dynamic_ss = true;
6329 } else {
6330 pi->sclk_ss = false;
6331 pi->mclk_ss = false;
6332 pi->dynamic_ss = true;
6333 }
6334
6335 pi->asi = RV770_ASI_DFLT;
6336 pi->pasi = CYPRESS_HASI_DFLT;
6337 pi->vrc = SISLANDS_VRC_DFLT;
6338
6339 pi->gfx_clock_gating = true;
6340
6341 eg_pi->sclk_deep_sleep = true;
6342 si_pi->sclk_deep_sleep_above_low = false;
6343
6344 if (pi->gfx_clock_gating &&
6345 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
6346 pi->thermal_protection = true;
6347 else
6348 pi->thermal_protection = false;
6349
6350 eg_pi->dynamic_ac_timing = true;
6351
6352 eg_pi->light_sleep = true;
6353#if defined(CONFIG_ACPI)
6354 eg_pi->pcie_performance_request =
6355 radeon_acpi_is_pcie_performance_request_supported(rdev);
6356#else
6357 eg_pi->pcie_performance_request = false;
6358#endif
6359
6360 si_pi->sram_end = SMC_RAM_END;
6361
6362 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
6363 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
6364 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
6365 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
6366 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
6367 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
6368 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
6369
6370 si_initialize_powertune_defaults(rdev);
6371
6372 return 0;
6373}
6374
6375void si_dpm_fini(struct radeon_device *rdev)
6376{
6377 int i;
6378
6379 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
6380 kfree(rdev->pm.dpm.ps[i].ps_priv);
6381 }
6382 kfree(rdev->pm.dpm.ps);
6383 kfree(rdev->pm.dpm.priv);
6384 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
6385 r600_free_extended_power_table(rdev);
6386}
6387
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
new file mode 100644
index 000000000000..4ce5032cdf49
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -0,0 +1,227 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __SI_DPM_H__
24#define __SI_DPM_H__
25
26#include "ni_dpm.h"
27#include "sislands_smc.h"
28
29enum si_cac_config_reg_type
30{
31 SISLANDS_CACCONFIG_MMR = 0,
32 SISLANDS_CACCONFIG_CGIND,
33 SISLANDS_CACCONFIG_MAX
34};
35
36struct si_cac_config_reg
37{
38 u32 offset;
39 u32 mask;
40 u32 shift;
41 u32 value;
42 enum si_cac_config_reg_type type;
43};
44
45struct si_powertune_data
46{
47 u32 cac_window;
48 u32 l2_lta_window_size_default;
49 u8 lts_truncate_default;
50 u8 shift_n_default;
51 u8 operating_temp;
52 struct ni_leakage_coeffients leakage_coefficients;
53 u32 fixed_kt;
54 u32 lkge_lut_v0_percent;
55 u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
56 bool enable_powertune_by_default;
57};
58
59struct si_dyn_powertune_data
60{
61 u32 cac_leakage;
62 s32 leakage_minimum_temperature;
63 u32 wintime;
64 u32 l2_lta_window_size;
65 u8 lts_truncate;
66 u8 shift_n;
67 u8 dc_pwr_value;
68 bool disable_uvd_powertune;
69};
70
71struct si_dte_data
72{
73 u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
74 u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
75 u32 k;
76 u32 t0;
77 u32 max_t;
78 u8 window_size;
79 u8 temp_select;
80 u8 dte_mode;
81 u8 tdep_count;
82 u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
83 u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
84 u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
85 u32 t_threshold;
86 bool enable_dte_by_default;
87};
88
89struct si_clock_registers {
90 u32 cg_spll_func_cntl;
91 u32 cg_spll_func_cntl_2;
92 u32 cg_spll_func_cntl_3;
93 u32 cg_spll_func_cntl_4;
94 u32 cg_spll_spread_spectrum;
95 u32 cg_spll_spread_spectrum_2;
96 u32 dll_cntl;
97 u32 mclk_pwrmgt_cntl;
98 u32 mpll_ad_func_cntl;
99 u32 mpll_dq_func_cntl;
100 u32 mpll_func_cntl;
101 u32 mpll_func_cntl_1;
102 u32 mpll_func_cntl_2;
103 u32 mpll_ss1;
104 u32 mpll_ss2;
105};
106
107struct si_mc_reg_entry {
108 u32 mclk_max;
109 u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
110};
111
112struct si_mc_reg_table {
113 u8 last;
114 u8 num_entries;
115 u16 valid_flag;
116 struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
117 SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
118};
119
120#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0
121#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1
122#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2
123#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3
124
125struct si_leakage_voltage_entry
126{
127 u16 voltage;
128 u16 leakage_index;
129};
130
131#define SISLANDS_LEAKAGE_INDEX0 0xff01
132#define SISLANDS_MAX_LEAKAGE_COUNT 4
133
134struct si_leakage_voltage
135{
136 u16 count;
137 struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
138};
139
140#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5
141
142struct si_ulv_param {
143 bool supported;
144 u32 cg_ulv_control;
145 u32 cg_ulv_parameter;
146 u32 volt_change_delay;
147 struct rv7xx_pl pl;
148 bool one_pcie_lane_in_ulv;
149};
150
151struct si_power_info {
152 /* must be first! */
153 struct ni_power_info ni;
154 struct si_clock_registers clock_registers;
155 struct si_mc_reg_table mc_reg_table;
156 struct atom_voltage_table mvdd_voltage_table;
157 struct atom_voltage_table vddc_phase_shed_table;
158 struct si_leakage_voltage leakage_voltage;
159 u16 mvdd_bootup_value;
160 struct si_ulv_param ulv;
161 u32 max_cu;
162 /* pcie gen */
163 enum radeon_pcie_gen force_pcie_gen;
164 enum radeon_pcie_gen boot_pcie_gen;
165 enum radeon_pcie_gen acpi_pcie_gen;
166 u32 sys_pcie_mask;
167 /* flags */
168 bool enable_dte;
169 bool enable_ppm;
170 bool vddc_phase_shed_control;
171 bool pspp_notify_required;
172 bool sclk_deep_sleep_above_low;
173 /* smc offsets */
174 u32 sram_end;
175 u32 state_table_start;
176 u32 soft_regs_start;
177 u32 mc_reg_table_start;
178 u32 arb_table_start;
179 u32 cac_table_start;
180 u32 dte_table_start;
181 u32 spll_table_start;
182 u32 papm_cfg_table_start;
183 /* CAC stuff */
184 const struct si_cac_config_reg *cac_weights;
185 const struct si_cac_config_reg *lcac_config;
186 const struct si_cac_config_reg *cac_override;
187 const struct si_powertune_data *powertune_data;
188 struct si_dyn_powertune_data dyn_powertune_data;
189 /* DTE stuff */
190 struct si_dte_data dte_data;
191 /* scratch structs */
192 SMC_SIslands_MCRegisters smc_mc_reg_table;
193 SISLANDS_SMC_STATETABLE smc_statetable;
194 PP_SIslands_PAPMParameters papm_parm;
195};
196
197#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
198#define SISLANDS_ACPI_STATE_ARB_INDEX 1
199#define SISLANDS_ULV_STATE_ARB_INDEX 2
200#define SISLANDS_DRIVER_STATE_ARB_INDEX 3
201
202#define SISLANDS_DPM2_MAX_PULSE_SKIP 256
203
204#define SISLANDS_DPM2_NEAR_TDP_DEC 10
205#define SISLANDS_DPM2_ABOVE_SAFE_INC 5
206#define SISLANDS_DPM2_BELOW_SAFE_INC 20
207
208#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80
209
210#define SISLANDS_DPM2_MAXPS_PERCENT_H 99
211#define SISLANDS_DPM2_MAXPS_PERCENT_M 99
212
213#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
214#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12
215#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
216#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E
217#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF
218
219#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN 10
220
221#define SISLANDS_VRC_DFLT 0xC000B3
222#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT 1687
223#define SISLANDS_CGULVPARAMETER_DFLT 0x00040035
224#define SISLANDS_CGULVCONTROL_DFLT 0x1f007550
225
226
227#endif
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
new file mode 100644
index 000000000000..5f524c0a541e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -0,0 +1,284 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "radeon.h"
28#include "sid.h"
29#include "ppsmc.h"
30#include "radeon_ucode.h"
31
32int si_set_smc_sram_address(struct radeon_device *rdev,
33 u32 smc_address, u32 limit)
34{
35 if (smc_address & 3)
36 return -EINVAL;
37 if ((smc_address + 3) > limit)
38 return -EINVAL;
39
40 WREG32(SMC_IND_INDEX_0, smc_address);
41 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
42
43 return 0;
44}
45
46int si_copy_bytes_to_smc(struct radeon_device *rdev,
47 u32 smc_start_address,
48 const u8 *src, u32 byte_count, u32 limit)
49{
50 int ret;
51 u32 data, original_data, addr, extra_shift;
52
53 if (smc_start_address & 3)
54 return -EINVAL;
55 if ((smc_start_address + byte_count) > limit)
56 return -EINVAL;
57
58 addr = smc_start_address;
59
60 while (byte_count >= 4) {
61 /* SMC address space is BE */
62 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
63
64 ret = si_set_smc_sram_address(rdev, addr, limit);
65 if (ret)
66 return ret;
67
68 WREG32(SMC_IND_DATA_0, data);
69
70 src += 4;
71 byte_count -= 4;
72 addr += 4;
73 }
74
75 /* RMW for the final bytes */
76 if (byte_count > 0) {
77 data = 0;
78
79 ret = si_set_smc_sram_address(rdev, addr, limit);
80 if (ret)
81 return ret;
82
83 original_data = RREG32(SMC_IND_DATA_0);
84
85 extra_shift = 8 * (4 - byte_count);
86
87 while (byte_count > 0) {
88 /* SMC address space is BE */
89 data = (data << 8) + *src++;
90 byte_count--;
91 }
92
93 data <<= extra_shift;
94
95 data |= (original_data & ~((~0UL) << extra_shift));
96
97 ret = si_set_smc_sram_address(rdev, addr, limit);
98 if (ret)
99 return ret;
100
101 WREG32(SMC_IND_DATA_0, data);
102 }
103 return 0;
104}
105
106void si_start_smc(struct radeon_device *rdev)
107{
108 u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
109
110 tmp &= ~RST_REG;
111
112 WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
113}
114
115void si_reset_smc(struct radeon_device *rdev)
116{
117 u32 tmp;
118
119 RREG32(CB_CGTT_SCLK_CTRL);
120 RREG32(CB_CGTT_SCLK_CTRL);
121 RREG32(CB_CGTT_SCLK_CTRL);
122 RREG32(CB_CGTT_SCLK_CTRL);
123
124 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
125 tmp |= RST_REG;
126 WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
127}
128
129int si_program_jump_on_start(struct radeon_device *rdev)
130{
131 static u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
132
133 return si_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
134}
135
136void si_stop_smc_clock(struct radeon_device *rdev)
137{
138 u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
139
140 tmp |= CK_DISABLE;
141
142 WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
143}
144
145void si_start_smc_clock(struct radeon_device *rdev)
146{
147 u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
148
149 tmp &= ~CK_DISABLE;
150
151 WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
152}
153
154bool si_is_smc_running(struct radeon_device *rdev)
155{
156 u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
157 u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
158
159 if (!(rst & RST_REG) && !(clk & CK_DISABLE))
160 return true;
161
162 return false;
163}
164
165PPSMC_Result si_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
166{
167 u32 tmp;
168 int i;
169
170 if (!si_is_smc_running(rdev))
171 return PPSMC_Result_Failed;
172
173 WREG32(SMC_MESSAGE_0, msg);
174
175 for (i = 0; i < rdev->usec_timeout; i++) {
176 tmp = RREG32(SMC_RESP_0);
177 if (tmp != 0)
178 break;
179 udelay(1);
180 }
181 tmp = RREG32(SMC_RESP_0);
182
183 return (PPSMC_Result)tmp;
184}
185
186PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev)
187{
188 u32 tmp;
189 int i;
190
191 if (!si_is_smc_running(rdev))
192 return PPSMC_Result_OK;
193
194 for (i = 0; i < rdev->usec_timeout; i++) {
195 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
196 if ((tmp & CKEN) == 0)
197 break;
198 udelay(1);
199 }
200
201 return PPSMC_Result_OK;
202}
203
204int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
205{
206 u32 ucode_start_address;
207 u32 ucode_size;
208 const u8 *src;
209 u32 data;
210
211 if (!rdev->smc_fw)
212 return -EINVAL;
213
214 switch (rdev->family) {
215 case CHIP_TAHITI:
216 ucode_start_address = TAHITI_SMC_UCODE_START;
217 ucode_size = TAHITI_SMC_UCODE_SIZE;
218 break;
219 case CHIP_PITCAIRN:
220 ucode_start_address = PITCAIRN_SMC_UCODE_START;
221 ucode_size = PITCAIRN_SMC_UCODE_SIZE;
222 break;
223 case CHIP_VERDE:
224 ucode_start_address = VERDE_SMC_UCODE_START;
225 ucode_size = VERDE_SMC_UCODE_SIZE;
226 break;
227 case CHIP_OLAND:
228 ucode_start_address = OLAND_SMC_UCODE_START;
229 ucode_size = OLAND_SMC_UCODE_SIZE;
230 break;
231 case CHIP_HAINAN:
232 ucode_start_address = HAINAN_SMC_UCODE_START;
233 ucode_size = HAINAN_SMC_UCODE_SIZE;
234 break;
235 default:
236 DRM_ERROR("unknown asic in smc ucode loader\n");
237 BUG();
238 }
239
240 if (ucode_size & 3)
241 return -EINVAL;
242
243 src = (const u8 *)rdev->smc_fw->data;
244 WREG32(SMC_IND_INDEX_0, ucode_start_address);
245 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
246 while (ucode_size >= 4) {
247 /* SMC address space is BE */
248 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
249
250 WREG32(SMC_IND_DATA_0, data);
251
252 src += 4;
253 ucode_size -= 4;
254 }
255 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
256
257 return 0;
258}
259
260int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
261 u32 *value, u32 limit)
262{
263 int ret;
264
265 ret = si_set_smc_sram_address(rdev, smc_address, limit);
266 if (ret)
267 return ret;
268
269 *value = RREG32(SMC_IND_DATA_0);
270 return 0;
271}
272
273int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
274 u32 value, u32 limit)
275{
276 int ret;
277
278 ret = si_set_smc_sram_address(rdev, smc_address, limit);
279 if (ret)
280 return ret;
281
282 WREG32(SMC_IND_DATA_0, value);
283 return 0;
284}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 8f2d7d4f9b28..299d657d0168 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -30,6 +30,94 @@
30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
31#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001 31#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
32 32
33#define SI_MAX_SH_GPRS 256
34#define SI_MAX_TEMP_GPRS 16
35#define SI_MAX_SH_THREADS 256
36#define SI_MAX_SH_STACK_ENTRIES 4096
37#define SI_MAX_FRC_EOV_CNT 16384
38#define SI_MAX_BACKENDS 8
39#define SI_MAX_BACKENDS_MASK 0xFF
40#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
41#define SI_MAX_SIMDS 12
42#define SI_MAX_SIMDS_MASK 0x0FFF
43#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
44#define SI_MAX_PIPES 8
45#define SI_MAX_PIPES_MASK 0xFF
46#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
47#define SI_MAX_LDS_NUM 0xFFFF
48#define SI_MAX_TCC 16
49#define SI_MAX_TCC_MASK 0xFFFF
50
51/* SMC IND accessor regs */
52#define SMC_IND_INDEX_0 0x200
53#define SMC_IND_DATA_0 0x204
54
55#define SMC_IND_ACCESS_CNTL 0x228
56# define AUTO_INCREMENT_IND_0 (1 << 0)
57#define SMC_MESSAGE_0 0x22c
58#define SMC_RESP_0 0x230
59
60/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
61#define SMC_CG_IND_START 0xc0030000
62#define SMC_CG_IND_END 0xc0040000
63
64#define CG_CGTT_LOCAL_0 0x400
65#define CG_CGTT_LOCAL_1 0x401
66
67/* SMC IND registers */
68#define SMC_SYSCON_RESET_CNTL 0x80000000
69# define RST_REG (1 << 0)
70#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004
71# define CK_DISABLE (1 << 0)
72# define CKEN (1 << 24)
73
74#define VGA_HDP_CONTROL 0x328
75#define VGA_MEMORY_DISABLE (1 << 4)
76
77#define DCCG_DISP_SLOW_SELECT_REG 0x4fc
78#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
79#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
80#define DCCG_DISP1_SLOW_SELECT_SHIFT 0
81#define DCCG_DISP2_SLOW_SELECT(x) ((x) << 4)
82#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
83#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
84
85#define CG_SPLL_FUNC_CNTL 0x600
86#define SPLL_RESET (1 << 0)
87#define SPLL_SLEEP (1 << 1)
88#define SPLL_BYPASS_EN (1 << 3)
89#define SPLL_REF_DIV(x) ((x) << 4)
90#define SPLL_REF_DIV_MASK (0x3f << 4)
91#define SPLL_PDIV_A(x) ((x) << 20)
92#define SPLL_PDIV_A_MASK (0x7f << 20)
93#define SPLL_PDIV_A_SHIFT 20
94#define CG_SPLL_FUNC_CNTL_2 0x604
95#define SCLK_MUX_SEL(x) ((x) << 0)
96#define SCLK_MUX_SEL_MASK (0x1ff << 0)
97#define CG_SPLL_FUNC_CNTL_3 0x608
98#define SPLL_FB_DIV(x) ((x) << 0)
99#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
100#define SPLL_FB_DIV_SHIFT 0
101#define SPLL_DITHEN (1 << 28)
102#define CG_SPLL_FUNC_CNTL_4 0x60c
103
104#define SPLL_CNTL_MODE 0x618
105# define SPLL_REFCLK_SEL(x) ((x) << 8)
106# define SPLL_REFCLK_SEL_MASK 0xFF00
107
108#define CG_SPLL_SPREAD_SPECTRUM 0x620
109#define SSEN (1 << 0)
110#define CLK_S(x) ((x) << 4)
111#define CLK_S_MASK (0xfff << 4)
112#define CLK_S_SHIFT 4
113#define CG_SPLL_SPREAD_SPECTRUM_2 0x624
114#define CLK_V(x) ((x) << 0)
115#define CLK_V_MASK (0x3ffffff << 0)
116#define CLK_V_SHIFT 0
117
118#define CG_SPLL_AUTOSCALE_CNTL 0x62c
119# define AUTOSCALE_ON_SS_CLEAR (1 << 9)
120
33/* discrete uvd clocks */ 121/* discrete uvd clocks */
34#define CG_UPLL_FUNC_CNTL 0x634 122#define CG_UPLL_FUNC_CNTL 0x634
35# define UPLL_RESET_MASK 0x00000001 123# define UPLL_RESET_MASK 0x00000001
@@ -59,6 +147,45 @@
59#define CG_UPLL_SPREAD_SPECTRUM 0x650 147#define CG_UPLL_SPREAD_SPECTRUM 0x650
60# define SSEN_MASK 0x00000001 148# define SSEN_MASK 0x00000001
61 149
150#define MPLL_BYPASSCLK_SEL 0x65c
151# define MPLL_CLKOUT_SEL(x) ((x) << 8)
152# define MPLL_CLKOUT_SEL_MASK 0xFF00
153
154#define CG_CLKPIN_CNTL 0x660
155# define XTALIN_DIVIDE (1 << 1)
156# define BCLK_AS_XCLK (1 << 2)
157#define CG_CLKPIN_CNTL_2 0x664
158# define FORCE_BIF_REFCLK_EN (1 << 3)
159# define MUX_TCLK_TO_XCLK (1 << 8)
160
161#define THM_CLK_CNTL 0x66c
162# define CMON_CLK_SEL(x) ((x) << 0)
163# define CMON_CLK_SEL_MASK 0xFF
164# define TMON_CLK_SEL(x) ((x) << 8)
165# define TMON_CLK_SEL_MASK 0xFF00
166#define MISC_CLK_CNTL 0x670
167# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
168# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
169# define ZCLK_SEL(x) ((x) << 8)
170# define ZCLK_SEL_MASK 0xFF00
171
172#define CG_THERMAL_CTRL 0x700
173#define DPM_EVENT_SRC(x) ((x) << 0)
174#define DPM_EVENT_SRC_MASK (7 << 0)
175#define DIG_THERM_DPM(x) ((x) << 14)
176#define DIG_THERM_DPM_MASK 0x003FC000
177#define DIG_THERM_DPM_SHIFT 14
178
179#define CG_THERMAL_INT 0x708
180#define DIG_THERM_INTH(x) ((x) << 8)
181#define DIG_THERM_INTH_MASK 0x0000FF00
182#define DIG_THERM_INTH_SHIFT 8
183#define DIG_THERM_INTL(x) ((x) << 16)
184#define DIG_THERM_INTL_MASK 0x00FF0000
185#define DIG_THERM_INTL_SHIFT 16
186#define THERM_INT_MASK_HIGH (1 << 24)
187#define THERM_INT_MASK_LOW (1 << 25)
188
62#define CG_MULT_THERMAL_STATUS 0x714 189#define CG_MULT_THERMAL_STATUS 0x714
63#define ASIC_MAX_TEMP(x) ((x) << 0) 190#define ASIC_MAX_TEMP(x) ((x) << 0)
64#define ASIC_MAX_TEMP_MASK 0x000001ff 191#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -67,31 +194,85 @@
67#define CTF_TEMP_MASK 0x0003fe00 194#define CTF_TEMP_MASK 0x0003fe00
68#define CTF_TEMP_SHIFT 9 195#define CTF_TEMP_SHIFT 9
69 196
70#define SI_MAX_SH_GPRS 256 197#define GENERAL_PWRMGT 0x780
71#define SI_MAX_TEMP_GPRS 16 198# define GLOBAL_PWRMGT_EN (1 << 0)
72#define SI_MAX_SH_THREADS 256 199# define STATIC_PM_EN (1 << 1)
73#define SI_MAX_SH_STACK_ENTRIES 4096 200# define THERMAL_PROTECTION_DIS (1 << 2)
74#define SI_MAX_FRC_EOV_CNT 16384 201# define THERMAL_PROTECTION_TYPE (1 << 3)
75#define SI_MAX_BACKENDS 8 202# define SW_SMIO_INDEX(x) ((x) << 6)
76#define SI_MAX_BACKENDS_MASK 0xFF 203# define SW_SMIO_INDEX_MASK (1 << 6)
77#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F 204# define SW_SMIO_INDEX_SHIFT 6
78#define SI_MAX_SIMDS 12 205# define VOLT_PWRMGT_EN (1 << 10)
79#define SI_MAX_SIMDS_MASK 0x0FFF 206# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
80#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF 207#define CG_TPC 0x784
81#define SI_MAX_PIPES 8 208#define SCLK_PWRMGT_CNTL 0x788
82#define SI_MAX_PIPES_MASK 0xFF 209# define SCLK_PWRMGT_OFF (1 << 0)
83#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F 210# define SCLK_LOW_D1 (1 << 1)
84#define SI_MAX_LDS_NUM 0xFFFF 211# define FIR_RESET (1 << 4)
85#define SI_MAX_TCC 16 212# define FIR_FORCE_TREND_SEL (1 << 5)
86#define SI_MAX_TCC_MASK 0xFFFF 213# define FIR_TREND_MODE (1 << 6)
87 214# define DYN_GFX_CLK_OFF_EN (1 << 7)
88#define VGA_HDP_CONTROL 0x328 215# define GFX_CLK_FORCE_ON (1 << 8)
89#define VGA_MEMORY_DISABLE (1 << 4) 216# define GFX_CLK_REQUEST_OFF (1 << 9)
90 217# define GFX_CLK_FORCE_OFF (1 << 10)
91#define CG_CLKPIN_CNTL 0x660 218# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
92# define XTALIN_DIVIDE (1 << 1) 219# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
93#define CG_CLKPIN_CNTL_2 0x664 220# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
94# define MUX_TCLK_TO_XCLK (1 << 8) 221# define DYN_LIGHT_SLEEP_EN (1 << 14)
222
223#define CG_FTV 0x7bc
224
225#define CG_FFCT_0 0x7c0
226# define UTC_0(x) ((x) << 0)
227# define UTC_0_MASK (0x3ff << 0)
228# define DTC_0(x) ((x) << 10)
229# define DTC_0_MASK (0x3ff << 10)
230
231#define CG_BSP 0x7fc
232# define BSP(x) ((x) << 0)
233# define BSP_MASK (0xffff << 0)
234# define BSU(x) ((x) << 16)
235# define BSU_MASK (0xf << 16)
236#define CG_AT 0x800
237# define CG_R(x) ((x) << 0)
238# define CG_R_MASK (0xffff << 0)
239# define CG_L(x) ((x) << 16)
240# define CG_L_MASK (0xffff << 16)
241
242#define CG_GIT 0x804
243# define CG_GICST(x) ((x) << 0)
244# define CG_GICST_MASK (0xffff << 0)
245# define CG_GIPOT(x) ((x) << 16)
246# define CG_GIPOT_MASK (0xffff << 16)
247
248#define CG_SSP 0x80c
249# define SST(x) ((x) << 0)
250# define SST_MASK (0xffff << 0)
251# define SSTU(x) ((x) << 16)
252# define SSTU_MASK (0xf << 16)
253
254#define CG_DISPLAY_GAP_CNTL 0x828
255# define DISP1_GAP(x) ((x) << 0)
256# define DISP1_GAP_MASK (3 << 0)
257# define DISP2_GAP(x) ((x) << 2)
258# define DISP2_GAP_MASK (3 << 2)
259# define VBI_TIMER_COUNT(x) ((x) << 4)
260# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
261# define VBI_TIMER_UNIT(x) ((x) << 20)
262# define VBI_TIMER_UNIT_MASK (7 << 20)
263# define DISP1_GAP_MCHG(x) ((x) << 24)
264# define DISP1_GAP_MCHG_MASK (3 << 24)
265# define DISP2_GAP_MCHG(x) ((x) << 26)
266# define DISP2_GAP_MCHG_MASK (3 << 26)
267
268#define CG_ULV_CONTROL 0x878
269#define CG_ULV_PARAMETER 0x87c
270
271#define SMC_SCRATCH0 0x884
272
273#define CG_CAC_CTRL 0x8b8
274# define CAC_WINDOW(x) ((x) << 0)
275# define CAC_WINDOW_MASK 0x00ffffff
95 276
96#define DMIF_ADDR_CONFIG 0xBD4 277#define DMIF_ADDR_CONFIG 0xBD4
97 278
@@ -203,6 +384,10 @@
203#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C 384#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
204#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580 385#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
205 386
387#define VM_L2_CG 0x15c0
388#define MC_CG_ENABLE (1 << 18)
389#define MC_LS_ENABLE (1 << 19)
390
206#define MC_SHARED_CHMAP 0x2004 391#define MC_SHARED_CHMAP 0x2004
207#define NOOFCHAN_SHIFT 12 392#define NOOFCHAN_SHIFT 12
208#define NOOFCHAN_MASK 0x0000f000 393#define NOOFCHAN_MASK 0x0000f000
@@ -228,6 +413,17 @@
228 413
229#define MC_SHARED_BLACKOUT_CNTL 0x20ac 414#define MC_SHARED_BLACKOUT_CNTL 0x20ac
230 415
416#define MC_HUB_MISC_HUB_CG 0x20b8
417#define MC_HUB_MISC_VM_CG 0x20bc
418
419#define MC_HUB_MISC_SIP_CG 0x20c0
420
421#define MC_XPB_CLK_GAT 0x2478
422
423#define MC_CITF_MISC_RD_CG 0x2648
424#define MC_CITF_MISC_WR_CG 0x264c
425#define MC_CITF_MISC_VM_CG 0x2650
426
231#define MC_ARB_RAMCFG 0x2760 427#define MC_ARB_RAMCFG 0x2760
232#define NOOFBANK_SHIFT 0 428#define NOOFBANK_SHIFT 0
233#define NOOFBANK_MASK 0x00000003 429#define NOOFBANK_MASK 0x00000003
@@ -243,6 +439,23 @@
243#define NOOFGROUPS_SHIFT 12 439#define NOOFGROUPS_SHIFT 12
244#define NOOFGROUPS_MASK 0x00001000 440#define NOOFGROUPS_MASK 0x00001000
245 441
442#define MC_ARB_DRAM_TIMING 0x2774
443#define MC_ARB_DRAM_TIMING2 0x2778
444
445#define MC_ARB_BURST_TIME 0x2808
446#define STATE0(x) ((x) << 0)
447#define STATE0_MASK (0x1f << 0)
448#define STATE0_SHIFT 0
449#define STATE1(x) ((x) << 5)
450#define STATE1_MASK (0x1f << 5)
451#define STATE1_SHIFT 5
452#define STATE2(x) ((x) << 10)
453#define STATE2_MASK (0x1f << 10)
454#define STATE2_SHIFT 10
455#define STATE3(x) ((x) << 15)
456#define STATE3_MASK (0x1f << 15)
457#define STATE3_SHIFT 15
458
246#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808 459#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808
247#define TRAIN_DONE_D0 (1 << 30) 460#define TRAIN_DONE_D0 (1 << 30)
248#define TRAIN_DONE_D1 (1 << 31) 461#define TRAIN_DONE_D1 (1 << 31)
@@ -250,13 +463,105 @@
250#define MC_SEQ_SUP_CNTL 0x28c8 463#define MC_SEQ_SUP_CNTL 0x28c8
251#define RUN_MASK (1 << 0) 464#define RUN_MASK (1 << 0)
252#define MC_SEQ_SUP_PGM 0x28cc 465#define MC_SEQ_SUP_PGM 0x28cc
466#define MC_PMG_AUTO_CMD 0x28d0
253 467
254#define MC_IO_PAD_CNTL_D0 0x29d0 468#define MC_IO_PAD_CNTL_D0 0x29d0
255#define MEM_FALL_OUT_CMD (1 << 8) 469#define MEM_FALL_OUT_CMD (1 << 8)
256 470
471#define MC_SEQ_RAS_TIMING 0x28a0
472#define MC_SEQ_CAS_TIMING 0x28a4
473#define MC_SEQ_MISC_TIMING 0x28a8
474#define MC_SEQ_MISC_TIMING2 0x28ac
475#define MC_SEQ_PMG_TIMING 0x28b0
476#define MC_SEQ_RD_CTL_D0 0x28b4
477#define MC_SEQ_RD_CTL_D1 0x28b8
478#define MC_SEQ_WR_CTL_D0 0x28bc
479#define MC_SEQ_WR_CTL_D1 0x28c0
480
481#define MC_SEQ_MISC0 0x2a00
482#define MC_SEQ_MISC0_VEN_ID_SHIFT 8
483#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00
484#define MC_SEQ_MISC0_VEN_ID_VALUE 3
485#define MC_SEQ_MISC0_REV_ID_SHIFT 12
486#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000
487#define MC_SEQ_MISC0_REV_ID_VALUE 1
488#define MC_SEQ_MISC0_GDDR5_SHIFT 28
489#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
490#define MC_SEQ_MISC0_GDDR5_VALUE 5
491#define MC_SEQ_MISC1 0x2a04
492#define MC_SEQ_RESERVE_M 0x2a08
493#define MC_PMG_CMD_EMRS 0x2a0c
494
257#define MC_SEQ_IO_DEBUG_INDEX 0x2a44 495#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
258#define MC_SEQ_IO_DEBUG_DATA 0x2a48 496#define MC_SEQ_IO_DEBUG_DATA 0x2a48
259 497
498#define MC_SEQ_MISC5 0x2a54
499#define MC_SEQ_MISC6 0x2a58
500
501#define MC_SEQ_MISC7 0x2a64
502
503#define MC_SEQ_RAS_TIMING_LP 0x2a6c
504#define MC_SEQ_CAS_TIMING_LP 0x2a70
505#define MC_SEQ_MISC_TIMING_LP 0x2a74
506#define MC_SEQ_MISC_TIMING2_LP 0x2a78
507#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
508#define MC_SEQ_WR_CTL_D1_LP 0x2a80
509#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
510#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
511
512#define MC_PMG_CMD_MRS 0x2aac
513
514#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
515#define MC_SEQ_RD_CTL_D1_LP 0x2b20
516
517#define MC_PMG_CMD_MRS1 0x2b44
518#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
519#define MC_SEQ_PMG_TIMING_LP 0x2b4c
520
521#define MC_SEQ_WR_CTL_2 0x2b54
522#define MC_SEQ_WR_CTL_2_LP 0x2b58
523#define MC_PMG_CMD_MRS2 0x2b5c
524#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
525
526#define MCLK_PWRMGT_CNTL 0x2ba0
527# define DLL_SPEED(x) ((x) << 0)
528# define DLL_SPEED_MASK (0x1f << 0)
529# define DLL_READY (1 << 6)
530# define MC_INT_CNTL (1 << 7)
531# define MRDCK0_PDNB (1 << 8)
532# define MRDCK1_PDNB (1 << 9)
533# define MRDCK0_RESET (1 << 16)
534# define MRDCK1_RESET (1 << 17)
535# define DLL_READY_READ (1 << 24)
536#define DLL_CNTL 0x2ba4
537# define MRDCK0_BYPASS (1 << 24)
538# define MRDCK1_BYPASS (1 << 25)
539
540#define MPLL_FUNC_CNTL 0x2bb4
541#define BWCTRL(x) ((x) << 20)
542#define BWCTRL_MASK (0xff << 20)
543#define MPLL_FUNC_CNTL_1 0x2bb8
544#define VCO_MODE(x) ((x) << 0)
545#define VCO_MODE_MASK (3 << 0)
546#define CLKFRAC(x) ((x) << 4)
547#define CLKFRAC_MASK (0xfff << 4)
548#define CLKF(x) ((x) << 16)
549#define CLKF_MASK (0xfff << 16)
550#define MPLL_FUNC_CNTL_2 0x2bbc
551#define MPLL_AD_FUNC_CNTL 0x2bc0
552#define YCLK_POST_DIV(x) ((x) << 0)
553#define YCLK_POST_DIV_MASK (7 << 0)
554#define MPLL_DQ_FUNC_CNTL 0x2bc4
555#define YCLK_SEL(x) ((x) << 4)
556#define YCLK_SEL_MASK (1 << 4)
557
558#define MPLL_SS1 0x2bcc
559#define CLKV(x) ((x) << 0)
560#define CLKV_MASK (0x3ffffff << 0)
561#define MPLL_SS2 0x2bd0
562#define CLKS(x) ((x) << 0)
563#define CLKS_MASK (0xfff << 0)
564
260#define HDP_HOST_PATH_CNTL 0x2C00 565#define HDP_HOST_PATH_CNTL 0x2C00
261#define HDP_NONSURFACE_BASE 0x2C04 566#define HDP_NONSURFACE_BASE 0x2C04
262#define HDP_NONSURFACE_INFO 0x2C08 567#define HDP_NONSURFACE_INFO 0x2C08
@@ -266,6 +571,8 @@
266#define HDP_MISC_CNTL 0x2F4C 571#define HDP_MISC_CNTL 0x2F4C
267#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) 572#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
268 573
574#define ATC_MISC_CG 0x3350
575
269#define IH_RB_CNTL 0x3e00 576#define IH_RB_CNTL 0x3e00
270# define IH_RB_ENABLE (1 << 0) 577# define IH_RB_ENABLE (1 << 0)
271# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ 578# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
@@ -424,6 +731,9 @@
424# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) 731# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
425# define DC_HPDx_EN (1 << 28) 732# define DC_HPDx_EN (1 << 28)
426 733
734#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
735# define STUTTER_ENABLE (1 << 0)
736
427/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ 737/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
428#define CRTC_STATUS_FRAME_COUNT 0x6e98 738#define CRTC_STATUS_FRAME_COUNT 0x6e98
429 739
@@ -599,6 +909,24 @@
599 909
600#define SQC_CACHES 0x8C08 910#define SQC_CACHES 0x8C08
601 911
912#define SQ_POWER_THROTTLE 0x8e58
913#define MIN_POWER(x) ((x) << 0)
914#define MIN_POWER_MASK (0x3fff << 0)
915#define MIN_POWER_SHIFT 0
916#define MAX_POWER(x) ((x) << 16)
917#define MAX_POWER_MASK (0x3fff << 16)
918#define MAX_POWER_SHIFT 0
919#define SQ_POWER_THROTTLE2 0x8e5c
920#define MAX_POWER_DELTA(x) ((x) << 0)
921#define MAX_POWER_DELTA_MASK (0x3fff << 0)
922#define MAX_POWER_DELTA_SHIFT 0
923#define STI_SIZE(x) ((x) << 16)
924#define STI_SIZE_MASK (0x3ff << 16)
925#define STI_SIZE_SHIFT 16
926#define LTI_RATIO(x) ((x) << 27)
927#define LTI_RATIO_MASK (0xf << 27)
928#define LTI_RATIO_SHIFT 27
929
602#define SX_DEBUG_1 0x9060 930#define SX_DEBUG_1 0x9060
603 931
604#define SPI_STATIC_THREAD_MGMT_1 0x90E0 932#define SPI_STATIC_THREAD_MGMT_1 0x90E0
@@ -616,6 +944,11 @@
616#define CGTS_USER_TCC_DISABLE 0x914C 944#define CGTS_USER_TCC_DISABLE 0x914C
617#define TCC_DISABLE_MASK 0xFFFF0000 945#define TCC_DISABLE_MASK 0xFFFF0000
618#define TCC_DISABLE_SHIFT 16 946#define TCC_DISABLE_SHIFT 16
947#define CGTS_SM_CTRL_REG 0x9150
948#define OVERRIDE (1 << 21)
949#define LS_OVERRIDE (1 << 22)
950
951#define SPI_LB_CU_MASK 0x9354
619 952
620#define TA_CNTL_AUX 0x9508 953#define TA_CNTL_AUX 0x9508
621 954
@@ -705,6 +1038,8 @@
705#define CB_PERFCOUNTER3_SELECT0 0x9a38 1038#define CB_PERFCOUNTER3_SELECT0 0x9a38
706#define CB_PERFCOUNTER3_SELECT1 0x9a3c 1039#define CB_PERFCOUNTER3_SELECT1 0x9a3c
707 1040
1041#define CB_CGTT_SCLK_CTRL 0x9a60
1042
708#define GC_USER_RB_BACKEND_DISABLE 0x9B7C 1043#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
709#define BACKEND_DISABLE_MASK 0x00FF0000 1044#define BACKEND_DISABLE_MASK 0x00FF0000
710#define BACKEND_DISABLE_SHIFT 16 1045#define BACKEND_DISABLE_SHIFT 16
@@ -762,6 +1097,9 @@
762# define CP_RINGID1_INT_STAT (1 << 30) 1097# define CP_RINGID1_INT_STAT (1 << 30)
763# define CP_RINGID0_INT_STAT (1 << 31) 1098# define CP_RINGID0_INT_STAT (1 << 31)
764 1099
1100#define CP_MEM_SLP_CNTL 0xC1E4
1101# define CP_MEM_LS_EN (1 << 0)
1102
765#define CP_DEBUG 0xC1FC 1103#define CP_DEBUG 0xC1FC
766 1104
767#define RLC_CNTL 0xC300 1105#define RLC_CNTL 0xC300
@@ -769,6 +1107,7 @@
769#define RLC_RL_BASE 0xC304 1107#define RLC_RL_BASE 0xC304
770#define RLC_RL_SIZE 0xC308 1108#define RLC_RL_SIZE 0xC308
771#define RLC_LB_CNTL 0xC30C 1109#define RLC_LB_CNTL 0xC30C
1110# define LOAD_BALANCE_ENABLE (1 << 0)
772#define RLC_SAVE_AND_RESTORE_BASE 0xC310 1111#define RLC_SAVE_AND_RESTORE_BASE 0xC310
773#define RLC_LB_CNTR_MAX 0xC314 1112#define RLC_LB_CNTR_MAX 0xC314
774#define RLC_LB_CNTR_INIT 0xC318 1113#define RLC_LB_CNTR_INIT 0xC318
@@ -783,6 +1122,56 @@
783#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340 1122#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340
784#define RLC_MC_CNTL 0xC344 1123#define RLC_MC_CNTL 0xC344
785#define RLC_UCODE_CNTL 0xC348 1124#define RLC_UCODE_CNTL 0xC348
1125#define RLC_STAT 0xC34C
1126# define RLC_BUSY_STATUS (1 << 0)
1127# define GFX_POWER_STATUS (1 << 1)
1128# define GFX_CLOCK_STATUS (1 << 2)
1129# define GFX_LS_STATUS (1 << 3)
1130
1131#define RLC_PG_CNTL 0xC35C
1132# define GFX_PG_ENABLE (1 << 0)
1133# define GFX_PG_SRC (1 << 1)
1134
1135#define RLC_CGTT_MGCG_OVERRIDE 0xC400
1136#define RLC_CGCG_CGLS_CTRL 0xC404
1137# define CGCG_EN (1 << 0)
1138# define CGLS_EN (1 << 1)
1139
1140#define RLC_TTOP_D 0xC414
1141# define RLC_PUD(x) ((x) << 0)
1142# define RLC_PUD_MASK (0xff << 0)
1143# define RLC_PDD(x) ((x) << 8)
1144# define RLC_PDD_MASK (0xff << 8)
1145# define RLC_TTPD(x) ((x) << 16)
1146# define RLC_TTPD_MASK (0xff << 16)
1147# define RLC_MSD(x) ((x) << 24)
1148# define RLC_MSD_MASK (0xff << 24)
1149
1150#define RLC_LB_INIT_CU_MASK 0xC41C
1151
1152#define RLC_PG_AO_CU_MASK 0xC42C
1153#define RLC_MAX_PG_CU 0xC430
1154# define MAX_PU_CU(x) ((x) << 0)
1155# define MAX_PU_CU_MASK (0xff << 0)
1156#define RLC_AUTO_PG_CTRL 0xC434
1157# define AUTO_PG_EN (1 << 0)
1158# define GRBM_REG_SGIT(x) ((x) << 3)
1159# define GRBM_REG_SGIT_MASK (0xffff << 3)
1160# define PG_AFTER_GRBM_REG_ST(x) ((x) << 19)
1161# define PG_AFTER_GRBM_REG_ST_MASK (0x1fff << 19)
1162
1163#define RLC_SERDES_WR_MASTER_MASK_0 0xC454
1164#define RLC_SERDES_WR_MASTER_MASK_1 0xC458
1165#define RLC_SERDES_WR_CTRL 0xC45C
1166
1167#define RLC_SERDES_MASTER_BUSY_0 0xC464
1168#define RLC_SERDES_MASTER_BUSY_1 0xC468
1169
1170#define RLC_GCPM_GENERAL_3 0xC478
1171
1172#define DB_RENDER_CONTROL 0x28000
1173
1174#define DB_DEPTH_INFO 0x2803c
786 1175
787#define PA_SC_RASTER_CONFIG 0x28350 1176#define PA_SC_RASTER_CONFIG 0x28350
788# define RASTER_CONFIG_RB_MAP_0 0 1177# define RASTER_CONFIG_RB_MAP_0 0
@@ -829,6 +1218,146 @@
829# define THREAD_TRACE_FLUSH (54 << 0) 1218# define THREAD_TRACE_FLUSH (54 << 0)
830# define THREAD_TRACE_FINISH (55 << 0) 1219# define THREAD_TRACE_FINISH (55 << 0)
831 1220
1221/* PIF PHY0 registers idx/data 0x8/0xc */
1222#define PB0_PIF_CNTL 0x10
1223# define LS2_EXIT_TIME(x) ((x) << 17)
1224# define LS2_EXIT_TIME_MASK (0x7 << 17)
1225# define LS2_EXIT_TIME_SHIFT 17
1226#define PB0_PIF_PAIRING 0x11
1227# define MULTI_PIF (1 << 25)
1228#define PB0_PIF_PWRDOWN_0 0x12
1229# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
1230# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
1231# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
1232# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
1233# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
1234# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
1235# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
1236# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
1237# define PLL_RAMP_UP_TIME_0_SHIFT 24
1238#define PB0_PIF_PWRDOWN_1 0x13
1239# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
1240# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
1241# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
1242# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
1243# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
1244# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
1245# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
1246# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
1247# define PLL_RAMP_UP_TIME_1_SHIFT 24
1248
1249#define PB0_PIF_PWRDOWN_2 0x17
1250# define PLL_POWER_STATE_IN_TXS2_2(x) ((x) << 7)
1251# define PLL_POWER_STATE_IN_TXS2_2_MASK (0x7 << 7)
1252# define PLL_POWER_STATE_IN_TXS2_2_SHIFT 7
1253# define PLL_POWER_STATE_IN_OFF_2(x) ((x) << 10)
1254# define PLL_POWER_STATE_IN_OFF_2_MASK (0x7 << 10)
1255# define PLL_POWER_STATE_IN_OFF_2_SHIFT 10
1256# define PLL_RAMP_UP_TIME_2(x) ((x) << 24)
1257# define PLL_RAMP_UP_TIME_2_MASK (0x7 << 24)
1258# define PLL_RAMP_UP_TIME_2_SHIFT 24
1259#define PB0_PIF_PWRDOWN_3 0x18
1260# define PLL_POWER_STATE_IN_TXS2_3(x) ((x) << 7)
1261# define PLL_POWER_STATE_IN_TXS2_3_MASK (0x7 << 7)
1262# define PLL_POWER_STATE_IN_TXS2_3_SHIFT 7
1263# define PLL_POWER_STATE_IN_OFF_3(x) ((x) << 10)
1264# define PLL_POWER_STATE_IN_OFF_3_MASK (0x7 << 10)
1265# define PLL_POWER_STATE_IN_OFF_3_SHIFT 10
1266# define PLL_RAMP_UP_TIME_3(x) ((x) << 24)
1267# define PLL_RAMP_UP_TIME_3_MASK (0x7 << 24)
1268# define PLL_RAMP_UP_TIME_3_SHIFT 24
1269/* PIF PHY1 registers idx/data 0x10/0x14 */
1270#define PB1_PIF_CNTL 0x10
1271#define PB1_PIF_PAIRING 0x11
1272#define PB1_PIF_PWRDOWN_0 0x12
1273#define PB1_PIF_PWRDOWN_1 0x13
1274
1275#define PB1_PIF_PWRDOWN_2 0x17
1276#define PB1_PIF_PWRDOWN_3 0x18
1277/* PCIE registers idx/data 0x30/0x34 */
1278#define PCIE_CNTL2 0x1c /* PCIE */
1279# define SLV_MEM_LS_EN (1 << 16)
1280# define MST_MEM_LS_EN (1 << 18)
1281# define REPLAY_MEM_LS_EN (1 << 19)
1282#define PCIE_LC_STATUS1 0x28 /* PCIE */
1283# define LC_REVERSE_RCVR (1 << 0)
1284# define LC_REVERSE_XMIT (1 << 1)
1285# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
1286# define LC_OPERATING_LINK_WIDTH_SHIFT 2
1287# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
1288# define LC_DETECTED_LINK_WIDTH_SHIFT 5
1289
1290#define PCIE_P_CNTL 0x40 /* PCIE */
1291# define P_IGNORE_EDB_ERR (1 << 6)
1292
1293/* PCIE PORT registers idx/data 0x38/0x3c */
1294#define PCIE_LC_CNTL 0xa0
1295# define LC_L0S_INACTIVITY(x) ((x) << 8)
1296# define LC_L0S_INACTIVITY_MASK (0xf << 8)
1297# define LC_L0S_INACTIVITY_SHIFT 8
1298# define LC_L1_INACTIVITY(x) ((x) << 12)
1299# define LC_L1_INACTIVITY_MASK (0xf << 12)
1300# define LC_L1_INACTIVITY_SHIFT 12
1301# define LC_PMI_TO_L1_DIS (1 << 16)
1302# define LC_ASPM_TO_L1_DIS (1 << 24)
1303#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
1304# define LC_LINK_WIDTH_SHIFT 0
1305# define LC_LINK_WIDTH_MASK 0x7
1306# define LC_LINK_WIDTH_X0 0
1307# define LC_LINK_WIDTH_X1 1
1308# define LC_LINK_WIDTH_X2 2
1309# define LC_LINK_WIDTH_X4 3
1310# define LC_LINK_WIDTH_X8 4
1311# define LC_LINK_WIDTH_X16 6
1312# define LC_LINK_WIDTH_RD_SHIFT 4
1313# define LC_LINK_WIDTH_RD_MASK 0x70
1314# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
1315# define LC_RECONFIG_NOW (1 << 8)
1316# define LC_RENEGOTIATION_SUPPORT (1 << 9)
1317# define LC_RENEGOTIATE_EN (1 << 10)
1318# define LC_SHORT_RECONFIG_EN (1 << 11)
1319# define LC_UPCONFIGURE_SUPPORT (1 << 12)
1320# define LC_UPCONFIGURE_DIS (1 << 13)
1321# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
1322# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
1323# define LC_DYN_LANES_PWR_STATE_SHIFT 21
1324#define PCIE_LC_N_FTS_CNTL 0xa3 /* PCIE_P */
1325# define LC_XMIT_N_FTS(x) ((x) << 0)
1326# define LC_XMIT_N_FTS_MASK (0xff << 0)
1327# define LC_XMIT_N_FTS_SHIFT 0
1328# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
1329# define LC_N_FTS_MASK (0xff << 24)
1330#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
1331# define LC_GEN2_EN_STRAP (1 << 0)
1332# define LC_GEN3_EN_STRAP (1 << 1)
1333# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
1334# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
1335# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
1336# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
1337# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
1338# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
1339# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
1340# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
1341# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
1342# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
1343# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
1344# define LC_CURRENT_DATA_RATE_SHIFT 13
1345# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
1346# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
1347# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
1348# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
1349# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
1350
1351#define PCIE_LC_CNTL2 0xb1
1352# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
1353# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
1354
1355#define PCIE_LC_CNTL3 0xb5 /* PCIE_P */
1356# define LC_GO_TO_RECOVERY (1 << 30)
1357#define PCIE_LC_CNTL4 0xb6 /* PCIE_P */
1358# define LC_REDO_EQ (1 << 5)
1359# define LC_SET_QUIESCE (1 << 13)
1360
832/* 1361/*
833 * UVD 1362 * UVD
834 */ 1363 */
@@ -838,6 +1367,21 @@
838#define UVD_RBC_RB_RPTR 0xF690 1367#define UVD_RBC_RB_RPTR 0xF690
839#define UVD_RBC_RB_WPTR 0xF694 1368#define UVD_RBC_RB_WPTR 0xF694
840 1369
1370#define UVD_CGC_CTRL 0xF4B0
1371# define DCM (1 << 0)
1372# define CG_DT(x) ((x) << 2)
1373# define CG_DT_MASK (0xf << 2)
1374# define CLK_OD(x) ((x) << 6)
1375# define CLK_OD_MASK (0x1f << 6)
1376
1377 /* UVD CTX indirect */
1378#define UVD_CGC_MEM_CTRL 0xC0
1379#define UVD_CGC_CTRL2 0xC1
1380# define DYN_OR_EN (1 << 0)
1381# define DYN_RR_EN (1 << 1)
1382# define G_DIV_ID(x) ((x) << 2)
1383# define G_DIV_ID_MASK (0x7 << 2)
1384
841/* 1385/*
842 * PM4 1386 * PM4
843 */ 1387 */
@@ -1082,6 +1626,11 @@
1082# define DMA_IDLE (1 << 0) 1626# define DMA_IDLE (1 << 0)
1083#define DMA_TILING_CONFIG 0xd0b8 1627#define DMA_TILING_CONFIG 0xd0b8
1084 1628
1629#define DMA_PG 0xd0d4
1630# define PG_CNTL_ENABLE (1 << 0)
1631#define DMA_PGFSM_CONFIG 0xd0d8
1632#define DMA_PGFSM_WRITE 0xd0dc
1633
1085#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \ 1634#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
1086 (((b) & 0x1) << 26) | \ 1635 (((b) & 0x1) << 26) | \
1087 (((t) & 0x1) << 23) | \ 1636 (((t) & 0x1) << 23) | \
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
new file mode 100644
index 000000000000..5578e9837026
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -0,0 +1,397 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef PP_SISLANDS_SMC_H
24#define PP_SISLANDS_SMC_H
25
26#include "ppsmc.h"
27
28#pragma pack(push, 1)
29
30#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
31
32struct PP_SIslands_Dpm2PerfLevel
33{
34 uint8_t MaxPS;
35 uint8_t TgtAct;
36 uint8_t MaxPS_StepInc;
37 uint8_t MaxPS_StepDec;
38 uint8_t PSSamplingTime;
39 uint8_t NearTDPDec;
40 uint8_t AboveSafeInc;
41 uint8_t BelowSafeInc;
42 uint8_t PSDeltaLimit;
43 uint8_t PSDeltaWin;
44 uint16_t PwrEfficiencyRatio;
45 uint8_t Reserved[4];
46};
47
48typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
49
50struct PP_SIslands_DPM2Status
51{
52 uint32_t dpm2Flags;
53 uint8_t CurrPSkip;
54 uint8_t CurrPSkipPowerShift;
55 uint8_t CurrPSkipTDP;
56 uint8_t CurrPSkipOCP;
57 uint8_t MaxSPLLIndex;
58 uint8_t MinSPLLIndex;
59 uint8_t CurrSPLLIndex;
60 uint8_t InfSweepMode;
61 uint8_t InfSweepDir;
62 uint8_t TDPexceeded;
63 uint8_t reserved;
64 uint8_t SwitchDownThreshold;
65 uint32_t SwitchDownCounter;
66 uint32_t SysScalingFactor;
67};
68
69typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
70
71struct PP_SIslands_DPM2Parameters
72{
73 uint32_t TDPLimit;
74 uint32_t NearTDPLimit;
75 uint32_t SafePowerLimit;
76 uint32_t PowerBoostLimit;
77 uint32_t MinLimitDelta;
78};
79typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
80
81struct PP_SIslands_PAPMStatus
82{
83 uint32_t EstimatedDGPU_T;
84 uint32_t EstimatedDGPU_P;
85 uint32_t EstimatedAPU_T;
86 uint32_t EstimatedAPU_P;
87 uint8_t dGPU_T_Limit_Exceeded;
88 uint8_t reserved[3];
89};
90typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
91
92struct PP_SIslands_PAPMParameters
93{
94 uint32_t NearTDPLimitTherm;
95 uint32_t NearTDPLimitPAPM;
96 uint32_t PlatformPowerLimit;
97 uint32_t dGPU_T_Limit;
98 uint32_t dGPU_T_Warning;
99 uint32_t dGPU_T_Hysteresis;
100};
101typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
102
103struct SISLANDS_SMC_SCLK_VALUE
104{
105 uint32_t vCG_SPLL_FUNC_CNTL;
106 uint32_t vCG_SPLL_FUNC_CNTL_2;
107 uint32_t vCG_SPLL_FUNC_CNTL_3;
108 uint32_t vCG_SPLL_FUNC_CNTL_4;
109 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
110 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
111 uint32_t sclk_value;
112};
113
114typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
115
116struct SISLANDS_SMC_MCLK_VALUE
117{
118 uint32_t vMPLL_FUNC_CNTL;
119 uint32_t vMPLL_FUNC_CNTL_1;
120 uint32_t vMPLL_FUNC_CNTL_2;
121 uint32_t vMPLL_AD_FUNC_CNTL;
122 uint32_t vMPLL_DQ_FUNC_CNTL;
123 uint32_t vMCLK_PWRMGT_CNTL;
124 uint32_t vDLL_CNTL;
125 uint32_t vMPLL_SS;
126 uint32_t vMPLL_SS2;
127 uint32_t mclk_value;
128};
129
130typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
131
132struct SISLANDS_SMC_VOLTAGE_VALUE
133{
134 uint16_t value;
135 uint8_t index;
136 uint8_t phase_settings;
137};
138
139typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
140
141struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
142{
143 uint8_t ACIndex;
144 uint8_t displayWatermark;
145 uint8_t gen2PCIE;
146 uint8_t UVDWatermark;
147 uint8_t VCEWatermark;
148 uint8_t strobeMode;
149 uint8_t mcFlags;
150 uint8_t padding;
151 uint32_t aT;
152 uint32_t bSP;
153 SISLANDS_SMC_SCLK_VALUE sclk;
154 SISLANDS_SMC_MCLK_VALUE mclk;
155 SISLANDS_SMC_VOLTAGE_VALUE vddc;
156 SISLANDS_SMC_VOLTAGE_VALUE mvdd;
157 SISLANDS_SMC_VOLTAGE_VALUE vddci;
158 SISLANDS_SMC_VOLTAGE_VALUE std_vddc;
159 uint8_t hysteresisUp;
160 uint8_t hysteresisDown;
161 uint8_t stateFlags;
162 uint8_t arbRefreshState;
163 uint32_t SQPowerThrottle;
164 uint32_t SQPowerThrottle_2;
165 uint32_t MaxPoweredUpCU;
166 SISLANDS_SMC_VOLTAGE_VALUE high_temp_vddc;
167 SISLANDS_SMC_VOLTAGE_VALUE low_temp_vddc;
168 uint32_t reserved[2];
169 PP_SIslands_Dpm2PerfLevel dpm2;
170};
171
172#define SISLANDS_SMC_STROBE_RATIO 0x0F
173#define SISLANDS_SMC_STROBE_ENABLE 0x10
174
175#define SISLANDS_SMC_MC_EDC_RD_FLAG 0x01
176#define SISLANDS_SMC_MC_EDC_WR_FLAG 0x02
177#define SISLANDS_SMC_MC_RTT_ENABLE 0x04
178#define SISLANDS_SMC_MC_STUTTER_EN 0x08
179#define SISLANDS_SMC_MC_PG_EN 0x10
180
181typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
182
183struct SISLANDS_SMC_SWSTATE
184{
185 uint8_t flags;
186 uint8_t levelCount;
187 uint8_t padding2;
188 uint8_t padding3;
189 SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1];
190};
191
192typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
193
194#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
195#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
196#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
197#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
198
199struct SISLANDS_SMC_VOLTAGEMASKTABLE
200{
201 uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
202};
203
204typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
205
206#define SISLANDS_MAX_NO_VREG_STEPS 32
207
208struct SISLANDS_SMC_STATETABLE
209{
210 uint8_t thermalProtectType;
211 uint8_t systemFlags;
212 uint8_t maxVDDCIndexInPPTable;
213 uint8_t extraFlags;
214 uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
215 SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
216 SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
217 PP_SIslands_DPM2Parameters dpm2Params;
218 SISLANDS_SMC_SWSTATE initialState;
219 SISLANDS_SMC_SWSTATE ACPIState;
220 SISLANDS_SMC_SWSTATE ULVState;
221 SISLANDS_SMC_SWSTATE driverState;
222 SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
223};
224
225typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
226
227#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0
228#define SI_SMC_SOFT_REGISTER_delay_vreg 0xC
229#define SI_SMC_SOFT_REGISTER_delay_acpi 0x28
230#define SI_SMC_SOFT_REGISTER_seq_index 0x5C
231#define SI_SMC_SOFT_REGISTER_mvdd_chg_time 0x60
232#define SI_SMC_SOFT_REGISTER_mclk_switch_lim 0x70
233#define SI_SMC_SOFT_REGISTER_watermark_threshold 0x78
234#define SI_SMC_SOFT_REGISTER_phase_shedding_delay 0x88
235#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay 0x8C
236#define SI_SMC_SOFT_REGISTER_mc_block_delay 0x98
237#define SI_SMC_SOFT_REGISTER_ticks_per_us 0xA8
238#define SI_SMC_SOFT_REGISTER_crtc_index 0xC4
239#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8
240#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC
241#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
242#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
243#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
244
245#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
246#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
247
248#define SMC_SISLANDS_SCALE_I 7
249#define SMC_SISLANDS_SCALE_R 12
250
251struct PP_SIslands_CacConfig
252{
253 uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
254 uint32_t lkge_lut_V0;
255 uint32_t lkge_lut_Vstep;
256 uint32_t WinTime;
257 uint32_t R_LL;
258 uint32_t calculation_repeats;
259 uint32_t l2numWin_TDP;
260 uint32_t dc_cac;
261 uint8_t lts_truncate_n;
262 uint8_t SHIFT_N;
263 uint8_t log2_PG_LKG_SCALE;
264 uint8_t cac_temp;
265 uint32_t lkge_lut_T0;
266 uint32_t lkge_lut_Tstep;
267};
268
269typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
270
271#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
272#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
273
274struct SMC_SIslands_MCRegisterAddress
275{
276 uint16_t s0;
277 uint16_t s1;
278};
279
280typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
281
282struct SMC_SIslands_MCRegisterSet
283{
284 uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
285};
286
287typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
288
289struct SMC_SIslands_MCRegisters
290{
291 uint8_t last;
292 uint8_t reserved[3];
293 SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
294 SMC_SIslands_MCRegisterSet data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
295};
296
297typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
298
299struct SMC_SIslands_MCArbDramTimingRegisterSet
300{
301 uint32_t mc_arb_dram_timing;
302 uint32_t mc_arb_dram_timing2;
303 uint8_t mc_arb_rfsh_rate;
304 uint8_t mc_arb_burst_time;
305 uint8_t padding[2];
306};
307
308typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
309
310struct SMC_SIslands_MCArbDramTimingRegisters
311{
312 uint8_t arb_current;
313 uint8_t reserved[3];
314 SMC_SIslands_MCArbDramTimingRegisterSet data[16];
315};
316
317typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
318
319struct SMC_SISLANDS_SPLL_DIV_TABLE
320{
321 uint32_t freq[256];
322 uint32_t ss[256];
323};
324
325#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff
326#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
327#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000
328#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25
329#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff
330#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0
331#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000
332#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20
333
334typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
335
336#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5
337
338#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
339
340struct Smc_SIslands_DTE_Configuration
341{
342 uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
343 uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
344 uint32_t K;
345 uint32_t T0;
346 uint32_t MaxT;
347 uint8_t WindowSize;
348 uint8_t Tdep_count;
349 uint8_t temp_select;
350 uint8_t DTE_mode;
351 uint8_t T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
352 uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
353 uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
354 uint32_t Tthreshold;
355};
356
357typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
358
359#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1
360
361#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000
362
363#define SISLANDS_SMC_FIRMWARE_HEADER_version 0x0
364#define SISLANDS_SMC_FIRMWARE_HEADER_flags 0x4
365#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0xC
366#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable 0x10
367#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x14
368#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable 0x18
369#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x24
370#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30
371#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x38
372#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration 0x40
373#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters 0x48
374
375#pragma pack(pop)
376
377int si_set_smc_sram_address(struct radeon_device *rdev,
378 u32 smc_address, u32 limit);
379int si_copy_bytes_to_smc(struct radeon_device *rdev,
380 u32 smc_start_address,
381 const u8 *src, u32 byte_count, u32 limit);
382void si_start_smc(struct radeon_device *rdev);
383void si_reset_smc(struct radeon_device *rdev);
384int si_program_jump_on_start(struct radeon_device *rdev);
385void si_stop_smc_clock(struct radeon_device *rdev);
386void si_start_smc_clock(struct radeon_device *rdev);
387bool si_is_smc_running(struct radeon_device *rdev);
388PPSMC_Result si_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
389PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev);
390int si_load_smc_ucode(struct radeon_device *rdev, u32 limit);
391int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
392 u32 *value, u32 limit);
393int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
394 u32 value, u32 limit);
395
396#endif
397
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
new file mode 100644
index 000000000000..dbad293bfed5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -0,0 +1,1801 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "sumod.h"
27#include "r600_dpm.h"
28#include "cypress_dpm.h"
29#include "sumo_dpm.h"
30
31#define SUMO_MAX_DEEPSLEEP_DIVIDER_ID 5
32#define SUMO_MINIMUM_ENGINE_CLOCK 800
33#define BOOST_DPM_LEVEL 7
34
35static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] =
36{
37 SUMO_UTC_DFLT_00,
38 SUMO_UTC_DFLT_01,
39 SUMO_UTC_DFLT_02,
40 SUMO_UTC_DFLT_03,
41 SUMO_UTC_DFLT_04,
42 SUMO_UTC_DFLT_05,
43 SUMO_UTC_DFLT_06,
44 SUMO_UTC_DFLT_07,
45 SUMO_UTC_DFLT_08,
46 SUMO_UTC_DFLT_09,
47 SUMO_UTC_DFLT_10,
48 SUMO_UTC_DFLT_11,
49 SUMO_UTC_DFLT_12,
50 SUMO_UTC_DFLT_13,
51 SUMO_UTC_DFLT_14,
52};
53
54static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] =
55{
56 SUMO_DTC_DFLT_00,
57 SUMO_DTC_DFLT_01,
58 SUMO_DTC_DFLT_02,
59 SUMO_DTC_DFLT_03,
60 SUMO_DTC_DFLT_04,
61 SUMO_DTC_DFLT_05,
62 SUMO_DTC_DFLT_06,
63 SUMO_DTC_DFLT_07,
64 SUMO_DTC_DFLT_08,
65 SUMO_DTC_DFLT_09,
66 SUMO_DTC_DFLT_10,
67 SUMO_DTC_DFLT_11,
68 SUMO_DTC_DFLT_12,
69 SUMO_DTC_DFLT_13,
70 SUMO_DTC_DFLT_14,
71};
72
73struct sumo_ps *sumo_get_ps(struct radeon_ps *rps)
74{
75 struct sumo_ps *ps = rps->ps_priv;
76
77 return ps;
78}
79
80struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev)
81{
82 struct sumo_power_info *pi = rdev->pm.dpm.priv;
83
84 return pi;
85}
86
87static void sumo_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
88{
89 if (enable)
90 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
91 else {
92 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
93 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
94 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
95 RREG32(GB_ADDR_CONFIG);
96 }
97}
98
99#define CGCG_CGTT_LOCAL0_MASK 0xE5BFFFFF
100#define CGCG_CGTT_LOCAL1_MASK 0xEFFF07FF
101
102static void sumo_mg_clockgating_enable(struct radeon_device *rdev, bool enable)
103{
104 u32 local0;
105 u32 local1;
106
107 local0 = RREG32(CG_CGTT_LOCAL_0);
108 local1 = RREG32(CG_CGTT_LOCAL_1);
109
110 if (enable) {
111 WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
112 WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
113 } else {
114 WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
115 WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
116 }
117}
118
119static void sumo_program_git(struct radeon_device *rdev)
120{
121 u32 p, u;
122 u32 xclk = radeon_get_xclk(rdev);
123
124 r600_calculate_u_and_p(SUMO_GICST_DFLT,
125 xclk, 16, &p, &u);
126
127 WREG32_P(CG_GIT, CG_GICST(p), ~CG_GICST_MASK);
128}
129
130static void sumo_program_grsd(struct radeon_device *rdev)
131{
132 u32 p, u;
133 u32 xclk = radeon_get_xclk(rdev);
134 u32 grs = 256 * 25 / 100;
135
136 r600_calculate_u_and_p(1, xclk, 14, &p, &u);
137
138 WREG32(CG_GCOOR, PHC(grs) | SDC(p) | SU(u));
139}
140
141void sumo_gfx_clockgating_initialize(struct radeon_device *rdev)
142{
143 sumo_program_git(rdev);
144 sumo_program_grsd(rdev);
145}
146
147static void sumo_gfx_powergating_initialize(struct radeon_device *rdev)
148{
149 u32 rcu_pwr_gating_cntl;
150 u32 p, u;
151 u32 p_c, p_p, d_p;
152 u32 r_t, i_t;
153 u32 xclk = radeon_get_xclk(rdev);
154
155 if (rdev->family == CHIP_PALM) {
156 p_c = 4;
157 d_p = 10;
158 r_t = 10;
159 i_t = 4;
160 p_p = 50 + 1000/200 + 6 * 32;
161 } else {
162 p_c = 16;
163 d_p = 50;
164 r_t = 50;
165 i_t = 50;
166 p_p = 113;
167 }
168
169 WREG32(CG_SCRATCH2, 0x01B60A17);
170
171 r600_calculate_u_and_p(SUMO_GFXPOWERGATINGT_DFLT,
172 xclk, 16, &p, &u);
173
174 WREG32_P(CG_PWR_GATING_CNTL, PGP(p) | PGU(u),
175 ~(PGP_MASK | PGU_MASK));
176
177 r600_calculate_u_and_p(SUMO_VOLTAGEDROPT_DFLT,
178 xclk, 16, &p, &u);
179
180 WREG32_P(CG_CG_VOLTAGE_CNTL, PGP(p) | PGU(u),
181 ~(PGP_MASK | PGU_MASK));
182
183 if (rdev->family == CHIP_PALM) {
184 WREG32_RCU(RCU_PWR_GATING_SEQ0, 0x10103210);
185 WREG32_RCU(RCU_PWR_GATING_SEQ1, 0x10101010);
186 } else {
187 WREG32_RCU(RCU_PWR_GATING_SEQ0, 0x76543210);
188 WREG32_RCU(RCU_PWR_GATING_SEQ1, 0xFEDCBA98);
189 }
190
191 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL);
192 rcu_pwr_gating_cntl &=
193 ~(RSVD_MASK | PCV_MASK | PGS_MASK);
194 rcu_pwr_gating_cntl |= PCV(p_c) | PGS(1) | PWR_GATING_EN;
195 if (rdev->family == CHIP_PALM) {
196 rcu_pwr_gating_cntl &= ~PCP_MASK;
197 rcu_pwr_gating_cntl |= PCP(0x77);
198 }
199 WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl);
200
201 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2);
202 rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK);
203 rcu_pwr_gating_cntl |= MPPU(p_p) | MPPD(50);
204 WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl);
205
206 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3);
207 rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK);
208 rcu_pwr_gating_cntl |= DPPU(d_p) | DPPD(50);
209 WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl);
210
211 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_4);
212 rcu_pwr_gating_cntl &= ~(RT_MASK | IT_MASK);
213 rcu_pwr_gating_cntl |= RT(r_t) | IT(i_t);
214 WREG32_RCU(RCU_PWR_GATING_CNTL_4, rcu_pwr_gating_cntl);
215
216 if (rdev->family == CHIP_PALM)
217 WREG32_RCU(RCU_PWR_GATING_CNTL_5, 0xA02);
218
219 sumo_smu_pg_init(rdev);
220
221 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL);
222 rcu_pwr_gating_cntl &=
223 ~(RSVD_MASK | PCV_MASK | PGS_MASK);
224 rcu_pwr_gating_cntl |= PCV(p_c) | PGS(4) | PWR_GATING_EN;
225 if (rdev->family == CHIP_PALM) {
226 rcu_pwr_gating_cntl &= ~PCP_MASK;
227 rcu_pwr_gating_cntl |= PCP(0x77);
228 }
229 WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl);
230
231 if (rdev->family == CHIP_PALM) {
232 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2);
233 rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK);
234 rcu_pwr_gating_cntl |= MPPU(113) | MPPD(50);
235 WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl);
236
237 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3);
238 rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK);
239 rcu_pwr_gating_cntl |= DPPU(16) | DPPD(50);
240 WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl);
241 }
242
243 sumo_smu_pg_init(rdev);
244
245 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL);
246 rcu_pwr_gating_cntl &=
247 ~(RSVD_MASK | PCV_MASK | PGS_MASK);
248 rcu_pwr_gating_cntl |= PGS(5) | PWR_GATING_EN;
249
250 if (rdev->family == CHIP_PALM) {
251 rcu_pwr_gating_cntl |= PCV(4);
252 rcu_pwr_gating_cntl &= ~PCP_MASK;
253 rcu_pwr_gating_cntl |= PCP(0x77);
254 } else
255 rcu_pwr_gating_cntl |= PCV(11);
256 WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl);
257
258 if (rdev->family == CHIP_PALM) {
259 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2);
260 rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK);
261 rcu_pwr_gating_cntl |= MPPU(113) | MPPD(50);
262 WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl);
263
264 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3);
265 rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK);
266 rcu_pwr_gating_cntl |= DPPU(22) | DPPD(50);
267 WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl);
268 }
269
270 sumo_smu_pg_init(rdev);
271}
272
273static void sumo_gfx_powergating_enable(struct radeon_device *rdev, bool enable)
274{
275 if (enable)
276 WREG32_P(CG_PWR_GATING_CNTL, DYN_PWR_DOWN_EN, ~DYN_PWR_DOWN_EN);
277 else {
278 WREG32_P(CG_PWR_GATING_CNTL, 0, ~DYN_PWR_DOWN_EN);
279 RREG32(GB_ADDR_CONFIG);
280 }
281}
282
283static int sumo_enable_clock_power_gating(struct radeon_device *rdev)
284{
285 struct sumo_power_info *pi = sumo_get_pi(rdev);
286
287 if (pi->enable_gfx_clock_gating)
288 sumo_gfx_clockgating_initialize(rdev);
289 if (pi->enable_gfx_power_gating)
290 sumo_gfx_powergating_initialize(rdev);
291 if (pi->enable_mg_clock_gating)
292 sumo_mg_clockgating_enable(rdev, true);
293 if (pi->enable_gfx_clock_gating)
294 sumo_gfx_clockgating_enable(rdev, true);
295 if (pi->enable_gfx_power_gating)
296 sumo_gfx_powergating_enable(rdev, true);
297
298 return 0;
299}
300
301static void sumo_disable_clock_power_gating(struct radeon_device *rdev)
302{
303 struct sumo_power_info *pi = sumo_get_pi(rdev);
304
305 if (pi->enable_gfx_clock_gating)
306 sumo_gfx_clockgating_enable(rdev, false);
307 if (pi->enable_gfx_power_gating)
308 sumo_gfx_powergating_enable(rdev, false);
309 if (pi->enable_mg_clock_gating)
310 sumo_mg_clockgating_enable(rdev, false);
311}
312
313static void sumo_calculate_bsp(struct radeon_device *rdev,
314 u32 high_clk)
315{
316 struct sumo_power_info *pi = sumo_get_pi(rdev);
317 u32 xclk = radeon_get_xclk(rdev);
318
319 pi->pasi = 65535 * 100 / high_clk;
320 pi->asi = 65535 * 100 / high_clk;
321
322 r600_calculate_u_and_p(pi->asi,
323 xclk, 16, &pi->bsp, &pi->bsu);
324
325 r600_calculate_u_and_p(pi->pasi,
326 xclk, 16, &pi->pbsp, &pi->pbsu);
327
328 pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
329 pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
330}
331
332static void sumo_init_bsp(struct radeon_device *rdev)
333{
334 struct sumo_power_info *pi = sumo_get_pi(rdev);
335
336 WREG32(CG_BSP_0, pi->psp);
337}
338
339
340static void sumo_program_bsp(struct radeon_device *rdev,
341 struct radeon_ps *rps)
342{
343 struct sumo_power_info *pi = sumo_get_pi(rdev);
344 struct sumo_ps *ps = sumo_get_ps(rps);
345 u32 i;
346 u32 highest_engine_clock = ps->levels[ps->num_levels - 1].sclk;
347
348 if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
349 highest_engine_clock = pi->boost_pl.sclk;
350
351 sumo_calculate_bsp(rdev, highest_engine_clock);
352
353 for (i = 0; i < ps->num_levels - 1; i++)
354 WREG32(CG_BSP_0 + (i * 4), pi->dsp);
355
356 WREG32(CG_BSP_0 + (i * 4), pi->psp);
357
358 if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
359 WREG32(CG_BSP_0 + (BOOST_DPM_LEVEL * 4), pi->psp);
360}
361
362static void sumo_write_at(struct radeon_device *rdev,
363 u32 index, u32 value)
364{
365 if (index == 0)
366 WREG32(CG_AT_0, value);
367 else if (index == 1)
368 WREG32(CG_AT_1, value);
369 else if (index == 2)
370 WREG32(CG_AT_2, value);
371 else if (index == 3)
372 WREG32(CG_AT_3, value);
373 else if (index == 4)
374 WREG32(CG_AT_4, value);
375 else if (index == 5)
376 WREG32(CG_AT_5, value);
377 else if (index == 6)
378 WREG32(CG_AT_6, value);
379 else if (index == 7)
380 WREG32(CG_AT_7, value);
381}
382
383static void sumo_program_at(struct radeon_device *rdev,
384 struct radeon_ps *rps)
385{
386 struct sumo_power_info *pi = sumo_get_pi(rdev);
387 struct sumo_ps *ps = sumo_get_ps(rps);
388 u32 asi;
389 u32 i;
390 u32 m_a;
391 u32 a_t;
392 u32 r[SUMO_MAX_HARDWARE_POWERLEVELS];
393 u32 l[SUMO_MAX_HARDWARE_POWERLEVELS];
394
395 r[0] = SUMO_R_DFLT0;
396 r[1] = SUMO_R_DFLT1;
397 r[2] = SUMO_R_DFLT2;
398 r[3] = SUMO_R_DFLT3;
399 r[4] = SUMO_R_DFLT4;
400
401 l[0] = SUMO_L_DFLT0;
402 l[1] = SUMO_L_DFLT1;
403 l[2] = SUMO_L_DFLT2;
404 l[3] = SUMO_L_DFLT3;
405 l[4] = SUMO_L_DFLT4;
406
407 for (i = 0; i < ps->num_levels; i++) {
408 asi = (i == ps->num_levels - 1) ? pi->pasi : pi->asi;
409
410 m_a = asi * ps->levels[i].sclk / 100;
411
412 a_t = CG_R(m_a * r[i] / 100) | CG_L(m_a * l[i] / 100);
413
414 sumo_write_at(rdev, i, a_t);
415 }
416
417 if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) {
418 asi = pi->pasi;
419
420 m_a = asi * pi->boost_pl.sclk / 100;
421
422 a_t = CG_R(m_a * r[ps->num_levels - 1] / 100) |
423 CG_L(m_a * l[ps->num_levels - 1] / 100);
424
425 sumo_write_at(rdev, BOOST_DPM_LEVEL, a_t);
426 }
427}
428
429static void sumo_program_tp(struct radeon_device *rdev)
430{
431 int i;
432 enum r600_td td = R600_TD_DFLT;
433
434 for (i = 0; i < SUMO_PM_NUMBER_OF_TC; i++) {
435 WREG32_P(CG_FFCT_0 + (i * 4), UTC_0(sumo_utc[i]), ~UTC_0_MASK);
436 WREG32_P(CG_FFCT_0 + (i * 4), DTC_0(sumo_dtc[i]), ~DTC_0_MASK);
437 }
438
439 if (td == R600_TD_AUTO)
440 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
441 else
442 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
443
444 if (td == R600_TD_UP)
445 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
446
447 if (td == R600_TD_DOWN)
448 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
449}
450
451void sumo_program_vc(struct radeon_device *rdev, u32 vrc)
452{
453 WREG32(CG_FTV, vrc);
454}
455
456void sumo_clear_vc(struct radeon_device *rdev)
457{
458 WREG32(CG_FTV, 0);
459}
460
461void sumo_program_sstp(struct radeon_device *rdev)
462{
463 u32 p, u;
464 u32 xclk = radeon_get_xclk(rdev);
465
466 r600_calculate_u_and_p(SUMO_SST_DFLT,
467 xclk, 16, &p, &u);
468
469 WREG32(CG_SSP, SSTU(u) | SST(p));
470}
471
472static void sumo_set_divider_value(struct radeon_device *rdev,
473 u32 index, u32 divider)
474{
475 u32 reg_index = index / 4;
476 u32 field_index = index % 4;
477
478 if (field_index == 0)
479 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
480 SCLK_FSTATE_0_DIV(divider), ~SCLK_FSTATE_0_DIV_MASK);
481 else if (field_index == 1)
482 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
483 SCLK_FSTATE_1_DIV(divider), ~SCLK_FSTATE_1_DIV_MASK);
484 else if (field_index == 2)
485 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
486 SCLK_FSTATE_2_DIV(divider), ~SCLK_FSTATE_2_DIV_MASK);
487 else if (field_index == 3)
488 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
489 SCLK_FSTATE_3_DIV(divider), ~SCLK_FSTATE_3_DIV_MASK);
490}
491
492static void sumo_set_ds_dividers(struct radeon_device *rdev,
493 u32 index, u32 divider)
494{
495 struct sumo_power_info *pi = sumo_get_pi(rdev);
496
497 if (pi->enable_sclk_ds) {
498 u32 dpm_ctrl = RREG32(CG_SCLK_DPM_CTRL_6);
499
500 dpm_ctrl &= ~(0x7 << (index * 3));
501 dpm_ctrl |= (divider << (index * 3));
502 WREG32(CG_SCLK_DPM_CTRL_6, dpm_ctrl);
503 }
504}
505
506static void sumo_set_ss_dividers(struct radeon_device *rdev,
507 u32 index, u32 divider)
508{
509 struct sumo_power_info *pi = sumo_get_pi(rdev);
510
511 if (pi->enable_sclk_ds) {
512 u32 dpm_ctrl = RREG32(CG_SCLK_DPM_CTRL_11);
513
514 dpm_ctrl &= ~(0x7 << (index * 3));
515 dpm_ctrl |= (divider << (index * 3));
516 WREG32(CG_SCLK_DPM_CTRL_11, dpm_ctrl);
517 }
518}
519
520static void sumo_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
521{
522 u32 voltage_cntl = RREG32(CG_DPM_VOLTAGE_CNTL);
523
524 voltage_cntl &= ~(DPM_STATE0_LEVEL_MASK << (index * 2));
525 voltage_cntl |= (vid << (DPM_STATE0_LEVEL_SHIFT + index * 2));
526 WREG32(CG_DPM_VOLTAGE_CNTL, voltage_cntl);
527}
528
529static void sumo_set_allos_gnb_slow(struct radeon_device *rdev, u32 index, u32 gnb_slow)
530{
531 struct sumo_power_info *pi = sumo_get_pi(rdev);
532 u32 temp = gnb_slow;
533 u32 cg_sclk_dpm_ctrl_3;
534
535 if (pi->driver_nbps_policy_disable)
536 temp = 1;
537
538 cg_sclk_dpm_ctrl_3 = RREG32(CG_SCLK_DPM_CTRL_3);
539 cg_sclk_dpm_ctrl_3 &= ~(GNB_SLOW_FSTATE_0_MASK << index);
540 cg_sclk_dpm_ctrl_3 |= (temp << (GNB_SLOW_FSTATE_0_SHIFT + index));
541
542 WREG32(CG_SCLK_DPM_CTRL_3, cg_sclk_dpm_ctrl_3);
543}
544
545static void sumo_program_power_level(struct radeon_device *rdev,
546 struct sumo_pl *pl, u32 index)
547{
548 struct sumo_power_info *pi = sumo_get_pi(rdev);
549 int ret;
550 struct atom_clock_dividers dividers;
551 u32 ds_en = RREG32(DEEP_SLEEP_CNTL) & ENABLE_DS;
552
553 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
554 pl->sclk, false, &dividers);
555 if (ret)
556 return;
557
558 sumo_set_divider_value(rdev, index, dividers.post_div);
559
560 sumo_set_vid(rdev, index, pl->vddc_index);
561
562 if (pl->ss_divider_index == 0 || pl->ds_divider_index == 0) {
563 if (ds_en)
564 WREG32_P(DEEP_SLEEP_CNTL, 0, ~ENABLE_DS);
565 } else {
566 sumo_set_ss_dividers(rdev, index, pl->ss_divider_index);
567 sumo_set_ds_dividers(rdev, index, pl->ds_divider_index);
568
569 if (!ds_en)
570 WREG32_P(DEEP_SLEEP_CNTL, ENABLE_DS, ~ENABLE_DS);
571 }
572
573 sumo_set_allos_gnb_slow(rdev, index, pl->allow_gnb_slow);
574
575 if (pi->enable_boost)
576 sumo_set_tdp_limit(rdev, index, pl->sclk_dpm_tdp_limit);
577}
578
579static void sumo_power_level_enable(struct radeon_device *rdev, u32 index, bool enable)
580{
581 u32 reg_index = index / 4;
582 u32 field_index = index % 4;
583
584 if (field_index == 0)
585 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
586 enable ? SCLK_FSTATE_0_VLD : 0, ~SCLK_FSTATE_0_VLD);
587 else if (field_index == 1)
588 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
589 enable ? SCLK_FSTATE_1_VLD : 0, ~SCLK_FSTATE_1_VLD);
590 else if (field_index == 2)
591 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
592 enable ? SCLK_FSTATE_2_VLD : 0, ~SCLK_FSTATE_2_VLD);
593 else if (field_index == 3)
594 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
595 enable ? SCLK_FSTATE_3_VLD : 0, ~SCLK_FSTATE_3_VLD);
596}
597
598static bool sumo_dpm_enabled(struct radeon_device *rdev)
599{
600 if (RREG32(CG_SCLK_DPM_CTRL_3) & DPM_SCLK_ENABLE)
601 return true;
602 else
603 return false;
604}
605
606static void sumo_start_dpm(struct radeon_device *rdev)
607{
608 WREG32_P(CG_SCLK_DPM_CTRL_3, DPM_SCLK_ENABLE, ~DPM_SCLK_ENABLE);
609}
610
611static void sumo_stop_dpm(struct radeon_device *rdev)
612{
613 WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~DPM_SCLK_ENABLE);
614}
615
616static void sumo_set_forced_mode(struct radeon_device *rdev, bool enable)
617{
618 if (enable)
619 WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_SCLK_STATE_EN, ~FORCE_SCLK_STATE_EN);
620 else
621 WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~FORCE_SCLK_STATE_EN);
622}
623
624static void sumo_set_forced_mode_enabled(struct radeon_device *rdev)
625{
626 int i;
627
628 sumo_set_forced_mode(rdev, true);
629 for (i = 0; i < rdev->usec_timeout; i++) {
630 if (RREG32(CG_SCLK_STATUS) & SCLK_OVERCLK_DETECT)
631 break;
632 udelay(1);
633 }
634}
635
636static void sumo_wait_for_level_0(struct radeon_device *rdev)
637{
638 int i;
639
640 for (i = 0; i < rdev->usec_timeout; i++) {
641 if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) == 0)
642 break;
643 udelay(1);
644 }
645 for (i = 0; i < rdev->usec_timeout; i++) {
646 if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) == 0)
647 break;
648 udelay(1);
649 }
650}
651
652static void sumo_set_forced_mode_disabled(struct radeon_device *rdev)
653{
654 sumo_set_forced_mode(rdev, false);
655}
656
657static void sumo_enable_power_level_0(struct radeon_device *rdev)
658{
659 sumo_power_level_enable(rdev, 0, true);
660}
661
662static void sumo_patch_boost_state(struct radeon_device *rdev,
663 struct radeon_ps *rps)
664{
665 struct sumo_power_info *pi = sumo_get_pi(rdev);
666 struct sumo_ps *new_ps = sumo_get_ps(rps);
667
668 if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) {
669 pi->boost_pl = new_ps->levels[new_ps->num_levels - 1];
670 pi->boost_pl.sclk = pi->sys_info.boost_sclk;
671 pi->boost_pl.vddc_index = pi->sys_info.boost_vid_2bit;
672 pi->boost_pl.sclk_dpm_tdp_limit = pi->sys_info.sclk_dpm_tdp_limit_boost;
673 }
674}
675
676static void sumo_pre_notify_alt_vddnb_change(struct radeon_device *rdev,
677 struct radeon_ps *new_rps,
678 struct radeon_ps *old_rps)
679{
680 struct sumo_ps *new_ps = sumo_get_ps(new_rps);
681 struct sumo_ps *old_ps = sumo_get_ps(old_rps);
682 u32 nbps1_old = 0;
683 u32 nbps1_new = 0;
684
685 if (old_ps != NULL)
686 nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
687
688 nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
689
690 if (nbps1_old == 1 && nbps1_new == 0)
691 sumo_smu_notify_alt_vddnb_change(rdev, 0, 0);
692}
693
694static void sumo_post_notify_alt_vddnb_change(struct radeon_device *rdev,
695 struct radeon_ps *new_rps,
696 struct radeon_ps *old_rps)
697{
698 struct sumo_ps *new_ps = sumo_get_ps(new_rps);
699 struct sumo_ps *old_ps = sumo_get_ps(old_rps);
700 u32 nbps1_old = 0;
701 u32 nbps1_new = 0;
702
703 if (old_ps != NULL)
704 nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0;
705
706 nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0;
707
708 if (nbps1_old == 0 && nbps1_new == 1)
709 sumo_smu_notify_alt_vddnb_change(rdev, 1, 1);
710}
711
712static void sumo_enable_boost(struct radeon_device *rdev,
713 struct radeon_ps *rps,
714 bool enable)
715{
716 struct sumo_ps *new_ps = sumo_get_ps(rps);
717
718 if (enable) {
719 if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
720 sumo_boost_state_enable(rdev, true);
721 } else
722 sumo_boost_state_enable(rdev, false);
723}
724
725static void sumo_set_forced_level(struct radeon_device *rdev, u32 index)
726{
727 WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_SCLK_STATE(index), ~FORCE_SCLK_STATE_MASK);
728}
729
730static void sumo_set_forced_level_0(struct radeon_device *rdev)
731{
732 sumo_set_forced_level(rdev, 0);
733}
734
735static void sumo_program_wl(struct radeon_device *rdev,
736 struct radeon_ps *rps)
737{
738 struct sumo_ps *new_ps = sumo_get_ps(rps);
739 u32 dpm_ctrl4 = RREG32(CG_SCLK_DPM_CTRL_4);
740
741 dpm_ctrl4 &= 0xFFFFFF00;
742 dpm_ctrl4 |= (1 << (new_ps->num_levels - 1));
743
744 if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
745 dpm_ctrl4 |= (1 << BOOST_DPM_LEVEL);
746
747 WREG32(CG_SCLK_DPM_CTRL_4, dpm_ctrl4);
748}
749
750static void sumo_program_power_levels_0_to_n(struct radeon_device *rdev,
751 struct radeon_ps *new_rps,
752 struct radeon_ps *old_rps)
753{
754 struct sumo_power_info *pi = sumo_get_pi(rdev);
755 struct sumo_ps *new_ps = sumo_get_ps(new_rps);
756 struct sumo_ps *old_ps = sumo_get_ps(old_rps);
757 u32 i;
758 u32 n_current_state_levels = (old_ps == NULL) ? 1 : old_ps->num_levels;
759
760 for (i = 0; i < new_ps->num_levels; i++) {
761 sumo_program_power_level(rdev, &new_ps->levels[i], i);
762 sumo_power_level_enable(rdev, i, true);
763 }
764
765 for (i = new_ps->num_levels; i < n_current_state_levels; i++)
766 sumo_power_level_enable(rdev, i, false);
767
768 if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
769 sumo_program_power_level(rdev, &pi->boost_pl, BOOST_DPM_LEVEL);
770}
771
772static void sumo_enable_acpi_pm(struct radeon_device *rdev)
773{
774 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
775}
776
777static void sumo_program_power_level_enter_state(struct radeon_device *rdev)
778{
779 WREG32_P(CG_SCLK_DPM_CTRL_5, SCLK_FSTATE_BOOTUP(0), ~SCLK_FSTATE_BOOTUP_MASK);
780}
781
782static void sumo_program_acpi_power_level(struct radeon_device *rdev)
783{
784 struct sumo_power_info *pi = sumo_get_pi(rdev);
785 struct atom_clock_dividers dividers;
786 int ret;
787
788 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
789 pi->acpi_pl.sclk,
790 false, &dividers);
791 if (ret)
792 return;
793
794 WREG32_P(CG_ACPI_CNTL, SCLK_ACPI_DIV(dividers.post_div), ~SCLK_ACPI_DIV_MASK);
795 WREG32_P(CG_ACPI_VOLTAGE_CNTL, 0, ~ACPI_VOLTAGE_EN);
796}
797
798static void sumo_program_bootup_state(struct radeon_device *rdev)
799{
800 struct sumo_power_info *pi = sumo_get_pi(rdev);
801 u32 dpm_ctrl4 = RREG32(CG_SCLK_DPM_CTRL_4);
802 u32 i;
803
804 sumo_program_power_level(rdev, &pi->boot_pl, 0);
805
806 dpm_ctrl4 &= 0xFFFFFF00;
807 WREG32(CG_SCLK_DPM_CTRL_4, dpm_ctrl4);
808
809 for (i = 1; i < 8; i++)
810 sumo_power_level_enable(rdev, i, false);
811}
812
813static void sumo_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
814 struct radeon_ps *new_rps,
815 struct radeon_ps *old_rps)
816{
817 struct sumo_ps *new_ps = sumo_get_ps(new_rps);
818 struct sumo_ps *current_ps = sumo_get_ps(old_rps);
819
820 if ((new_rps->vclk == old_rps->vclk) &&
821 (new_rps->dclk == old_rps->dclk))
822 return;
823
824 if (new_ps->levels[new_ps->num_levels - 1].sclk >=
825 current_ps->levels[current_ps->num_levels - 1].sclk)
826 return;
827
828 radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk);
829}
830
831static void sumo_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
832 struct radeon_ps *new_rps,
833 struct radeon_ps *old_rps)
834{
835 struct sumo_ps *new_ps = sumo_get_ps(new_rps);
836 struct sumo_ps *current_ps = sumo_get_ps(old_rps);
837
838 if ((new_rps->vclk == old_rps->vclk) &&
839 (new_rps->dclk == old_rps->dclk))
840 return;
841
842 if (new_ps->levels[new_ps->num_levels - 1].sclk <
843 current_ps->levels[current_ps->num_levels - 1].sclk)
844 return;
845
846 radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk);
847}
848
849void sumo_take_smu_control(struct radeon_device *rdev, bool enable)
850{
851/* This bit selects who handles display phy powergating.
852 * Clear the bit to let atom handle it.
853 * Set it to let the driver handle it.
854 * For now we just let atom handle it.
855 */
856#if 0
857 u32 v = RREG32(DOUT_SCRATCH3);
858
859 if (enable)
860 v |= 0x4;
861 else
862 v &= 0xFFFFFFFB;
863
864 WREG32(DOUT_SCRATCH3, v);
865#endif
866}
867
868static void sumo_enable_sclk_ds(struct radeon_device *rdev, bool enable)
869{
870 if (enable) {
871 u32 deep_sleep_cntl = RREG32(DEEP_SLEEP_CNTL);
872 u32 deep_sleep_cntl2 = RREG32(DEEP_SLEEP_CNTL2);
873 u32 t = 1;
874
875 deep_sleep_cntl &= ~R_DIS;
876 deep_sleep_cntl &= ~HS_MASK;
877 deep_sleep_cntl |= HS(t > 4095 ? 4095 : t);
878
879 deep_sleep_cntl2 |= LB_UFP_EN;
880 deep_sleep_cntl2 &= INOUT_C_MASK;
881 deep_sleep_cntl2 |= INOUT_C(0xf);
882
883 WREG32(DEEP_SLEEP_CNTL2, deep_sleep_cntl2);
884 WREG32(DEEP_SLEEP_CNTL, deep_sleep_cntl);
885 } else
886 WREG32_P(DEEP_SLEEP_CNTL, 0, ~ENABLE_DS);
887}
888
889static void sumo_program_bootup_at(struct radeon_device *rdev)
890{
891 WREG32_P(CG_AT_0, CG_R(0xffff), ~CG_R_MASK);
892 WREG32_P(CG_AT_0, CG_L(0), ~CG_L_MASK);
893}
894
895static void sumo_reset_am(struct radeon_device *rdev)
896{
897 WREG32_P(SCLK_PWRMGT_CNTL, FIR_RESET, ~FIR_RESET);
898}
899
900static void sumo_start_am(struct radeon_device *rdev)
901{
902 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_RESET);
903}
904
905static void sumo_program_ttp(struct radeon_device *rdev)
906{
907 u32 xclk = radeon_get_xclk(rdev);
908 u32 p, u;
909 u32 cg_sclk_dpm_ctrl_5 = RREG32(CG_SCLK_DPM_CTRL_5);
910
911 r600_calculate_u_and_p(1000,
912 xclk, 16, &p, &u);
913
914 cg_sclk_dpm_ctrl_5 &= ~(TT_TP_MASK | TT_TU_MASK);
915 cg_sclk_dpm_ctrl_5 |= TT_TP(p) | TT_TU(u);
916
917 WREG32(CG_SCLK_DPM_CTRL_5, cg_sclk_dpm_ctrl_5);
918}
919
920static void sumo_program_ttt(struct radeon_device *rdev)
921{
922 u32 cg_sclk_dpm_ctrl_3 = RREG32(CG_SCLK_DPM_CTRL_3);
923 struct sumo_power_info *pi = sumo_get_pi(rdev);
924
925 cg_sclk_dpm_ctrl_3 &= ~(GNB_TT_MASK | GNB_THERMTHRO_MASK);
926 cg_sclk_dpm_ctrl_3 |= GNB_TT(pi->thermal_auto_throttling + 49);
927
928 WREG32(CG_SCLK_DPM_CTRL_3, cg_sclk_dpm_ctrl_3);
929}
930
931
932static void sumo_enable_voltage_scaling(struct radeon_device *rdev, bool enable)
933{
934 if (enable) {
935 WREG32_P(CG_DPM_VOLTAGE_CNTL, DPM_VOLTAGE_EN, ~DPM_VOLTAGE_EN);
936 WREG32_P(CG_CG_VOLTAGE_CNTL, 0, ~CG_VOLTAGE_EN);
937 } else {
938 WREG32_P(CG_CG_VOLTAGE_CNTL, CG_VOLTAGE_EN, ~CG_VOLTAGE_EN);
939 WREG32_P(CG_DPM_VOLTAGE_CNTL, 0, ~DPM_VOLTAGE_EN);
940 }
941}
942
943static void sumo_override_cnb_thermal_events(struct radeon_device *rdev)
944{
945 WREG32_P(CG_SCLK_DPM_CTRL_3, CNB_THERMTHRO_MASK_SCLK,
946 ~CNB_THERMTHRO_MASK_SCLK);
947}
948
949static void sumo_program_dc_hto(struct radeon_device *rdev)
950{
951 u32 cg_sclk_dpm_ctrl_4 = RREG32(CG_SCLK_DPM_CTRL_4);
952 u32 p, u;
953 u32 xclk = radeon_get_xclk(rdev);
954
955 r600_calculate_u_and_p(100000,
956 xclk, 14, &p, &u);
957
958 cg_sclk_dpm_ctrl_4 &= ~(DC_HDC_MASK | DC_HU_MASK);
959 cg_sclk_dpm_ctrl_4 |= DC_HDC(p) | DC_HU(u);
960
961 WREG32(CG_SCLK_DPM_CTRL_4, cg_sclk_dpm_ctrl_4);
962}
963
964static void sumo_force_nbp_state(struct radeon_device *rdev,
965 struct radeon_ps *rps)
966{
967 struct sumo_power_info *pi = sumo_get_pi(rdev);
968 struct sumo_ps *new_ps = sumo_get_ps(rps);
969
970 if (!pi->driver_nbps_policy_disable) {
971 if (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)
972 WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_NB_PSTATE_1, ~FORCE_NB_PSTATE_1);
973 else
974 WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~FORCE_NB_PSTATE_1);
975 }
976}
977
978u32 sumo_get_sleep_divider_from_id(u32 id)
979{
980 return 1 << id;
981}
982
983u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
984 u32 sclk,
985 u32 min_sclk_in_sr)
986{
987 struct sumo_power_info *pi = sumo_get_pi(rdev);
988 u32 i;
989 u32 temp;
990 u32 min = (min_sclk_in_sr > SUMO_MINIMUM_ENGINE_CLOCK) ?
991 min_sclk_in_sr : SUMO_MINIMUM_ENGINE_CLOCK;
992
993 if (sclk < min)
994 return 0;
995
996 if (!pi->enable_sclk_ds)
997 return 0;
998
999 for (i = SUMO_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1000 temp = sclk / sumo_get_sleep_divider_from_id(i);
1001
1002 if (temp >= min || i == 0)
1003 break;
1004 }
1005 return i;
1006}
1007
1008static u32 sumo_get_valid_engine_clock(struct radeon_device *rdev,
1009 u32 lower_limit)
1010{
1011 struct sumo_power_info *pi = sumo_get_pi(rdev);
1012 u32 i;
1013
1014 for (i = 0; i < pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries; i++) {
1015 if (pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency >= lower_limit)
1016 return pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency;
1017 }
1018
1019 return pi->sys_info.sclk_voltage_mapping_table.entries[pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1].sclk_frequency;
1020}
1021
1022static void sumo_patch_thermal_state(struct radeon_device *rdev,
1023 struct sumo_ps *ps,
1024 struct sumo_ps *current_ps)
1025{
1026 struct sumo_power_info *pi = sumo_get_pi(rdev);
1027 u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
1028 u32 current_vddc;
1029 u32 current_sclk;
1030 u32 current_index = 0;
1031
1032 if (current_ps) {
1033 current_vddc = current_ps->levels[current_index].vddc_index;
1034 current_sclk = current_ps->levels[current_index].sclk;
1035 } else {
1036 current_vddc = pi->boot_pl.vddc_index;
1037 current_sclk = pi->boot_pl.sclk;
1038 }
1039
1040 ps->levels[0].vddc_index = current_vddc;
1041
1042 if (ps->levels[0].sclk > current_sclk)
1043 ps->levels[0].sclk = current_sclk;
1044
1045 ps->levels[0].ss_divider_index =
1046 sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[0].sclk, sclk_in_sr);
1047
1048 ps->levels[0].ds_divider_index =
1049 sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[0].sclk, SUMO_MINIMUM_ENGINE_CLOCK);
1050
1051 if (ps->levels[0].ds_divider_index > ps->levels[0].ss_divider_index + 1)
1052 ps->levels[0].ds_divider_index = ps->levels[0].ss_divider_index + 1;
1053
1054 if (ps->levels[0].ss_divider_index == ps->levels[0].ds_divider_index) {
1055 if (ps->levels[0].ss_divider_index > 1)
1056 ps->levels[0].ss_divider_index = ps->levels[0].ss_divider_index - 1;
1057 }
1058
1059 if (ps->levels[0].ss_divider_index == 0)
1060 ps->levels[0].ds_divider_index = 0;
1061
1062 if (ps->levels[0].ds_divider_index == 0)
1063 ps->levels[0].ss_divider_index = 0;
1064}
1065
1066static void sumo_apply_state_adjust_rules(struct radeon_device *rdev,
1067 struct radeon_ps *new_rps,
1068 struct radeon_ps *old_rps)
1069{
1070 struct sumo_ps *ps = sumo_get_ps(new_rps);
1071 struct sumo_ps *current_ps = sumo_get_ps(old_rps);
1072 struct sumo_power_info *pi = sumo_get_pi(rdev);
1073 u32 min_voltage = 0; /* ??? */
1074 u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */
1075 u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
1076 u32 i;
1077
1078 if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1079 return sumo_patch_thermal_state(rdev, ps, current_ps);
1080
1081 if (pi->enable_boost) {
1082 if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE)
1083 ps->flags |= SUMO_POWERSTATE_FLAGS_BOOST_STATE;
1084 }
1085
1086 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) ||
1087 (new_rps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ||
1088 (new_rps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE))
1089 ps->flags |= SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE;
1090
1091 for (i = 0; i < ps->num_levels; i++) {
1092 if (ps->levels[i].vddc_index < min_voltage)
1093 ps->levels[i].vddc_index = min_voltage;
1094
1095 if (ps->levels[i].sclk < min_sclk)
1096 ps->levels[i].sclk =
1097 sumo_get_valid_engine_clock(rdev, min_sclk);
1098
1099 ps->levels[i].ss_divider_index =
1100 sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[i].sclk, sclk_in_sr);
1101
1102 ps->levels[i].ds_divider_index =
1103 sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[i].sclk, SUMO_MINIMUM_ENGINE_CLOCK);
1104
1105 if (ps->levels[i].ds_divider_index > ps->levels[i].ss_divider_index + 1)
1106 ps->levels[i].ds_divider_index = ps->levels[i].ss_divider_index + 1;
1107
1108 if (ps->levels[i].ss_divider_index == ps->levels[i].ds_divider_index) {
1109 if (ps->levels[i].ss_divider_index > 1)
1110 ps->levels[i].ss_divider_index = ps->levels[i].ss_divider_index - 1;
1111 }
1112
1113 if (ps->levels[i].ss_divider_index == 0)
1114 ps->levels[i].ds_divider_index = 0;
1115
1116 if (ps->levels[i].ds_divider_index == 0)
1117 ps->levels[i].ss_divider_index = 0;
1118
1119 if (ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)
1120 ps->levels[i].allow_gnb_slow = 1;
1121 else if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ||
1122 (new_rps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC))
1123 ps->levels[i].allow_gnb_slow = 0;
1124 else if (i == ps->num_levels - 1)
1125 ps->levels[i].allow_gnb_slow = 0;
1126 else
1127 ps->levels[i].allow_gnb_slow = 1;
1128 }
1129}
1130
1131static void sumo_cleanup_asic(struct radeon_device *rdev)
1132{
1133 sumo_take_smu_control(rdev, false);
1134}
1135
1136static void sumo_uvd_init(struct radeon_device *rdev)
1137{
1138 u32 tmp;
1139
1140 tmp = RREG32(CG_VCLK_CNTL);
1141 tmp &= ~VCLK_DIR_CNTL_EN;
1142 WREG32(CG_VCLK_CNTL, tmp);
1143
1144 tmp = RREG32(CG_DCLK_CNTL);
1145 tmp &= ~DCLK_DIR_CNTL_EN;
1146 WREG32(CG_DCLK_CNTL, tmp);
1147
1148 /* 100 Mhz */
1149 radeon_set_uvd_clocks(rdev, 10000, 10000);
1150}
1151
1152static int sumo_set_thermal_temperature_range(struct radeon_device *rdev,
1153 int min_temp, int max_temp)
1154{
1155 int low_temp = 0 * 1000;
1156 int high_temp = 255 * 1000;
1157
1158 if (low_temp < min_temp)
1159 low_temp = min_temp;
1160 if (high_temp > max_temp)
1161 high_temp = max_temp;
1162 if (high_temp < low_temp) {
1163 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1164 return -EINVAL;
1165 }
1166
1167 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK);
1168 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK);
1169
1170 rdev->pm.dpm.thermal.min_temp = low_temp;
1171 rdev->pm.dpm.thermal.max_temp = high_temp;
1172
1173 return 0;
1174}
1175
1176static void sumo_update_current_ps(struct radeon_device *rdev,
1177 struct radeon_ps *rps)
1178{
1179 struct sumo_ps *new_ps = sumo_get_ps(rps);
1180 struct sumo_power_info *pi = sumo_get_pi(rdev);
1181
1182 pi->current_rps = *rps;
1183 pi->current_ps = *new_ps;
1184 pi->current_rps.ps_priv = &pi->current_ps;
1185}
1186
1187static void sumo_update_requested_ps(struct radeon_device *rdev,
1188 struct radeon_ps *rps)
1189{
1190 struct sumo_ps *new_ps = sumo_get_ps(rps);
1191 struct sumo_power_info *pi = sumo_get_pi(rdev);
1192
1193 pi->requested_rps = *rps;
1194 pi->requested_ps = *new_ps;
1195 pi->requested_rps.ps_priv = &pi->requested_ps;
1196}
1197
1198int sumo_dpm_enable(struct radeon_device *rdev)
1199{
1200 struct sumo_power_info *pi = sumo_get_pi(rdev);
1201 int ret;
1202
1203 if (sumo_dpm_enabled(rdev))
1204 return -EINVAL;
1205
1206 ret = sumo_enable_clock_power_gating(rdev);
1207 if (ret)
1208 return ret;
1209 sumo_program_bootup_state(rdev);
1210 sumo_init_bsp(rdev);
1211 sumo_reset_am(rdev);
1212 sumo_program_tp(rdev);
1213 sumo_program_bootup_at(rdev);
1214 sumo_start_am(rdev);
1215 if (pi->enable_auto_thermal_throttling) {
1216 sumo_program_ttp(rdev);
1217 sumo_program_ttt(rdev);
1218 }
1219 sumo_program_dc_hto(rdev);
1220 sumo_program_power_level_enter_state(rdev);
1221 sumo_enable_voltage_scaling(rdev, true);
1222 sumo_program_sstp(rdev);
1223 sumo_program_vc(rdev, SUMO_VRC_DFLT);
1224 sumo_override_cnb_thermal_events(rdev);
1225 sumo_start_dpm(rdev);
1226 sumo_wait_for_level_0(rdev);
1227 if (pi->enable_sclk_ds)
1228 sumo_enable_sclk_ds(rdev, true);
1229 if (pi->enable_boost)
1230 sumo_enable_boost_timer(rdev);
1231
1232 if (rdev->irq.installed &&
1233 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1234 ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1235 if (ret)
1236 return ret;
1237 rdev->irq.dpm_thermal = true;
1238 radeon_irq_set(rdev);
1239 }
1240
1241 sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1242
1243 return 0;
1244}
1245
1246void sumo_dpm_disable(struct radeon_device *rdev)
1247{
1248 struct sumo_power_info *pi = sumo_get_pi(rdev);
1249
1250 if (!sumo_dpm_enabled(rdev))
1251 return;
1252 sumo_disable_clock_power_gating(rdev);
1253 if (pi->enable_sclk_ds)
1254 sumo_enable_sclk_ds(rdev, false);
1255 sumo_clear_vc(rdev);
1256 sumo_wait_for_level_0(rdev);
1257 sumo_stop_dpm(rdev);
1258 sumo_enable_voltage_scaling(rdev, false);
1259
1260 if (rdev->irq.installed &&
1261 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1262 rdev->irq.dpm_thermal = false;
1263 radeon_irq_set(rdev);
1264 }
1265
1266 sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1267}
1268
1269int sumo_dpm_pre_set_power_state(struct radeon_device *rdev)
1270{
1271 struct sumo_power_info *pi = sumo_get_pi(rdev);
1272 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
1273 struct radeon_ps *new_ps = &requested_ps;
1274
1275 sumo_update_requested_ps(rdev, new_ps);
1276
1277 if (pi->enable_dynamic_patch_ps)
1278 sumo_apply_state_adjust_rules(rdev,
1279 &pi->requested_rps,
1280 &pi->current_rps);
1281
1282 return 0;
1283}
1284
1285int sumo_dpm_set_power_state(struct radeon_device *rdev)
1286{
1287 struct sumo_power_info *pi = sumo_get_pi(rdev);
1288 struct radeon_ps *new_ps = &pi->requested_rps;
1289 struct radeon_ps *old_ps = &pi->current_rps;
1290
1291 if (pi->enable_dpm)
1292 sumo_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1293 if (pi->enable_boost) {
1294 sumo_enable_boost(rdev, new_ps, false);
1295 sumo_patch_boost_state(rdev, new_ps);
1296 }
1297 if (pi->enable_dpm) {
1298 sumo_pre_notify_alt_vddnb_change(rdev, new_ps, old_ps);
1299 sumo_enable_power_level_0(rdev);
1300 sumo_set_forced_level_0(rdev);
1301 sumo_set_forced_mode_enabled(rdev);
1302 sumo_wait_for_level_0(rdev);
1303 sumo_program_power_levels_0_to_n(rdev, new_ps, old_ps);
1304 sumo_program_wl(rdev, new_ps);
1305 sumo_program_bsp(rdev, new_ps);
1306 sumo_program_at(rdev, new_ps);
1307 sumo_force_nbp_state(rdev, new_ps);
1308 sumo_set_forced_mode_disabled(rdev);
1309 sumo_set_forced_mode_enabled(rdev);
1310 sumo_set_forced_mode_disabled(rdev);
1311 sumo_post_notify_alt_vddnb_change(rdev, new_ps, old_ps);
1312 }
1313 if (pi->enable_boost)
1314 sumo_enable_boost(rdev, new_ps, true);
1315 if (pi->enable_dpm)
1316 sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1317
1318 return 0;
1319}
1320
1321void sumo_dpm_post_set_power_state(struct radeon_device *rdev)
1322{
1323 struct sumo_power_info *pi = sumo_get_pi(rdev);
1324 struct radeon_ps *new_ps = &pi->requested_rps;
1325
1326 sumo_update_current_ps(rdev, new_ps);
1327}
1328
1329void sumo_dpm_reset_asic(struct radeon_device *rdev)
1330{
1331 sumo_program_bootup_state(rdev);
1332 sumo_enable_power_level_0(rdev);
1333 sumo_set_forced_level_0(rdev);
1334 sumo_set_forced_mode_enabled(rdev);
1335 sumo_wait_for_level_0(rdev);
1336 sumo_set_forced_mode_disabled(rdev);
1337 sumo_set_forced_mode_enabled(rdev);
1338 sumo_set_forced_mode_disabled(rdev);
1339}
1340
1341void sumo_dpm_setup_asic(struct radeon_device *rdev)
1342{
1343 struct sumo_power_info *pi = sumo_get_pi(rdev);
1344
1345 sumo_initialize_m3_arb(rdev);
1346 pi->fw_version = sumo_get_running_fw_version(rdev);
1347 DRM_INFO("Found smc ucode version: 0x%08x\n", pi->fw_version);
1348 sumo_program_acpi_power_level(rdev);
1349 sumo_enable_acpi_pm(rdev);
1350 sumo_take_smu_control(rdev, true);
1351 sumo_uvd_init(rdev);
1352}
1353
1354void sumo_dpm_display_configuration_changed(struct radeon_device *rdev)
1355{
1356
1357}
1358
1359union power_info {
1360 struct _ATOM_POWERPLAY_INFO info;
1361 struct _ATOM_POWERPLAY_INFO_V2 info_2;
1362 struct _ATOM_POWERPLAY_INFO_V3 info_3;
1363 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
1364 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
1365 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
1366};
1367
1368union pplib_clock_info {
1369 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
1370 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
1371 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
1372 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
1373};
1374
1375union pplib_power_state {
1376 struct _ATOM_PPLIB_STATE v1;
1377 struct _ATOM_PPLIB_STATE_V2 v2;
1378};
1379
1380static void sumo_patch_boot_state(struct radeon_device *rdev,
1381 struct sumo_ps *ps)
1382{
1383 struct sumo_power_info *pi = sumo_get_pi(rdev);
1384
1385 ps->num_levels = 1;
1386 ps->flags = 0;
1387 ps->levels[0] = pi->boot_pl;
1388}
1389
1390static void sumo_parse_pplib_non_clock_info(struct radeon_device *rdev,
1391 struct radeon_ps *rps,
1392 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
1393 u8 table_rev)
1394{
1395 struct sumo_ps *ps = sumo_get_ps(rps);
1396
1397 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
1398 rps->class = le16_to_cpu(non_clock_info->usClassification);
1399 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
1400
1401 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
1402 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
1403 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
1404 } else {
1405 rps->vclk = 0;
1406 rps->dclk = 0;
1407 }
1408
1409 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
1410 rdev->pm.dpm.boot_ps = rps;
1411 sumo_patch_boot_state(rdev, ps);
1412 }
1413 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
1414 rdev->pm.dpm.uvd_ps = rps;
1415}
1416
1417static void sumo_parse_pplib_clock_info(struct radeon_device *rdev,
1418 struct radeon_ps *rps, int index,
1419 union pplib_clock_info *clock_info)
1420{
1421 struct sumo_power_info *pi = sumo_get_pi(rdev);
1422 struct sumo_ps *ps = sumo_get_ps(rps);
1423 struct sumo_pl *pl = &ps->levels[index];
1424 u32 sclk;
1425
1426 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
1427 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
1428 pl->sclk = sclk;
1429 pl->vddc_index = clock_info->sumo.vddcIndex;
1430 pl->sclk_dpm_tdp_limit = clock_info->sumo.tdpLimit;
1431
1432 ps->num_levels = index + 1;
1433
1434 if (pi->enable_sclk_ds) {
1435 pl->ds_divider_index = 5;
1436 pl->ss_divider_index = 4;
1437 }
1438}
1439
1440static int sumo_parse_power_table(struct radeon_device *rdev)
1441{
1442 struct radeon_mode_info *mode_info = &rdev->mode_info;
1443 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
1444 union pplib_power_state *power_state;
1445 int i, j, k, non_clock_array_index, clock_array_index;
1446 union pplib_clock_info *clock_info;
1447 struct _StateArray *state_array;
1448 struct _ClockInfoArray *clock_info_array;
1449 struct _NonClockInfoArray *non_clock_info_array;
1450 union power_info *power_info;
1451 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
1452 u16 data_offset;
1453 u8 frev, crev;
1454 u8 *power_state_offset;
1455 struct sumo_ps *ps;
1456
1457 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
1458 &frev, &crev, &data_offset))
1459 return -EINVAL;
1460 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1461
1462 state_array = (struct _StateArray *)
1463 (mode_info->atom_context->bios + data_offset +
1464 le16_to_cpu(power_info->pplib.usStateArrayOffset));
1465 clock_info_array = (struct _ClockInfoArray *)
1466 (mode_info->atom_context->bios + data_offset +
1467 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
1468 non_clock_info_array = (struct _NonClockInfoArray *)
1469 (mode_info->atom_context->bios + data_offset +
1470 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
1471
1472 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
1473 state_array->ucNumEntries, GFP_KERNEL);
1474 if (!rdev->pm.dpm.ps)
1475 return -ENOMEM;
1476 power_state_offset = (u8 *)state_array->states;
1477 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
1478 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1479 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1480 for (i = 0; i < state_array->ucNumEntries; i++) {
1481 power_state = (union pplib_power_state *)power_state_offset;
1482 non_clock_array_index = power_state->v2.nonClockInfoIndex;
1483 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
1484 &non_clock_info_array->nonClockInfo[non_clock_array_index];
1485 if (!rdev->pm.power_state[i].clock_info)
1486 return -EINVAL;
1487 ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
1488 if (ps == NULL) {
1489 kfree(rdev->pm.dpm.ps);
1490 return -ENOMEM;
1491 }
1492 rdev->pm.dpm.ps[i].ps_priv = ps;
1493 k = 0;
1494 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
1495 clock_array_index = power_state->v2.clockInfoIndex[j];
1496 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
1497 break;
1498 clock_info = (union pplib_clock_info *)
1499 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
1500 sumo_parse_pplib_clock_info(rdev,
1501 &rdev->pm.dpm.ps[i], k,
1502 clock_info);
1503 k++;
1504 }
1505 sumo_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
1506 non_clock_info,
1507 non_clock_info_array->ucEntrySize);
1508 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
1509 }
1510 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
1511 return 0;
1512}
1513
1514u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
1515 struct sumo_vid_mapping_table *vid_mapping_table,
1516 u32 vid_2bit)
1517{
1518 u32 i;
1519
1520 for (i = 0; i < vid_mapping_table->num_entries; i++) {
1521 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
1522 return vid_mapping_table->entries[i].vid_7bit;
1523 }
1524
1525 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
1526}
1527
1528static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev,
1529 u32 vid_2bit)
1530{
1531 struct sumo_power_info *pi = sumo_get_pi(rdev);
1532 u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, &pi->sys_info.vid_mapping_table, vid_2bit);
1533
1534 if (vid_7bit > 0x7C)
1535 return 0;
1536
1537 return (15500 - vid_7bit * 125 + 5) / 10;
1538}
1539
1540static void sumo_construct_display_voltage_mapping_table(struct radeon_device *rdev,
1541 struct sumo_disp_clock_voltage_mapping_table *disp_clk_voltage_mapping_table,
1542 ATOM_CLK_VOLT_CAPABILITY *table)
1543{
1544 u32 i;
1545
1546 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
1547 if (table[i].ulMaximumSupportedCLK == 0)
1548 break;
1549
1550 disp_clk_voltage_mapping_table->display_clock_frequency[i] =
1551 table[i].ulMaximumSupportedCLK;
1552 }
1553
1554 disp_clk_voltage_mapping_table->num_max_voltage_levels = i;
1555
1556 if (disp_clk_voltage_mapping_table->num_max_voltage_levels == 0) {
1557 disp_clk_voltage_mapping_table->display_clock_frequency[0] = 80000;
1558 disp_clk_voltage_mapping_table->num_max_voltage_levels = 1;
1559 }
1560}
1561
1562void sumo_construct_sclk_voltage_mapping_table(struct radeon_device *rdev,
1563 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
1564 ATOM_AVAILABLE_SCLK_LIST *table)
1565{
1566 u32 i;
1567 u32 n = 0;
1568 u32 prev_sclk = 0;
1569
1570 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
1571 if (table[i].ulSupportedSCLK > prev_sclk) {
1572 sclk_voltage_mapping_table->entries[n].sclk_frequency =
1573 table[i].ulSupportedSCLK;
1574 sclk_voltage_mapping_table->entries[n].vid_2bit =
1575 table[i].usVoltageIndex;
1576 prev_sclk = table[i].ulSupportedSCLK;
1577 n++;
1578 }
1579 }
1580
1581 sclk_voltage_mapping_table->num_max_dpm_entries = n;
1582}
1583
1584void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
1585 struct sumo_vid_mapping_table *vid_mapping_table,
1586 ATOM_AVAILABLE_SCLK_LIST *table)
1587{
1588 u32 i, j;
1589
1590 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
1591 if (table[i].ulSupportedSCLK != 0) {
1592 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
1593 table[i].usVoltageID;
1594 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
1595 table[i].usVoltageIndex;
1596 }
1597 }
1598
1599 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
1600 if (vid_mapping_table->entries[i].vid_7bit == 0) {
1601 for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
1602 if (vid_mapping_table->entries[j].vid_7bit != 0) {
1603 vid_mapping_table->entries[i] =
1604 vid_mapping_table->entries[j];
1605 vid_mapping_table->entries[j].vid_7bit = 0;
1606 break;
1607 }
1608 }
1609
1610 if (j == SUMO_MAX_NUMBER_VOLTAGES)
1611 break;
1612 }
1613 }
1614
1615 vid_mapping_table->num_entries = i;
1616}
1617
1618union igp_info {
1619 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
1620 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
1621 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
1622 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
1623};
1624
1625static int sumo_parse_sys_info_table(struct radeon_device *rdev)
1626{
1627 struct sumo_power_info *pi = sumo_get_pi(rdev);
1628 struct radeon_mode_info *mode_info = &rdev->mode_info;
1629 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
1630 union igp_info *igp_info;
1631 u8 frev, crev;
1632 u16 data_offset;
1633 int i;
1634
1635 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1636 &frev, &crev, &data_offset)) {
1637 igp_info = (union igp_info *)(mode_info->atom_context->bios +
1638 data_offset);
1639
1640 if (crev != 6) {
1641 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
1642 return -EINVAL;
1643 }
1644 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_6.ulBootUpEngineClock);
1645 pi->sys_info.min_sclk = le32_to_cpu(igp_info->info_6.ulMinEngineClock);
1646 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_6.ulBootUpUMAClock);
1647 pi->sys_info.bootup_nb_voltage_index =
1648 le16_to_cpu(igp_info->info_6.usBootUpNBVoltage);
1649 if (igp_info->info_6.ucHtcTmpLmt == 0)
1650 pi->sys_info.htc_tmp_lmt = 203;
1651 else
1652 pi->sys_info.htc_tmp_lmt = igp_info->info_6.ucHtcTmpLmt;
1653 if (igp_info->info_6.ucHtcHystLmt == 0)
1654 pi->sys_info.htc_hyst_lmt = 5;
1655 else
1656 pi->sys_info.htc_hyst_lmt = igp_info->info_6.ucHtcHystLmt;
1657 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
1658 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
1659 }
1660 for (i = 0; i < NUMBER_OF_M3ARB_PARAM_SETS; i++) {
1661 pi->sys_info.csr_m3_arb_cntl_default[i] =
1662 le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_DEFAULT[i]);
1663 pi->sys_info.csr_m3_arb_cntl_uvd[i] =
1664 le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_UVD[i]);
1665 pi->sys_info.csr_m3_arb_cntl_fs3d[i] =
1666 le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_FS3D[i]);
1667 }
1668 pi->sys_info.sclk_dpm_boost_margin =
1669 le32_to_cpu(igp_info->info_6.SclkDpmBoostMargin);
1670 pi->sys_info.sclk_dpm_throttle_margin =
1671 le32_to_cpu(igp_info->info_6.SclkDpmThrottleMargin);
1672 pi->sys_info.sclk_dpm_tdp_limit_pg =
1673 le16_to_cpu(igp_info->info_6.SclkDpmTdpLimitPG);
1674 pi->sys_info.gnb_tdp_limit = le16_to_cpu(igp_info->info_6.GnbTdpLimit);
1675 pi->sys_info.sclk_dpm_tdp_limit_boost =
1676 le16_to_cpu(igp_info->info_6.SclkDpmTdpLimitBoost);
1677 pi->sys_info.boost_sclk = le32_to_cpu(igp_info->info_6.ulBoostEngineCLock);
1678 pi->sys_info.boost_vid_2bit = igp_info->info_6.ulBoostVid_2bit;
1679 if (igp_info->info_6.EnableBoost)
1680 pi->sys_info.enable_boost = true;
1681 else
1682 pi->sys_info.enable_boost = false;
1683 sumo_construct_display_voltage_mapping_table(rdev,
1684 &pi->sys_info.disp_clk_voltage_mapping_table,
1685 igp_info->info_6.sDISPCLK_Voltage);
1686 sumo_construct_sclk_voltage_mapping_table(rdev,
1687 &pi->sys_info.sclk_voltage_mapping_table,
1688 igp_info->info_6.sAvail_SCLK);
1689 sumo_construct_vid_mapping_table(rdev, &pi->sys_info.vid_mapping_table,
1690 igp_info->info_6.sAvail_SCLK);
1691
1692 }
1693 return 0;
1694}
1695
1696static void sumo_construct_boot_and_acpi_state(struct radeon_device *rdev)
1697{
1698 struct sumo_power_info *pi = sumo_get_pi(rdev);
1699
1700 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
1701 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
1702 pi->boot_pl.ds_divider_index = 0;
1703 pi->boot_pl.ss_divider_index = 0;
1704 pi->boot_pl.allow_gnb_slow = 1;
1705 pi->acpi_pl = pi->boot_pl;
1706 pi->current_ps.num_levels = 1;
1707 pi->current_ps.levels[0] = pi->boot_pl;
1708}
1709
1710int sumo_dpm_init(struct radeon_device *rdev)
1711{
1712 struct sumo_power_info *pi;
1713 u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT;
1714 int ret;
1715
1716 pi = kzalloc(sizeof(struct sumo_power_info), GFP_KERNEL);
1717 if (pi == NULL)
1718 return -ENOMEM;
1719 rdev->pm.dpm.priv = pi;
1720
1721 pi->driver_nbps_policy_disable = false;
1722 if ((rdev->family == CHIP_PALM) && (hw_rev < 3))
1723 pi->disable_gfx_power_gating_in_uvd = true;
1724 else
1725 pi->disable_gfx_power_gating_in_uvd = false;
1726 pi->enable_alt_vddnb = true;
1727 pi->enable_sclk_ds = true;
1728 pi->enable_dynamic_m3_arbiter = false;
1729 pi->enable_dynamic_patch_ps = true;
1730 pi->enable_gfx_power_gating = true;
1731 pi->enable_gfx_clock_gating = true;
1732 pi->enable_mg_clock_gating = true;
1733 pi->enable_auto_thermal_throttling = true;
1734
1735 ret = sumo_parse_sys_info_table(rdev);
1736 if (ret)
1737 return ret;
1738
1739 sumo_construct_boot_and_acpi_state(rdev);
1740
1741 ret = sumo_parse_power_table(rdev);
1742 if (ret)
1743 return ret;
1744
1745 pi->pasi = CYPRESS_HASI_DFLT;
1746 pi->asi = RV770_ASI_DFLT;
1747 pi->thermal_auto_throttling = pi->sys_info.htc_tmp_lmt;
1748 pi->enable_boost = pi->sys_info.enable_boost;
1749 pi->enable_dpm = true;
1750
1751 return 0;
1752}
1753
1754void sumo_dpm_print_power_state(struct radeon_device *rdev,
1755 struct radeon_ps *rps)
1756{
1757 int i;
1758 struct sumo_ps *ps = sumo_get_ps(rps);
1759
1760 r600_dpm_print_class_info(rps->class, rps->class2);
1761 r600_dpm_print_cap_info(rps->caps);
1762 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
1763 for (i = 0; i < ps->num_levels; i++) {
1764 struct sumo_pl *pl = &ps->levels[i];
1765 printk("\t\tpower level %d sclk: %u vddc: %u\n",
1766 i, pl->sclk,
1767 sumo_convert_voltage_index_to_value(rdev, pl->vddc_index));
1768 }
1769 r600_dpm_print_ps_status(rdev, rps);
1770}
1771
1772void sumo_dpm_fini(struct radeon_device *rdev)
1773{
1774 int i;
1775
1776 sumo_cleanup_asic(rdev); /* ??? */
1777
1778 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1779 kfree(rdev->pm.dpm.ps[i].ps_priv);
1780 }
1781 kfree(rdev->pm.dpm.ps);
1782 kfree(rdev->pm.dpm.priv);
1783}
1784
1785u32 sumo_dpm_get_sclk(struct radeon_device *rdev, bool low)
1786{
1787 struct sumo_power_info *pi = sumo_get_pi(rdev);
1788 struct sumo_ps *requested_state = sumo_get_ps(&pi->requested_rps);
1789
1790 if (low)
1791 return requested_state->levels[0].sclk;
1792 else
1793 return requested_state->levels[requested_state->num_levels - 1].sclk;
1794}
1795
1796u32 sumo_dpm_get_mclk(struct radeon_device *rdev, bool low)
1797{
1798 struct sumo_power_info *pi = sumo_get_pi(rdev);
1799
1800 return pi->sys_info.bootup_uma_clk;
1801}
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.h b/drivers/gpu/drm/radeon/sumo_dpm.h
new file mode 100644
index 000000000000..07dda299c784
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sumo_dpm.h
@@ -0,0 +1,220 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __SUMO_DPM_H__
24#define __SUMO_DPM_H__
25
26#include "atom.h"
27
28#define SUMO_MAX_HARDWARE_POWERLEVELS 5
29#define SUMO_PM_NUMBER_OF_TC 15
30
31struct sumo_pl {
32 u32 sclk;
33 u32 vddc_index;
34 u32 ds_divider_index;
35 u32 ss_divider_index;
36 u32 allow_gnb_slow;
37 u32 sclk_dpm_tdp_limit;
38};
39
40/* used for the flags field */
41#define SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE (1 << 0)
42#define SUMO_POWERSTATE_FLAGS_BOOST_STATE (1 << 1)
43
44struct sumo_ps {
45 struct sumo_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
46 u32 num_levels;
47 /* flags */
48 u32 flags;
49};
50
51#define NUMBER_OF_M3ARB_PARAM_SETS 10
52#define SUMO_MAX_NUMBER_VOLTAGES 4
53
54struct sumo_disp_clock_voltage_mapping_table {
55 u32 num_max_voltage_levels;
56 u32 display_clock_frequency[SUMO_MAX_NUMBER_VOLTAGES];
57};
58
59struct sumo_vid_mapping_entry {
60 u16 vid_2bit;
61 u16 vid_7bit;
62};
63
64struct sumo_vid_mapping_table {
65 u32 num_entries;
66 struct sumo_vid_mapping_entry entries[SUMO_MAX_NUMBER_VOLTAGES];
67};
68
69struct sumo_sclk_voltage_mapping_entry {
70 u32 sclk_frequency;
71 u16 vid_2bit;
72 u16 rsv;
73};
74
75struct sumo_sclk_voltage_mapping_table {
76 u32 num_max_dpm_entries;
77 struct sumo_sclk_voltage_mapping_entry entries[SUMO_MAX_HARDWARE_POWERLEVELS];
78};
79
80struct sumo_sys_info {
81 u32 bootup_sclk;
82 u32 min_sclk;
83 u32 bootup_uma_clk;
84 u16 bootup_nb_voltage_index;
85 u8 htc_tmp_lmt;
86 u8 htc_hyst_lmt;
87 struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
88 struct sumo_disp_clock_voltage_mapping_table disp_clk_voltage_mapping_table;
89 struct sumo_vid_mapping_table vid_mapping_table;
90 u32 csr_m3_arb_cntl_default[NUMBER_OF_M3ARB_PARAM_SETS];
91 u32 csr_m3_arb_cntl_uvd[NUMBER_OF_M3ARB_PARAM_SETS];
92 u32 csr_m3_arb_cntl_fs3d[NUMBER_OF_M3ARB_PARAM_SETS];
93 u32 sclk_dpm_boost_margin;
94 u32 sclk_dpm_throttle_margin;
95 u32 sclk_dpm_tdp_limit_pg;
96 u32 gnb_tdp_limit;
97 u32 sclk_dpm_tdp_limit_boost;
98 u32 boost_sclk;
99 u32 boost_vid_2bit;
100 bool enable_boost;
101};
102
103struct sumo_power_info {
104 u32 asi;
105 u32 pasi;
106 u32 bsp;
107 u32 bsu;
108 u32 pbsp;
109 u32 pbsu;
110 u32 dsp;
111 u32 psp;
112 u32 thermal_auto_throttling;
113 u32 uvd_m3_arbiter;
114 u32 fw_version;
115 struct sumo_sys_info sys_info;
116 struct sumo_pl acpi_pl;
117 struct sumo_pl boot_pl;
118 struct sumo_pl boost_pl;
119 bool disable_gfx_power_gating_in_uvd;
120 bool driver_nbps_policy_disable;
121 bool enable_alt_vddnb;
122 bool enable_dynamic_m3_arbiter;
123 bool enable_gfx_clock_gating;
124 bool enable_gfx_power_gating;
125 bool enable_mg_clock_gating;
126 bool enable_sclk_ds;
127 bool enable_auto_thermal_throttling;
128 bool enable_dynamic_patch_ps;
129 bool enable_dpm;
130 bool enable_boost;
131 struct radeon_ps current_rps;
132 struct sumo_ps current_ps;
133 struct radeon_ps requested_rps;
134 struct sumo_ps requested_ps;
135};
136
137#define SUMO_UTC_DFLT_00 0x48
138#define SUMO_UTC_DFLT_01 0x44
139#define SUMO_UTC_DFLT_02 0x44
140#define SUMO_UTC_DFLT_03 0x44
141#define SUMO_UTC_DFLT_04 0x44
142#define SUMO_UTC_DFLT_05 0x44
143#define SUMO_UTC_DFLT_06 0x44
144#define SUMO_UTC_DFLT_07 0x44
145#define SUMO_UTC_DFLT_08 0x44
146#define SUMO_UTC_DFLT_09 0x44
147#define SUMO_UTC_DFLT_10 0x44
148#define SUMO_UTC_DFLT_11 0x44
149#define SUMO_UTC_DFLT_12 0x44
150#define SUMO_UTC_DFLT_13 0x44
151#define SUMO_UTC_DFLT_14 0x44
152
153#define SUMO_DTC_DFLT_00 0x48
154#define SUMO_DTC_DFLT_01 0x44
155#define SUMO_DTC_DFLT_02 0x44
156#define SUMO_DTC_DFLT_03 0x44
157#define SUMO_DTC_DFLT_04 0x44
158#define SUMO_DTC_DFLT_05 0x44
159#define SUMO_DTC_DFLT_06 0x44
160#define SUMO_DTC_DFLT_07 0x44
161#define SUMO_DTC_DFLT_08 0x44
162#define SUMO_DTC_DFLT_09 0x44
163#define SUMO_DTC_DFLT_10 0x44
164#define SUMO_DTC_DFLT_11 0x44
165#define SUMO_DTC_DFLT_12 0x44
166#define SUMO_DTC_DFLT_13 0x44
167#define SUMO_DTC_DFLT_14 0x44
168
169#define SUMO_AH_DFLT 5
170
171#define SUMO_R_DFLT0 70
172#define SUMO_R_DFLT1 70
173#define SUMO_R_DFLT2 70
174#define SUMO_R_DFLT3 70
175#define SUMO_R_DFLT4 100
176
177#define SUMO_L_DFLT0 0
178#define SUMO_L_DFLT1 20
179#define SUMO_L_DFLT2 20
180#define SUMO_L_DFLT3 20
181#define SUMO_L_DFLT4 20
182#define SUMO_VRC_DFLT 0x30033
183#define SUMO_MGCGTTLOCAL0_DFLT 0
184#define SUMO_MGCGTTLOCAL1_DFLT 0
185#define SUMO_GICST_DFLT 19
186#define SUMO_SST_DFLT 8
187#define SUMO_VOLTAGEDROPT_DFLT 1
188#define SUMO_GFXPOWERGATINGT_DFLT 100
189
190/* sumo_dpm.c */
191void sumo_gfx_clockgating_initialize(struct radeon_device *rdev);
192void sumo_program_vc(struct radeon_device *rdev, u32 vrc);
193void sumo_clear_vc(struct radeon_device *rdev);
194void sumo_program_sstp(struct radeon_device *rdev);
195void sumo_take_smu_control(struct radeon_device *rdev, bool enable);
196void sumo_construct_sclk_voltage_mapping_table(struct radeon_device *rdev,
197 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
198 ATOM_AVAILABLE_SCLK_LIST *table);
199void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
200 struct sumo_vid_mapping_table *vid_mapping_table,
201 ATOM_AVAILABLE_SCLK_LIST *table);
202u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
203 struct sumo_vid_mapping_table *vid_mapping_table,
204 u32 vid_2bit);
205u32 sumo_get_sleep_divider_from_id(u32 id);
206u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
207 u32 sclk,
208 u32 min_sclk_in_sr);
209
210/* sumo_smc.c */
211void sumo_initialize_m3_arb(struct radeon_device *rdev);
212void sumo_smu_pg_init(struct radeon_device *rdev);
213void sumo_set_tdp_limit(struct radeon_device *rdev, u32 index, u32 tdp_limit);
214void sumo_smu_notify_alt_vddnb_change(struct radeon_device *rdev,
215 bool powersaving, bool force_nbps1);
216void sumo_boost_state_enable(struct radeon_device *rdev, bool enable);
217void sumo_enable_boost_timer(struct radeon_device *rdev);
218u32 sumo_get_running_fw_version(struct radeon_device *rdev);
219
220#endif
diff --git a/drivers/gpu/drm/radeon/sumo_smc.c b/drivers/gpu/drm/radeon/sumo_smc.c
new file mode 100644
index 000000000000..18abba5b5810
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sumo_smc.c
@@ -0,0 +1,222 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "sumod.h"
27#include "sumo_dpm.h"
28#include "ppsmc.h"
29
30#define SUMO_SMU_SERVICE_ROUTINE_PG_INIT 1
31#define SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY 27
32#define SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20 20
33
34struct sumo_ps *sumo_get_ps(struct radeon_ps *rps);
35struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev);
36
37static void sumo_send_msg_to_smu(struct radeon_device *rdev, u32 id)
38{
39 u32 gfx_int_req;
40 int i;
41
42 for (i = 0; i < rdev->usec_timeout; i++) {
43 if (RREG32(GFX_INT_STATUS) & INT_DONE)
44 break;
45 udelay(1);
46 }
47
48 gfx_int_req = SERV_INDEX(id) | INT_REQ;
49 WREG32(GFX_INT_REQ, gfx_int_req);
50
51 for (i = 0; i < rdev->usec_timeout; i++) {
52 if (RREG32(GFX_INT_REQ) & INT_REQ)
53 break;
54 udelay(1);
55 }
56
57 for (i = 0; i < rdev->usec_timeout; i++) {
58 if (RREG32(GFX_INT_STATUS) & INT_ACK)
59 break;
60 udelay(1);
61 }
62
63 for (i = 0; i < rdev->usec_timeout; i++) {
64 if (RREG32(GFX_INT_STATUS) & INT_DONE)
65 break;
66 udelay(1);
67 }
68
69 gfx_int_req &= ~INT_REQ;
70 WREG32(GFX_INT_REQ, gfx_int_req);
71}
72
73void sumo_initialize_m3_arb(struct radeon_device *rdev)
74{
75 struct sumo_power_info *pi = sumo_get_pi(rdev);
76 u32 i;
77
78 if (!pi->enable_dynamic_m3_arbiter)
79 return;
80
81 for (i = 0; i < NUMBER_OF_M3ARB_PARAM_SETS; i++)
82 WREG32_RCU(MCU_M3ARB_PARAMS + (i * 4),
83 pi->sys_info.csr_m3_arb_cntl_default[i]);
84
85 for (; i < NUMBER_OF_M3ARB_PARAM_SETS * 2; i++)
86 WREG32_RCU(MCU_M3ARB_PARAMS + (i * 4),
87 pi->sys_info.csr_m3_arb_cntl_uvd[i % NUMBER_OF_M3ARB_PARAM_SETS]);
88
89 for (; i < NUMBER_OF_M3ARB_PARAM_SETS * 3; i++)
90 WREG32_RCU(MCU_M3ARB_PARAMS + (i * 4),
91 pi->sys_info.csr_m3_arb_cntl_fs3d[i % NUMBER_OF_M3ARB_PARAM_SETS]);
92}
93
94static bool sumo_is_alt_vddnb_supported(struct radeon_device *rdev)
95{
96 struct sumo_power_info *pi = sumo_get_pi(rdev);
97 bool return_code = false;
98
99 if (!pi->enable_alt_vddnb)
100 return return_code;
101
102 if ((rdev->family == CHIP_SUMO) || (rdev->family == CHIP_SUMO2)) {
103 if (pi->fw_version >= 0x00010C00)
104 return_code = true;
105 }
106
107 return return_code;
108}
109
110void sumo_smu_notify_alt_vddnb_change(struct radeon_device *rdev,
111 bool powersaving, bool force_nbps1)
112{
113 u32 param = 0;
114
115 if (!sumo_is_alt_vddnb_supported(rdev))
116 return;
117
118 if (powersaving)
119 param |= 1;
120
121 if (force_nbps1)
122 param |= 2;
123
124 WREG32_RCU(RCU_ALTVDDNB_NOTIFY, param);
125
126 sumo_send_msg_to_smu(rdev, SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY);
127}
128
129void sumo_smu_pg_init(struct radeon_device *rdev)
130{
131 sumo_send_msg_to_smu(rdev, SUMO_SMU_SERVICE_ROUTINE_PG_INIT);
132}
133
134static u32 sumo_power_of_4(u32 unit)
135{
136 u32 ret = 1;
137 u32 i;
138
139 for (i = 0; i < unit; i++)
140 ret *= 4;
141
142 return ret;
143}
144
145void sumo_enable_boost_timer(struct radeon_device *rdev)
146{
147 struct sumo_power_info *pi = sumo_get_pi(rdev);
148 u32 period, unit, timer_value;
149 u32 xclk = radeon_get_xclk(rdev);
150
151 unit = (RREG32_RCU(RCU_LCLK_SCALING_CNTL) & LCLK_SCALING_TIMER_PRESCALER_MASK)
152 >> LCLK_SCALING_TIMER_PRESCALER_SHIFT;
153
154 period = 100 * (xclk / 100 / sumo_power_of_4(unit));
155
156 timer_value = (period << 16) | (unit << 4);
157
158 WREG32_RCU(RCU_GNB_PWR_REP_TIMER_CNTL, timer_value);
159 WREG32_RCU(RCU_BOOST_MARGIN, pi->sys_info.sclk_dpm_boost_margin);
160 WREG32_RCU(RCU_THROTTLE_MARGIN, pi->sys_info.sclk_dpm_throttle_margin);
161 WREG32_RCU(GNB_TDP_LIMIT, pi->sys_info.gnb_tdp_limit);
162 WREG32_RCU(RCU_SclkDpmTdpLimitPG, pi->sys_info.sclk_dpm_tdp_limit_pg);
163
164 sumo_send_msg_to_smu(rdev, SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20);
165}
166
167void sumo_set_tdp_limit(struct radeon_device *rdev, u32 index, u32 tdp_limit)
168{
169 u32 regoffset = 0;
170 u32 shift = 0;
171 u32 mask = 0xFFF;
172 u32 sclk_dpm_tdp_limit;
173
174 switch (index) {
175 case 0:
176 regoffset = RCU_SclkDpmTdpLimit01;
177 shift = 16;
178 break;
179 case 1:
180 regoffset = RCU_SclkDpmTdpLimit01;
181 shift = 0;
182 break;
183 case 2:
184 regoffset = RCU_SclkDpmTdpLimit23;
185 shift = 16;
186 break;
187 case 3:
188 regoffset = RCU_SclkDpmTdpLimit23;
189 shift = 0;
190 break;
191 case 4:
192 regoffset = RCU_SclkDpmTdpLimit47;
193 shift = 16;
194 break;
195 case 7:
196 regoffset = RCU_SclkDpmTdpLimit47;
197 shift = 0;
198 break;
199 default:
200 break;
201 }
202
203 sclk_dpm_tdp_limit = RREG32_RCU(regoffset);
204 sclk_dpm_tdp_limit &= ~(mask << shift);
205 sclk_dpm_tdp_limit |= (tdp_limit << shift);
206 WREG32_RCU(regoffset, sclk_dpm_tdp_limit);
207}
208
209void sumo_boost_state_enable(struct radeon_device *rdev, bool enable)
210{
211 u32 boost_disable = RREG32_RCU(RCU_GPU_BOOST_DISABLE);
212
213 boost_disable &= 0xFFFFFFFE;
214 boost_disable |= (enable ? 0 : 1);
215 WREG32_RCU(RCU_GPU_BOOST_DISABLE, boost_disable);
216}
217
218u32 sumo_get_running_fw_version(struct radeon_device *rdev)
219{
220 return RREG32_RCU(RCU_FW_VERSION);
221}
222
diff --git a/drivers/gpu/drm/radeon/sumod.h b/drivers/gpu/drm/radeon/sumod.h
new file mode 100644
index 000000000000..7c9c2d4b86c0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sumod.h
@@ -0,0 +1,372 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef _SUMOD_H_
25#define _SUMOD_H_
26
27/* pm registers */
28
29/* rcu */
30#define RCU_FW_VERSION 0x30c
31
32#define RCU_PWR_GATING_SEQ0 0x408
33#define RCU_PWR_GATING_SEQ1 0x40c
34#define RCU_PWR_GATING_CNTL 0x410
35# define PWR_GATING_EN (1 << 0)
36# define RSVD_MASK (0x3 << 1)
37# define PCV(x) ((x) << 3)
38# define PCV_MASK (0x1f << 3)
39# define PCV_SHIFT 3
40# define PCP(x) ((x) << 8)
41# define PCP_MASK (0xf << 8)
42# define PCP_SHIFT 8
43# define RPW(x) ((x) << 16)
44# define RPW_MASK (0xf << 16)
45# define RPW_SHIFT 16
46# define ID(x) ((x) << 24)
47# define ID_MASK (0xf << 24)
48# define ID_SHIFT 24
49# define PGS(x) ((x) << 28)
50# define PGS_MASK (0xf << 28)
51# define PGS_SHIFT 28
52
53#define RCU_ALTVDDNB_NOTIFY 0x430
54#define RCU_LCLK_SCALING_CNTL 0x434
55# define LCLK_SCALING_EN (1 << 0)
56# define LCLK_SCALING_TYPE (1 << 1)
57# define LCLK_SCALING_TIMER_PRESCALER(x) ((x) << 4)
58# define LCLK_SCALING_TIMER_PRESCALER_MASK (0xf << 4)
59# define LCLK_SCALING_TIMER_PRESCALER_SHIFT 4
60# define LCLK_SCALING_TIMER_PERIOD(x) ((x) << 16)
61# define LCLK_SCALING_TIMER_PERIOD_MASK (0xf << 16)
62# define LCLK_SCALING_TIMER_PERIOD_SHIFT 16
63
64#define RCU_PWR_GATING_CNTL_2 0x4a0
65# define MPPU(x) ((x) << 0)
66# define MPPU_MASK (0xffff << 0)
67# define MPPU_SHIFT 0
68# define MPPD(x) ((x) << 16)
69# define MPPD_MASK (0xffff << 16)
70# define MPPD_SHIFT 16
71#define RCU_PWR_GATING_CNTL_3 0x4a4
72# define DPPU(x) ((x) << 0)
73# define DPPU_MASK (0xffff << 0)
74# define DPPU_SHIFT 0
75# define DPPD(x) ((x) << 16)
76# define DPPD_MASK (0xffff << 16)
77# define DPPD_SHIFT 16
78#define RCU_PWR_GATING_CNTL_4 0x4a8
79# define RT(x) ((x) << 0)
80# define RT_MASK (0xffff << 0)
81# define RT_SHIFT 0
82# define IT(x) ((x) << 16)
83# define IT_MASK (0xffff << 16)
84# define IT_SHIFT 16
85
86/* yes these two have the same address */
87#define RCU_PWR_GATING_CNTL_5 0x504
88#define RCU_GPU_BOOST_DISABLE 0x508
89
90#define MCU_M3ARB_INDEX 0x504
91#define MCU_M3ARB_PARAMS 0x508
92
93#define RCU_GNB_PWR_REP_TIMER_CNTL 0x50C
94
95#define RCU_SclkDpmTdpLimit01 0x514
96#define RCU_SclkDpmTdpLimit23 0x518
97#define RCU_SclkDpmTdpLimit47 0x51C
98#define RCU_SclkDpmTdpLimitPG 0x520
99
100#define GNB_TDP_LIMIT 0x540
101#define RCU_BOOST_MARGIN 0x544
102#define RCU_THROTTLE_MARGIN 0x548
103
104#define SMU_PCIE_PG_ARGS 0x58C
105#define SMU_PCIE_PG_ARGS_2 0x598
106#define SMU_PCIE_PG_ARGS_3 0x59C
107
108/* mmio */
109#define RCU_STATUS 0x11c
110# define GMC_PWR_GATER_BUSY (1 << 8)
111# define GFX_PWR_GATER_BUSY (1 << 9)
112# define UVD_PWR_GATER_BUSY (1 << 10)
113# define PCIE_PWR_GATER_BUSY (1 << 11)
114# define GMC_PWR_GATER_STATE (1 << 12)
115# define GFX_PWR_GATER_STATE (1 << 13)
116# define UVD_PWR_GATER_STATE (1 << 14)
117# define PCIE_PWR_GATER_STATE (1 << 15)
118# define GFX1_PWR_GATER_BUSY (1 << 16)
119# define GFX2_PWR_GATER_BUSY (1 << 17)
120# define GFX1_PWR_GATER_STATE (1 << 18)
121# define GFX2_PWR_GATER_STATE (1 << 19)
122
123#define GFX_INT_REQ 0x120
124# define INT_REQ (1 << 0)
125# define SERV_INDEX(x) ((x) << 1)
126# define SERV_INDEX_MASK (0xff << 1)
127# define SERV_INDEX_SHIFT 1
128#define GFX_INT_STATUS 0x124
129# define INT_ACK (1 << 0)
130# define INT_DONE (1 << 1)
131
132#define CG_SCLK_CNTL 0x600
133# define SCLK_DIVIDER(x) ((x) << 0)
134# define SCLK_DIVIDER_MASK (0x7f << 0)
135# define SCLK_DIVIDER_SHIFT 0
136#define CG_SCLK_STATUS 0x604
137# define SCLK_OVERCLK_DETECT (1 << 2)
138
139#define CG_DCLK_CNTL 0x610
140# define DCLK_DIVIDER_MASK 0x7f
141# define DCLK_DIR_CNTL_EN (1 << 8)
142#define CG_DCLK_STATUS 0x614
143# define DCLK_STATUS (1 << 0)
144#define CG_VCLK_CNTL 0x618
145# define VCLK_DIVIDER_MASK 0x7f
146# define VCLK_DIR_CNTL_EN (1 << 8)
147#define CG_VCLK_STATUS 0x61c
148
149#define GENERAL_PWRMGT 0x63c
150# define STATIC_PM_EN (1 << 1)
151
152#define SCLK_PWRMGT_CNTL 0x644
153# define SCLK_PWRMGT_OFF (1 << 0)
154# define SCLK_LOW_D1 (1 << 1)
155# define FIR_RESET (1 << 4)
156# define FIR_FORCE_TREND_SEL (1 << 5)
157# define FIR_TREND_MODE (1 << 6)
158# define DYN_GFX_CLK_OFF_EN (1 << 7)
159# define GFX_CLK_FORCE_ON (1 << 8)
160# define GFX_CLK_REQUEST_OFF (1 << 9)
161# define GFX_CLK_FORCE_OFF (1 << 10)
162# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
163# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
164# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
165# define GFX_VOLTAGE_CHANGE_EN (1 << 16)
166# define GFX_VOLTAGE_CHANGE_MODE (1 << 17)
167
168#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
169# define TARG_SCLK_INDEX(x) ((x) << 6)
170# define TARG_SCLK_INDEX_MASK (0x7 << 6)
171# define TARG_SCLK_INDEX_SHIFT 6
172# define CURR_SCLK_INDEX(x) ((x) << 9)
173# define CURR_SCLK_INDEX_MASK (0x7 << 9)
174# define CURR_SCLK_INDEX_SHIFT 9
175# define TARG_INDEX(x) ((x) << 12)
176# define TARG_INDEX_MASK (0x7 << 12)
177# define TARG_INDEX_SHIFT 12
178# define CURR_INDEX(x) ((x) << 15)
179# define CURR_INDEX_MASK (0x7 << 15)
180# define CURR_INDEX_SHIFT 15
181
182#define CG_SCLK_DPM_CTRL 0x684
183# define SCLK_FSTATE_0_DIV(x) ((x) << 0)
184# define SCLK_FSTATE_0_DIV_MASK (0x7f << 0)
185# define SCLK_FSTATE_0_DIV_SHIFT 0
186# define SCLK_FSTATE_0_VLD (1 << 7)
187# define SCLK_FSTATE_1_DIV(x) ((x) << 8)
188# define SCLK_FSTATE_1_DIV_MASK (0x7f << 8)
189# define SCLK_FSTATE_1_DIV_SHIFT 8
190# define SCLK_FSTATE_1_VLD (1 << 15)
191# define SCLK_FSTATE_2_DIV(x) ((x) << 16)
192# define SCLK_FSTATE_2_DIV_MASK (0x7f << 16)
193# define SCLK_FSTATE_2_DIV_SHIFT 16
194# define SCLK_FSTATE_2_VLD (1 << 23)
195# define SCLK_FSTATE_3_DIV(x) ((x) << 24)
196# define SCLK_FSTATE_3_DIV_MASK (0x7f << 24)
197# define SCLK_FSTATE_3_DIV_SHIFT 24
198# define SCLK_FSTATE_3_VLD (1 << 31)
199#define CG_SCLK_DPM_CTRL_2 0x688
200#define CG_GCOOR 0x68c
201# define PHC(x) ((x) << 0)
202# define PHC_MASK (0x1f << 0)
203# define PHC_SHIFT 0
204# define SDC(x) ((x) << 9)
205# define SDC_MASK (0x3ff << 9)
206# define SDC_SHIFT 9
207# define SU(x) ((x) << 23)
208# define SU_MASK (0xf << 23)
209# define SU_SHIFT 23
210# define DIV_ID(x) ((x) << 28)
211# define DIV_ID_MASK (0x7 << 28)
212# define DIV_ID_SHIFT 28
213
214#define CG_FTV 0x690
215#define CG_FFCT_0 0x694
216# define UTC_0(x) ((x) << 0)
217# define UTC_0_MASK (0x3ff << 0)
218# define UTC_0_SHIFT 0
219# define DTC_0(x) ((x) << 10)
220# define DTC_0_MASK (0x3ff << 10)
221# define DTC_0_SHIFT 10
222
223#define CG_GIT 0x6d8
224# define CG_GICST(x) ((x) << 0)
225# define CG_GICST_MASK (0xffff << 0)
226# define CG_GICST_SHIFT 0
227# define CG_GIPOT(x) ((x) << 16)
228# define CG_GIPOT_MASK (0xffff << 16)
229# define CG_GIPOT_SHIFT 16
230
231#define CG_SCLK_DPM_CTRL_3 0x6e0
232# define FORCE_SCLK_STATE(x) ((x) << 0)
233# define FORCE_SCLK_STATE_MASK (0x7 << 0)
234# define FORCE_SCLK_STATE_SHIFT 0
235# define FORCE_SCLK_STATE_EN (1 << 3)
236# define GNB_TT(x) ((x) << 8)
237# define GNB_TT_MASK (0xff << 8)
238# define GNB_TT_SHIFT 8
239# define GNB_THERMTHRO_MASK (1 << 16)
240# define CNB_THERMTHRO_MASK_SCLK (1 << 17)
241# define DPM_SCLK_ENABLE (1 << 18)
242# define GNB_SLOW_FSTATE_0_MASK (1 << 23)
243# define GNB_SLOW_FSTATE_0_SHIFT 23
244# define FORCE_NB_PSTATE_1 (1 << 31)
245
246#define CG_SSP 0x6e8
247# define SST(x) ((x) << 0)
248# define SST_MASK (0xffff << 0)
249# define SST_SHIFT 0
250# define SSTU(x) ((x) << 16)
251# define SSTU_MASK (0xffff << 16)
252# define SSTU_SHIFT 16
253
254#define CG_ACPI_CNTL 0x70c
255# define SCLK_ACPI_DIV(x) ((x) << 0)
256# define SCLK_ACPI_DIV_MASK (0x7f << 0)
257# define SCLK_ACPI_DIV_SHIFT 0
258
259#define CG_SCLK_DPM_CTRL_4 0x71c
260# define DC_HDC(x) ((x) << 14)
261# define DC_HDC_MASK (0x3fff << 14)
262# define DC_HDC_SHIFT 14
263# define DC_HU(x) ((x) << 28)
264# define DC_HU_MASK (0xf << 28)
265# define DC_HU_SHIFT 28
266#define CG_SCLK_DPM_CTRL_5 0x720
267# define SCLK_FSTATE_BOOTUP(x) ((x) << 0)
268# define SCLK_FSTATE_BOOTUP_MASK (0x7 << 0)
269# define SCLK_FSTATE_BOOTUP_SHIFT 0
270# define TT_TP(x) ((x) << 3)
271# define TT_TP_MASK (0xffff << 3)
272# define TT_TP_SHIFT 3
273# define TT_TU(x) ((x) << 19)
274# define TT_TU_MASK (0xff << 19)
275# define TT_TU_SHIFT 19
276#define CG_SCLK_DPM_CTRL_6 0x724
277#define CG_AT_0 0x728
278# define CG_R(x) ((x) << 0)
279# define CG_R_MASK (0xffff << 0)
280# define CG_R_SHIFT 0
281# define CG_L(x) ((x) << 16)
282# define CG_L_MASK (0xffff << 16)
283# define CG_L_SHIFT 16
284#define CG_AT_1 0x72c
285#define CG_AT_2 0x730
286#define CG_THERMAL_INT 0x734
287#define DIG_THERM_INTH(x) ((x) << 8)
288#define DIG_THERM_INTH_MASK 0x0000FF00
289#define DIG_THERM_INTH_SHIFT 8
290#define DIG_THERM_INTL(x) ((x) << 16)
291#define DIG_THERM_INTL_MASK 0x00FF0000
292#define DIG_THERM_INTL_SHIFT 16
293#define THERM_INT_MASK_HIGH (1 << 24)
294#define THERM_INT_MASK_LOW (1 << 25)
295#define CG_AT_3 0x738
296#define CG_AT_4 0x73c
297#define CG_AT_5 0x740
298#define CG_AT_6 0x744
299#define CG_AT_7 0x748
300
301#define CG_BSP_0 0x750
302# define BSP(x) ((x) << 0)
303# define BSP_MASK (0xffff << 0)
304# define BSP_SHIFT 0
305# define BSU(x) ((x) << 16)
306# define BSU_MASK (0xf << 16)
307# define BSU_SHIFT 16
308
309#define CG_CG_VOLTAGE_CNTL 0x770
310# define REQ (1 << 0)
311# define LEVEL(x) ((x) << 1)
312# define LEVEL_MASK (0x3 << 1)
313# define LEVEL_SHIFT 1
314# define CG_VOLTAGE_EN (1 << 3)
315# define FORCE (1 << 4)
316# define PERIOD(x) ((x) << 8)
317# define PERIOD_MASK (0xffff << 8)
318# define PERIOD_SHIFT 8
319# define UNIT(x) ((x) << 24)
320# define UNIT_MASK (0xf << 24)
321# define UNIT_SHIFT 24
322
323#define CG_ACPI_VOLTAGE_CNTL 0x780
324# define ACPI_VOLTAGE_EN (1 << 8)
325
326#define CG_DPM_VOLTAGE_CNTL 0x788
327# define DPM_STATE0_LEVEL_MASK (0x3 << 0)
328# define DPM_STATE0_LEVEL_SHIFT 0
329# define DPM_VOLTAGE_EN (1 << 16)
330
331#define CG_PWR_GATING_CNTL 0x7ac
332# define DYN_PWR_DOWN_EN (1 << 0)
333# define ACPI_PWR_DOWN_EN (1 << 1)
334# define GFX_CLK_OFF_PWR_DOWN_EN (1 << 2)
335# define IOC_DISGPU_PWR_DOWN_EN (1 << 3)
336# define FORCE_POWR_ON (1 << 4)
337# define PGP(x) ((x) << 8)
338# define PGP_MASK (0xffff << 8)
339# define PGP_SHIFT 8
340# define PGU(x) ((x) << 24)
341# define PGU_MASK (0xf << 24)
342# define PGU_SHIFT 24
343
344#define CG_CGTT_LOCAL_0 0x7d0
345#define CG_CGTT_LOCAL_1 0x7d4
346
347#define DEEP_SLEEP_CNTL 0x818
348# define R_DIS (1 << 3)
349# define HS(x) ((x) << 4)
350# define HS_MASK (0xfff << 4)
351# define HS_SHIFT 4
352# define ENABLE_DS (1 << 31)
353#define DEEP_SLEEP_CNTL2 0x81c
354# define LB_UFP_EN (1 << 0)
355# define INOUT_C(x) ((x) << 4)
356# define INOUT_C_MASK (0xff << 4)
357# define INOUT_C_SHIFT 4
358
359#define CG_SCRATCH2 0x824
360
361#define CG_SCLK_DPM_CTRL_11 0x830
362
363#define HW_REV 0x5564
364# define ATI_REV_ID_MASK (0xf << 28)
365# define ATI_REV_ID_SHIFT 28
366/* 0 = A0, 1 = A1, 2 = B0, 3 = C0, etc. */
367
368#define DOUT_SCRATCH3 0x611c
369
370#define GB_ADDR_CONFIG 0x98f8
371
372#endif
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
new file mode 100644
index 000000000000..fce825e112ff
--- /dev/null
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -0,0 +1,1887 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "trinityd.h"
27#include "r600_dpm.h"
28#include "trinity_dpm.h"
29
30#define TRINITY_MAX_DEEPSLEEP_DIVIDER_ID 5
31#define TRINITY_MINIMUM_ENGINE_CLOCK 800
32#define SCLK_MIN_DIV_INTV_SHIFT 12
33#define TRINITY_DISPCLK_BYPASS_THRESHOLD 10000
34
35#ifndef TRINITY_MGCG_SEQUENCE
36#define TRINITY_MGCG_SEQUENCE 100
37
38static const u32 trinity_mgcg_shls_default[] =
39{
40 /* Register, Value, Mask */
41 0x0000802c, 0xc0000000, 0xffffffff,
42 0x00003fc4, 0xc0000000, 0xffffffff,
43 0x00005448, 0x00000100, 0xffffffff,
44 0x000055e4, 0x00000100, 0xffffffff,
45 0x0000160c, 0x00000100, 0xffffffff,
46 0x00008984, 0x06000100, 0xffffffff,
47 0x0000c164, 0x00000100, 0xffffffff,
48 0x00008a18, 0x00000100, 0xffffffff,
49 0x0000897c, 0x06000100, 0xffffffff,
50 0x00008b28, 0x00000100, 0xffffffff,
51 0x00009144, 0x00800200, 0xffffffff,
52 0x00009a60, 0x00000100, 0xffffffff,
53 0x00009868, 0x00000100, 0xffffffff,
54 0x00008d58, 0x00000100, 0xffffffff,
55 0x00009510, 0x00000100, 0xffffffff,
56 0x0000949c, 0x00000100, 0xffffffff,
57 0x00009654, 0x00000100, 0xffffffff,
58 0x00009030, 0x00000100, 0xffffffff,
59 0x00009034, 0x00000100, 0xffffffff,
60 0x00009038, 0x00000100, 0xffffffff,
61 0x0000903c, 0x00000100, 0xffffffff,
62 0x00009040, 0x00000100, 0xffffffff,
63 0x0000a200, 0x00000100, 0xffffffff,
64 0x0000a204, 0x00000100, 0xffffffff,
65 0x0000a208, 0x00000100, 0xffffffff,
66 0x0000a20c, 0x00000100, 0xffffffff,
67 0x00009744, 0x00000100, 0xffffffff,
68 0x00003f80, 0x00000100, 0xffffffff,
69 0x0000a210, 0x00000100, 0xffffffff,
70 0x0000a214, 0x00000100, 0xffffffff,
71 0x000004d8, 0x00000100, 0xffffffff,
72 0x00009664, 0x00000100, 0xffffffff,
73 0x00009698, 0x00000100, 0xffffffff,
74 0x000004d4, 0x00000200, 0xffffffff,
75 0x000004d0, 0x00000000, 0xffffffff,
76 0x000030cc, 0x00000104, 0xffffffff,
77 0x0000d0c0, 0x00000100, 0xffffffff,
78 0x0000d8c0, 0x00000100, 0xffffffff,
79 0x0000951c, 0x00010000, 0xffffffff,
80 0x00009160, 0x00030002, 0xffffffff,
81 0x00009164, 0x00050004, 0xffffffff,
82 0x00009168, 0x00070006, 0xffffffff,
83 0x00009178, 0x00070000, 0xffffffff,
84 0x0000917c, 0x00030002, 0xffffffff,
85 0x00009180, 0x00050004, 0xffffffff,
86 0x0000918c, 0x00010006, 0xffffffff,
87 0x00009190, 0x00090008, 0xffffffff,
88 0x00009194, 0x00070000, 0xffffffff,
89 0x00009198, 0x00030002, 0xffffffff,
90 0x0000919c, 0x00050004, 0xffffffff,
91 0x000091a8, 0x00010006, 0xffffffff,
92 0x000091ac, 0x00090008, 0xffffffff,
93 0x000091b0, 0x00070000, 0xffffffff,
94 0x000091b4, 0x00030002, 0xffffffff,
95 0x000091b8, 0x00050004, 0xffffffff,
96 0x000091c4, 0x00010006, 0xffffffff,
97 0x000091c8, 0x00090008, 0xffffffff,
98 0x000091cc, 0x00070000, 0xffffffff,
99 0x000091d0, 0x00030002, 0xffffffff,
100 0x000091d4, 0x00050004, 0xffffffff,
101 0x000091e0, 0x00010006, 0xffffffff,
102 0x000091e4, 0x00090008, 0xffffffff,
103 0x000091e8, 0x00000000, 0xffffffff,
104 0x000091ec, 0x00070000, 0xffffffff,
105 0x000091f0, 0x00030002, 0xffffffff,
106 0x000091f4, 0x00050004, 0xffffffff,
107 0x00009200, 0x00010006, 0xffffffff,
108 0x00009204, 0x00090008, 0xffffffff,
109 0x00009208, 0x00070000, 0xffffffff,
110 0x0000920c, 0x00030002, 0xffffffff,
111 0x00009210, 0x00050004, 0xffffffff,
112 0x0000921c, 0x00010006, 0xffffffff,
113 0x00009220, 0x00090008, 0xffffffff,
114 0x00009294, 0x00000000, 0xffffffff
115};
116
117static const u32 trinity_mgcg_shls_enable[] =
118{
119 /* Register, Value, Mask */
120 0x0000802c, 0xc0000000, 0xffffffff,
121 0x000008f8, 0x00000000, 0xffffffff,
122 0x000008fc, 0x00000000, 0x000133FF,
123 0x000008f8, 0x00000001, 0xffffffff,
124 0x000008fc, 0x00000000, 0xE00B03FC,
125 0x00009150, 0x96944200, 0xffffffff
126};
127
128static const u32 trinity_mgcg_shls_disable[] =
129{
130 /* Register, Value, Mask */
131 0x0000802c, 0xc0000000, 0xffffffff,
132 0x00009150, 0x00600000, 0xffffffff,
133 0x000008f8, 0x00000000, 0xffffffff,
134 0x000008fc, 0xffffffff, 0x000133FF,
135 0x000008f8, 0x00000001, 0xffffffff,
136 0x000008fc, 0xffffffff, 0xE00B03FC
137};
138#endif
139
140#ifndef TRINITY_SYSLS_SEQUENCE
141#define TRINITY_SYSLS_SEQUENCE 100
142
143static const u32 trinity_sysls_default[] =
144{
145 /* Register, Value, Mask */
146 0x000055e8, 0x00000000, 0xffffffff,
147 0x0000d0bc, 0x00000000, 0xffffffff,
148 0x0000d8bc, 0x00000000, 0xffffffff,
149 0x000015c0, 0x000c1401, 0xffffffff,
150 0x0000264c, 0x000c0400, 0xffffffff,
151 0x00002648, 0x000c0400, 0xffffffff,
152 0x00002650, 0x000c0400, 0xffffffff,
153 0x000020b8, 0x000c0400, 0xffffffff,
154 0x000020bc, 0x000c0400, 0xffffffff,
155 0x000020c0, 0x000c0c80, 0xffffffff,
156 0x0000f4a0, 0x000000c0, 0xffffffff,
157 0x0000f4a4, 0x00680fff, 0xffffffff,
158 0x00002f50, 0x00000404, 0xffffffff,
159 0x000004c8, 0x00000001, 0xffffffff,
160 0x0000641c, 0x00000000, 0xffffffff,
161 0x00000c7c, 0x00000000, 0xffffffff,
162 0x00006dfc, 0x00000000, 0xffffffff
163};
164
165static const u32 trinity_sysls_disable[] =
166{
167 /* Register, Value, Mask */
168 0x0000d0c0, 0x00000000, 0xffffffff,
169 0x0000d8c0, 0x00000000, 0xffffffff,
170 0x000055e8, 0x00000000, 0xffffffff,
171 0x0000d0bc, 0x00000000, 0xffffffff,
172 0x0000d8bc, 0x00000000, 0xffffffff,
173 0x000015c0, 0x00041401, 0xffffffff,
174 0x0000264c, 0x00040400, 0xffffffff,
175 0x00002648, 0x00040400, 0xffffffff,
176 0x00002650, 0x00040400, 0xffffffff,
177 0x000020b8, 0x00040400, 0xffffffff,
178 0x000020bc, 0x00040400, 0xffffffff,
179 0x000020c0, 0x00040c80, 0xffffffff,
180 0x0000f4a0, 0x000000c0, 0xffffffff,
181 0x0000f4a4, 0x00680000, 0xffffffff,
182 0x00002f50, 0x00000404, 0xffffffff,
183 0x000004c8, 0x00000001, 0xffffffff,
184 0x0000641c, 0x00007ffd, 0xffffffff,
185 0x00000c7c, 0x0000ff00, 0xffffffff,
186 0x00006dfc, 0x0000007f, 0xffffffff
187};
188
189static const u32 trinity_sysls_enable[] =
190{
191 /* Register, Value, Mask */
192 0x000055e8, 0x00000001, 0xffffffff,
193 0x0000d0bc, 0x00000100, 0xffffffff,
194 0x0000d8bc, 0x00000100, 0xffffffff,
195 0x000015c0, 0x000c1401, 0xffffffff,
196 0x0000264c, 0x000c0400, 0xffffffff,
197 0x00002648, 0x000c0400, 0xffffffff,
198 0x00002650, 0x000c0400, 0xffffffff,
199 0x000020b8, 0x000c0400, 0xffffffff,
200 0x000020bc, 0x000c0400, 0xffffffff,
201 0x000020c0, 0x000c0c80, 0xffffffff,
202 0x0000f4a0, 0x000000c0, 0xffffffff,
203 0x0000f4a4, 0x00680fff, 0xffffffff,
204 0x00002f50, 0x00000903, 0xffffffff,
205 0x000004c8, 0x00000000, 0xffffffff,
206 0x0000641c, 0x00000000, 0xffffffff,
207 0x00000c7c, 0x00000000, 0xffffffff,
208 0x00006dfc, 0x00000000, 0xffffffff
209};
210#endif
211
212static const u32 trinity_override_mgpg_sequences[] =
213{
214 /* Register, Value */
215 0x00000200, 0xE030032C,
216 0x00000204, 0x00000FFF,
217 0x00000200, 0xE0300058,
218 0x00000204, 0x00030301,
219 0x00000200, 0xE0300054,
220 0x00000204, 0x500010FF,
221 0x00000200, 0xE0300074,
222 0x00000204, 0x00030301,
223 0x00000200, 0xE0300070,
224 0x00000204, 0x500010FF,
225 0x00000200, 0xE0300090,
226 0x00000204, 0x00030301,
227 0x00000200, 0xE030008C,
228 0x00000204, 0x500010FF,
229 0x00000200, 0xE03000AC,
230 0x00000204, 0x00030301,
231 0x00000200, 0xE03000A8,
232 0x00000204, 0x500010FF,
233 0x00000200, 0xE03000C8,
234 0x00000204, 0x00030301,
235 0x00000200, 0xE03000C4,
236 0x00000204, 0x500010FF,
237 0x00000200, 0xE03000E4,
238 0x00000204, 0x00030301,
239 0x00000200, 0xE03000E0,
240 0x00000204, 0x500010FF,
241 0x00000200, 0xE0300100,
242 0x00000204, 0x00030301,
243 0x00000200, 0xE03000FC,
244 0x00000204, 0x500010FF,
245 0x00000200, 0xE0300058,
246 0x00000204, 0x00030303,
247 0x00000200, 0xE0300054,
248 0x00000204, 0x600010FF,
249 0x00000200, 0xE0300074,
250 0x00000204, 0x00030303,
251 0x00000200, 0xE0300070,
252 0x00000204, 0x600010FF,
253 0x00000200, 0xE0300090,
254 0x00000204, 0x00030303,
255 0x00000200, 0xE030008C,
256 0x00000204, 0x600010FF,
257 0x00000200, 0xE03000AC,
258 0x00000204, 0x00030303,
259 0x00000200, 0xE03000A8,
260 0x00000204, 0x600010FF,
261 0x00000200, 0xE03000C8,
262 0x00000204, 0x00030303,
263 0x00000200, 0xE03000C4,
264 0x00000204, 0x600010FF,
265 0x00000200, 0xE03000E4,
266 0x00000204, 0x00030303,
267 0x00000200, 0xE03000E0,
268 0x00000204, 0x600010FF,
269 0x00000200, 0xE0300100,
270 0x00000204, 0x00030303,
271 0x00000200, 0xE03000FC,
272 0x00000204, 0x600010FF,
273 0x00000200, 0xE0300058,
274 0x00000204, 0x00030303,
275 0x00000200, 0xE0300054,
276 0x00000204, 0x700010FF,
277 0x00000200, 0xE0300074,
278 0x00000204, 0x00030303,
279 0x00000200, 0xE0300070,
280 0x00000204, 0x700010FF,
281 0x00000200, 0xE0300090,
282 0x00000204, 0x00030303,
283 0x00000200, 0xE030008C,
284 0x00000204, 0x700010FF,
285 0x00000200, 0xE03000AC,
286 0x00000204, 0x00030303,
287 0x00000200, 0xE03000A8,
288 0x00000204, 0x700010FF,
289 0x00000200, 0xE03000C8,
290 0x00000204, 0x00030303,
291 0x00000200, 0xE03000C4,
292 0x00000204, 0x700010FF,
293 0x00000200, 0xE03000E4,
294 0x00000204, 0x00030303,
295 0x00000200, 0xE03000E0,
296 0x00000204, 0x700010FF,
297 0x00000200, 0xE0300100,
298 0x00000204, 0x00030303,
299 0x00000200, 0xE03000FC,
300 0x00000204, 0x700010FF,
301 0x00000200, 0xE0300058,
302 0x00000204, 0x00010303,
303 0x00000200, 0xE0300054,
304 0x00000204, 0x800010FF,
305 0x00000200, 0xE0300074,
306 0x00000204, 0x00010303,
307 0x00000200, 0xE0300070,
308 0x00000204, 0x800010FF,
309 0x00000200, 0xE0300090,
310 0x00000204, 0x00010303,
311 0x00000200, 0xE030008C,
312 0x00000204, 0x800010FF,
313 0x00000200, 0xE03000AC,
314 0x00000204, 0x00010303,
315 0x00000200, 0xE03000A8,
316 0x00000204, 0x800010FF,
317 0x00000200, 0xE03000C4,
318 0x00000204, 0x800010FF,
319 0x00000200, 0xE03000C8,
320 0x00000204, 0x00010303,
321 0x00000200, 0xE03000E4,
322 0x00000204, 0x00010303,
323 0x00000200, 0xE03000E0,
324 0x00000204, 0x800010FF,
325 0x00000200, 0xE0300100,
326 0x00000204, 0x00010303,
327 0x00000200, 0xE03000FC,
328 0x00000204, 0x800010FF,
329 0x00000200, 0x0001f198,
330 0x00000204, 0x0003ffff,
331 0x00000200, 0x0001f19C,
332 0x00000204, 0x3fffffff,
333 0x00000200, 0xE030032C,
334 0x00000204, 0x00000000,
335};
336
337static void trinity_program_clk_gating_hw_sequence(struct radeon_device *rdev,
338 const u32 *seq, u32 count);
339static void trinity_override_dynamic_mg_powergating(struct radeon_device *rdev);
340static void trinity_apply_state_adjust_rules(struct radeon_device *rdev,
341 struct radeon_ps *new_rps,
342 struct radeon_ps *old_rps);
343
344struct trinity_ps *trinity_get_ps(struct radeon_ps *rps)
345{
346 struct trinity_ps *ps = rps->ps_priv;
347
348 return ps;
349}
350
351struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev)
352{
353 struct trinity_power_info *pi = rdev->pm.dpm.priv;
354
355 return pi;
356}
357
358static void trinity_gfx_powergating_initialize(struct radeon_device *rdev)
359{
360 struct trinity_power_info *pi = trinity_get_pi(rdev);
361 u32 p, u;
362 u32 value;
363 struct atom_clock_dividers dividers;
364 u32 xclk = radeon_get_xclk(rdev);
365 u32 sssd = 1;
366 int ret;
367 u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT;
368
369 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
370 25000, false, &dividers);
371 if (ret)
372 return;
373
374 value = RREG32_SMC(GFX_POWER_GATING_CNTL);
375 value &= ~(SSSD_MASK | PDS_DIV_MASK);
376 if (sssd)
377 value |= SSSD(1);
378 value |= PDS_DIV(dividers.post_div);
379 WREG32_SMC(GFX_POWER_GATING_CNTL, value);
380
381 r600_calculate_u_and_p(500, xclk, 16, &p, &u);
382
383 WREG32(CG_PG_CTRL, SP(p) | SU(u));
384
385 WREG32_P(CG_GIPOTS, CG_GIPOT(p), ~CG_GIPOT_MASK);
386
387 /* XXX double check hw_rev */
388 if (pi->override_dynamic_mgpg && (hw_rev == 0))
389 trinity_override_dynamic_mg_powergating(rdev);
390
391}
392
393#define CGCG_CGTT_LOCAL0_MASK 0xFFFF33FF
394#define CGCG_CGTT_LOCAL1_MASK 0xFFFB0FFE
395#define CGTS_SM_CTRL_REG_DISABLE 0x00600000
396#define CGTS_SM_CTRL_REG_ENABLE 0x96944200
397
398static void trinity_mg_clockgating_enable(struct radeon_device *rdev,
399 bool enable)
400{
401 u32 local0;
402 u32 local1;
403
404 if (enable) {
405 local0 = RREG32_CG(CG_CGTT_LOCAL_0);
406 local1 = RREG32_CG(CG_CGTT_LOCAL_1);
407
408 WREG32_CG(CG_CGTT_LOCAL_0,
409 (0x00380000 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
410 WREG32_CG(CG_CGTT_LOCAL_1,
411 (0x0E000000 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
412
413 WREG32(CGTS_SM_CTRL_REG, CGTS_SM_CTRL_REG_ENABLE);
414 } else {
415 WREG32(CGTS_SM_CTRL_REG, CGTS_SM_CTRL_REG_DISABLE);
416
417 local0 = RREG32_CG(CG_CGTT_LOCAL_0);
418 local1 = RREG32_CG(CG_CGTT_LOCAL_1);
419
420 WREG32_CG(CG_CGTT_LOCAL_0,
421 CGCG_CGTT_LOCAL0_MASK | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
422 WREG32_CG(CG_CGTT_LOCAL_1,
423 CGCG_CGTT_LOCAL1_MASK | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
424 }
425}
426
427static void trinity_mg_clockgating_initialize(struct radeon_device *rdev)
428{
429 u32 count;
430 const u32 *seq = NULL;
431
432 seq = &trinity_mgcg_shls_default[0];
433 count = sizeof(trinity_mgcg_shls_default) / (3 * sizeof(u32));
434
435 trinity_program_clk_gating_hw_sequence(rdev, seq, count);
436}
437
438static void trinity_gfx_clockgating_enable(struct radeon_device *rdev,
439 bool enable)
440{
441 if (enable) {
442 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
443 } else {
444 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
445 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
446 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
447 RREG32(GB_ADDR_CONFIG);
448 }
449}
450
451static void trinity_program_clk_gating_hw_sequence(struct radeon_device *rdev,
452 const u32 *seq, u32 count)
453{
454 u32 i, length = count * 3;
455
456 for (i = 0; i < length; i += 3)
457 WREG32_P(seq[i], seq[i+1], ~seq[i+2]);
458}
459
460static void trinity_program_override_mgpg_sequences(struct radeon_device *rdev,
461 const u32 *seq, u32 count)
462{
463 u32 i, length = count * 2;
464
465 for (i = 0; i < length; i += 2)
466 WREG32(seq[i], seq[i+1]);
467
468}
469
470static void trinity_override_dynamic_mg_powergating(struct radeon_device *rdev)
471{
472 u32 count;
473 const u32 *seq = NULL;
474
475 seq = &trinity_override_mgpg_sequences[0];
476 count = sizeof(trinity_override_mgpg_sequences) / (2 * sizeof(u32));
477
478 trinity_program_override_mgpg_sequences(rdev, seq, count);
479}
480
481static void trinity_ls_clockgating_enable(struct radeon_device *rdev,
482 bool enable)
483{
484 u32 count;
485 const u32 *seq = NULL;
486
487 if (enable) {
488 seq = &trinity_sysls_enable[0];
489 count = sizeof(trinity_sysls_enable) / (3 * sizeof(u32));
490 } else {
491 seq = &trinity_sysls_disable[0];
492 count = sizeof(trinity_sysls_disable) / (3 * sizeof(u32));
493 }
494
495 trinity_program_clk_gating_hw_sequence(rdev, seq, count);
496}
497
498static void trinity_gfx_powergating_enable(struct radeon_device *rdev,
499 bool enable)
500{
501 if (enable) {
502 if (RREG32_SMC(CC_SMU_TST_EFUSE1_MISC) & RB_BACKEND_DISABLE_MASK)
503 WREG32_SMC(SMU_SCRATCH_A, (RREG32_SMC(SMU_SCRATCH_A) | 0x01));
504
505 WREG32_P(SCLK_PWRMGT_CNTL, DYN_PWR_DOWN_EN, ~DYN_PWR_DOWN_EN);
506 } else {
507 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_PWR_DOWN_EN);
508 RREG32(GB_ADDR_CONFIG);
509 }
510}
511
512static void trinity_gfx_dynamic_mgpg_enable(struct radeon_device *rdev,
513 bool enable)
514{
515 u32 value;
516
517 if (enable) {
518 value = RREG32_SMC(PM_I_CNTL_1);
519 value &= ~DS_PG_CNTL_MASK;
520 value |= DS_PG_CNTL(1);
521 WREG32_SMC(PM_I_CNTL_1, value);
522
523 value = RREG32_SMC(SMU_S_PG_CNTL);
524 value &= ~DS_PG_EN_MASK;
525 value |= DS_PG_EN(1);
526 WREG32_SMC(SMU_S_PG_CNTL, value);
527 } else {
528 value = RREG32_SMC(SMU_S_PG_CNTL);
529 value &= ~DS_PG_EN_MASK;
530 WREG32_SMC(SMU_S_PG_CNTL, value);
531
532 value = RREG32_SMC(PM_I_CNTL_1);
533 value &= ~DS_PG_CNTL_MASK;
534 WREG32_SMC(PM_I_CNTL_1, value);
535 }
536
537 trinity_gfx_dynamic_mgpg_config(rdev);
538
539}
540
541static void trinity_enable_clock_power_gating(struct radeon_device *rdev)
542{
543 struct trinity_power_info *pi = trinity_get_pi(rdev);
544
545 if (pi->enable_gfx_clock_gating)
546 sumo_gfx_clockgating_initialize(rdev);
547 if (pi->enable_mg_clock_gating)
548 trinity_mg_clockgating_initialize(rdev);
549 if (pi->enable_gfx_power_gating)
550 trinity_gfx_powergating_initialize(rdev);
551 if (pi->enable_mg_clock_gating) {
552 trinity_ls_clockgating_enable(rdev, true);
553 trinity_mg_clockgating_enable(rdev, true);
554 }
555 if (pi->enable_gfx_clock_gating)
556 trinity_gfx_clockgating_enable(rdev, true);
557 if (pi->enable_gfx_dynamic_mgpg)
558 trinity_gfx_dynamic_mgpg_enable(rdev, true);
559 if (pi->enable_gfx_power_gating)
560 trinity_gfx_powergating_enable(rdev, true);
561}
562
563static void trinity_disable_clock_power_gating(struct radeon_device *rdev)
564{
565 struct trinity_power_info *pi = trinity_get_pi(rdev);
566
567 if (pi->enable_gfx_power_gating)
568 trinity_gfx_powergating_enable(rdev, false);
569 if (pi->enable_gfx_dynamic_mgpg)
570 trinity_gfx_dynamic_mgpg_enable(rdev, false);
571 if (pi->enable_gfx_clock_gating)
572 trinity_gfx_clockgating_enable(rdev, false);
573 if (pi->enable_mg_clock_gating) {
574 trinity_mg_clockgating_enable(rdev, false);
575 trinity_ls_clockgating_enable(rdev, false);
576 }
577}
578
579static void trinity_set_divider_value(struct radeon_device *rdev,
580 u32 index, u32 sclk)
581{
582 struct atom_clock_dividers dividers;
583 int ret;
584 u32 value;
585 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
586
587 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
588 sclk, false, &dividers);
589 if (ret)
590 return;
591
592 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix);
593 value &= ~CLK_DIVIDER_MASK;
594 value |= CLK_DIVIDER(dividers.post_div);
595 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
596
597 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
598 sclk/2, false, &dividers);
599 if (ret)
600 return;
601
602 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_PG_CNTL + ix);
603 value &= ~PD_SCLK_DIVIDER_MASK;
604 value |= PD_SCLK_DIVIDER(dividers.post_div);
605 WREG32_SMC(SMU_SCLK_DPM_STATE_0_PG_CNTL + ix, value);
606}
607
608static void trinity_set_ds_dividers(struct radeon_device *rdev,
609 u32 index, u32 divider)
610{
611 u32 value;
612 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
613
614 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix);
615 value &= ~DS_DIV_MASK;
616 value |= DS_DIV(divider);
617 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value);
618}
619
620static void trinity_set_ss_dividers(struct radeon_device *rdev,
621 u32 index, u32 divider)
622{
623 u32 value;
624 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
625
626 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix);
627 value &= ~DS_SH_DIV_MASK;
628 value |= DS_SH_DIV(divider);
629 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value);
630}
631
632static void trinity_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
633{
634 struct trinity_power_info *pi = trinity_get_pi(rdev);
635 u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, &pi->sys_info.vid_mapping_table, vid);
636 u32 value;
637 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
638
639 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix);
640 value &= ~VID_MASK;
641 value |= VID(vid_7bit);
642 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
643
644 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix);
645 value &= ~LVRT_MASK;
646 value |= LVRT(0);
647 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
648}
649
650static void trinity_set_allos_gnb_slow(struct radeon_device *rdev,
651 u32 index, u32 gnb_slow)
652{
653 u32 value;
654 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
655
656 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_3 + ix);
657 value &= ~GNB_SLOW_MASK;
658 value |= GNB_SLOW(gnb_slow);
659 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_3 + ix, value);
660}
661
662static void trinity_set_force_nbp_state(struct radeon_device *rdev,
663 u32 index, u32 force_nbp_state)
664{
665 u32 value;
666 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
667
668 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_3 + ix);
669 value &= ~FORCE_NBPS1_MASK;
670 value |= FORCE_NBPS1(force_nbp_state);
671 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_3 + ix, value);
672}
673
674static void trinity_set_display_wm(struct radeon_device *rdev,
675 u32 index, u32 wm)
676{
677 u32 value;
678 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
679
680 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix);
681 value &= ~DISPLAY_WM_MASK;
682 value |= DISPLAY_WM(wm);
683 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value);
684}
685
686static void trinity_set_vce_wm(struct radeon_device *rdev,
687 u32 index, u32 wm)
688{
689 u32 value;
690 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
691
692 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix);
693 value &= ~VCE_WM_MASK;
694 value |= VCE_WM(wm);
695 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value);
696}
697
698static void trinity_set_at(struct radeon_device *rdev,
699 u32 index, u32 at)
700{
701 u32 value;
702 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
703
704 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_AT + ix);
705 value &= ~AT_MASK;
706 value |= AT(at);
707 WREG32_SMC(SMU_SCLK_DPM_STATE_0_AT + ix, value);
708}
709
710static void trinity_program_power_level(struct radeon_device *rdev,
711 struct trinity_pl *pl, u32 index)
712{
713 struct trinity_power_info *pi = trinity_get_pi(rdev);
714
715 if (index >= SUMO_MAX_HARDWARE_POWERLEVELS)
716 return;
717
718 trinity_set_divider_value(rdev, index, pl->sclk);
719 trinity_set_vid(rdev, index, pl->vddc_index);
720 trinity_set_ss_dividers(rdev, index, pl->ss_divider_index);
721 trinity_set_ds_dividers(rdev, index, pl->ds_divider_index);
722 trinity_set_allos_gnb_slow(rdev, index, pl->allow_gnb_slow);
723 trinity_set_force_nbp_state(rdev, index, pl->force_nbp_state);
724 trinity_set_display_wm(rdev, index, pl->display_wm);
725 trinity_set_vce_wm(rdev, index, pl->vce_wm);
726 trinity_set_at(rdev, index, pi->at[index]);
727}
728
729static void trinity_power_level_enable_disable(struct radeon_device *rdev,
730 u32 index, bool enable)
731{
732 u32 value;
733 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
734
735 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix);
736 value &= ~STATE_VALID_MASK;
737 if (enable)
738 value |= STATE_VALID(1);
739 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
740}
741
742static bool trinity_dpm_enabled(struct radeon_device *rdev)
743{
744 if (RREG32_SMC(SMU_SCLK_DPM_CNTL) & SCLK_DPM_EN(1))
745 return true;
746 else
747 return false;
748}
749
750static void trinity_start_dpm(struct radeon_device *rdev)
751{
752 u32 value = RREG32_SMC(SMU_SCLK_DPM_CNTL);
753
754 value &= ~(SCLK_DPM_EN_MASK | SCLK_DPM_BOOT_STATE_MASK | VOLTAGE_CHG_EN_MASK);
755 value |= SCLK_DPM_EN(1) | SCLK_DPM_BOOT_STATE(0) | VOLTAGE_CHG_EN(1);
756 WREG32_SMC(SMU_SCLK_DPM_CNTL, value);
757
758 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
759 WREG32_P(CG_CG_VOLTAGE_CNTL, 0, ~EN);
760
761 trinity_dpm_config(rdev, true);
762}
763
764static void trinity_wait_for_dpm_enabled(struct radeon_device *rdev)
765{
766 int i;
767
768 for (i = 0; i < rdev->usec_timeout; i++) {
769 if (RREG32(SCLK_PWRMGT_CNTL) & DYNAMIC_PM_EN)
770 break;
771 udelay(1);
772 }
773 for (i = 0; i < rdev->usec_timeout; i++) {
774 if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_STATE_MASK) == 0)
775 break;
776 udelay(1);
777 }
778 for (i = 0; i < rdev->usec_timeout; i++) {
779 if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) == 0)
780 break;
781 udelay(1);
782 }
783}
784
785static void trinity_stop_dpm(struct radeon_device *rdev)
786{
787 u32 sclk_dpm_cntl;
788
789 WREG32_P(CG_CG_VOLTAGE_CNTL, EN, ~EN);
790
791 sclk_dpm_cntl = RREG32_SMC(SMU_SCLK_DPM_CNTL);
792 sclk_dpm_cntl &= ~(SCLK_DPM_EN_MASK | VOLTAGE_CHG_EN_MASK);
793 WREG32_SMC(SMU_SCLK_DPM_CNTL, sclk_dpm_cntl);
794
795 trinity_dpm_config(rdev, false);
796}
797
798static void trinity_start_am(struct radeon_device *rdev)
799{
800 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~(RESET_SCLK_CNT | RESET_BUSY_CNT));
801}
802
803static void trinity_reset_am(struct radeon_device *rdev)
804{
805 WREG32_P(SCLK_PWRMGT_CNTL, RESET_SCLK_CNT | RESET_BUSY_CNT,
806 ~(RESET_SCLK_CNT | RESET_BUSY_CNT));
807}
808
809static void trinity_wait_for_level_0(struct radeon_device *rdev)
810{
811 int i;
812
813 for (i = 0; i < rdev->usec_timeout; i++) {
814 if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) == 0)
815 break;
816 udelay(1);
817 }
818}
819
820static void trinity_enable_power_level_0(struct radeon_device *rdev)
821{
822 trinity_power_level_enable_disable(rdev, 0, true);
823}
824
825static void trinity_force_level_0(struct radeon_device *rdev)
826{
827 trinity_dpm_force_state(rdev, 0);
828}
829
830static void trinity_unforce_levels(struct radeon_device *rdev)
831{
832 trinity_dpm_no_forced_level(rdev);
833}
834
835static void trinity_program_power_levels_0_to_n(struct radeon_device *rdev,
836 struct radeon_ps *new_rps,
837 struct radeon_ps *old_rps)
838{
839 struct trinity_ps *new_ps = trinity_get_ps(new_rps);
840 struct trinity_ps *old_ps = trinity_get_ps(old_rps);
841 u32 i;
842 u32 n_current_state_levels = (old_ps == NULL) ? 1 : old_ps->num_levels;
843
844 for (i = 0; i < new_ps->num_levels; i++) {
845 trinity_program_power_level(rdev, &new_ps->levels[i], i);
846 trinity_power_level_enable_disable(rdev, i, true);
847 }
848
849 for (i = new_ps->num_levels; i < n_current_state_levels; i++)
850 trinity_power_level_enable_disable(rdev, i, false);
851}
852
853static void trinity_program_bootup_state(struct radeon_device *rdev)
854{
855 struct trinity_power_info *pi = trinity_get_pi(rdev);
856 u32 i;
857
858 trinity_program_power_level(rdev, &pi->boot_pl, 0);
859 trinity_power_level_enable_disable(rdev, 0, true);
860
861 for (i = 1; i < 8; i++)
862 trinity_power_level_enable_disable(rdev, i, false);
863}
864
865static void trinity_setup_uvd_clock_table(struct radeon_device *rdev,
866 struct radeon_ps *rps)
867{
868 struct trinity_ps *ps = trinity_get_ps(rps);
869 u32 uvdstates = (ps->vclk_low_divider |
870 ps->vclk_high_divider << 8 |
871 ps->dclk_low_divider << 16 |
872 ps->dclk_high_divider << 24);
873
874 WREG32_SMC(SMU_UVD_DPM_STATES, uvdstates);
875}
876
877static void trinity_setup_uvd_dpm_interval(struct radeon_device *rdev,
878 u32 interval)
879{
880 u32 p, u;
881 u32 tp = RREG32_SMC(PM_TP);
882 u32 val;
883 u32 xclk = radeon_get_xclk(rdev);
884
885 r600_calculate_u_and_p(interval, xclk, 16, &p, &u);
886
887 val = (p + tp - 1) / tp;
888
889 WREG32_SMC(SMU_UVD_DPM_CNTL, val);
890}
891
892static bool trinity_uvd_clocks_zero(struct radeon_ps *rps)
893{
894 if ((rps->vclk == 0) && (rps->dclk == 0))
895 return true;
896 else
897 return false;
898}
899
900static bool trinity_uvd_clocks_equal(struct radeon_ps *rps1,
901 struct radeon_ps *rps2)
902{
903 struct trinity_ps *ps1 = trinity_get_ps(rps1);
904 struct trinity_ps *ps2 = trinity_get_ps(rps2);
905
906 if ((rps1->vclk == rps2->vclk) &&
907 (rps1->dclk == rps2->dclk) &&
908 (ps1->vclk_low_divider == ps2->vclk_low_divider) &&
909 (ps1->vclk_high_divider == ps2->vclk_high_divider) &&
910 (ps1->dclk_low_divider == ps2->dclk_low_divider) &&
911 (ps1->dclk_high_divider == ps2->dclk_high_divider))
912 return true;
913 else
914 return false;
915}
916
917static void trinity_setup_uvd_clocks(struct radeon_device *rdev,
918 struct radeon_ps *new_rps,
919 struct radeon_ps *old_rps)
920{
921 struct trinity_power_info *pi = trinity_get_pi(rdev);
922
923 if (pi->uvd_dpm) {
924 if (trinity_uvd_clocks_zero(new_rps) &&
925 !trinity_uvd_clocks_zero(old_rps)) {
926 trinity_setup_uvd_dpm_interval(rdev, 0);
927 } else if (!trinity_uvd_clocks_zero(new_rps)) {
928 trinity_setup_uvd_clock_table(rdev, new_rps);
929
930 if (trinity_uvd_clocks_zero(old_rps)) {
931 u32 tmp = RREG32(CG_MISC_REG);
932 tmp &= 0xfffffffd;
933 WREG32(CG_MISC_REG, tmp);
934
935 radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk);
936
937 trinity_setup_uvd_dpm_interval(rdev, 3000);
938 }
939 }
940 trinity_uvd_dpm_config(rdev);
941 } else {
942 if (trinity_uvd_clocks_zero(new_rps) ||
943 trinity_uvd_clocks_equal(new_rps, old_rps))
944 return;
945
946 radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk);
947 }
948}
949
950static void trinity_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
951 struct radeon_ps *new_rps,
952 struct radeon_ps *old_rps)
953{
954 struct trinity_ps *new_ps = trinity_get_ps(new_rps);
955 struct trinity_ps *current_ps = trinity_get_ps(new_rps);
956
957 if (new_ps->levels[new_ps->num_levels - 1].sclk >=
958 current_ps->levels[current_ps->num_levels - 1].sclk)
959 return;
960
961 trinity_setup_uvd_clocks(rdev, new_rps, old_rps);
962}
963
964static void trinity_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
965 struct radeon_ps *new_rps,
966 struct radeon_ps *old_rps)
967{
968 struct trinity_ps *new_ps = trinity_get_ps(new_rps);
969 struct trinity_ps *current_ps = trinity_get_ps(old_rps);
970
971 if (new_ps->levels[new_ps->num_levels - 1].sclk <
972 current_ps->levels[current_ps->num_levels - 1].sclk)
973 return;
974
975 trinity_setup_uvd_clocks(rdev, new_rps, old_rps);
976}
977
978static void trinity_program_ttt(struct radeon_device *rdev)
979{
980 struct trinity_power_info *pi = trinity_get_pi(rdev);
981 u32 value = RREG32_SMC(SMU_SCLK_DPM_TTT);
982
983 value &= ~(HT_MASK | LT_MASK);
984 value |= HT((pi->thermal_auto_throttling + 49) * 8);
985 value |= LT((pi->thermal_auto_throttling + 49 - pi->sys_info.htc_hyst_lmt) * 8);
986 WREG32_SMC(SMU_SCLK_DPM_TTT, value);
987}
988
989static void trinity_enable_att(struct radeon_device *rdev)
990{
991 u32 value = RREG32_SMC(SMU_SCLK_DPM_TT_CNTL);
992
993 value &= ~SCLK_TT_EN_MASK;
994 value |= SCLK_TT_EN(1);
995 WREG32_SMC(SMU_SCLK_DPM_TT_CNTL, value);
996}
997
998static void trinity_program_sclk_dpm(struct radeon_device *rdev)
999{
1000 u32 p, u;
1001 u32 tp = RREG32_SMC(PM_TP);
1002 u32 ni;
1003 u32 xclk = radeon_get_xclk(rdev);
1004 u32 value;
1005
1006 r600_calculate_u_and_p(400, xclk, 16, &p, &u);
1007
1008 ni = (p + tp - 1) / tp;
1009
1010 value = RREG32_SMC(PM_I_CNTL_1);
1011 value &= ~SCLK_DPM_MASK;
1012 value |= SCLK_DPM(ni);
1013 WREG32_SMC(PM_I_CNTL_1, value);
1014}
1015
1016static int trinity_set_thermal_temperature_range(struct radeon_device *rdev,
1017 int min_temp, int max_temp)
1018{
1019 int low_temp = 0 * 1000;
1020 int high_temp = 255 * 1000;
1021
1022 if (low_temp < min_temp)
1023 low_temp = min_temp;
1024 if (high_temp > max_temp)
1025 high_temp = max_temp;
1026 if (high_temp < low_temp) {
1027 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1028 return -EINVAL;
1029 }
1030
1031 WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK);
1032 WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK);
1033
1034 rdev->pm.dpm.thermal.min_temp = low_temp;
1035 rdev->pm.dpm.thermal.max_temp = high_temp;
1036
1037 return 0;
1038}
1039
1040static void trinity_update_current_ps(struct radeon_device *rdev,
1041 struct radeon_ps *rps)
1042{
1043 struct trinity_ps *new_ps = trinity_get_ps(rps);
1044 struct trinity_power_info *pi = trinity_get_pi(rdev);
1045
1046 pi->current_rps = *rps;
1047 pi->current_ps = *new_ps;
1048 pi->current_rps.ps_priv = &pi->current_ps;
1049}
1050
1051static void trinity_update_requested_ps(struct radeon_device *rdev,
1052 struct radeon_ps *rps)
1053{
1054 struct trinity_ps *new_ps = trinity_get_ps(rps);
1055 struct trinity_power_info *pi = trinity_get_pi(rdev);
1056
1057 pi->requested_rps = *rps;
1058 pi->requested_ps = *new_ps;
1059 pi->requested_rps.ps_priv = &pi->requested_ps;
1060}
1061
1062int trinity_dpm_enable(struct radeon_device *rdev)
1063{
1064 struct trinity_power_info *pi = trinity_get_pi(rdev);
1065 int ret;
1066
1067 trinity_acquire_mutex(rdev);
1068
1069 if (trinity_dpm_enabled(rdev)) {
1070 trinity_release_mutex(rdev);
1071 return -EINVAL;
1072 }
1073
1074 trinity_enable_clock_power_gating(rdev);
1075 trinity_program_bootup_state(rdev);
1076 sumo_program_vc(rdev, 0x00C00033);
1077 trinity_start_am(rdev);
1078 if (pi->enable_auto_thermal_throttling) {
1079 trinity_program_ttt(rdev);
1080 trinity_enable_att(rdev);
1081 }
1082 trinity_program_sclk_dpm(rdev);
1083 trinity_start_dpm(rdev);
1084 trinity_wait_for_dpm_enabled(rdev);
1085 trinity_release_mutex(rdev);
1086
1087 if (rdev->irq.installed &&
1088 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1089 ret = trinity_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1090 if (ret) {
1091 trinity_release_mutex(rdev);
1092 return ret;
1093 }
1094 rdev->irq.dpm_thermal = true;
1095 radeon_irq_set(rdev);
1096 }
1097
1098 trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1099
1100 return 0;
1101}
1102
1103void trinity_dpm_disable(struct radeon_device *rdev)
1104{
1105 trinity_acquire_mutex(rdev);
1106 if (!trinity_dpm_enabled(rdev)) {
1107 trinity_release_mutex(rdev);
1108 return;
1109 }
1110 trinity_disable_clock_power_gating(rdev);
1111 sumo_clear_vc(rdev);
1112 trinity_wait_for_level_0(rdev);
1113 trinity_stop_dpm(rdev);
1114 trinity_reset_am(rdev);
1115 trinity_release_mutex(rdev);
1116
1117 if (rdev->irq.installed &&
1118 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1119 rdev->irq.dpm_thermal = false;
1120 radeon_irq_set(rdev);
1121 }
1122
1123 trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1124}
1125
1126static void trinity_get_min_sclk_divider(struct radeon_device *rdev)
1127{
1128 struct trinity_power_info *pi = trinity_get_pi(rdev);
1129
1130 pi->min_sclk_did =
1131 (RREG32_SMC(CC_SMU_MISC_FUSES) & MinSClkDid_MASK) >> MinSClkDid_SHIFT;
1132}
1133
1134static void trinity_setup_nbp_sim(struct radeon_device *rdev,
1135 struct radeon_ps *rps)
1136{
1137 struct trinity_power_info *pi = trinity_get_pi(rdev);
1138 struct trinity_ps *new_ps = trinity_get_ps(rps);
1139 u32 nbpsconfig;
1140
1141 if (pi->sys_info.nb_dpm_enable) {
1142 nbpsconfig = RREG32_SMC(NB_PSTATE_CONFIG);
1143 nbpsconfig &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK | DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
1144 nbpsconfig |= (Dpm0PgNbPsLo(new_ps->Dpm0PgNbPsLo) |
1145 Dpm0PgNbPsHi(new_ps->Dpm0PgNbPsHi) |
1146 DpmXNbPsLo(new_ps->DpmXNbPsLo) |
1147 DpmXNbPsHi(new_ps->DpmXNbPsHi));
1148 WREG32_SMC(NB_PSTATE_CONFIG, nbpsconfig);
1149 }
1150}
1151
1152int trinity_dpm_pre_set_power_state(struct radeon_device *rdev)
1153{
1154 struct trinity_power_info *pi = trinity_get_pi(rdev);
1155 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
1156 struct radeon_ps *new_ps = &requested_ps;
1157
1158 trinity_update_requested_ps(rdev, new_ps);
1159
1160 trinity_apply_state_adjust_rules(rdev,
1161 &pi->requested_rps,
1162 &pi->current_rps);
1163
1164 return 0;
1165}
1166
1167int trinity_dpm_set_power_state(struct radeon_device *rdev)
1168{
1169 struct trinity_power_info *pi = trinity_get_pi(rdev);
1170 struct radeon_ps *new_ps = &pi->requested_rps;
1171 struct radeon_ps *old_ps = &pi->current_rps;
1172
1173 trinity_acquire_mutex(rdev);
1174 if (pi->enable_dpm) {
1175 trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1176 trinity_enable_power_level_0(rdev);
1177 trinity_force_level_0(rdev);
1178 trinity_wait_for_level_0(rdev);
1179 trinity_setup_nbp_sim(rdev, new_ps);
1180 trinity_program_power_levels_0_to_n(rdev, new_ps, old_ps);
1181 trinity_force_level_0(rdev);
1182 trinity_unforce_levels(rdev);
1183 trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1184 }
1185 trinity_release_mutex(rdev);
1186
1187 return 0;
1188}
1189
1190void trinity_dpm_post_set_power_state(struct radeon_device *rdev)
1191{
1192 struct trinity_power_info *pi = trinity_get_pi(rdev);
1193 struct radeon_ps *new_ps = &pi->requested_rps;
1194
1195 trinity_update_current_ps(rdev, new_ps);
1196}
1197
1198void trinity_dpm_setup_asic(struct radeon_device *rdev)
1199{
1200 trinity_acquire_mutex(rdev);
1201 sumo_program_sstp(rdev);
1202 sumo_take_smu_control(rdev, true);
1203 trinity_get_min_sclk_divider(rdev);
1204 trinity_release_mutex(rdev);
1205}
1206
1207void trinity_dpm_reset_asic(struct radeon_device *rdev)
1208{
1209 struct trinity_power_info *pi = trinity_get_pi(rdev);
1210
1211 trinity_acquire_mutex(rdev);
1212 if (pi->enable_dpm) {
1213 trinity_enable_power_level_0(rdev);
1214 trinity_force_level_0(rdev);
1215 trinity_wait_for_level_0(rdev);
1216 trinity_program_bootup_state(rdev);
1217 trinity_force_level_0(rdev);
1218 trinity_unforce_levels(rdev);
1219 }
1220 trinity_release_mutex(rdev);
1221}
1222
1223static u16 trinity_convert_voltage_index_to_value(struct radeon_device *rdev,
1224 u32 vid_2bit)
1225{
1226 struct trinity_power_info *pi = trinity_get_pi(rdev);
1227 u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, &pi->sys_info.vid_mapping_table, vid_2bit);
1228 u32 svi_mode = (RREG32_SMC(PM_CONFIG) & SVI_Mode) ? 1 : 0;
1229 u32 step = (svi_mode == 0) ? 1250 : 625;
1230 u32 delta = vid_7bit * step + 50;
1231
1232 if (delta > 155000)
1233 return 0;
1234
1235 return (155000 - delta) / 100;
1236}
1237
1238static void trinity_patch_boot_state(struct radeon_device *rdev,
1239 struct trinity_ps *ps)
1240{
1241 struct trinity_power_info *pi = trinity_get_pi(rdev);
1242
1243 ps->num_levels = 1;
1244 ps->nbps_flags = 0;
1245 ps->bapm_flags = 0;
1246 ps->levels[0] = pi->boot_pl;
1247}
1248
1249static u8 trinity_calculate_vce_wm(struct radeon_device *rdev, u32 sclk)
1250{
1251 if (sclk < 20000)
1252 return 1;
1253 return 0;
1254}
1255
1256static void trinity_construct_boot_state(struct radeon_device *rdev)
1257{
1258 struct trinity_power_info *pi = trinity_get_pi(rdev);
1259
1260 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
1261 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
1262 pi->boot_pl.ds_divider_index = 0;
1263 pi->boot_pl.ss_divider_index = 0;
1264 pi->boot_pl.allow_gnb_slow = 1;
1265 pi->boot_pl.force_nbp_state = 0;
1266 pi->boot_pl.display_wm = 0;
1267 pi->boot_pl.vce_wm = 0;
1268 pi->current_ps.num_levels = 1;
1269 pi->current_ps.levels[0] = pi->boot_pl;
1270}
1271
1272static u8 trinity_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1273 u32 sclk, u32 min_sclk_in_sr)
1274{
1275 struct trinity_power_info *pi = trinity_get_pi(rdev);
1276 u32 i;
1277 u32 temp;
1278 u32 min = (min_sclk_in_sr > TRINITY_MINIMUM_ENGINE_CLOCK) ?
1279 min_sclk_in_sr : TRINITY_MINIMUM_ENGINE_CLOCK;
1280
1281 if (sclk < min)
1282 return 0;
1283
1284 if (!pi->enable_sclk_ds)
1285 return 0;
1286
1287 for (i = TRINITY_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1288 temp = sclk / sumo_get_sleep_divider_from_id(i);
1289 if (temp >= min || i == 0)
1290 break;
1291 }
1292
1293 return (u8)i;
1294}
1295
1296static u32 trinity_get_valid_engine_clock(struct radeon_device *rdev,
1297 u32 lower_limit)
1298{
1299 struct trinity_power_info *pi = trinity_get_pi(rdev);
1300 u32 i;
1301
1302 for (i = 0; i < pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries; i++) {
1303 if (pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency >= lower_limit)
1304 return pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency;
1305 }
1306
1307 if (i == pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries)
1308 DRM_ERROR("engine clock out of range!");
1309
1310 return 0;
1311}
1312
1313static void trinity_patch_thermal_state(struct radeon_device *rdev,
1314 struct trinity_ps *ps,
1315 struct trinity_ps *current_ps)
1316{
1317 struct trinity_power_info *pi = trinity_get_pi(rdev);
1318 u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
1319 u32 current_vddc;
1320 u32 current_sclk;
1321 u32 current_index = 0;
1322
1323 if (current_ps) {
1324 current_vddc = current_ps->levels[current_index].vddc_index;
1325 current_sclk = current_ps->levels[current_index].sclk;
1326 } else {
1327 current_vddc = pi->boot_pl.vddc_index;
1328 current_sclk = pi->boot_pl.sclk;
1329 }
1330
1331 ps->levels[0].vddc_index = current_vddc;
1332
1333 if (ps->levels[0].sclk > current_sclk)
1334 ps->levels[0].sclk = current_sclk;
1335
1336 ps->levels[0].ds_divider_index =
1337 trinity_get_sleep_divider_id_from_clock(rdev, ps->levels[0].sclk, sclk_in_sr);
1338 ps->levels[0].ss_divider_index = ps->levels[0].ds_divider_index;
1339 ps->levels[0].allow_gnb_slow = 1;
1340 ps->levels[0].force_nbp_state = 0;
1341 ps->levels[0].display_wm = 0;
1342 ps->levels[0].vce_wm =
1343 trinity_calculate_vce_wm(rdev, ps->levels[0].sclk);
1344}
1345
1346static u8 trinity_calculate_display_wm(struct radeon_device *rdev,
1347 struct trinity_ps *ps, u32 index)
1348{
1349 if (ps == NULL || ps->num_levels <= 1)
1350 return 0;
1351 else if (ps->num_levels == 2) {
1352 if (index == 0)
1353 return 0;
1354 else
1355 return 1;
1356 } else {
1357 if (index == 0)
1358 return 0;
1359 else if (ps->levels[index].sclk < 30000)
1360 return 0;
1361 else
1362 return 1;
1363 }
1364}
1365
1366static u32 trinity_get_uvd_clock_index(struct radeon_device *rdev,
1367 struct radeon_ps *rps)
1368{
1369 struct trinity_power_info *pi = trinity_get_pi(rdev);
1370 u32 i = 0;
1371
1372 for (i = 0; i < 4; i++) {
1373 if ((rps->vclk == pi->sys_info.uvd_clock_table_entries[i].vclk) &&
1374 (rps->dclk == pi->sys_info.uvd_clock_table_entries[i].dclk))
1375 break;
1376 }
1377
1378 if (i >= 4) {
1379 DRM_ERROR("UVD clock index not found!\n");
1380 i = 3;
1381 }
1382 return i;
1383}
1384
1385static void trinity_adjust_uvd_state(struct radeon_device *rdev,
1386 struct radeon_ps *rps)
1387{
1388 struct trinity_ps *ps = trinity_get_ps(rps);
1389 struct trinity_power_info *pi = trinity_get_pi(rdev);
1390 u32 high_index = 0;
1391 u32 low_index = 0;
1392
1393 if (pi->uvd_dpm && r600_is_uvd_state(rps->class, rps->class2)) {
1394 high_index = trinity_get_uvd_clock_index(rdev, rps);
1395
1396 switch(high_index) {
1397 case 3:
1398 case 2:
1399 low_index = 1;
1400 break;
1401 case 1:
1402 case 0:
1403 default:
1404 low_index = 0;
1405 break;
1406 }
1407
1408 ps->vclk_low_divider =
1409 pi->sys_info.uvd_clock_table_entries[high_index].vclk_did;
1410 ps->dclk_low_divider =
1411 pi->sys_info.uvd_clock_table_entries[high_index].dclk_did;
1412 ps->vclk_high_divider =
1413 pi->sys_info.uvd_clock_table_entries[low_index].vclk_did;
1414 ps->dclk_high_divider =
1415 pi->sys_info.uvd_clock_table_entries[low_index].dclk_did;
1416 }
1417}
1418
1419
1420
1421static void trinity_apply_state_adjust_rules(struct radeon_device *rdev,
1422 struct radeon_ps *new_rps,
1423 struct radeon_ps *old_rps)
1424{
1425 struct trinity_ps *ps = trinity_get_ps(new_rps);
1426 struct trinity_ps *current_ps = trinity_get_ps(old_rps);
1427 struct trinity_power_info *pi = trinity_get_pi(rdev);
1428 u32 min_voltage = 0; /* ??? */
1429 u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */
1430 u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
1431 u32 i;
1432 bool force_high;
1433 u32 num_active_displays = rdev->pm.dpm.new_active_crtc_count;
1434
1435 if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1436 return trinity_patch_thermal_state(rdev, ps, current_ps);
1437
1438 trinity_adjust_uvd_state(rdev, new_rps);
1439
1440 for (i = 0; i < ps->num_levels; i++) {
1441 if (ps->levels[i].vddc_index < min_voltage)
1442 ps->levels[i].vddc_index = min_voltage;
1443
1444 if (ps->levels[i].sclk < min_sclk)
1445 ps->levels[i].sclk =
1446 trinity_get_valid_engine_clock(rdev, min_sclk);
1447
1448 ps->levels[i].ds_divider_index =
1449 sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[i].sclk, sclk_in_sr);
1450
1451 ps->levels[i].ss_divider_index = ps->levels[i].ds_divider_index;
1452
1453 ps->levels[i].allow_gnb_slow = 1;
1454 ps->levels[i].force_nbp_state = 0;
1455 ps->levels[i].display_wm =
1456 trinity_calculate_display_wm(rdev, ps, i);
1457 ps->levels[i].vce_wm =
1458 trinity_calculate_vce_wm(rdev, ps->levels[0].sclk);
1459 }
1460
1461 if ((new_rps->class & (ATOM_PPLIB_CLASSIFICATION_HDSTATE | ATOM_PPLIB_CLASSIFICATION_SDSTATE)) ||
1462 ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY))
1463 ps->bapm_flags |= TRINITY_POWERSTATE_FLAGS_BAPM_DISABLE;
1464
1465 if (pi->sys_info.nb_dpm_enable) {
1466 ps->Dpm0PgNbPsLo = 0x1;
1467 ps->Dpm0PgNbPsHi = 0x0;
1468 ps->DpmXNbPsLo = 0x2;
1469 ps->DpmXNbPsHi = 0x1;
1470
1471 if ((new_rps->class & (ATOM_PPLIB_CLASSIFICATION_HDSTATE | ATOM_PPLIB_CLASSIFICATION_SDSTATE)) ||
1472 ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)) {
1473 force_high = ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ||
1474 ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) &&
1475 (pi->sys_info.uma_channel_number == 1)));
1476 force_high = (num_active_displays >= 3) || force_high;
1477 ps->Dpm0PgNbPsLo = force_high ? 0x2 : 0x3;
1478 ps->Dpm0PgNbPsHi = 0x1;
1479 ps->DpmXNbPsLo = force_high ? 0x2 : 0x3;
1480 ps->DpmXNbPsHi = 0x2;
1481 ps->levels[ps->num_levels - 1].allow_gnb_slow = 0;
1482 }
1483 }
1484}
1485
1486static void trinity_cleanup_asic(struct radeon_device *rdev)
1487{
1488 sumo_take_smu_control(rdev, false);
1489}
1490
1491#if 0
1492static void trinity_pre_display_configuration_change(struct radeon_device *rdev)
1493{
1494 struct trinity_power_info *pi = trinity_get_pi(rdev);
1495
1496 if (pi->voltage_drop_in_dce)
1497 trinity_dce_enable_voltage_adjustment(rdev, false);
1498}
1499#endif
1500
1501static void trinity_add_dccac_value(struct radeon_device *rdev)
1502{
1503 u32 gpu_cac_avrg_cntl_window_size;
1504 u32 num_active_displays = rdev->pm.dpm.new_active_crtc_count;
1505 u64 disp_clk = rdev->clock.default_dispclk / 100;
1506 u32 dc_cac_value;
1507
1508 gpu_cac_avrg_cntl_window_size =
1509 (RREG32_SMC(GPU_CAC_AVRG_CNTL) & WINDOW_SIZE_MASK) >> WINDOW_SIZE_SHIFT;
1510
1511 dc_cac_value = (u32)((14213 * disp_clk * disp_clk * (u64)num_active_displays) >>
1512 (32 - gpu_cac_avrg_cntl_window_size));
1513
1514 WREG32_SMC(DC_CAC_VALUE, dc_cac_value);
1515}
1516
1517void trinity_dpm_display_configuration_changed(struct radeon_device *rdev)
1518{
1519 struct trinity_power_info *pi = trinity_get_pi(rdev);
1520
1521 if (pi->voltage_drop_in_dce)
1522 trinity_dce_enable_voltage_adjustment(rdev, true);
1523 trinity_add_dccac_value(rdev);
1524}
1525
1526union power_info {
1527 struct _ATOM_POWERPLAY_INFO info;
1528 struct _ATOM_POWERPLAY_INFO_V2 info_2;
1529 struct _ATOM_POWERPLAY_INFO_V3 info_3;
1530 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
1531 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
1532 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
1533};
1534
1535union pplib_clock_info {
1536 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
1537 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
1538 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
1539 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
1540};
1541
1542union pplib_power_state {
1543 struct _ATOM_PPLIB_STATE v1;
1544 struct _ATOM_PPLIB_STATE_V2 v2;
1545};
1546
1547static void trinity_parse_pplib_non_clock_info(struct radeon_device *rdev,
1548 struct radeon_ps *rps,
1549 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
1550 u8 table_rev)
1551{
1552 struct trinity_ps *ps = trinity_get_ps(rps);
1553
1554 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
1555 rps->class = le16_to_cpu(non_clock_info->usClassification);
1556 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
1557
1558 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
1559 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
1560 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
1561 } else {
1562 rps->vclk = 0;
1563 rps->dclk = 0;
1564 }
1565
1566 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
1567 rdev->pm.dpm.boot_ps = rps;
1568 trinity_patch_boot_state(rdev, ps);
1569 }
1570 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
1571 rdev->pm.dpm.uvd_ps = rps;
1572}
1573
1574static void trinity_parse_pplib_clock_info(struct radeon_device *rdev,
1575 struct radeon_ps *rps, int index,
1576 union pplib_clock_info *clock_info)
1577{
1578 struct trinity_power_info *pi = trinity_get_pi(rdev);
1579 struct trinity_ps *ps = trinity_get_ps(rps);
1580 struct trinity_pl *pl = &ps->levels[index];
1581 u32 sclk;
1582
1583 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
1584 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
1585 pl->sclk = sclk;
1586 pl->vddc_index = clock_info->sumo.vddcIndex;
1587
1588 ps->num_levels = index + 1;
1589
1590 if (pi->enable_sclk_ds) {
1591 pl->ds_divider_index = 5;
1592 pl->ss_divider_index = 5;
1593 }
1594}
1595
1596static int trinity_parse_power_table(struct radeon_device *rdev)
1597{
1598 struct radeon_mode_info *mode_info = &rdev->mode_info;
1599 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
1600 union pplib_power_state *power_state;
1601 int i, j, k, non_clock_array_index, clock_array_index;
1602 union pplib_clock_info *clock_info;
1603 struct _StateArray *state_array;
1604 struct _ClockInfoArray *clock_info_array;
1605 struct _NonClockInfoArray *non_clock_info_array;
1606 union power_info *power_info;
1607 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
1608 u16 data_offset;
1609 u8 frev, crev;
1610 u8 *power_state_offset;
1611 struct sumo_ps *ps;
1612
1613 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
1614 &frev, &crev, &data_offset))
1615 return -EINVAL;
1616 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1617
1618 state_array = (struct _StateArray *)
1619 (mode_info->atom_context->bios + data_offset +
1620 le16_to_cpu(power_info->pplib.usStateArrayOffset));
1621 clock_info_array = (struct _ClockInfoArray *)
1622 (mode_info->atom_context->bios + data_offset +
1623 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
1624 non_clock_info_array = (struct _NonClockInfoArray *)
1625 (mode_info->atom_context->bios + data_offset +
1626 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
1627
1628 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
1629 state_array->ucNumEntries, GFP_KERNEL);
1630 if (!rdev->pm.dpm.ps)
1631 return -ENOMEM;
1632 power_state_offset = (u8 *)state_array->states;
1633 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
1634 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1635 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1636 for (i = 0; i < state_array->ucNumEntries; i++) {
1637 power_state = (union pplib_power_state *)power_state_offset;
1638 non_clock_array_index = power_state->v2.nonClockInfoIndex;
1639 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
1640 &non_clock_info_array->nonClockInfo[non_clock_array_index];
1641 if (!rdev->pm.power_state[i].clock_info)
1642 return -EINVAL;
1643 ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
1644 if (ps == NULL) {
1645 kfree(rdev->pm.dpm.ps);
1646 return -ENOMEM;
1647 }
1648 rdev->pm.dpm.ps[i].ps_priv = ps;
1649 k = 0;
1650 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
1651 clock_array_index = power_state->v2.clockInfoIndex[j];
1652 if (clock_array_index >= clock_info_array->ucNumEntries)
1653 continue;
1654 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
1655 break;
1656 clock_info = (union pplib_clock_info *)
1657 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
1658 trinity_parse_pplib_clock_info(rdev,
1659 &rdev->pm.dpm.ps[i], k,
1660 clock_info);
1661 k++;
1662 }
1663 trinity_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
1664 non_clock_info,
1665 non_clock_info_array->ucEntrySize);
1666 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
1667 }
1668 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
1669 return 0;
1670}
1671
1672union igp_info {
1673 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
1674 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
1675 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
1676 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
1677 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
1678};
1679
1680static u32 trinity_convert_did_to_freq(struct radeon_device *rdev, u8 did)
1681{
1682 struct trinity_power_info *pi = trinity_get_pi(rdev);
1683 u32 divider;
1684
1685 if (did >= 8 && did <= 0x3f)
1686 divider = did * 25;
1687 else if (did > 0x3f && did <= 0x5f)
1688 divider = (did - 64) * 50 + 1600;
1689 else if (did > 0x5f && did <= 0x7e)
1690 divider = (did - 96) * 100 + 3200;
1691 else if (did == 0x7f)
1692 divider = 128 * 100;
1693 else
1694 return 10000;
1695
1696 return ((pi->sys_info.dentist_vco_freq * 100) + (divider - 1)) / divider;
1697}
1698
1699static int trinity_parse_sys_info_table(struct radeon_device *rdev)
1700{
1701 struct trinity_power_info *pi = trinity_get_pi(rdev);
1702 struct radeon_mode_info *mode_info = &rdev->mode_info;
1703 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
1704 union igp_info *igp_info;
1705 u8 frev, crev;
1706 u16 data_offset;
1707 int i;
1708
1709 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1710 &frev, &crev, &data_offset)) {
1711 igp_info = (union igp_info *)(mode_info->atom_context->bios +
1712 data_offset);
1713
1714 if (crev != 7) {
1715 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
1716 return -EINVAL;
1717 }
1718 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_7.ulBootUpEngineClock);
1719 pi->sys_info.min_sclk = le32_to_cpu(igp_info->info_7.ulMinEngineClock);
1720 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_7.ulBootUpUMAClock);
1721 pi->sys_info.dentist_vco_freq = le32_to_cpu(igp_info->info_7.ulDentistVCOFreq);
1722 pi->sys_info.bootup_nb_voltage_index =
1723 le16_to_cpu(igp_info->info_7.usBootUpNBVoltage);
1724 if (igp_info->info_7.ucHtcTmpLmt == 0)
1725 pi->sys_info.htc_tmp_lmt = 203;
1726 else
1727 pi->sys_info.htc_tmp_lmt = igp_info->info_7.ucHtcTmpLmt;
1728 if (igp_info->info_7.ucHtcHystLmt == 0)
1729 pi->sys_info.htc_hyst_lmt = 5;
1730 else
1731 pi->sys_info.htc_hyst_lmt = igp_info->info_7.ucHtcHystLmt;
1732 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
1733 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
1734 }
1735
1736 if (pi->enable_nbps_policy)
1737 pi->sys_info.nb_dpm_enable = igp_info->info_7.ucNBDPMEnable;
1738 else
1739 pi->sys_info.nb_dpm_enable = 0;
1740
1741 for (i = 0; i < TRINITY_NUM_NBPSTATES; i++) {
1742 pi->sys_info.nbp_mclk[i] = le32_to_cpu(igp_info->info_7.ulNbpStateMemclkFreq[i]);
1743 pi->sys_info.nbp_nclk[i] = le32_to_cpu(igp_info->info_7.ulNbpStateNClkFreq[i]);
1744 }
1745
1746 pi->sys_info.nbp_voltage_index[0] = le16_to_cpu(igp_info->info_7.usNBP0Voltage);
1747 pi->sys_info.nbp_voltage_index[1] = le16_to_cpu(igp_info->info_7.usNBP1Voltage);
1748 pi->sys_info.nbp_voltage_index[2] = le16_to_cpu(igp_info->info_7.usNBP2Voltage);
1749 pi->sys_info.nbp_voltage_index[3] = le16_to_cpu(igp_info->info_7.usNBP3Voltage);
1750
1751 if (!pi->sys_info.nb_dpm_enable) {
1752 for (i = 1; i < TRINITY_NUM_NBPSTATES; i++) {
1753 pi->sys_info.nbp_mclk[i] = pi->sys_info.nbp_mclk[0];
1754 pi->sys_info.nbp_nclk[i] = pi->sys_info.nbp_nclk[0];
1755 pi->sys_info.nbp_voltage_index[i] = pi->sys_info.nbp_voltage_index[0];
1756 }
1757 }
1758
1759 pi->sys_info.uma_channel_number = igp_info->info_7.ucUMAChannelNumber;
1760
1761 sumo_construct_sclk_voltage_mapping_table(rdev,
1762 &pi->sys_info.sclk_voltage_mapping_table,
1763 igp_info->info_7.sAvail_SCLK);
1764 sumo_construct_vid_mapping_table(rdev, &pi->sys_info.vid_mapping_table,
1765 igp_info->info_7.sAvail_SCLK);
1766
1767 pi->sys_info.uvd_clock_table_entries[0].vclk_did =
1768 igp_info->info_7.ucDPMState0VclkFid;
1769 pi->sys_info.uvd_clock_table_entries[1].vclk_did =
1770 igp_info->info_7.ucDPMState1VclkFid;
1771 pi->sys_info.uvd_clock_table_entries[2].vclk_did =
1772 igp_info->info_7.ucDPMState2VclkFid;
1773 pi->sys_info.uvd_clock_table_entries[3].vclk_did =
1774 igp_info->info_7.ucDPMState3VclkFid;
1775
1776 pi->sys_info.uvd_clock_table_entries[0].dclk_did =
1777 igp_info->info_7.ucDPMState0DclkFid;
1778 pi->sys_info.uvd_clock_table_entries[1].dclk_did =
1779 igp_info->info_7.ucDPMState1DclkFid;
1780 pi->sys_info.uvd_clock_table_entries[2].dclk_did =
1781 igp_info->info_7.ucDPMState2DclkFid;
1782 pi->sys_info.uvd_clock_table_entries[3].dclk_did =
1783 igp_info->info_7.ucDPMState3DclkFid;
1784
1785 for (i = 0; i < 4; i++) {
1786 pi->sys_info.uvd_clock_table_entries[i].vclk =
1787 trinity_convert_did_to_freq(rdev,
1788 pi->sys_info.uvd_clock_table_entries[i].vclk_did);
1789 pi->sys_info.uvd_clock_table_entries[i].dclk =
1790 trinity_convert_did_to_freq(rdev,
1791 pi->sys_info.uvd_clock_table_entries[i].dclk_did);
1792 }
1793
1794
1795
1796 }
1797 return 0;
1798}
1799
1800int trinity_dpm_init(struct radeon_device *rdev)
1801{
1802 struct trinity_power_info *pi;
1803 int ret, i;
1804
1805 pi = kzalloc(sizeof(struct trinity_power_info), GFP_KERNEL);
1806 if (pi == NULL)
1807 return -ENOMEM;
1808 rdev->pm.dpm.priv = pi;
1809
1810 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1811 pi->at[i] = TRINITY_AT_DFLT;
1812
1813 pi->enable_nbps_policy = true;
1814 pi->enable_sclk_ds = true;
1815 pi->enable_gfx_power_gating = true;
1816 pi->enable_gfx_clock_gating = true;
1817 pi->enable_mg_clock_gating = true;
1818 pi->enable_gfx_dynamic_mgpg = true; /* ??? */
1819 pi->override_dynamic_mgpg = true;
1820 pi->enable_auto_thermal_throttling = true;
1821 pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
1822 pi->uvd_dpm = true; /* ??? */
1823
1824 ret = trinity_parse_sys_info_table(rdev);
1825 if (ret)
1826 return ret;
1827
1828 trinity_construct_boot_state(rdev);
1829
1830 ret = trinity_parse_power_table(rdev);
1831 if (ret)
1832 return ret;
1833
1834 pi->thermal_auto_throttling = pi->sys_info.htc_tmp_lmt;
1835 pi->enable_dpm = true;
1836
1837 return 0;
1838}
1839
1840void trinity_dpm_print_power_state(struct radeon_device *rdev,
1841 struct radeon_ps *rps)
1842{
1843 int i;
1844 struct trinity_ps *ps = trinity_get_ps(rps);
1845
1846 r600_dpm_print_class_info(rps->class, rps->class2);
1847 r600_dpm_print_cap_info(rps->caps);
1848 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
1849 for (i = 0; i < ps->num_levels; i++) {
1850 struct trinity_pl *pl = &ps->levels[i];
1851 printk("\t\tpower level %d sclk: %u vddc: %u\n",
1852 i, pl->sclk,
1853 trinity_convert_voltage_index_to_value(rdev, pl->vddc_index));
1854 }
1855 r600_dpm_print_ps_status(rdev, rps);
1856}
1857
1858void trinity_dpm_fini(struct radeon_device *rdev)
1859{
1860 int i;
1861
1862 trinity_cleanup_asic(rdev); /* ??? */
1863
1864 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1865 kfree(rdev->pm.dpm.ps[i].ps_priv);
1866 }
1867 kfree(rdev->pm.dpm.ps);
1868 kfree(rdev->pm.dpm.priv);
1869}
1870
1871u32 trinity_dpm_get_sclk(struct radeon_device *rdev, bool low)
1872{
1873 struct trinity_power_info *pi = trinity_get_pi(rdev);
1874 struct trinity_ps *requested_state = trinity_get_ps(&pi->requested_rps);
1875
1876 if (low)
1877 return requested_state->levels[0].sclk;
1878 else
1879 return requested_state->levels[requested_state->num_levels - 1].sclk;
1880}
1881
1882u32 trinity_dpm_get_mclk(struct radeon_device *rdev, bool low)
1883{
1884 struct trinity_power_info *pi = trinity_get_pi(rdev);
1885
1886 return pi->sys_info.bootup_uma_clk;
1887}
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
new file mode 100644
index 000000000000..c621b843aab5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/trinity_dpm.h
@@ -0,0 +1,131 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __TRINITY_DPM_H__
24#define __TRINITY_DPM_H__
25
26#include "sumo_dpm.h"
27
28#define TRINITY_SIZEOF_DPM_STATE_TABLE (SMU_SCLK_DPM_STATE_1_CNTL_0 - SMU_SCLK_DPM_STATE_0_CNTL_0)
29
30struct trinity_pl {
31 u32 sclk;
32 u8 vddc_index;
33 u8 ds_divider_index;
34 u8 ss_divider_index;
35 u8 allow_gnb_slow;
36 u8 force_nbp_state;
37 u8 display_wm;
38 u8 vce_wm;
39};
40
41#define TRINITY_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1 << 0)
42#define TRINITY_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1 << 1)
43#define TRINITY_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1 << 2)
44
45#define TRINITY_POWERSTATE_FLAGS_BAPM_DISABLE (1 << 0)
46
47struct trinity_ps {
48 u32 num_levels;
49 struct trinity_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
50
51 u32 nbps_flags;
52 u32 bapm_flags;
53
54 u8 Dpm0PgNbPsLo;
55 u8 Dpm0PgNbPsHi;
56 u8 DpmXNbPsLo;
57 u8 DpmXNbPsHi;
58
59 u32 vclk_low_divider;
60 u32 vclk_high_divider;
61 u32 dclk_low_divider;
62 u32 dclk_high_divider;
63};
64
65#define TRINITY_NUM_NBPSTATES 4
66
67struct trinity_uvd_clock_table_entry
68{
69 u32 vclk;
70 u32 dclk;
71 u8 vclk_did;
72 u8 dclk_did;
73 u8 rsv[2];
74};
75
76struct trinity_sys_info {
77 u32 bootup_uma_clk;
78 u32 bootup_sclk;
79 u32 min_sclk;
80 u32 dentist_vco_freq;
81 u32 nb_dpm_enable;
82 u32 nbp_mclk[TRINITY_NUM_NBPSTATES];
83 u32 nbp_nclk[TRINITY_NUM_NBPSTATES];
84 u16 nbp_voltage_index[TRINITY_NUM_NBPSTATES];
85 u16 bootup_nb_voltage_index;
86 u8 htc_tmp_lmt;
87 u8 htc_hyst_lmt;
88 struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
89 struct sumo_vid_mapping_table vid_mapping_table;
90 u32 uma_channel_number;
91 struct trinity_uvd_clock_table_entry uvd_clock_table_entries[4];
92};
93
94struct trinity_power_info {
95 u32 at[SUMO_MAX_HARDWARE_POWERLEVELS];
96 u32 dpm_interval;
97 u32 thermal_auto_throttling;
98 struct trinity_sys_info sys_info;
99 struct trinity_pl boot_pl;
100 u32 min_sclk_did;
101 bool enable_nbps_policy;
102 bool voltage_drop_in_dce;
103 bool override_dynamic_mgpg;
104 bool enable_gfx_clock_gating;
105 bool enable_gfx_power_gating;
106 bool enable_mg_clock_gating;
107 bool enable_gfx_dynamic_mgpg;
108 bool enable_auto_thermal_throttling;
109 bool enable_dpm;
110 bool enable_sclk_ds;
111 bool uvd_dpm;
112 struct radeon_ps current_rps;
113 struct trinity_ps current_ps;
114 struct radeon_ps requested_rps;
115 struct trinity_ps requested_ps;
116};
117
118#define TRINITY_AT_DFLT 30
119
120/* trinity_smc.c */
121int trinity_dpm_config(struct radeon_device *rdev, bool enable);
122int trinity_uvd_dpm_config(struct radeon_device *rdev);
123int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
124int trinity_dpm_no_forced_level(struct radeon_device *rdev);
125int trinity_dce_enable_voltage_adjustment(struct radeon_device *rdev,
126 bool enable);
127int trinity_gfx_dynamic_mgpg_config(struct radeon_device *rdev);
128void trinity_acquire_mutex(struct radeon_device *rdev);
129void trinity_release_mutex(struct radeon_device *rdev);
130
131#endif
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
new file mode 100644
index 000000000000..85f86a29513c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "trinityd.h"
27#include "trinity_dpm.h"
28#include "ppsmc.h"
29
30struct trinity_ps *trinity_get_ps(struct radeon_ps *rps);
31struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev);
32
33static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
34{
35 int i;
36 u32 v = 0;
37
38 WREG32(SMC_MESSAGE_0, id);
39 for (i = 0; i < rdev->usec_timeout; i++) {
40 if (RREG32(SMC_RESP_0) != 0)
41 break;
42 udelay(1);
43 }
44 v = RREG32(SMC_RESP_0);
45
46 if (v != 1) {
47 if (v == 0xFF) {
48 DRM_ERROR("SMC failed to handle the message!\n");
49 return -EINVAL;
50 } else if (v == 0xFE) {
51 DRM_ERROR("Unknown SMC message!\n");
52 return -EINVAL;
53 }
54 }
55
56 return 0;
57}
58
59int trinity_dpm_config(struct radeon_device *rdev, bool enable)
60{
61 if (enable)
62 WREG32_SMC(SMU_SCRATCH0, 1);
63 else
64 WREG32_SMC(SMU_SCRATCH0, 0);
65
66 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Config);
67}
68
69int trinity_dpm_force_state(struct radeon_device *rdev, u32 n)
70{
71 WREG32_SMC(SMU_SCRATCH0, n);
72
73 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DPM_ForceState);
74}
75
76int trinity_uvd_dpm_config(struct radeon_device *rdev)
77{
78 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_UVD_DPM_Config);
79}
80
81int trinity_dpm_no_forced_level(struct radeon_device *rdev)
82{
83 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
84}
85
86int trinity_dce_enable_voltage_adjustment(struct radeon_device *rdev,
87 bool enable)
88{
89 if (enable)
90 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DCE_AllowVoltageAdjustment);
91 else
92 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DCE_RemoveVoltageAdjustment);
93}
94
95int trinity_gfx_dynamic_mgpg_config(struct radeon_device *rdev)
96{
97 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_PG_SIMD_Config);
98}
99
100void trinity_acquire_mutex(struct radeon_device *rdev)
101{
102 int i;
103
104 WREG32(SMC_INT_REQ, 1);
105 for (i = 0; i < rdev->usec_timeout; i++) {
106 if ((RREG32(SMC_INT_REQ) & 0xffff) == 1)
107 break;
108 udelay(1);
109 }
110}
111
112void trinity_release_mutex(struct radeon_device *rdev)
113{
114 WREG32(SMC_INT_REQ, 0);
115}
diff --git a/drivers/gpu/drm/radeon/trinityd.h b/drivers/gpu/drm/radeon/trinityd.h
new file mode 100644
index 000000000000..fd32e2771755
--- /dev/null
+++ b/drivers/gpu/drm/radeon/trinityd.h
@@ -0,0 +1,228 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef _TRINITYD_H_
25#define _TRINITYD_H_
26
27/* pm registers */
28
29/* cg */
30#define CG_CGTT_LOCAL_0 0x0
31#define CG_CGTT_LOCAL_1 0x1
32
33/* smc */
34#define SMU_SCLK_DPM_STATE_0_CNTL_0 0x1f000
35# define STATE_VALID(x) ((x) << 0)
36# define STATE_VALID_MASK (0xff << 0)
37# define STATE_VALID_SHIFT 0
38# define CLK_DIVIDER(x) ((x) << 8)
39# define CLK_DIVIDER_MASK (0xff << 8)
40# define CLK_DIVIDER_SHIFT 8
41# define VID(x) ((x) << 16)
42# define VID_MASK (0xff << 16)
43# define VID_SHIFT 16
44# define LVRT(x) ((x) << 24)
45# define LVRT_MASK (0xff << 24)
46# define LVRT_SHIFT 24
47#define SMU_SCLK_DPM_STATE_0_CNTL_1 0x1f004
48# define DS_DIV(x) ((x) << 0)
49# define DS_DIV_MASK (0xff << 0)
50# define DS_DIV_SHIFT 0
51# define DS_SH_DIV(x) ((x) << 8)
52# define DS_SH_DIV_MASK (0xff << 8)
53# define DS_SH_DIV_SHIFT 8
54# define DISPLAY_WM(x) ((x) << 16)
55# define DISPLAY_WM_MASK (0xff << 16)
56# define DISPLAY_WM_SHIFT 16
57# define VCE_WM(x) ((x) << 24)
58# define VCE_WM_MASK (0xff << 24)
59# define VCE_WM_SHIFT 24
60
61#define SMU_SCLK_DPM_STATE_0_CNTL_3 0x1f00c
62# define GNB_SLOW(x) ((x) << 0)
63# define GNB_SLOW_MASK (0xff << 0)
64# define GNB_SLOW_SHIFT 0
65# define FORCE_NBPS1(x) ((x) << 8)
66# define FORCE_NBPS1_MASK (0xff << 8)
67# define FORCE_NBPS1_SHIFT 8
68#define SMU_SCLK_DPM_STATE_0_AT 0x1f010
69# define AT(x) ((x) << 0)
70# define AT_MASK (0xff << 0)
71# define AT_SHIFT 0
72
73#define SMU_SCLK_DPM_STATE_0_PG_CNTL 0x1f014
74# define PD_SCLK_DIVIDER(x) ((x) << 16)
75# define PD_SCLK_DIVIDER_MASK (0xff << 16)
76# define PD_SCLK_DIVIDER_SHIFT 16
77
78#define SMU_SCLK_DPM_STATE_1_CNTL_0 0x1f020
79
80#define SMU_SCLK_DPM_CNTL 0x1f100
81# define SCLK_DPM_EN(x) ((x) << 0)
82# define SCLK_DPM_EN_MASK (0xff << 0)
83# define SCLK_DPM_EN_SHIFT 0
84# define SCLK_DPM_BOOT_STATE(x) ((x) << 16)
85# define SCLK_DPM_BOOT_STATE_MASK (0xff << 16)
86# define SCLK_DPM_BOOT_STATE_SHIFT 16
87# define VOLTAGE_CHG_EN(x) ((x) << 24)
88# define VOLTAGE_CHG_EN_MASK (0xff << 24)
89# define VOLTAGE_CHG_EN_SHIFT 24
90
91#define SMU_SCLK_DPM_TT_CNTL 0x1f108
92# define SCLK_TT_EN(x) ((x) << 0)
93# define SCLK_TT_EN_MASK (0xff << 0)
94# define SCLK_TT_EN_SHIFT 0
95#define SMU_SCLK_DPM_TTT 0x1f10c
96# define LT(x) ((x) << 0)
97# define LT_MASK (0xffff << 0)
98# define LT_SHIFT 0
99# define HT(x) ((x) << 16)
100# define HT_MASK (0xffff << 16)
101# define HT_SHIFT 16
102
103#define SMU_UVD_DPM_STATES 0x1f1a0
104#define SMU_UVD_DPM_CNTL 0x1f1a4
105
106#define SMU_S_PG_CNTL 0x1f118
107# define DS_PG_EN(x) ((x) << 16)
108# define DS_PG_EN_MASK (0xff << 16)
109# define DS_PG_EN_SHIFT 16
110
111#define GFX_POWER_GATING_CNTL 0x1f38c
112# define PDS_DIV(x) ((x) << 0)
113# define PDS_DIV_MASK (0xff << 0)
114# define PDS_DIV_SHIFT 0
115# define SSSD(x) ((x) << 8)
116# define SSSD_MASK (0xff << 8)
117# define SSSD_SHIFT 8
118
119#define PM_CONFIG 0x1f428
120# define SVI_Mode (1 << 29)
121
122#define PM_I_CNTL_1 0x1f464
123# define SCLK_DPM(x) ((x) << 0)
124# define SCLK_DPM_MASK (0xff << 0)
125# define SCLK_DPM_SHIFT 0
126# define DS_PG_CNTL(x) ((x) << 16)
127# define DS_PG_CNTL_MASK (0xff << 16)
128# define DS_PG_CNTL_SHIFT 16
129#define PM_TP 0x1f468
130
131#define NB_PSTATE_CONFIG 0x1f5f8
132# define Dpm0PgNbPsLo(x) ((x) << 0)
133# define Dpm0PgNbPsLo_MASK (3 << 0)
134# define Dpm0PgNbPsLo_SHIFT 0
135# define Dpm0PgNbPsHi(x) ((x) << 2)
136# define Dpm0PgNbPsHi_MASK (3 << 2)
137# define Dpm0PgNbPsHi_SHIFT 2
138# define DpmXNbPsLo(x) ((x) << 4)
139# define DpmXNbPsLo_MASK (3 << 4)
140# define DpmXNbPsLo_SHIFT 4
141# define DpmXNbPsHi(x) ((x) << 6)
142# define DpmXNbPsHi_MASK (3 << 6)
143# define DpmXNbPsHi_SHIFT 6
144
145#define DC_CAC_VALUE 0x1f908
146
147#define GPU_CAC_AVRG_CNTL 0x1f920
148# define WINDOW_SIZE(x) ((x) << 0)
149# define WINDOW_SIZE_MASK (0xff << 0)
150# define WINDOW_SIZE_SHIFT 0
151
152#define CC_SMU_MISC_FUSES 0xe0001004
153# define MinSClkDid(x) ((x) << 2)
154# define MinSClkDid_MASK (0x7f << 2)
155# define MinSClkDid_SHIFT 2
156
157#define CC_SMU_TST_EFUSE1_MISC 0xe000101c
158# define RB_BACKEND_DISABLE(x) ((x) << 16)
159# define RB_BACKEND_DISABLE_MASK (3 << 16)
160# define RB_BACKEND_DISABLE_SHIFT 16
161
162#define SMU_SCRATCH_A 0xe0003024
163
164#define SMU_SCRATCH0 0xe0003040
165
166/* mmio */
167#define SMC_INT_REQ 0x220
168
169#define SMC_MESSAGE_0 0x22c
170#define SMC_RESP_0 0x230
171
172#define GENERAL_PWRMGT 0x670
173# define GLOBAL_PWRMGT_EN (1 << 0)
174
175#define SCLK_PWRMGT_CNTL 0x678
176# define DYN_PWR_DOWN_EN (1 << 2)
177# define RESET_BUSY_CNT (1 << 4)
178# define RESET_SCLK_CNT (1 << 5)
179# define DYN_GFX_CLK_OFF_EN (1 << 7)
180# define GFX_CLK_FORCE_ON (1 << 8)
181# define DYNAMIC_PM_EN (1 << 21)
182
183#define TARGET_AND_CURRENT_PROFILE_INDEX 0x684
184# define TARGET_STATE(x) ((x) << 0)
185# define TARGET_STATE_MASK (0xf << 0)
186# define TARGET_STATE_SHIFT 0
187# define CURRENT_STATE(x) ((x) << 4)
188# define CURRENT_STATE_MASK (0xf << 4)
189# define CURRENT_STATE_SHIFT 4
190
191#define CG_GIPOTS 0x6d8
192# define CG_GIPOT(x) ((x) << 16)
193# define CG_GIPOT_MASK (0xffff << 16)
194# define CG_GIPOT_SHIFT 16
195
196#define CG_PG_CTRL 0x6e0
197# define SP(x) ((x) << 0)
198# define SP_MASK (0xffff << 0)
199# define SP_SHIFT 0
200# define SU(x) ((x) << 16)
201# define SU_MASK (0xffff << 16)
202# define SU_SHIFT 16
203
204#define CG_MISC_REG 0x708
205
206#define CG_THERMAL_INT_CTRL 0x738
207# define DIG_THERM_INTH(x) ((x) << 0)
208# define DIG_THERM_INTH_MASK (0xff << 0)
209# define DIG_THERM_INTH_SHIFT 0
210# define DIG_THERM_INTL(x) ((x) << 8)
211# define DIG_THERM_INTL_MASK (0xff << 8)
212# define DIG_THERM_INTL_SHIFT 8
213# define THERM_INTH_MASK (1 << 24)
214# define THERM_INTL_MASK (1 << 25)
215
216#define CG_CG_VOLTAGE_CNTL 0x770
217# define EN (1 << 9)
218
219#define HW_REV 0x5564
220# define ATI_REV_ID_MASK (0xf << 28)
221# define ATI_REV_ID_SHIFT 28
222/* 0 = A0, 1 = A1, 2 = B0, 3 = C0, etc. */
223
224#define CGTS_SM_CTRL_REG 0x9150
225
226#define GB_ADDR_CONFIG 0x98f8
227
228#endif
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index 0ead502e17d2..f5e1168c7647 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -20,10 +20,13 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: Dave Airlie 22 * Authors: Dave Airlie
23 * Christian König
23 */ 24 */
24#ifndef DRM_FIXED_H 25#ifndef DRM_FIXED_H
25#define DRM_FIXED_H 26#define DRM_FIXED_H
26 27
28#include <linux/math64.h>
29
27typedef union dfixed { 30typedef union dfixed {
28 u32 full; 31 u32 full;
29} fixed20_12; 32} fixed20_12;
@@ -65,4 +68,95 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
65 tmp /= 2; 68 tmp /= 2;
66 return lower_32_bits(tmp); 69 return lower_32_bits(tmp);
67} 70}
71
72#define DRM_FIXED_POINT 32
73#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
74#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
75#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
76
77static inline s64 drm_int2fixp(int a)
78{
79 return ((s64)a) << DRM_FIXED_POINT;
80}
81
82static inline int drm_fixp2int(int64_t a)
83{
84 return ((s64)a) >> DRM_FIXED_POINT;
85}
86
87static inline s64 drm_fixp_msbset(int64_t a)
88{
89 unsigned shift, sign = (a >> 63) & 1;
90
91 for (shift = 62; shift > 0; --shift)
92 if ((a >> shift) != sign)
93 return shift;
94
95 return 0;
96}
97
98static inline s64 drm_fixp_mul(s64 a, s64 b)
99{
100 unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b);
101 s64 result;
102
103 if (shift > 63) {
104 shift = shift - 63;
105 a >>= shift >> 1;
106 b >>= shift >> 1;
107 } else
108 shift = 0;
109
110 result = a * b;
111
112 if (shift > DRM_FIXED_POINT)
113 return result << (shift - DRM_FIXED_POINT);
114
115 if (shift < DRM_FIXED_POINT)
116 return result >> (DRM_FIXED_POINT - shift);
117
118 return result;
119}
120
121static inline s64 drm_fixp_div(s64 a, s64 b)
122{
123 unsigned shift = 63 - drm_fixp_msbset(a);
124 s64 result;
125
126 a <<= shift;
127
128 if (shift < DRM_FIXED_POINT)
129 b >>= (DRM_FIXED_POINT - shift);
130
131 result = div64_s64(a, b);
132
133 if (shift > DRM_FIXED_POINT)
134 return result >> (shift - DRM_FIXED_POINT);
135
136 return result;
137}
138
139static inline s64 drm_fixp_exp(s64 x)
140{
141 s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
142 s64 sum = DRM_FIXED_ONE, term, y = x;
143 u64 count = 1;
144
145 if (x < 0)
146 y = -1 * x;
147
148 term = y;
149
150 while (term >= tolerance) {
151 sum = sum + term;
152 count = count + 1;
153 term = drm_fixp_mul(term, div64_s64(y, count));
154 }
155
156 if (x < 0)
157 sum = drm_fixp_div(1, sum);
158
159 return sum;
160}
161
68#endif 162#endif
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index bb1bc485390b..34efaf64cc87 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -152,6 +152,14 @@
152 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 152 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
153 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 153 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
154 {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 154 {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
155 {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
156 {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
157 {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
158 {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
159 {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
160 {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
161 {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
162 {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
155 {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 163 {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
156 {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 164 {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
157 {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 165 {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -580,6 +588,22 @@
580 {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 588 {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
581 {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 589 {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
582 {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 590 {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
591 {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
592 {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
593 {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
594 {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
595 {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
596 {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
597 {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
598 {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
599 {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
600 {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
601 {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
602 {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
603 {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
604 {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
605 {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
606 {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
583 {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 607 {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
584 {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 608 {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
585 {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 609 {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \