diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/gpu/drm/radeon | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/gpu/drm/radeon')
93 files changed, 15459 insertions, 3782 deletions
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index 1c02d23f6fcc..ea92bbe3ed37 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config DRM_RADEON_KMS | 1 | config DRM_RADEON_KMS |
2 | bool "Enable modesetting on radeon by default - NEW DRIVER" | 2 | bool "Enable modesetting on radeon by default - NEW DRIVER" |
3 | depends on DRM_RADEON | 3 | depends on DRM_RADEON |
4 | select BACKLIGHT_CLASS_DEVICE | ||
4 | help | 5 | help |
5 | Choose this option if you want kernel modesetting enabled by default. | 6 | Choose this option if you want kernel modesetting enabled by default. |
6 | 7 | ||
@@ -27,11 +28,4 @@ config DRM_RADEON_KMS | |||
27 | The kernel will also perform security check on command stream | 28 | The kernel will also perform security check on command stream |
28 | provided by the user, we want to catch and forbid any illegal use | 29 | provided by the user, we want to catch and forbid any illegal use |
29 | of the GPU such as DMA into random system memory or into memory | 30 | of the GPU such as DMA into random system memory or into memory |
30 | not owned by the process supplying the command stream. This part | 31 | not owned by the process supplying the command stream. |
31 | of the code is still incomplete and this why we propose that patch | ||
32 | as a staging driver addition, future security might forbid current | ||
33 | experimental userspace to run. | ||
34 | |||
35 | This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX | ||
36 | (radeon up to X1950). Works is underway to provide support for R6XX, | ||
37 | R7XX and newer hardware (radeon from HD2XXX to HD4XXX). | ||
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index aebe00875041..3896ef811102 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -36,6 +36,9 @@ $(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable | |||
36 | $(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable | 36 | $(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable |
37 | $(call if_changed,mkregtable) | 37 | $(call if_changed,mkregtable) |
38 | 38 | ||
39 | $(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable | ||
40 | $(call if_changed,mkregtable) | ||
41 | |||
39 | $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h | 42 | $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h |
40 | 43 | ||
41 | $(obj)/r200.o: $(obj)/r200_reg_safe.h | 44 | $(obj)/r200.o: $(obj)/r200_reg_safe.h |
@@ -50,7 +53,7 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h | |||
50 | 53 | ||
51 | $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h | 54 | $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h |
52 | 55 | ||
53 | $(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h | 56 | $(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h |
54 | 57 | ||
55 | radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ | 58 | radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ |
56 | radeon_irq.o r300_cmdbuf.o r600_cp.o | 59 | radeon_irq.o r300_cmdbuf.o r600_cp.o |
@@ -65,10 +68,13 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ | |||
65 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ | 68 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ |
66 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ | 69 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ |
67 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ | 70 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ |
68 | evergreen.o evergreen_cs.o | 71 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ |
72 | radeon_trace_points.o ni.o cayman_blit_shaders.o | ||
69 | 73 | ||
70 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 74 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
71 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o | 75 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o |
72 | radeon-$(CONFIG_ACPI) += radeon_acpi.o | 76 | radeon-$(CONFIG_ACPI) += radeon_acpi.o |
73 | 77 | ||
74 | obj-$(CONFIG_DRM_RADEON)+= radeon.o | 78 | obj-$(CONFIG_DRM_RADEON)+= radeon.o |
79 | |||
80 | CFLAGS_radeon_trace_points.o := -I$(src) \ No newline at end of file | ||
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h index c714179d1bfa..c61c3fe9fb98 100644 --- a/drivers/gpu/drm/radeon/ObjectID.h +++ b/drivers/gpu/drm/radeon/ObjectID.h | |||
@@ -37,6 +37,8 @@ | |||
37 | #define GRAPH_OBJECT_TYPE_CONNECTOR 0x3 | 37 | #define GRAPH_OBJECT_TYPE_CONNECTOR 0x3 |
38 | #define GRAPH_OBJECT_TYPE_ROUTER 0x4 | 38 | #define GRAPH_OBJECT_TYPE_ROUTER 0x4 |
39 | /* deleted */ | 39 | /* deleted */ |
40 | #define GRAPH_OBJECT_TYPE_DISPLAY_PATH 0x6 | ||
41 | #define GRAPH_OBJECT_TYPE_GENERIC 0x7 | ||
40 | 42 | ||
41 | /****************************************************/ | 43 | /****************************************************/ |
42 | /* Encoder Object ID Definition */ | 44 | /* Encoder Object ID Definition */ |
@@ -64,6 +66,9 @@ | |||
64 | #define ENCODER_OBJECT_ID_VT1623 0x10 | 66 | #define ENCODER_OBJECT_ID_VT1623 0x10 |
65 | #define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 | 67 | #define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 |
66 | #define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 | 68 | #define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 |
69 | #define ENCODER_OBJECT_ID_ALMOND 0x22 | ||
70 | #define ENCODER_OBJECT_ID_TRAVIS 0x23 | ||
71 | #define ENCODER_OBJECT_ID_NUTMEG 0x22 | ||
67 | /* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */ | 72 | /* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */ |
68 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 | 73 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 |
69 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 | 74 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 |
@@ -108,6 +113,7 @@ | |||
108 | #define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 | 113 | #define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 |
109 | #define CONNECTOR_OBJECT_ID_eDP 0x14 | 114 | #define CONNECTOR_OBJECT_ID_eDP 0x14 |
110 | #define CONNECTOR_OBJECT_ID_MXM 0x15 | 115 | #define CONNECTOR_OBJECT_ID_MXM 0x15 |
116 | #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 | ||
111 | 117 | ||
112 | /* deleted */ | 118 | /* deleted */ |
113 | 119 | ||
@@ -124,6 +130,7 @@ | |||
124 | #define GENERIC_OBJECT_ID_GLSYNC 0x01 | 130 | #define GENERIC_OBJECT_ID_GLSYNC 0x01 |
125 | #define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02 | 131 | #define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02 |
126 | #define GENERIC_OBJECT_ID_MXM_OPM 0x03 | 132 | #define GENERIC_OBJECT_ID_MXM_OPM 0x03 |
133 | #define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin | ||
127 | 134 | ||
128 | /****************************************************/ | 135 | /****************************************************/ |
129 | /* Graphics Object ENUM ID Definition */ | 136 | /* Graphics Object ENUM ID Definition */ |
@@ -360,6 +367,26 @@ | |||
360 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 367 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
361 | ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) | 368 | ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) |
362 | 369 | ||
370 | #define ENCODER_ALMOND_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
371 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
372 | ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT) | ||
373 | |||
374 | #define ENCODER_ALMOND_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
375 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
376 | ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT) | ||
377 | |||
378 | #define ENCODER_TRAVIS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
379 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
380 | ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT) | ||
381 | |||
382 | #define ENCODER_TRAVIS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
383 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
384 | ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT) | ||
385 | |||
386 | #define ENCODER_NUTMEG_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
387 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
388 | ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT) | ||
389 | |||
363 | /****************************************************/ | 390 | /****************************************************/ |
364 | /* Connector Object ID definition - Shared with BIOS */ | 391 | /* Connector Object ID definition - Shared with BIOS */ |
365 | /****************************************************/ | 392 | /****************************************************/ |
@@ -421,6 +448,14 @@ | |||
421 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 448 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
422 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) | 449 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) |
423 | 450 | ||
451 | #define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
452 | GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ | ||
453 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) | ||
454 | |||
455 | #define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
456 | GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ | ||
457 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) | ||
458 | |||
424 | #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 459 | #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
425 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 460 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
426 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) | 461 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) |
@@ -512,6 +547,7 @@ | |||
512 | #define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 547 | #define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
513 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 548 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
514 | CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) | 549 | CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) |
550 | |||
515 | #define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 551 | #define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
516 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 552 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
517 | CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) | 553 | CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) |
@@ -593,6 +629,14 @@ | |||
593 | GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\ | 629 | GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\ |
594 | CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC | 630 | CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC |
595 | 631 | ||
632 | #define CONNECTOR_LVDS_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
633 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
634 | CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT) | ||
635 | |||
636 | #define CONNECTOR_LVDS_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
637 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
638 | CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT) | ||
639 | |||
596 | /****************************************************/ | 640 | /****************************************************/ |
597 | /* Router Object ID definition - Shared with BIOS */ | 641 | /* Router Object ID definition - Shared with BIOS */ |
598 | /****************************************************/ | 642 | /****************************************************/ |
@@ -621,6 +665,10 @@ | |||
621 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 665 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
622 | GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT) | 666 | GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT) |
623 | 667 | ||
668 | #define GENERICOBJECT_STEREO_PIN_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\ | ||
669 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
670 | GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT) | ||
671 | |||
624 | /****************************************************/ | 672 | /****************************************************/ |
625 | /* Object Cap definition - Shared with BIOS */ | 673 | /* Object Cap definition - Shared with BIOS */ |
626 | /****************************************************/ | 674 | /****************************************************/ |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 8e421f644a54..ebdb0fdb8348 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "atom.h" | 32 | #include "atom.h" |
33 | #include "atom-names.h" | 33 | #include "atom-names.h" |
34 | #include "atom-bits.h" | 34 | #include "atom-bits.h" |
35 | #include "radeon.h" | ||
35 | 36 | ||
36 | #define ATOM_COND_ABOVE 0 | 37 | #define ATOM_COND_ABOVE 0 |
37 | #define ATOM_COND_ABOVEOREQUAL 1 | 38 | #define ATOM_COND_ABOVEOREQUAL 1 |
@@ -101,7 +102,9 @@ static void debug_print_spaces(int n) | |||
101 | static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | 102 | static uint32_t atom_iio_execute(struct atom_context *ctx, int base, |
102 | uint32_t index, uint32_t data) | 103 | uint32_t index, uint32_t data) |
103 | { | 104 | { |
105 | struct radeon_device *rdev = ctx->card->dev->dev_private; | ||
104 | uint32_t temp = 0xCDCDCDCD; | 106 | uint32_t temp = 0xCDCDCDCD; |
107 | |||
105 | while (1) | 108 | while (1) |
106 | switch (CU8(base)) { | 109 | switch (CU8(base)) { |
107 | case ATOM_IIO_NOP: | 110 | case ATOM_IIO_NOP: |
@@ -112,6 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | |||
112 | base += 3; | 115 | base += 3; |
113 | break; | 116 | break; |
114 | case ATOM_IIO_WRITE: | 117 | case ATOM_IIO_WRITE: |
118 | if (rdev->family == CHIP_RV515) | ||
119 | (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); | ||
115 | ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); | 120 | ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); |
116 | base += 3; | 121 | base += 3; |
117 | break; | 122 | break; |
@@ -130,7 +135,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | |||
130 | case ATOM_IIO_MOVE_INDEX: | 135 | case ATOM_IIO_MOVE_INDEX: |
131 | temp &= | 136 | temp &= |
132 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << | 137 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << |
133 | CU8(base + 2)); | 138 | CU8(base + 3)); |
134 | temp |= | 139 | temp |= |
135 | ((index >> CU8(base + 2)) & | 140 | ((index >> CU8(base + 2)) & |
136 | (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + | 141 | (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + |
@@ -140,7 +145,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | |||
140 | case ATOM_IIO_MOVE_DATA: | 145 | case ATOM_IIO_MOVE_DATA: |
141 | temp &= | 146 | temp &= |
142 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << | 147 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << |
143 | CU8(base + 2)); | 148 | CU8(base + 3)); |
144 | temp |= | 149 | temp |= |
145 | ((data >> CU8(base + 2)) & | 150 | ((data >> CU8(base + 2)) & |
146 | (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + | 151 | (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + |
@@ -150,7 +155,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | |||
150 | case ATOM_IIO_MOVE_ATTR: | 155 | case ATOM_IIO_MOVE_ATTR: |
151 | temp &= | 156 | temp &= |
152 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << | 157 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << |
153 | CU8(base + 2)); | 158 | CU8(base + 3)); |
154 | temp |= | 159 | temp |= |
155 | ((ctx-> | 160 | ((ctx-> |
156 | io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - | 161 | io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - |
@@ -647,12 +652,12 @@ static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg) | |||
647 | 652 | ||
648 | static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) | 653 | static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) |
649 | { | 654 | { |
650 | uint8_t count = U8((*ptr)++); | 655 | unsigned count = U8((*ptr)++); |
651 | SDEBUG(" count: %d\n", count); | 656 | SDEBUG(" count: %d\n", count); |
652 | if (arg == ATOM_UNIT_MICROSEC) | 657 | if (arg == ATOM_UNIT_MICROSEC) |
653 | udelay(count); | 658 | udelay(count); |
654 | else | 659 | else |
655 | schedule_timeout_uninterruptible(msecs_to_jiffies(count)); | 660 | msleep(count); |
656 | } | 661 | } |
657 | 662 | ||
658 | static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg) | 663 | static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg) |
@@ -733,16 +738,16 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | |||
733 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) | 738 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) |
734 | { | 739 | { |
735 | uint8_t attr = U8((*ptr)++); | 740 | uint8_t attr = U8((*ptr)++); |
736 | uint32_t dst, src1, src2, saved; | 741 | uint32_t dst, mask, src, saved; |
737 | int dptr = *ptr; | 742 | int dptr = *ptr; |
738 | SDEBUG(" dst: "); | 743 | SDEBUG(" dst: "); |
739 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 744 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
740 | SDEBUG(" src1: "); | 745 | mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); |
741 | src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); | 746 | SDEBUG(" mask: 0x%08x", mask); |
742 | SDEBUG(" src2: "); | 747 | SDEBUG(" src: "); |
743 | src2 = atom_get_src(ctx, attr, ptr); | 748 | src = atom_get_src(ctx, attr, ptr); |
744 | dst &= src1; | 749 | dst &= mask; |
745 | dst |= src2; | 750 | dst |= src; |
746 | SDEBUG(" dst: "); | 751 | SDEBUG(" dst: "); |
747 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | 752 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
748 | } | 753 | } |
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index fe359a239df3..1b50ad8919d5 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h | |||
@@ -73,8 +73,18 @@ | |||
73 | #define ATOM_PPLL1 0 | 73 | #define ATOM_PPLL1 0 |
74 | #define ATOM_PPLL2 1 | 74 | #define ATOM_PPLL2 1 |
75 | #define ATOM_DCPLL 2 | 75 | #define ATOM_DCPLL 2 |
76 | #define ATOM_PPLL0 2 | ||
77 | #define ATOM_EXT_PLL1 8 | ||
78 | #define ATOM_EXT_PLL2 9 | ||
79 | #define ATOM_EXT_CLOCK 10 | ||
76 | #define ATOM_PPLL_INVALID 0xFF | 80 | #define ATOM_PPLL_INVALID 0xFF |
77 | 81 | ||
82 | #define ENCODER_REFCLK_SRC_P1PLL 0 | ||
83 | #define ENCODER_REFCLK_SRC_P2PLL 1 | ||
84 | #define ENCODER_REFCLK_SRC_DCPLL 2 | ||
85 | #define ENCODER_REFCLK_SRC_EXTCLK 3 | ||
86 | #define ENCODER_REFCLK_SRC_INVALID 0xFF | ||
87 | |||
78 | #define ATOM_SCALER1 0 | 88 | #define ATOM_SCALER1 0 |
79 | #define ATOM_SCALER2 1 | 89 | #define ATOM_SCALER2 1 |
80 | 90 | ||
@@ -192,6 +202,9 @@ typedef struct _ATOM_COMMON_TABLE_HEADER | |||
192 | /*Image can't be updated, while Driver needs to carry the new table! */ | 202 | /*Image can't be updated, while Driver needs to carry the new table! */ |
193 | }ATOM_COMMON_TABLE_HEADER; | 203 | }ATOM_COMMON_TABLE_HEADER; |
194 | 204 | ||
205 | /****************************************************************************/ | ||
206 | // Structure stores the ROM header. | ||
207 | /****************************************************************************/ | ||
195 | typedef struct _ATOM_ROM_HEADER | 208 | typedef struct _ATOM_ROM_HEADER |
196 | { | 209 | { |
197 | ATOM_COMMON_TABLE_HEADER sHeader; | 210 | ATOM_COMMON_TABLE_HEADER sHeader; |
@@ -221,6 +234,9 @@ typedef struct _ATOM_ROM_HEADER | |||
221 | #define USHORT void* | 234 | #define USHORT void* |
222 | #endif | 235 | #endif |
223 | 236 | ||
237 | /****************************************************************************/ | ||
238 | // Structures used in Command.mtb | ||
239 | /****************************************************************************/ | ||
224 | typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ | 240 | typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ |
225 | USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1 | 241 | USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1 |
226 | USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON | 242 | USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON |
@@ -312,6 +328,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ | |||
312 | #define SetUniphyInstance ASIC_StaticPwrMgtStatusChange | 328 | #define SetUniphyInstance ASIC_StaticPwrMgtStatusChange |
313 | #define HPDInterruptService ReadHWAssistedI2CStatus | 329 | #define HPDInterruptService ReadHWAssistedI2CStatus |
314 | #define EnableVGA_Access GetSCLKOverMCLKRatio | 330 | #define EnableVGA_Access GetSCLKOverMCLKRatio |
331 | #define GetDispObjectInfo EnableYUV | ||
315 | 332 | ||
316 | typedef struct _ATOM_MASTER_COMMAND_TABLE | 333 | typedef struct _ATOM_MASTER_COMMAND_TABLE |
317 | { | 334 | { |
@@ -357,6 +374,24 @@ typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER | |||
357 | /****************************************************************************/ | 374 | /****************************************************************************/ |
358 | #define COMPUTE_MEMORY_PLL_PARAM 1 | 375 | #define COMPUTE_MEMORY_PLL_PARAM 1 |
359 | #define COMPUTE_ENGINE_PLL_PARAM 2 | 376 | #define COMPUTE_ENGINE_PLL_PARAM 2 |
377 | #define ADJUST_MC_SETTING_PARAM 3 | ||
378 | |||
379 | /****************************************************************************/ | ||
380 | // Structures used by AdjustMemoryControllerTable | ||
381 | /****************************************************************************/ | ||
382 | typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ | ||
383 | { | ||
384 | #if ATOM_BIG_ENDIAN | ||
385 | ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block | ||
386 | ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0] | ||
387 | ULONG ulClockFreq:24; | ||
388 | #else | ||
389 | ULONG ulClockFreq:24; | ||
390 | ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0] | ||
391 | ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block | ||
392 | #endif | ||
393 | }ATOM_ADJUST_MEMORY_CLOCK_FREQ; | ||
394 | #define POINTER_RETURN_FLAG 0x80 | ||
360 | 395 | ||
361 | typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS | 396 | typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS |
362 | { | 397 | { |
@@ -440,6 +475,26 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 | |||
440 | #endif | 475 | #endif |
441 | }COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; | 476 | }COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; |
442 | 477 | ||
478 | typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 | ||
479 | { | ||
480 | union | ||
481 | { | ||
482 | ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter | ||
483 | ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter | ||
484 | }; | ||
485 | UCHAR ucRefDiv; //Output Parameter | ||
486 | UCHAR ucPostDiv; //Output Parameter | ||
487 | union | ||
488 | { | ||
489 | UCHAR ucCntlFlag; //Output Flags | ||
490 | UCHAR ucInputFlag; //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode | ||
491 | }; | ||
492 | UCHAR ucReserved; | ||
493 | }COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5; | ||
494 | |||
495 | // ucInputFlag | ||
496 | #define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode | ||
497 | |||
443 | typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER | 498 | typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER |
444 | { | 499 | { |
445 | ATOM_COMPUTE_CLOCK_FREQ ulClock; | 500 | ATOM_COMPUTE_CLOCK_FREQ ulClock; |
@@ -583,6 +638,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS | |||
583 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 | 638 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 |
584 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 | 639 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 |
585 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 | 640 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 |
641 | #define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ 0x02 | ||
586 | #define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04 | 642 | #define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04 |
587 | #define ATOM_ENCODER_CONFIG_LINKA 0x00 | 643 | #define ATOM_ENCODER_CONFIG_LINKA 0x00 |
588 | #define ATOM_ENCODER_CONFIG_LINKB 0x04 | 644 | #define ATOM_ENCODER_CONFIG_LINKB 0x04 |
@@ -608,6 +664,9 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS | |||
608 | #define ATOM_ENCODER_MODE_TV 13 | 664 | #define ATOM_ENCODER_MODE_TV 13 |
609 | #define ATOM_ENCODER_MODE_CV 14 | 665 | #define ATOM_ENCODER_MODE_CV 14 |
610 | #define ATOM_ENCODER_MODE_CRT 15 | 666 | #define ATOM_ENCODER_MODE_CRT 15 |
667 | #define ATOM_ENCODER_MODE_DVO 16 | ||
668 | #define ATOM_ENCODER_MODE_DP_SST ATOM_ENCODER_MODE_DP // For DP1.2 | ||
669 | #define ATOM_ENCODER_MODE_DP_MST 5 // For DP1.2 | ||
611 | 670 | ||
612 | typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 | 671 | typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 |
613 | { | 672 | { |
@@ -661,52 +720,126 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 | |||
661 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08 | 720 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08 |
662 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09 | 721 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09 |
663 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a | 722 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a |
723 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3 0x13 | ||
664 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b | 724 | #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b |
665 | #define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c | 725 | #define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c |
666 | #define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d | 726 | #define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d |
667 | #define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e | 727 | #define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e |
668 | #define ATOM_ENCODER_CMD_SETUP 0x0f | 728 | #define ATOM_ENCODER_CMD_SETUP 0x0f |
729 | #define ATOM_ENCODER_CMD_SETUP_PANEL_MODE 0x10 | ||
669 | 730 | ||
670 | // ucStatus | 731 | // ucStatus |
671 | #define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 | 732 | #define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 |
672 | #define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00 | 733 | #define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00 |
673 | 734 | ||
735 | //ucTableFormatRevision=1 | ||
736 | //ucTableContentRevision=3 | ||
674 | // Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver | 737 | // Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver |
675 | typedef struct _ATOM_DIG_ENCODER_CONFIG_V3 | 738 | typedef struct _ATOM_DIG_ENCODER_CONFIG_V3 |
676 | { | 739 | { |
677 | #if ATOM_BIG_ENDIAN | 740 | #if ATOM_BIG_ENDIAN |
678 | UCHAR ucReserved1:1; | 741 | UCHAR ucReserved1:1; |
679 | UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F | 742 | UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F) |
680 | UCHAR ucReserved:3; | 743 | UCHAR ucReserved:3; |
681 | UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz | 744 | UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz |
682 | #else | 745 | #else |
683 | UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz | 746 | UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz |
684 | UCHAR ucReserved:3; | 747 | UCHAR ucReserved:3; |
685 | UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F | 748 | UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F) |
686 | UCHAR ucReserved1:1; | 749 | UCHAR ucReserved1:1; |
687 | #endif | 750 | #endif |
688 | }ATOM_DIG_ENCODER_CONFIG_V3; | 751 | }ATOM_DIG_ENCODER_CONFIG_V3; |
689 | 752 | ||
753 | #define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 | ||
754 | #define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00 | ||
755 | #define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01 | ||
690 | #define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70 | 756 | #define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70 |
691 | 757 | #define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER 0x00 | |
758 | #define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER 0x10 | ||
759 | #define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER 0x20 | ||
760 | #define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER 0x30 | ||
761 | #define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER 0x40 | ||
762 | #define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER 0x50 | ||
692 | 763 | ||
693 | typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 | 764 | typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 |
694 | { | 765 | { |
695 | USHORT usPixelClock; // in 10KHz; for bios convenient | 766 | USHORT usPixelClock; // in 10KHz; for bios convenient |
696 | ATOM_DIG_ENCODER_CONFIG_V3 acConfig; | 767 | ATOM_DIG_ENCODER_CONFIG_V3 acConfig; |
697 | UCHAR ucAction; | 768 | UCHAR ucAction; |
698 | UCHAR ucEncoderMode; | 769 | union { |
770 | UCHAR ucEncoderMode; | ||
699 | // =0: DP encoder | 771 | // =0: DP encoder |
700 | // =1: LVDS encoder | 772 | // =1: LVDS encoder |
701 | // =2: DVI encoder | 773 | // =2: DVI encoder |
702 | // =3: HDMI encoder | 774 | // =3: HDMI encoder |
703 | // =4: SDVO encoder | 775 | // =4: SDVO encoder |
704 | // =5: DP audio | 776 | // =5: DP audio |
777 | UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE | ||
778 | // =0: external DP | ||
779 | // =1: internal DP2 | ||
780 | // =0x11: internal DP1 for NutMeg/Travis DP translator | ||
781 | }; | ||
705 | UCHAR ucLaneNum; // how many lanes to enable | 782 | UCHAR ucLaneNum; // how many lanes to enable |
706 | UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP | 783 | UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP |
707 | UCHAR ucReserved; | 784 | UCHAR ucReserved; |
708 | }DIG_ENCODER_CONTROL_PARAMETERS_V3; | 785 | }DIG_ENCODER_CONTROL_PARAMETERS_V3; |
709 | 786 | ||
787 | //ucTableFormatRevision=1 | ||
788 | //ucTableContentRevision=4 | ||
789 | // start from NI | ||
790 | // Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver | ||
791 | typedef struct _ATOM_DIG_ENCODER_CONFIG_V4 | ||
792 | { | ||
793 | #if ATOM_BIG_ENDIAN | ||
794 | UCHAR ucReserved1:1; | ||
795 | UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F) | ||
796 | UCHAR ucReserved:2; | ||
797 | UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version | ||
798 | #else | ||
799 | UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version | ||
800 | UCHAR ucReserved:2; | ||
801 | UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F) | ||
802 | UCHAR ucReserved1:1; | ||
803 | #endif | ||
804 | }ATOM_DIG_ENCODER_CONFIG_V4; | ||
805 | |||
806 | #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK 0x03 | ||
807 | #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00 | ||
808 | #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01 | ||
809 | #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02 | ||
810 | #define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70 | ||
811 | #define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00 | ||
812 | #define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10 | ||
813 | #define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER 0x20 | ||
814 | #define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30 | ||
815 | #define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40 | ||
816 | #define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50 | ||
817 | |||
818 | typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4 | ||
819 | { | ||
820 | USHORT usPixelClock; // in 10KHz; for bios convenient | ||
821 | union{ | ||
822 | ATOM_DIG_ENCODER_CONFIG_V4 acConfig; | ||
823 | UCHAR ucConfig; | ||
824 | }; | ||
825 | UCHAR ucAction; | ||
826 | union { | ||
827 | UCHAR ucEncoderMode; | ||
828 | // =0: DP encoder | ||
829 | // =1: LVDS encoder | ||
830 | // =2: DVI encoder | ||
831 | // =3: HDMI encoder | ||
832 | // =4: SDVO encoder | ||
833 | // =5: DP audio | ||
834 | UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE | ||
835 | // =0: external DP | ||
836 | // =1: internal DP2 | ||
837 | // =0x11: internal DP1 for NutMeg/Travis DP translator | ||
838 | }; | ||
839 | UCHAR ucLaneNum; // how many lanes to enable | ||
840 | UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP | ||
841 | UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version | ||
842 | }DIG_ENCODER_CONTROL_PARAMETERS_V4; | ||
710 | 843 | ||
711 | // define ucBitPerColor: | 844 | // define ucBitPerColor: |
712 | #define PANEL_BPC_UNDEFINE 0x00 | 845 | #define PANEL_BPC_UNDEFINE 0x00 |
@@ -716,6 +849,11 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 | |||
716 | #define PANEL_12BIT_PER_COLOR 0x04 | 849 | #define PANEL_12BIT_PER_COLOR 0x04 |
717 | #define PANEL_16BIT_PER_COLOR 0x05 | 850 | #define PANEL_16BIT_PER_COLOR 0x05 |
718 | 851 | ||
852 | //define ucPanelMode | ||
853 | #define DP_PANEL_MODE_EXTERNAL_DP_MODE 0x00 | ||
854 | #define DP_PANEL_MODE_INTERNAL_DP2_MODE 0x01 | ||
855 | #define DP_PANEL_MODE_INTERNAL_DP1_MODE 0x11 | ||
856 | |||
719 | /****************************************************************************/ | 857 | /****************************************************************************/ |
720 | // Structures used by UNIPHYTransmitterControlTable | 858 | // Structures used by UNIPHYTransmitterControlTable |
721 | // LVTMATransmitterControlTable | 859 | // LVTMATransmitterControlTable |
@@ -893,6 +1031,7 @@ typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3 | |||
893 | #endif | 1031 | #endif |
894 | }ATOM_DIG_TRANSMITTER_CONFIG_V3; | 1032 | }ATOM_DIG_TRANSMITTER_CONFIG_V3; |
895 | 1033 | ||
1034 | |||
896 | typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 | 1035 | typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 |
897 | { | 1036 | { |
898 | union | 1037 | union |
@@ -936,6 +1075,150 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 | |||
936 | #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD | 1075 | #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD |
937 | #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF | 1076 | #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF |
938 | 1077 | ||
1078 | |||
1079 | /****************************************************************************/ | ||
1080 | // Structures used by UNIPHYTransmitterControlTable V1.4 | ||
1081 | // ASIC Families: NI | ||
1082 | // ucTableFormatRevision=1 | ||
1083 | // ucTableContentRevision=4 | ||
1084 | /****************************************************************************/ | ||
1085 | typedef struct _ATOM_DP_VS_MODE_V4 | ||
1086 | { | ||
1087 | UCHAR ucLaneSel; | ||
1088 | union | ||
1089 | { | ||
1090 | UCHAR ucLaneSet; | ||
1091 | struct { | ||
1092 | #if ATOM_BIG_ENDIAN | ||
1093 | UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4 | ||
1094 | UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level | ||
1095 | UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level | ||
1096 | #else | ||
1097 | UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level | ||
1098 | UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level | ||
1099 | UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4 | ||
1100 | #endif | ||
1101 | }; | ||
1102 | }; | ||
1103 | }ATOM_DP_VS_MODE_V4; | ||
1104 | |||
1105 | typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4 | ||
1106 | { | ||
1107 | #if ATOM_BIG_ENDIAN | ||
1108 | UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) | ||
1109 | // =1 Dig Transmitter 2 ( Uniphy CD ) | ||
1110 | // =2 Dig Transmitter 3 ( Uniphy EF ) | ||
1111 | UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New | ||
1112 | UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F | ||
1113 | UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E | ||
1114 | // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F | ||
1115 | UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) | ||
1116 | UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector | ||
1117 | #else | ||
1118 | UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector | ||
1119 | UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) | ||
1120 | UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E | ||
1121 | // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F | ||
1122 | UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F | ||
1123 | UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New | ||
1124 | UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) | ||
1125 | // =1 Dig Transmitter 2 ( Uniphy CD ) | ||
1126 | // =2 Dig Transmitter 3 ( Uniphy EF ) | ||
1127 | #endif | ||
1128 | }ATOM_DIG_TRANSMITTER_CONFIG_V4; | ||
1129 | |||
1130 | typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 | ||
1131 | { | ||
1132 | union | ||
1133 | { | ||
1134 | USHORT usPixelClock; // in 10KHz; for bios convenient | ||
1135 | USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h | ||
1136 | ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode Redefined comparing to previous version | ||
1137 | }; | ||
1138 | union | ||
1139 | { | ||
1140 | ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig; | ||
1141 | UCHAR ucConfig; | ||
1142 | }; | ||
1143 | UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX | ||
1144 | UCHAR ucLaneNum; | ||
1145 | UCHAR ucReserved[3]; | ||
1146 | }DIG_TRANSMITTER_CONTROL_PARAMETERS_V4; | ||
1147 | |||
1148 | //ucConfig | ||
1149 | //Bit0 | ||
1150 | #define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR 0x01 | ||
1151 | //Bit1 | ||
1152 | #define ATOM_TRANSMITTER_CONFIG_V4_COHERENT 0x02 | ||
1153 | //Bit2 | ||
1154 | #define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK 0x04 | ||
1155 | #define ATOM_TRANSMITTER_CONFIG_V4_LINKA 0x00 | ||
1156 | #define ATOM_TRANSMITTER_CONFIG_V4_LINKB 0x04 | ||
1157 | // Bit3 | ||
1158 | #define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK 0x08 | ||
1159 | #define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER 0x00 | ||
1160 | #define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER 0x08 | ||
1161 | // Bit5:4 | ||
1162 | #define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 0x30 | ||
1163 | #define ATOM_TRANSMITTER_CONFIG_V4_P1PLL 0x00 | ||
1164 | #define ATOM_TRANSMITTER_CONFIG_V4_P2PLL 0x10 | ||
1165 | #define ATOM_TRANSMITTER_CONFIG_V4_DCPLL 0x20 // New in _V4 | ||
1166 | #define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT 0x30 // Changed comparing to V3 | ||
1167 | // Bit7:6 | ||
1168 | #define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK 0xC0 | ||
1169 | #define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1 0x00 //AB | ||
1170 | #define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2 0x40 //CD | ||
1171 | #define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF | ||
1172 | |||
1173 | |||
1174 | /****************************************************************************/ | ||
1175 | // Structures used by ExternalEncoderControlTable V1.3 | ||
1176 | // ASIC Families: Evergreen, Llano, NI | ||
1177 | // ucTableFormatRevision=1 | ||
1178 | // ucTableContentRevision=3 | ||
1179 | /****************************************************************************/ | ||
1180 | |||
1181 | typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 | ||
1182 | { | ||
1183 | union{ | ||
1184 | USHORT usPixelClock; // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT | ||
1185 | USHORT usConnectorId; // connector id, valid when ucAction = INIT | ||
1186 | }; | ||
1187 | UCHAR ucConfig; // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT | ||
1188 | UCHAR ucAction; // | ||
1189 | UCHAR ucEncoderMode; // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT | ||
1190 | UCHAR ucLaneNum; // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT | ||
1191 | UCHAR ucBitPerColor; // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP | ||
1192 | UCHAR ucReserved; | ||
1193 | }EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3; | ||
1194 | |||
1195 | // ucAction | ||
1196 | #define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT 0x00 | ||
1197 | #define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT 0x01 | ||
1198 | #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT 0x07 | ||
1199 | #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP 0x0f | ||
1200 | #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10 | ||
1201 | #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11 | ||
1202 | #define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12 | ||
1203 | #define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP 0x14 | ||
1204 | |||
1205 | // ucConfig | ||
1206 | #define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 | ||
1207 | #define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00 | ||
1208 | #define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01 | ||
1209 | #define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ 0x02 | ||
1210 | #define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK 0x70 | ||
1211 | #define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1 0x00 | ||
1212 | #define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2 0x10 | ||
1213 | #define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3 0x20 | ||
1214 | |||
1215 | typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 | ||
1216 | { | ||
1217 | EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder; | ||
1218 | ULONG ulReserved[2]; | ||
1219 | }EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3; | ||
1220 | |||
1221 | |||
939 | /****************************************************************************/ | 1222 | /****************************************************************************/ |
940 | // Structures used by DAC1OuputControlTable | 1223 | // Structures used by DAC1OuputControlTable |
941 | // DAC2OuputControlTable | 1224 | // DAC2OuputControlTable |
@@ -1142,6 +1425,7 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V2 | |||
1142 | #define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10 | 1425 | #define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10 |
1143 | #define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20 | 1426 | #define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20 |
1144 | 1427 | ||
1428 | |||
1145 | typedef struct _PIXEL_CLOCK_PARAMETERS_V3 | 1429 | typedef struct _PIXEL_CLOCK_PARAMETERS_V3 |
1146 | { | 1430 | { |
1147 | USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) | 1431 | USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) |
@@ -1202,6 +1486,55 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V5 | |||
1202 | #define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08 | 1486 | #define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08 |
1203 | #define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10 | 1487 | #define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10 |
1204 | 1488 | ||
1489 | typedef struct _CRTC_PIXEL_CLOCK_FREQ | ||
1490 | { | ||
1491 | #if ATOM_BIG_ENDIAN | ||
1492 | ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to | ||
1493 | // drive the pixel clock. not used for DCPLL case. | ||
1494 | ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing. | ||
1495 | // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version. | ||
1496 | #else | ||
1497 | ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing. | ||
1498 | // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version. | ||
1499 | ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to | ||
1500 | // drive the pixel clock. not used for DCPLL case. | ||
1501 | #endif | ||
1502 | }CRTC_PIXEL_CLOCK_FREQ; | ||
1503 | |||
1504 | typedef struct _PIXEL_CLOCK_PARAMETERS_V6 | ||
1505 | { | ||
1506 | union{ | ||
1507 | CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; // pixel clock and CRTC id frequency | ||
1508 | ULONG ulDispEngClkFreq; // dispclk frequency | ||
1509 | }; | ||
1510 | USHORT usFbDiv; // feedback divider integer part. | ||
1511 | UCHAR ucPostDiv; // post divider. | ||
1512 | UCHAR ucRefDiv; // Reference divider | ||
1513 | UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL | ||
1514 | UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h, | ||
1515 | // indicate which graphic encoder will be used. | ||
1516 | UCHAR ucEncoderMode; // Encoder mode: | ||
1517 | UCHAR ucMiscInfo; // bit[0]= Force program PPLL | ||
1518 | // bit[1]= when VGA timing is used. | ||
1519 | // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp | ||
1520 | // bit[4]= RefClock source for PPLL. | ||
1521 | // =0: XTLAIN( default mode ) | ||
1522 | // =1: other external clock source, which is pre-defined | ||
1523 | // by VBIOS depend on the feature required. | ||
1524 | // bit[7:5]: reserved. | ||
1525 | ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 ) | ||
1526 | |||
1527 | }PIXEL_CLOCK_PARAMETERS_V6; | ||
1528 | |||
1529 | #define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL 0x01 | ||
1530 | #define PIXEL_CLOCK_V6_MISC_VGA_MODE 0x02 | ||
1531 | #define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c | ||
1532 | #define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00 | ||
1533 | #define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04 | ||
1534 | #define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08 | ||
1535 | #define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c | ||
1536 | #define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10 | ||
1537 | |||
1205 | typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 | 1538 | typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 |
1206 | { | 1539 | { |
1207 | PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput; | 1540 | PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput; |
@@ -1241,10 +1574,11 @@ typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS | |||
1241 | typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 | 1574 | typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 |
1242 | { | 1575 | { |
1243 | USHORT usPixelClock; // target pixel clock | 1576 | USHORT usPixelClock; // target pixel clock |
1244 | UCHAR ucTransmitterID; // transmitter id defined in objectid.h | 1577 | UCHAR ucTransmitterID; // GPU transmitter id defined in objectid.h |
1245 | UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI | 1578 | UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI |
1246 | UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX | 1579 | UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX |
1247 | UCHAR ucReserved[3]; | 1580 | UCHAR ucExtTransmitterID; // external encoder id. |
1581 | UCHAR ucReserved[2]; | ||
1248 | }ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3; | 1582 | }ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3; |
1249 | 1583 | ||
1250 | // usDispPllConfig v1.2 for RoadRunner | 1584 | // usDispPllConfig v1.2 for RoadRunner |
@@ -1314,7 +1648,7 @@ typedef struct _GET_ENGINE_CLOCK_PARAMETERS | |||
1314 | typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS | 1648 | typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS |
1315 | { | 1649 | { |
1316 | USHORT usPrescale; //Ratio between Engine clock and I2C clock | 1650 | USHORT usPrescale; //Ratio between Engine clock and I2C clock |
1317 | USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID | 1651 | USHORT usVRAMAddress; //Address in Frame Buffer where to pace raw EDID |
1318 | USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status | 1652 | USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status |
1319 | //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte | 1653 | //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte |
1320 | UCHAR ucSlaveAddr; //Read from which slave | 1654 | UCHAR ucSlaveAddr; //Read from which slave |
@@ -1358,6 +1692,7 @@ typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS | |||
1358 | /**************************************************************************/ | 1692 | /**************************************************************************/ |
1359 | #define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS | 1693 | #define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS |
1360 | 1694 | ||
1695 | |||
1361 | /****************************************************************************/ | 1696 | /****************************************************************************/ |
1362 | // Structures used by PowerConnectorDetectionTable | 1697 | // Structures used by PowerConnectorDetectionTable |
1363 | /****************************************************************************/ | 1698 | /****************************************************************************/ |
@@ -1438,6 +1773,31 @@ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 | |||
1438 | #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00 | 1773 | #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00 |
1439 | #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8 | 1774 | #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8 |
1440 | 1775 | ||
1776 | // Used by DCE5.0 | ||
1777 | typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 | ||
1778 | { | ||
1779 | USHORT usSpreadSpectrumAmountFrac; // SS_AMOUNT_DSFRAC New in DCE5.0 | ||
1780 | UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread. | ||
1781 | // Bit[1]: 1-Ext. 0-Int. | ||
1782 | // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL | ||
1783 | // Bits[7:4] reserved | ||
1784 | UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE | ||
1785 | USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8] | ||
1786 | USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC | ||
1787 | }ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3; | ||
1788 | |||
1789 | #define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD 0x00 | ||
1790 | #define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD 0x01 | ||
1791 | #define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD 0x02 | ||
1792 | #define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK 0x0c | ||
1793 | #define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00 | ||
1794 | #define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04 | ||
1795 | #define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08 | ||
1796 | #define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF | ||
1797 | #define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0 | ||
1798 | #define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00 | ||
1799 | #define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT 8 | ||
1800 | |||
1441 | #define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL | 1801 | #define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL |
1442 | 1802 | ||
1443 | /**************************************************************************/ | 1803 | /**************************************************************************/ |
@@ -1706,7 +2066,7 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES | |||
1706 | USHORT StandardVESA_Timing; // Only used by Bios | 2066 | USHORT StandardVESA_Timing; // Only used by Bios |
1707 | USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4 | 2067 | USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4 |
1708 | USHORT DAC_Info; // Will be obsolete from R600 | 2068 | USHORT DAC_Info; // Will be obsolete from R600 |
1709 | USHORT LVDS_Info; // Shared by various SW components,latest version 1.1 | 2069 | USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info |
1710 | USHORT TMDS_Info; // Will be obsolete from R600 | 2070 | USHORT TMDS_Info; // Will be obsolete from R600 |
1711 | USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1 | 2071 | USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1 |
1712 | USHORT SupportedDevicesInfo; // Will be obsolete from R600 | 2072 | USHORT SupportedDevicesInfo; // Will be obsolete from R600 |
@@ -1736,12 +2096,16 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES | |||
1736 | USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1 | 2096 | USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1 |
1737 | }ATOM_MASTER_LIST_OF_DATA_TABLES; | 2097 | }ATOM_MASTER_LIST_OF_DATA_TABLES; |
1738 | 2098 | ||
2099 | // For backward compatible | ||
2100 | #define LVDS_Info LCD_Info | ||
2101 | |||
1739 | typedef struct _ATOM_MASTER_DATA_TABLE | 2102 | typedef struct _ATOM_MASTER_DATA_TABLE |
1740 | { | 2103 | { |
1741 | ATOM_COMMON_TABLE_HEADER sHeader; | 2104 | ATOM_COMMON_TABLE_HEADER sHeader; |
1742 | ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; | 2105 | ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; |
1743 | }ATOM_MASTER_DATA_TABLE; | 2106 | }ATOM_MASTER_DATA_TABLE; |
1744 | 2107 | ||
2108 | |||
1745 | /****************************************************************************/ | 2109 | /****************************************************************************/ |
1746 | // Structure used in MultimediaCapabilityInfoTable | 2110 | // Structure used in MultimediaCapabilityInfoTable |
1747 | /****************************************************************************/ | 2111 | /****************************************************************************/ |
@@ -1776,11 +2140,12 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO | |||
1776 | UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) | 2140 | UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) |
1777 | }ATOM_MULTIMEDIA_CONFIG_INFO; | 2141 | }ATOM_MULTIMEDIA_CONFIG_INFO; |
1778 | 2142 | ||
2143 | |||
1779 | /****************************************************************************/ | 2144 | /****************************************************************************/ |
1780 | // Structures used in FirmwareInfoTable | 2145 | // Structures used in FirmwareInfoTable |
1781 | /****************************************************************************/ | 2146 | /****************************************************************************/ |
1782 | 2147 | ||
1783 | // usBIOSCapability Defintion: | 2148 | // usBIOSCapability Definition: |
1784 | // Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; | 2149 | // Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; |
1785 | // Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; | 2150 | // Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; |
1786 | // Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; | 2151 | // Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; |
@@ -2031,8 +2396,47 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_1 | |||
2031 | UCHAR ucReserved4[3]; | 2396 | UCHAR ucReserved4[3]; |
2032 | }ATOM_FIRMWARE_INFO_V2_1; | 2397 | }ATOM_FIRMWARE_INFO_V2_1; |
2033 | 2398 | ||
2399 | //the structure below to be used from NI | ||
2400 | //ucTableFormatRevision=2 | ||
2401 | //ucTableContentRevision=2 | ||
2402 | typedef struct _ATOM_FIRMWARE_INFO_V2_2 | ||
2403 | { | ||
2404 | ATOM_COMMON_TABLE_HEADER sHeader; | ||
2405 | ULONG ulFirmwareRevision; | ||
2406 | ULONG ulDefaultEngineClock; //In 10Khz unit | ||
2407 | ULONG ulDefaultMemoryClock; //In 10Khz unit | ||
2408 | ULONG ulReserved[2]; | ||
2409 | ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit* | ||
2410 | ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit* | ||
2411 | ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit | ||
2412 | ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ? | ||
2413 | ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage. | ||
2414 | UCHAR ucReserved3; //Was ucASICMaxTemperature; | ||
2415 | UCHAR ucMinAllowedBL_Level; | ||
2416 | USHORT usBootUpVDDCVoltage; //In MV unit | ||
2417 | USHORT usLcdMinPixelClockPLL_Output; // In MHz unit | ||
2418 | USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit | ||
2419 | ULONG ulReserved4; //Was ulAsicMaximumVoltage | ||
2420 | ULONG ulMinPixelClockPLL_Output; //In 10Khz unit | ||
2421 | ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input | ||
2422 | ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input | ||
2423 | ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output | ||
2424 | USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC | ||
2425 | USHORT usMinPixelClockPLL_Input; //In 10Khz unit | ||
2426 | USHORT usMaxPixelClockPLL_Input; //In 10Khz unit | ||
2427 | USHORT usBootUpVDDCIVoltage; //In unit of mv; Was usMinPixelClockPLL_Output; | ||
2428 | ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; | ||
2429 | USHORT usCoreReferenceClock; //In 10Khz unit | ||
2430 | USHORT usMemoryReferenceClock; //In 10Khz unit | ||
2431 | USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock | ||
2432 | UCHAR ucMemoryModule_ID; //Indicate what is the board design | ||
2433 | UCHAR ucReserved9[3]; | ||
2434 | USHORT usBootUpMVDDCVoltage; //In unit of mv; Was usMinPixelClockPLL_Output; | ||
2435 | USHORT usReserved12; | ||
2436 | ULONG ulReserved10[3]; // New added comparing to previous version | ||
2437 | }ATOM_FIRMWARE_INFO_V2_2; | ||
2034 | 2438 | ||
2035 | #define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1 | 2439 | #define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2 |
2036 | 2440 | ||
2037 | /****************************************************************************/ | 2441 | /****************************************************************************/ |
2038 | // Structures used in IntegratedSystemInfoTable | 2442 | // Structures used in IntegratedSystemInfoTable |
@@ -2212,7 +2616,7 @@ ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pi | |||
2212 | ucDockingPinBit: which bit in this register to read the pin status; | 2616 | ucDockingPinBit: which bit in this register to read the pin status; |
2213 | ucDockingPinPolarity:Polarity of the pin when docked; | 2617 | ucDockingPinPolarity:Polarity of the pin when docked; |
2214 | 2618 | ||
2215 | ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0 | 2619 | ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0 |
2216 | 2620 | ||
2217 | usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. | 2621 | usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. |
2218 | 2622 | ||
@@ -2250,6 +2654,14 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep | |||
2250 | usMinDownStreamHTLinkWidth: same as above. | 2654 | usMinDownStreamHTLinkWidth: same as above. |
2251 | */ | 2655 | */ |
2252 | 2656 | ||
2657 | // ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo - CPU type definition | ||
2658 | #define INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU 0 | ||
2659 | #define INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN 1 | ||
2660 | #define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2 | ||
2661 | #define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3 | ||
2662 | #define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4 | ||
2663 | |||
2664 | #define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code | ||
2253 | 2665 | ||
2254 | #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 | 2666 | #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 |
2255 | #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 | 2667 | #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 |
@@ -2778,8 +3190,88 @@ typedef struct _ATOM_LVDS_INFO_V12 | |||
2778 | #define PANEL_RANDOM_DITHER 0x80 | 3190 | #define PANEL_RANDOM_DITHER 0x80 |
2779 | #define PANEL_RANDOM_DITHER_MASK 0x80 | 3191 | #define PANEL_RANDOM_DITHER_MASK 0x80 |
2780 | 3192 | ||
3193 | #define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 // no need to change this | ||
3194 | |||
3195 | /****************************************************************************/ | ||
3196 | // Structures used by LCD_InfoTable V1.3 Note: previous version was called ATOM_LVDS_INFO_V12 | ||
3197 | // ASIC Families: NI | ||
3198 | // ucTableFormatRevision=1 | ||
3199 | // ucTableContentRevision=3 | ||
3200 | /****************************************************************************/ | ||
3201 | typedef struct _ATOM_LCD_INFO_V13 | ||
3202 | { | ||
3203 | ATOM_COMMON_TABLE_HEADER sHeader; | ||
3204 | ATOM_DTD_FORMAT sLCDTiming; | ||
3205 | USHORT usExtInfoTableOffset; | ||
3206 | USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. | ||
3207 | ULONG ulReserved0; | ||
3208 | UCHAR ucLCD_Misc; // Reorganized in V13 | ||
3209 | // Bit0: {=0:single, =1:dual}, | ||
3210 | // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888} // was {=0:666RGB, =1:888RGB}, | ||
3211 | // Bit3:2: {Grey level} | ||
3212 | // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h) | ||
3213 | // Bit7 Reserved. was for ATOM_PANEL_MISC_API_ENABLED, still need it? | ||
3214 | UCHAR ucPanelDefaultRefreshRate; | ||
3215 | UCHAR ucPanelIdentification; | ||
3216 | UCHAR ucSS_Id; | ||
3217 | USHORT usLCDVenderID; | ||
3218 | USHORT usLCDProductID; | ||
3219 | UCHAR ucLCDPanel_SpecialHandlingCap; // Reorganized in V13 | ||
3220 | // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own | ||
3221 | // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED | ||
3222 | // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1) | ||
3223 | // Bit7-3: Reserved | ||
3224 | UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable | ||
3225 | USHORT usBacklightPWM; // Backlight PWM in Hz. New in _V13 | ||
3226 | |||
3227 | UCHAR ucPowerSequenceDIGONtoDE_in4Ms; | ||
3228 | UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms; | ||
3229 | UCHAR ucPowerSequenceDEtoDIGON_in4Ms; | ||
3230 | UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms; | ||
3231 | |||
3232 | UCHAR ucOffDelay_in4Ms; | ||
3233 | UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms; | ||
3234 | UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms; | ||
3235 | UCHAR ucReserved1; | ||
3236 | |||
3237 | ULONG ulReserved[4]; | ||
3238 | }ATOM_LCD_INFO_V13; | ||
3239 | |||
3240 | #define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13 | ||
3241 | |||
3242 | //Definitions for ucLCD_Misc | ||
3243 | #define ATOM_PANEL_MISC_V13_DUAL 0x00000001 | ||
3244 | #define ATOM_PANEL_MISC_V13_FPDI 0x00000002 | ||
3245 | #define ATOM_PANEL_MISC_V13_GREY_LEVEL 0x0000000C | ||
3246 | #define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT 2 | ||
3247 | #define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK 0x70 | ||
3248 | #define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR 0x10 | ||
3249 | #define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR 0x20 | ||
3250 | |||
3251 | //Color Bit Depth definition in EDID V1.4 @BYTE 14h | ||
3252 | //Bit 6 5 4 | ||
3253 | // 0 0 0 - Color bit depth is undefined | ||
3254 | // 0 0 1 - 6 Bits per Primary Color | ||
3255 | // 0 1 0 - 8 Bits per Primary Color | ||
3256 | // 0 1 1 - 10 Bits per Primary Color | ||
3257 | // 1 0 0 - 12 Bits per Primary Color | ||
3258 | // 1 0 1 - 14 Bits per Primary Color | ||
3259 | // 1 1 0 - 16 Bits per Primary Color | ||
3260 | // 1 1 1 - Reserved | ||
3261 | |||
3262 | //Definitions for ucLCDPanel_SpecialHandlingCap: | ||
3263 | |||
3264 | //Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. | ||
3265 | //Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL | ||
3266 | #define LCDPANEL_CAP_V13_READ_EDID 0x1 // = LCDPANEL_CAP_READ_EDID no change comparing to previous version | ||
3267 | |||
3268 | //If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together | ||
3269 | //with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static | ||
3270 | //refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12 | ||
3271 | #define LCDPANEL_CAP_V13_DRR_SUPPORTED 0x2 // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version | ||
2781 | 3272 | ||
2782 | #define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 | 3273 | //Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP. |
3274 | #define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version | ||
2783 | 3275 | ||
2784 | typedef struct _ATOM_PATCH_RECORD_MODE | 3276 | typedef struct _ATOM_PATCH_RECORD_MODE |
2785 | { | 3277 | { |
@@ -2868,7 +3360,7 @@ typedef struct _ATOM_SPREAD_SPECTRUM_INFO | |||
2868 | /****************************************************************************/ | 3360 | /****************************************************************************/ |
2869 | // Structure used in AnalogTV_InfoTable (Top level) | 3361 | // Structure used in AnalogTV_InfoTable (Top level) |
2870 | /****************************************************************************/ | 3362 | /****************************************************************************/ |
2871 | //ucTVBootUpDefaultStd definiton: | 3363 | //ucTVBootUpDefaultStd definition: |
2872 | 3364 | ||
2873 | //ATOM_TV_NTSC 1 | 3365 | //ATOM_TV_NTSC 1 |
2874 | //ATOM_TV_NTSCJ 2 | 3366 | //ATOM_TV_NTSCJ 2 |
@@ -2944,9 +3436,9 @@ typedef struct _ATOM_DPCD_INFO | |||
2944 | #define MAX_DTD_MODE_IN_VRAM 6 | 3436 | #define MAX_DTD_MODE_IN_VRAM 6 |
2945 | #define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT) | 3437 | #define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT) |
2946 | #define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) | 3438 | #define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) |
2947 | #define DFP_ENCODER_TYPE_OFFSET 0x80 | 3439 | //20 bytes for Encoder Type and DPCD in STD EDID area |
2948 | #define DP_ENCODER_LANE_NUM_OFFSET 0x84 | 3440 | #define DFP_ENCODER_TYPE_OFFSET (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20) |
2949 | #define DP_ENCODER_LINK_RATE_OFFSET 0x88 | 3441 | #define ATOM_DP_DPCD_OFFSET (DFP_ENCODER_TYPE_OFFSET + 4 ) |
2950 | 3442 | ||
2951 | #define ATOM_HWICON1_SURFACE_ADDR 0 | 3443 | #define ATOM_HWICON1_SURFACE_ADDR 0 |
2952 | #define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) | 3444 | #define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) |
@@ -2997,14 +3489,16 @@ typedef struct _ATOM_DPCD_INFO | |||
2997 | #define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) | 3489 | #define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) |
2998 | #define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) | 3490 | #define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) |
2999 | 3491 | ||
3000 | #define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) | 3492 | #define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) |
3001 | 3493 | ||
3002 | #define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256) | 3494 | #define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 1024) |
3003 | #define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512 | 3495 | #define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START + 512 |
3004 | 3496 | ||
3005 | //The size below is in Kb! | 3497 | //The size below is in Kb! |
3006 | #define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) | 3498 | #define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) |
3007 | 3499 | ||
3500 | #define ATOM_VRAM_RESERVE_V2_SIZE 32 | ||
3501 | |||
3008 | #define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L | 3502 | #define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L |
3009 | #define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 | 3503 | #define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 |
3010 | #define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 | 3504 | #define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 |
@@ -3206,6 +3700,15 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH | |||
3206 | USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. | 3700 | USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. |
3207 | }ATOM_DISPLAY_OBJECT_PATH; | 3701 | }ATOM_DISPLAY_OBJECT_PATH; |
3208 | 3702 | ||
3703 | typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH | ||
3704 | { | ||
3705 | USHORT usDeviceTag; //supported device | ||
3706 | USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH | ||
3707 | USHORT usConnObjectId; //Connector Object ID | ||
3708 | USHORT usGPUObjectId; //GPU ID | ||
3709 | USHORT usGraphicObjIds[2]; //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder | ||
3710 | }ATOM_DISPLAY_EXTERNAL_OBJECT_PATH; | ||
3711 | |||
3209 | typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE | 3712 | typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE |
3210 | { | 3713 | { |
3211 | UCHAR ucNumOfDispPath; | 3714 | UCHAR ucNumOfDispPath; |
@@ -3261,6 +3764,47 @@ typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset | |||
3261 | #define EXT_AUXDDC_LUTINDEX_7 7 | 3764 | #define EXT_AUXDDC_LUTINDEX_7 7 |
3262 | #define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1) | 3765 | #define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1) |
3263 | 3766 | ||
3767 | //ucChannelMapping are defined as following | ||
3768 | //for DP connector, eDP, DP to VGA/LVDS | ||
3769 | //Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3770 | //Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3771 | //Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3772 | //Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3773 | typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING | ||
3774 | { | ||
3775 | #if ATOM_BIG_ENDIAN | ||
3776 | UCHAR ucDP_Lane3_Source:2; | ||
3777 | UCHAR ucDP_Lane2_Source:2; | ||
3778 | UCHAR ucDP_Lane1_Source:2; | ||
3779 | UCHAR ucDP_Lane0_Source:2; | ||
3780 | #else | ||
3781 | UCHAR ucDP_Lane0_Source:2; | ||
3782 | UCHAR ucDP_Lane1_Source:2; | ||
3783 | UCHAR ucDP_Lane2_Source:2; | ||
3784 | UCHAR ucDP_Lane3_Source:2; | ||
3785 | #endif | ||
3786 | }ATOM_DP_CONN_CHANNEL_MAPPING; | ||
3787 | |||
3788 | //for DVI/HDMI, in dual link case, both links have to have same mapping. | ||
3789 | //Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3790 | //Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3791 | //Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3792 | //Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 | ||
3793 | typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING | ||
3794 | { | ||
3795 | #if ATOM_BIG_ENDIAN | ||
3796 | UCHAR ucDVI_CLK_Source:2; | ||
3797 | UCHAR ucDVI_DATA0_Source:2; | ||
3798 | UCHAR ucDVI_DATA1_Source:2; | ||
3799 | UCHAR ucDVI_DATA2_Source:2; | ||
3800 | #else | ||
3801 | UCHAR ucDVI_DATA2_Source:2; | ||
3802 | UCHAR ucDVI_DATA1_Source:2; | ||
3803 | UCHAR ucDVI_DATA0_Source:2; | ||
3804 | UCHAR ucDVI_CLK_Source:2; | ||
3805 | #endif | ||
3806 | }ATOM_DVI_CONN_CHANNEL_MAPPING; | ||
3807 | |||
3264 | typedef struct _EXT_DISPLAY_PATH | 3808 | typedef struct _EXT_DISPLAY_PATH |
3265 | { | 3809 | { |
3266 | USHORT usDeviceTag; //A bit vector to show what devices are supported | 3810 | USHORT usDeviceTag; //A bit vector to show what devices are supported |
@@ -3269,7 +3813,13 @@ typedef struct _EXT_DISPLAY_PATH | |||
3269 | UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT | 3813 | UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT |
3270 | UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT | 3814 | UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT |
3271 | USHORT usExtEncoderObjId; //external encoder object id | 3815 | USHORT usExtEncoderObjId; //external encoder object id |
3272 | USHORT usReserved[3]; | 3816 | union{ |
3817 | UCHAR ucChannelMapping; // if ucChannelMapping=0, using default one to one mapping | ||
3818 | ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping; | ||
3819 | ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping; | ||
3820 | }; | ||
3821 | UCHAR ucReserved; | ||
3822 | USHORT usReserved[2]; | ||
3273 | }EXT_DISPLAY_PATH; | 3823 | }EXT_DISPLAY_PATH; |
3274 | 3824 | ||
3275 | #define NUMBER_OF_UCHAR_FOR_GUID 16 | 3825 | #define NUMBER_OF_UCHAR_FOR_GUID 16 |
@@ -3281,10 +3831,11 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO | |||
3281 | UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string | 3831 | UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string |
3282 | EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. | 3832 | EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. |
3283 | UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. | 3833 | UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. |
3284 | UCHAR Reserved [7]; // for potential expansion | 3834 | UCHAR uc3DStereoPinId; // use for eDP panel |
3835 | UCHAR Reserved [6]; // for potential expansion | ||
3285 | }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; | 3836 | }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; |
3286 | 3837 | ||
3287 | //Related definitions, all records are differnt but they have a commond header | 3838 | //Related definitions, all records are different but they have a commond header |
3288 | typedef struct _ATOM_COMMON_RECORD_HEADER | 3839 | typedef struct _ATOM_COMMON_RECORD_HEADER |
3289 | { | 3840 | { |
3290 | UCHAR ucRecordType; //An emun to indicate the record type | 3841 | UCHAR ucRecordType; //An emun to indicate the record type |
@@ -3311,10 +3862,11 @@ typedef struct _ATOM_COMMON_RECORD_HEADER | |||
3311 | #define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table | 3862 | #define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table |
3312 | #define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record | 3863 | #define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record |
3313 | #define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 | 3864 | #define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 |
3865 | #define ATOM_ENCODER_CAP_RECORD_TYPE 20 | ||
3314 | 3866 | ||
3315 | 3867 | ||
3316 | //Must be updated when new record type is added,equal to that record definition! | 3868 | //Must be updated when new record type is added,equal to that record definition! |
3317 | #define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE | 3869 | #define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE |
3318 | 3870 | ||
3319 | typedef struct _ATOM_I2C_RECORD | 3871 | typedef struct _ATOM_I2C_RECORD |
3320 | { | 3872 | { |
@@ -3441,6 +3993,26 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD | |||
3441 | UCHAR ucPadding[2]; | 3993 | UCHAR ucPadding[2]; |
3442 | }ATOM_ENCODER_DVO_CF_RECORD; | 3994 | }ATOM_ENCODER_DVO_CF_RECORD; |
3443 | 3995 | ||
3996 | // Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap | ||
3997 | #define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path | ||
3998 | |||
3999 | typedef struct _ATOM_ENCODER_CAP_RECORD | ||
4000 | { | ||
4001 | ATOM_COMMON_RECORD_HEADER sheader; | ||
4002 | union { | ||
4003 | USHORT usEncoderCap; | ||
4004 | struct { | ||
4005 | #if ATOM_BIG_ENDIAN | ||
4006 | USHORT usReserved:15; // Bit1-15 may be defined for other capability in future | ||
4007 | USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability. | ||
4008 | #else | ||
4009 | USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability. | ||
4010 | USHORT usReserved:15; // Bit1-15 may be defined for other capability in future | ||
4011 | #endif | ||
4012 | }; | ||
4013 | }; | ||
4014 | }ATOM_ENCODER_CAP_RECORD; | ||
4015 | |||
3444 | // value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle | 4016 | // value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle |
3445 | #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 | 4017 | #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 |
3446 | #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 | 4018 | #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 |
@@ -3580,6 +4152,11 @@ typedef struct _ATOM_VOLTAGE_CONTROL | |||
3580 | #define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI | 4152 | #define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI |
3581 | #define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage | 4153 | #define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage |
3582 | #define VOLTAGE_CONTROL_ID_DS4402 0x04 | 4154 | #define VOLTAGE_CONTROL_ID_DS4402 0x04 |
4155 | #define VOLTAGE_CONTROL_ID_UP6266 0x05 | ||
4156 | #define VOLTAGE_CONTROL_ID_SCORPIO 0x06 | ||
4157 | #define VOLTAGE_CONTROL_ID_VT1556M 0x07 | ||
4158 | #define VOLTAGE_CONTROL_ID_CHL822x 0x08 | ||
4159 | #define VOLTAGE_CONTROL_ID_VT1586M 0x09 | ||
3583 | 4160 | ||
3584 | typedef struct _ATOM_VOLTAGE_OBJECT | 4161 | typedef struct _ATOM_VOLTAGE_OBJECT |
3585 | { | 4162 | { |
@@ -3670,66 +4247,157 @@ typedef struct _ATOM_POWER_SOURCE_INFO | |||
3670 | #define POWER_SENSOR_GPIO 0x01 | 4247 | #define POWER_SENSOR_GPIO 0x01 |
3671 | #define POWER_SENSOR_I2C 0x02 | 4248 | #define POWER_SENSOR_I2C 0x02 |
3672 | 4249 | ||
4250 | typedef struct _ATOM_CLK_VOLT_CAPABILITY | ||
4251 | { | ||
4252 | ULONG ulVoltageIndex; // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table | ||
4253 | ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz | ||
4254 | }ATOM_CLK_VOLT_CAPABILITY; | ||
4255 | |||
4256 | typedef struct _ATOM_AVAILABLE_SCLK_LIST | ||
4257 | { | ||
4258 | ULONG ulSupportedSCLK; // Maximum clock supported with specified voltage index, unit in 10kHz | ||
4259 | USHORT usVoltageIndex; // The Voltage Index indicated by FUSE for specified SCLK | ||
4260 | USHORT usVoltageID; // The Voltage ID indicated by FUSE for specified SCLK | ||
4261 | }ATOM_AVAILABLE_SCLK_LIST; | ||
4262 | |||
4263 | // ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition | ||
4264 | #define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE 1 // refer to ulSystemConfig bit[0] | ||
4265 | |||
4266 | // this IntegrateSystemInfoTable is used for Liano/Ontario APU | ||
3673 | typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 | 4267 | typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 |
3674 | { | 4268 | { |
3675 | ATOM_COMMON_TABLE_HEADER sHeader; | 4269 | ATOM_COMMON_TABLE_HEADER sHeader; |
3676 | ULONG ulBootUpEngineClock; | 4270 | ULONG ulBootUpEngineClock; |
3677 | ULONG ulDentistVCOFreq; | 4271 | ULONG ulDentistVCOFreq; |
3678 | ULONG ulBootUpUMAClock; | 4272 | ULONG ulBootUpUMAClock; |
3679 | ULONG ulReserved1[8]; | 4273 | ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4]; |
3680 | ULONG ulBootUpReqDisplayVector; | 4274 | ULONG ulBootUpReqDisplayVector; |
3681 | ULONG ulOtherDisplayMisc; | 4275 | ULONG ulOtherDisplayMisc; |
3682 | ULONG ulGPUCapInfo; | 4276 | ULONG ulGPUCapInfo; |
3683 | ULONG ulReserved2[3]; | 4277 | ULONG ulSB_MMIO_Base_Addr; |
4278 | USHORT usRequestedPWMFreqInHz; | ||
4279 | UCHAR ucHtcTmpLmt; | ||
4280 | UCHAR ucHtcHystLmt; | ||
4281 | ULONG ulMinEngineClock; | ||
3684 | ULONG ulSystemConfig; | 4282 | ULONG ulSystemConfig; |
3685 | ULONG ulCPUCapInfo; | 4283 | ULONG ulCPUCapInfo; |
3686 | USHORT usMaxNBVoltage; | 4284 | USHORT usNBP0Voltage; |
3687 | USHORT usMinNBVoltage; | 4285 | USHORT usNBP1Voltage; |
3688 | USHORT usBootUpNBVoltage; | 4286 | USHORT usBootUpNBVoltage; |
3689 | USHORT usExtDispConnInfoOffset; | 4287 | USHORT usExtDispConnInfoOffset; |
3690 | UCHAR ucHtcTmpLmt; | 4288 | USHORT usPanelRefreshRateRange; |
3691 | UCHAR ucTjOffset; | ||
3692 | UCHAR ucMemoryType; | 4289 | UCHAR ucMemoryType; |
3693 | UCHAR ucUMAChannelNumber; | 4290 | UCHAR ucUMAChannelNumber; |
3694 | ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10]; | 4291 | ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10]; |
3695 | ULONG ulCSR_M3_ARB_CNTL_UVD[10]; | 4292 | ULONG ulCSR_M3_ARB_CNTL_UVD[10]; |
3696 | ULONG ulCSR_M3_ARB_CNTL_FS3D[10]; | 4293 | ULONG ulCSR_M3_ARB_CNTL_FS3D[10]; |
3697 | ULONG ulReserved3[42]; | 4294 | ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; |
4295 | ULONG ulGMCRestoreResetTime; | ||
4296 | ULONG ulMinimumNClk; | ||
4297 | ULONG ulIdleNClk; | ||
4298 | ULONG ulDDR_DLL_PowerUpTime; | ||
4299 | ULONG ulDDR_PLL_PowerUpTime; | ||
4300 | USHORT usPCIEClkSSPercentage; | ||
4301 | USHORT usPCIEClkSSType; | ||
4302 | USHORT usLvdsSSPercentage; | ||
4303 | USHORT usLvdsSSpreadRateIn10Hz; | ||
4304 | USHORT usHDMISSPercentage; | ||
4305 | USHORT usHDMISSpreadRateIn10Hz; | ||
4306 | USHORT usDVISSPercentage; | ||
4307 | USHORT usDVISSpreadRateIn10Hz; | ||
4308 | ULONG ulReserved3[21]; | ||
3698 | ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; | 4309 | ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; |
3699 | }ATOM_INTEGRATED_SYSTEM_INFO_V6; | 4310 | }ATOM_INTEGRATED_SYSTEM_INFO_V6; |
3700 | 4311 | ||
4312 | // ulGPUCapInfo | ||
4313 | #define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 | ||
4314 | #define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08 | ||
4315 | |||
4316 | // ulOtherDisplayMisc | ||
4317 | #define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01 | ||
4318 | |||
4319 | |||
3701 | /********************************************************************************************************************** | 4320 | /********************************************************************************************************************** |
3702 | // ATOM_INTEGRATED_SYSTEM_INFO_V6 Description | 4321 | ATOM_INTEGRATED_SYSTEM_INFO_V6 Description |
3703 | //ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. | 4322 | ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock |
3704 | //ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. | 4323 | ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. |
3705 | //ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. | 4324 | ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. |
3706 | //ulReserved1[8] Reserved by now, must be 0x0. | 4325 | sDISPCLK_Voltage: Report Display clock voltage requirement. |
3707 | //ulBootUpReqDisplayVector VBIOS boot up display IDs | 4326 | |
3708 | // ATOM_DEVICE_CRT1_SUPPORT 0x0001 | 4327 | ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects: |
3709 | // ATOM_DEVICE_CRT2_SUPPORT 0x0010 | 4328 | ATOM_DEVICE_CRT1_SUPPORT 0x0001 |
3710 | // ATOM_DEVICE_DFP1_SUPPORT 0x0008 | 4329 | ATOM_DEVICE_CRT2_SUPPORT 0x0010 |
3711 | // ATOM_DEVICE_DFP6_SUPPORT 0x0040 | 4330 | ATOM_DEVICE_DFP1_SUPPORT 0x0008 |
3712 | // ATOM_DEVICE_DFP2_SUPPORT 0x0080 | 4331 | ATOM_DEVICE_DFP6_SUPPORT 0x0040 |
3713 | // ATOM_DEVICE_DFP3_SUPPORT 0x0200 | 4332 | ATOM_DEVICE_DFP2_SUPPORT 0x0080 |
3714 | // ATOM_DEVICE_DFP4_SUPPORT 0x0400 | 4333 | ATOM_DEVICE_DFP3_SUPPORT 0x0200 |
3715 | // ATOM_DEVICE_DFP5_SUPPORT 0x0800 | 4334 | ATOM_DEVICE_DFP4_SUPPORT 0x0400 |
3716 | // ATOM_DEVICE_LCD1_SUPPORT 0x0002 | 4335 | ATOM_DEVICE_DFP5_SUPPORT 0x0800 |
3717 | //ulOtherDisplayMisc Other display related flags, not defined yet. | 4336 | ATOM_DEVICE_LCD1_SUPPORT 0x0002 |
3718 | //ulGPUCapInfo TBD | 4337 | ulOtherDisplayMisc: Other display related flags, not defined yet. |
3719 | //ulReserved2[3] must be 0x0 for the reserved. | 4338 | ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode. |
3720 | //ulSystemConfig TBD | 4339 | =1: TMDS/HDMI Coherent Mode use signel PLL mode. |
3721 | //ulCPUCapInfo TBD | 4340 | bit[3]=0: Enable HW AUX mode detection logic |
3722 | //usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. | 4341 | =1: Disable HW AUX mode dettion logic |
3723 | //usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. | 4342 | ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage. |
3724 | //usBootUpNBVoltage Boot up NB voltage in unit of mv. | 4343 | |
3725 | //ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register. | 4344 | usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). |
3726 | //ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed. | 4345 | Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0; |
3727 | //ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. | 4346 | |
3728 | //ucUMAChannelNumber System memory channel numbers. | 4347 | When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below: |
3729 | //usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table. | 4348 | 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use; |
3730 | //ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default | 4349 | VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result, |
3731 | //ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback. | 4350 | Changing BL using VBIOS function is functional in both driver and non-driver present environment; |
3732 | //ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications. | 4351 | and enabling VariBri under the driver environment from PP table is optional. |
4352 | |||
4353 | 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating | ||
4354 | that BL control from GPU is expected. | ||
4355 | VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1 | ||
4356 | Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but | ||
4357 | it's per platform | ||
4358 | and enabling VariBri under the driver environment from PP table is optional. | ||
4359 | |||
4360 | ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. | ||
4361 | Threshold on value to enter HTC_active state. | ||
4362 | ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt. | ||
4363 | To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt. | ||
4364 | ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings. | ||
4365 | ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled | ||
4366 | =1: PCIE Power Gating Enabled | ||
4367 | Bit[1]=0: DDR-DLL shut-down feature disabled. | ||
4368 | 1: DDR-DLL shut-down feature enabled. | ||
4369 | Bit[2]=0: DDR-PLL Power down feature disabled. | ||
4370 | 1: DDR-PLL Power down feature enabled. | ||
4371 | ulCPUCapInfo: TBD | ||
4372 | usNBP0Voltage: VID for voltage on NB P0 State | ||
4373 | usNBP1Voltage: VID for voltage on NB P1 State | ||
4374 | usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement. | ||
4375 | usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure | ||
4376 | usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set | ||
4377 | to indicate a range. | ||
4378 | SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 | ||
4379 | SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 | ||
4380 | SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 | ||
4381 | SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 | ||
4382 | ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. | ||
4383 | ucUMAChannelNumber: System memory channel numbers. | ||
4384 | ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default | ||
4385 | ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback. | ||
4386 | ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications. | ||
4387 | sAvail_SCLK[5]: Arrays to provide available list of SLCK and corresponding voltage, order from low to high | ||
4388 | ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. | ||
4389 | ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. | ||
4390 | ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz. | ||
4391 | ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns. | ||
4392 | ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns. | ||
4393 | usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%. | ||
4394 | usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread. | ||
4395 | usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. | ||
4396 | usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. | ||
4397 | usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. | ||
4398 | usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. | ||
4399 | usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. | ||
4400 | usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. | ||
3733 | **********************************************************************************************************************/ | 4401 | **********************************************************************************************************************/ |
3734 | 4402 | ||
3735 | /**************************************************************************/ | 4403 | /**************************************************************************/ |
@@ -3790,6 +4458,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT | |||
3790 | #define ASIC_INTERNAL_SS_ON_LVDS 6 | 4458 | #define ASIC_INTERNAL_SS_ON_LVDS 6 |
3791 | #define ASIC_INTERNAL_SS_ON_DP 7 | 4459 | #define ASIC_INTERNAL_SS_ON_DP 7 |
3792 | #define ASIC_INTERNAL_SS_ON_DCPLL 8 | 4460 | #define ASIC_INTERNAL_SS_ON_DCPLL 8 |
4461 | #define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9 | ||
3793 | 4462 | ||
3794 | typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 | 4463 | typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 |
3795 | { | 4464 | { |
@@ -3903,8 +4572,9 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 | |||
3903 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 | 4572 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 |
3904 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 | 4573 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 |
3905 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 | 4574 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 |
4575 | #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4 | ||
3906 | 4576 | ||
3907 | //Byte aligned defintion for BIOS usage | 4577 | //Byte aligned definition for BIOS usage |
3908 | #define ATOM_S0_CRT1_MONOb0 0x01 | 4578 | #define ATOM_S0_CRT1_MONOb0 0x01 |
3909 | #define ATOM_S0_CRT1_COLORb0 0x02 | 4579 | #define ATOM_S0_CRT1_COLORb0 0x02 |
3910 | #define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0) | 4580 | #define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0) |
@@ -3970,7 +4640,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 | |||
3970 | #define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L | 4640 | #define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L |
3971 | 4641 | ||
3972 | 4642 | ||
3973 | //Byte aligned defintion for BIOS usage | 4643 | //Byte aligned definition for BIOS usage |
3974 | #define ATOM_S2_TV1_STANDARD_MASKb0 0x0F | 4644 | #define ATOM_S2_TV1_STANDARD_MASKb0 0x0F |
3975 | #define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF | 4645 | #define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF |
3976 | #define ATOM_S2_DEVICE_DPMS_STATEb2 0x01 | 4646 | #define ATOM_S2_DEVICE_DPMS_STATEb2 0x01 |
@@ -4020,7 +4690,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 | |||
4020 | #define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L | 4690 | #define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L |
4021 | #define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L | 4691 | #define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L |
4022 | 4692 | ||
4023 | //Byte aligned defintion for BIOS usage | 4693 | //Byte aligned definition for BIOS usage |
4024 | #define ATOM_S3_CRT1_ACTIVEb0 0x01 | 4694 | #define ATOM_S3_CRT1_ACTIVEb0 0x01 |
4025 | #define ATOM_S3_LCD1_ACTIVEb0 0x02 | 4695 | #define ATOM_S3_LCD1_ACTIVEb0 0x02 |
4026 | #define ATOM_S3_TV1_ACTIVEb0 0x04 | 4696 | #define ATOM_S3_TV1_ACTIVEb0 0x04 |
@@ -4056,7 +4726,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 | |||
4056 | #define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L | 4726 | #define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L |
4057 | #define ATOM_S4_LCD1_REFRESH_SHIFT 8 | 4727 | #define ATOM_S4_LCD1_REFRESH_SHIFT 8 |
4058 | 4728 | ||
4059 | //Byte aligned defintion for BIOS usage | 4729 | //Byte aligned definition for BIOS usage |
4060 | #define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF | 4730 | #define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF |
4061 | #define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0 | 4731 | #define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0 |
4062 | #define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0 | 4732 | #define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0 |
@@ -4135,7 +4805,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 | |||
4135 | #define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L | 4805 | #define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L |
4136 | #define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L | 4806 | #define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L |
4137 | 4807 | ||
4138 | //Byte aligned defintion for BIOS usage | 4808 | //Byte aligned definition for BIOS usage |
4139 | #define ATOM_S6_DEVICE_CHANGEb0 0x01 | 4809 | #define ATOM_S6_DEVICE_CHANGEb0 0x01 |
4140 | #define ATOM_S6_SCALER_CHANGEb0 0x02 | 4810 | #define ATOM_S6_SCALER_CHANGEb0 0x02 |
4141 | #define ATOM_S6_LID_CHANGEb0 0x04 | 4811 | #define ATOM_S6_LID_CHANGEb0 0x04 |
@@ -4376,7 +5046,7 @@ typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION | |||
4376 | 5046 | ||
4377 | typedef struct _MEMORY_CLEAN_UP_PARAMETERS | 5047 | typedef struct _MEMORY_CLEAN_UP_PARAMETERS |
4378 | { | 5048 | { |
4379 | USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address | 5049 | USHORT usMemoryStart; //in 8Kb boundary, offset from memory base address |
4380 | USHORT usMemorySize; //8Kb blocks aligned | 5050 | USHORT usMemorySize; //8Kb blocks aligned |
4381 | }MEMORY_CLEAN_UP_PARAMETERS; | 5051 | }MEMORY_CLEAN_UP_PARAMETERS; |
4382 | #define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS | 5052 | #define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS |
@@ -4529,7 +5199,8 @@ typedef struct _ATOM_INIT_REG_BLOCK{ | |||
4529 | #define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1) | 5199 | #define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1) |
4530 | #define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) | 5200 | #define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) |
4531 | #define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) | 5201 | #define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) |
4532 | 5202 | //#define ACCESS_MCIODEBUGIND 0x40 //defined in BIOS code | |
5203 | #define ACCESS_PLACEHOLDER 0x80 | ||
4533 | 5204 | ||
4534 | typedef struct _ATOM_MC_INIT_PARAM_TABLE | 5205 | typedef struct _ATOM_MC_INIT_PARAM_TABLE |
4535 | { | 5206 | { |
@@ -4554,6 +5225,10 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE | |||
4554 | #define _32Mx32 0x33 | 5225 | #define _32Mx32 0x33 |
4555 | #define _64Mx8 0x41 | 5226 | #define _64Mx8 0x41 |
4556 | #define _64Mx16 0x42 | 5227 | #define _64Mx16 0x42 |
5228 | #define _64Mx32 0x43 | ||
5229 | #define _128Mx8 0x51 | ||
5230 | #define _128Mx16 0x52 | ||
5231 | #define _256Mx8 0x61 | ||
4557 | 5232 | ||
4558 | #define SAMSUNG 0x1 | 5233 | #define SAMSUNG 0x1 |
4559 | #define INFINEON 0x2 | 5234 | #define INFINEON 0x2 |
@@ -4569,10 +5244,11 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE | |||
4569 | #define QIMONDA INFINEON | 5244 | #define QIMONDA INFINEON |
4570 | #define PROMOS MOSEL | 5245 | #define PROMOS MOSEL |
4571 | #define KRETON INFINEON | 5246 | #define KRETON INFINEON |
5247 | #define ELIXIR NANYA | ||
4572 | 5248 | ||
4573 | /////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// | 5249 | /////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// |
4574 | 5250 | ||
4575 | #define UCODE_ROM_START_ADDRESS 0x1c000 | 5251 | #define UCODE_ROM_START_ADDRESS 0x1b800 |
4576 | #define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode | 5252 | #define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode |
4577 | 5253 | ||
4578 | //uCode block header for reference | 5254 | //uCode block header for reference |
@@ -4903,7 +5579,34 @@ typedef struct _ATOM_VRAM_MODULE_V6 | |||
4903 | ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock | 5579 | ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock |
4904 | }ATOM_VRAM_MODULE_V6; | 5580 | }ATOM_VRAM_MODULE_V6; |
4905 | 5581 | ||
4906 | 5582 | typedef struct _ATOM_VRAM_MODULE_V7 | |
5583 | { | ||
5584 | // Design Specific Values | ||
5585 | ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP | ||
5586 | USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7 | ||
5587 | USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) | ||
5588 | USHORT usReserved; | ||
5589 | UCHAR ucExtMemoryID; // Current memory module ID | ||
5590 | UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5 | ||
5591 | UCHAR ucChannelNum; // Number of mem. channels supported in this module | ||
5592 | UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT | ||
5593 | UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 | ||
5594 | UCHAR ucReserve; // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now. | ||
5595 | UCHAR ucMisc; // RANK_OF_THISMEMORY etc. | ||
5596 | UCHAR ucVREFI; // Not used. | ||
5597 | UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2. | ||
5598 | UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble | ||
5599 | UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros | ||
5600 | UCHAR ucReserved[3]; | ||
5601 | // Memory Module specific values | ||
5602 | USHORT usEMRS2Value; // EMRS2/MR2 Value. | ||
5603 | USHORT usEMRS3Value; // EMRS3/MR3 Value. | ||
5604 | UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code | ||
5605 | UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) | ||
5606 | UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory | ||
5607 | UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth | ||
5608 | char strMemPNString[20]; // part number end with '0'. | ||
5609 | }ATOM_VRAM_MODULE_V7; | ||
4907 | 5610 | ||
4908 | typedef struct _ATOM_VRAM_INFO_V2 | 5611 | typedef struct _ATOM_VRAM_INFO_V2 |
4909 | { | 5612 | { |
@@ -4942,6 +5645,20 @@ typedef struct _ATOM_VRAM_INFO_V4 | |||
4942 | // ATOM_INIT_REG_BLOCK aMemAdjust; | 5645 | // ATOM_INIT_REG_BLOCK aMemAdjust; |
4943 | }ATOM_VRAM_INFO_V4; | 5646 | }ATOM_VRAM_INFO_V4; |
4944 | 5647 | ||
5648 | typedef struct _ATOM_VRAM_INFO_HEADER_V2_1 | ||
5649 | { | ||
5650 | ATOM_COMMON_TABLE_HEADER sHeader; | ||
5651 | USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting | ||
5652 | USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting | ||
5653 | USHORT usReserved[4]; | ||
5654 | UCHAR ucNumOfVRAMModule; // indicate number of VRAM module | ||
5655 | UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list | ||
5656 | UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version | ||
5657 | UCHAR ucReserved; | ||
5658 | ATOM_VRAM_MODULE_V7 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; | ||
5659 | }ATOM_VRAM_INFO_HEADER_V2_1; | ||
5660 | |||
5661 | |||
4945 | typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO | 5662 | typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO |
4946 | { | 5663 | { |
4947 | ATOM_COMMON_TABLE_HEADER sHeader; | 5664 | ATOM_COMMON_TABLE_HEADER sHeader; |
@@ -5182,6 +5899,16 @@ typedef struct _ASIC_TRANSMITTER_INFO | |||
5182 | UCHAR ucReserved; | 5899 | UCHAR ucReserved; |
5183 | }ASIC_TRANSMITTER_INFO; | 5900 | }ASIC_TRANSMITTER_INFO; |
5184 | 5901 | ||
5902 | #define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE 0x01 | ||
5903 | #define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE 0x02 | ||
5904 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK 0xc4 | ||
5905 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A 0x00 | ||
5906 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B 0x04 | ||
5907 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C 0x40 | ||
5908 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D 0x44 | ||
5909 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E 0x80 | ||
5910 | #define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F 0x84 | ||
5911 | |||
5185 | typedef struct _ASIC_ENCODER_INFO | 5912 | typedef struct _ASIC_ENCODER_INFO |
5186 | { | 5913 | { |
5187 | UCHAR ucEncoderID; | 5914 | UCHAR ucEncoderID; |
@@ -5284,6 +6011,28 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS | |||
5284 | /* /obselete */ | 6011 | /* /obselete */ |
5285 | #define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS | 6012 | #define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS |
5286 | 6013 | ||
6014 | |||
6015 | typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2 | ||
6016 | { | ||
6017 | USHORT usExtEncoderObjId; // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION | ||
6018 | UCHAR ucAuxId; | ||
6019 | UCHAR ucAction; | ||
6020 | UCHAR ucSinkType; // Iput and Output parameters. | ||
6021 | UCHAR ucHPDId; // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION | ||
6022 | UCHAR ucReserved[2]; | ||
6023 | }DP_ENCODER_SERVICE_PARAMETERS_V2; | ||
6024 | |||
6025 | typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2 | ||
6026 | { | ||
6027 | DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam; | ||
6028 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam; | ||
6029 | }DP_ENCODER_SERVICE_PS_ALLOCATION_V2; | ||
6030 | |||
6031 | // ucAction | ||
6032 | #define DP_SERVICE_V2_ACTION_GET_SINK_TYPE 0x01 | ||
6033 | #define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION 0x02 | ||
6034 | |||
6035 | |||
5287 | // DP_TRAINING_TABLE | 6036 | // DP_TRAINING_TABLE |
5288 | #define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR | 6037 | #define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR |
5289 | #define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) | 6038 | #define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) |
@@ -5339,6 +6088,7 @@ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2 | |||
5339 | #define SELECT_DCIO_IMPCAL 4 | 6088 | #define SELECT_DCIO_IMPCAL 4 |
5340 | #define SELECT_DCIO_DIG 6 | 6089 | #define SELECT_DCIO_DIG 6 |
5341 | #define SELECT_CRTC_PIXEL_RATE 7 | 6090 | #define SELECT_CRTC_PIXEL_RATE 7 |
6091 | #define SELECT_VGA_BLK 8 | ||
5342 | 6092 | ||
5343 | /****************************************************************************/ | 6093 | /****************************************************************************/ |
5344 | //Portion VI: Definitinos for vbios MC scratch registers that driver used | 6094 | //Portion VI: Definitinos for vbios MC scratch registers that driver used |
@@ -5744,7 +6494,17 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER | |||
5744 | #define ATOM_PP_THERMALCONTROLLER_ADT7473 9 | 6494 | #define ATOM_PP_THERMALCONTROLLER_ADT7473 9 |
5745 | #define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 | 6495 | #define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 |
5746 | #define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 | 6496 | #define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 |
6497 | #define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen. | ||
6498 | #define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally | ||
6499 | #define ATOM_PP_THERMALCONTROLLER_NISLANDS 15 | ||
6500 | |||
6501 | // Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal. | ||
6502 | // We probably should reserve the bit 0x80 for this use. | ||
6503 | // To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here). | ||
6504 | // The driver can pick the correct internal controller based on the ASIC. | ||
6505 | |||
5747 | #define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller | 6506 | #define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller |
6507 | #define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller | ||
5748 | 6508 | ||
5749 | typedef struct _ATOM_PPLIB_STATE | 6509 | typedef struct _ATOM_PPLIB_STATE |
5750 | { | 6510 | { |
@@ -5841,6 +6601,29 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 | |||
5841 | USHORT usExtendendedHeaderOffset; | 6601 | USHORT usExtendendedHeaderOffset; |
5842 | } ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; | 6602 | } ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; |
5843 | 6603 | ||
6604 | typedef struct _ATOM_PPLIB_POWERPLAYTABLE4 | ||
6605 | { | ||
6606 | ATOM_PPLIB_POWERPLAYTABLE3 basicTable3; | ||
6607 | ULONG ulGoldenPPID; // PPGen use only | ||
6608 | ULONG ulGoldenRevision; // PPGen use only | ||
6609 | USHORT usVddcDependencyOnSCLKOffset; | ||
6610 | USHORT usVddciDependencyOnMCLKOffset; | ||
6611 | USHORT usVddcDependencyOnMCLKOffset; | ||
6612 | USHORT usMaxClockVoltageOnDCOffset; | ||
6613 | USHORT usReserved[2]; | ||
6614 | } ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4; | ||
6615 | |||
6616 | typedef struct _ATOM_PPLIB_POWERPLAYTABLE5 | ||
6617 | { | ||
6618 | ATOM_PPLIB_POWERPLAYTABLE4 basicTable4; | ||
6619 | ULONG ulTDPLimit; | ||
6620 | ULONG ulNearTDPLimit; | ||
6621 | ULONG ulSQRampingThreshold; | ||
6622 | USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table | ||
6623 | ULONG ulCACLeakage; // TBD, this parameter is still under discussion. Change to ulReserved if not needed. | ||
6624 | ULONG ulReserved; | ||
6625 | } ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5; | ||
6626 | |||
5844 | //// ATOM_PPLIB_NONCLOCK_INFO::usClassification | 6627 | //// ATOM_PPLIB_NONCLOCK_INFO::usClassification |
5845 | #define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 | 6628 | #define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 |
5846 | #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 | 6629 | #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 |
@@ -5864,6 +6647,10 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 | |||
5864 | #define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 | 6647 | #define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 |
5865 | #define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 | 6648 | #define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 |
5866 | 6649 | ||
6650 | //// ATOM_PPLIB_NONCLOCK_INFO::usClassification2 | ||
6651 | #define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001 | ||
6652 | #define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002 | ||
6653 | |||
5867 | //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings | 6654 | //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings |
5868 | #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 | 6655 | #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 |
5869 | #define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 | 6656 | #define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 |
@@ -5896,9 +6683,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 | |||
5896 | #define ATOM_PPLIB_M3ARB_MASK 0x00060000 | 6683 | #define ATOM_PPLIB_M3ARB_MASK 0x00060000 |
5897 | #define ATOM_PPLIB_M3ARB_SHIFT 17 | 6684 | #define ATOM_PPLIB_M3ARB_SHIFT 17 |
5898 | 6685 | ||
6686 | #define ATOM_PPLIB_ENABLE_DRR 0x00080000 | ||
6687 | |||
6688 | // remaining 16 bits are reserved | ||
6689 | typedef struct _ATOM_PPLIB_THERMAL_STATE | ||
6690 | { | ||
6691 | UCHAR ucMinTemperature; | ||
6692 | UCHAR ucMaxTemperature; | ||
6693 | UCHAR ucThermalAction; | ||
6694 | }ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE; | ||
6695 | |||
5899 | // Contained in an array starting at the offset | 6696 | // Contained in an array starting at the offset |
5900 | // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. | 6697 | // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. |
5901 | // referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex | 6698 | // referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex |
6699 | #define ATOM_PPLIB_NONCLOCKINFO_VER1 12 | ||
6700 | #define ATOM_PPLIB_NONCLOCKINFO_VER2 24 | ||
5902 | typedef struct _ATOM_PPLIB_NONCLOCK_INFO | 6701 | typedef struct _ATOM_PPLIB_NONCLOCK_INFO |
5903 | { | 6702 | { |
5904 | USHORT usClassification; | 6703 | USHORT usClassification; |
@@ -5906,15 +6705,15 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO | |||
5906 | UCHAR ucMaxTemperature; | 6705 | UCHAR ucMaxTemperature; |
5907 | ULONG ulCapsAndSettings; | 6706 | ULONG ulCapsAndSettings; |
5908 | UCHAR ucRequiredPower; | 6707 | UCHAR ucRequiredPower; |
5909 | UCHAR ucUnused1[3]; | 6708 | USHORT usClassification2; |
6709 | ULONG ulVCLK; | ||
6710 | ULONG ulDCLK; | ||
6711 | UCHAR ucUnused[5]; | ||
5910 | } ATOM_PPLIB_NONCLOCK_INFO; | 6712 | } ATOM_PPLIB_NONCLOCK_INFO; |
5911 | 6713 | ||
5912 | // Contained in an array starting at the offset | 6714 | // Contained in an array starting at the offset |
5913 | // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. | 6715 | // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. |
5914 | // referenced from ATOM_PPLIB_STATE::ucClockStateIndices | 6716 | // referenced from ATOM_PPLIB_STATE::ucClockStateIndices |
5915 | #define ATOM_PPLIB_NONCLOCKINFO_VER1 12 | ||
5916 | #define ATOM_PPLIB_NONCLOCKINFO_VER2 24 | ||
5917 | |||
5918 | typedef struct _ATOM_PPLIB_R600_CLOCK_INFO | 6717 | typedef struct _ATOM_PPLIB_R600_CLOCK_INFO |
5919 | { | 6718 | { |
5920 | USHORT usEngineClockLow; | 6719 | USHORT usEngineClockLow; |
@@ -5985,10 +6784,97 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO | |||
5985 | #define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 | 6784 | #define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 |
5986 | #define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 | 6785 | #define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 |
5987 | 6786 | ||
6787 | typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{ | ||
6788 | USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz | ||
6789 | UCHAR ucEngineClockHigh; //clockfrequency >> 16. | ||
6790 | UCHAR vddcIndex; //2-bit vddc index; | ||
6791 | UCHAR leakage; //please use 8-bit absolute value, not the 6-bit % value | ||
6792 | //please initalize to 0 | ||
6793 | UCHAR rsv; | ||
6794 | //please initalize to 0 | ||
6795 | USHORT rsv1; | ||
6796 | //please initialize to 0s | ||
6797 | ULONG rsv2[2]; | ||
6798 | }ATOM_PPLIB_SUMO_CLOCK_INFO; | ||
6799 | |||
6800 | |||
6801 | |||
6802 | typedef struct _ATOM_PPLIB_STATE_V2 | ||
6803 | { | ||
6804 | //number of valid dpm levels in this state; Driver uses it to calculate the whole | ||
6805 | //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) | ||
6806 | UCHAR ucNumDPMLevels; | ||
6807 | |||
6808 | //a index to the array of nonClockInfos | ||
6809 | UCHAR nonClockInfoIndex; | ||
6810 | /** | ||
6811 | * Driver will read the first ucNumDPMLevels in this array | ||
6812 | */ | ||
6813 | UCHAR clockInfoIndex[1]; | ||
6814 | } ATOM_PPLIB_STATE_V2; | ||
6815 | |||
6816 | typedef struct StateArray{ | ||
6817 | //how many states we have | ||
6818 | UCHAR ucNumEntries; | ||
6819 | |||
6820 | ATOM_PPLIB_STATE_V2 states[1]; | ||
6821 | }StateArray; | ||
6822 | |||
6823 | |||
6824 | typedef struct ClockInfoArray{ | ||
6825 | //how many clock levels we have | ||
6826 | UCHAR ucNumEntries; | ||
6827 | |||
6828 | //sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO) | ||
6829 | UCHAR ucEntrySize; | ||
6830 | |||
6831 | //this is for Sumo | ||
6832 | ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1]; | ||
6833 | }ClockInfoArray; | ||
6834 | |||
6835 | typedef struct NonClockInfoArray{ | ||
6836 | |||
6837 | //how many non-clock levels we have. normally should be same as number of states | ||
6838 | UCHAR ucNumEntries; | ||
6839 | //sizeof(ATOM_PPLIB_NONCLOCK_INFO) | ||
6840 | UCHAR ucEntrySize; | ||
6841 | |||
6842 | ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; | ||
6843 | }NonClockInfoArray; | ||
6844 | |||
6845 | typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record | ||
6846 | { | ||
6847 | USHORT usClockLow; | ||
6848 | UCHAR ucClockHigh; | ||
6849 | USHORT usVoltage; | ||
6850 | }ATOM_PPLIB_Clock_Voltage_Dependency_Record; | ||
6851 | |||
6852 | typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table | ||
6853 | { | ||
6854 | UCHAR ucNumEntries; // Number of entries. | ||
6855 | ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. | ||
6856 | }ATOM_PPLIB_Clock_Voltage_Dependency_Table; | ||
6857 | |||
6858 | typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record | ||
6859 | { | ||
6860 | USHORT usSclkLow; | ||
6861 | UCHAR ucSclkHigh; | ||
6862 | USHORT usMclkLow; | ||
6863 | UCHAR ucMclkHigh; | ||
6864 | USHORT usVddc; | ||
6865 | USHORT usVddci; | ||
6866 | }ATOM_PPLIB_Clock_Voltage_Limit_Record; | ||
6867 | |||
6868 | typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table | ||
6869 | { | ||
6870 | UCHAR ucNumEntries; // Number of entries. | ||
6871 | ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. | ||
6872 | }ATOM_PPLIB_Clock_Voltage_Limit_Table; | ||
6873 | |||
5988 | /**************************************************************************/ | 6874 | /**************************************************************************/ |
5989 | 6875 | ||
5990 | 6876 | ||
5991 | // Following definitions are for compatiblity issue in different SW components. | 6877 | // Following definitions are for compatibility issue in different SW components. |
5992 | #define ATOM_MASTER_DATA_TABLE_REVISION 0x01 | 6878 | #define ATOM_MASTER_DATA_TABLE_REVISION 0x01 |
5993 | #define Object_Info Object_Header | 6879 | #define Object_Info Object_Header |
5994 | #define AdjustARB_SEQ MC_InitParameter | 6880 | #define AdjustARB_SEQ MC_InitParameter |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index cd0290f946cf..9541995e4b21 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -48,29 +48,29 @@ static void atombios_overscan_setup(struct drm_crtc *crtc, | |||
48 | 48 | ||
49 | switch (radeon_crtc->rmx_type) { | 49 | switch (radeon_crtc->rmx_type) { |
50 | case RMX_CENTER: | 50 | case RMX_CENTER: |
51 | args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | 51 | args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); |
52 | args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | 52 | args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); |
53 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | 53 | args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); |
54 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | 54 | args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); |
55 | break; | 55 | break; |
56 | case RMX_ASPECT: | 56 | case RMX_ASPECT: |
57 | a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; | 57 | a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; |
58 | a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; | 58 | a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; |
59 | 59 | ||
60 | if (a1 > a2) { | 60 | if (a1 > a2) { |
61 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | 61 | args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); |
62 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | 62 | args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); |
63 | } else if (a2 > a1) { | 63 | } else if (a2 > a1) { |
64 | args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | 64 | args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); |
65 | args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | 65 | args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); |
66 | } | 66 | } |
67 | break; | 67 | break; |
68 | case RMX_FULL: | 68 | case RMX_FULL: |
69 | default: | 69 | default: |
70 | args.usOverscanRight = radeon_crtc->h_border; | 70 | args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border); |
71 | args.usOverscanLeft = radeon_crtc->h_border; | 71 | args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border); |
72 | args.usOverscanBottom = radeon_crtc->v_border; | 72 | args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border); |
73 | args.usOverscanTop = radeon_crtc->v_border; | 73 | args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border); |
74 | break; | 74 | break; |
75 | } | 75 | } |
76 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 76 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
@@ -253,7 +253,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
253 | case DRM_MODE_DPMS_SUSPEND: | 253 | case DRM_MODE_DPMS_SUSPEND: |
254 | case DRM_MODE_DPMS_OFF: | 254 | case DRM_MODE_DPMS_OFF: |
255 | drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); | 255 | drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); |
256 | atombios_blank_crtc(crtc, ATOM_ENABLE); | 256 | if (radeon_crtc->enabled) |
257 | atombios_blank_crtc(crtc, ATOM_ENABLE); | ||
257 | if (ASIC_IS_DCE3(rdev)) | 258 | if (ASIC_IS_DCE3(rdev)) |
258 | atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); | 259 | atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); |
259 | atombios_enable_crtc(crtc, ATOM_DISABLE); | 260 | atombios_enable_crtc(crtc, ATOM_DISABLE); |
@@ -398,65 +399,106 @@ static void atombios_disable_ss(struct drm_crtc *crtc) | |||
398 | 399 | ||
399 | 400 | ||
400 | union atom_enable_ss { | 401 | union atom_enable_ss { |
401 | ENABLE_LVDS_SS_PARAMETERS legacy; | 402 | ENABLE_LVDS_SS_PARAMETERS lvds_ss; |
403 | ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2; | ||
402 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; | 404 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; |
405 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2; | ||
406 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3; | ||
403 | }; | 407 | }; |
404 | 408 | ||
405 | static void atombios_enable_ss(struct drm_crtc *crtc) | 409 | static void atombios_crtc_program_ss(struct drm_crtc *crtc, |
410 | int enable, | ||
411 | int pll_id, | ||
412 | struct radeon_atom_ss *ss) | ||
406 | { | 413 | { |
407 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
408 | struct drm_device *dev = crtc->dev; | 414 | struct drm_device *dev = crtc->dev; |
409 | struct radeon_device *rdev = dev->dev_private; | 415 | struct radeon_device *rdev = dev->dev_private; |
410 | struct drm_encoder *encoder = NULL; | ||
411 | struct radeon_encoder *radeon_encoder = NULL; | ||
412 | struct radeon_encoder_atom_dig *dig = NULL; | ||
413 | int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); | 416 | int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); |
414 | union atom_enable_ss args; | 417 | union atom_enable_ss args; |
415 | uint16_t percentage = 0; | ||
416 | uint8_t type = 0, step = 0, delay = 0, range = 0; | ||
417 | 418 | ||
418 | /* XXX add ss support for DCE4 */ | 419 | memset(&args, 0, sizeof(args)); |
419 | if (ASIC_IS_DCE4(rdev)) | ||
420 | return; | ||
421 | 420 | ||
422 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 421 | if (ASIC_IS_DCE5(rdev)) { |
423 | if (encoder->crtc == crtc) { | 422 | args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0); |
424 | radeon_encoder = to_radeon_encoder(encoder); | 423 | args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; |
425 | /* only enable spread spectrum on LVDS */ | 424 | switch (pll_id) { |
426 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 425 | case ATOM_PPLL1: |
427 | dig = radeon_encoder->enc_priv; | 426 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; |
428 | if (dig && dig->ss) { | 427 | args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
429 | percentage = dig->ss->percentage; | 428 | args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
430 | type = dig->ss->type; | 429 | break; |
431 | step = dig->ss->step; | 430 | case ATOM_PPLL2: |
432 | delay = dig->ss->delay; | 431 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; |
433 | range = dig->ss->range; | 432 | args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
434 | } else | 433 | args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
435 | return; | 434 | break; |
436 | } else | 435 | case ATOM_DCPLL: |
437 | return; | 436 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; |
437 | args.v3.usSpreadSpectrumAmount = cpu_to_le16(0); | ||
438 | args.v3.usSpreadSpectrumStep = cpu_to_le16(0); | ||
438 | break; | 439 | break; |
440 | case ATOM_PPLL_INVALID: | ||
441 | return; | ||
439 | } | 442 | } |
440 | } | 443 | args.v3.ucEnable = enable; |
441 | 444 | if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) | |
442 | if (!radeon_encoder) | 445 | args.v3.ucEnable = ATOM_DISABLE; |
443 | return; | 446 | } else if (ASIC_IS_DCE4(rdev)) { |
444 | 447 | args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); | |
445 | memset(&args, 0, sizeof(args)); | 448 | args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; |
446 | if (ASIC_IS_AVIVO(rdev)) { | 449 | switch (pll_id) { |
447 | args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage); | 450 | case ATOM_PPLL1: |
448 | args.v1.ucSpreadSpectrumType = type; | 451 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; |
449 | args.v1.ucSpreadSpectrumStep = step; | 452 | args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
450 | args.v1.ucSpreadSpectrumDelay = delay; | 453 | args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
451 | args.v1.ucSpreadSpectrumRange = range; | 454 | break; |
452 | args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; | 455 | case ATOM_PPLL2: |
453 | args.v1.ucEnable = ATOM_ENABLE; | 456 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; |
457 | args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); | ||
458 | args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); | ||
459 | break; | ||
460 | case ATOM_DCPLL: | ||
461 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; | ||
462 | args.v2.usSpreadSpectrumAmount = cpu_to_le16(0); | ||
463 | args.v2.usSpreadSpectrumStep = cpu_to_le16(0); | ||
464 | break; | ||
465 | case ATOM_PPLL_INVALID: | ||
466 | return; | ||
467 | } | ||
468 | args.v2.ucEnable = enable; | ||
469 | if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) | ||
470 | args.v2.ucEnable = ATOM_DISABLE; | ||
471 | } else if (ASIC_IS_DCE3(rdev)) { | ||
472 | args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); | ||
473 | args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; | ||
474 | args.v1.ucSpreadSpectrumStep = ss->step; | ||
475 | args.v1.ucSpreadSpectrumDelay = ss->delay; | ||
476 | args.v1.ucSpreadSpectrumRange = ss->range; | ||
477 | args.v1.ucPpll = pll_id; | ||
478 | args.v1.ucEnable = enable; | ||
479 | } else if (ASIC_IS_AVIVO(rdev)) { | ||
480 | if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || | ||
481 | (ss->type & ATOM_EXTERNAL_SS_MASK)) { | ||
482 | atombios_disable_ss(crtc); | ||
483 | return; | ||
484 | } | ||
485 | args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); | ||
486 | args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; | ||
487 | args.lvds_ss_2.ucSpreadSpectrumStep = ss->step; | ||
488 | args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay; | ||
489 | args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; | ||
490 | args.lvds_ss_2.ucEnable = enable; | ||
454 | } else { | 491 | } else { |
455 | args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage); | 492 | if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || |
456 | args.legacy.ucSpreadSpectrumType = type; | 493 | (ss->type & ATOM_EXTERNAL_SS_MASK)) { |
457 | args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; | 494 | atombios_disable_ss(crtc); |
458 | args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; | 495 | return; |
459 | args.legacy.ucEnable = ATOM_ENABLE; | 496 | } |
497 | args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); | ||
498 | args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; | ||
499 | args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2; | ||
500 | args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4; | ||
501 | args.lvds_ss.ucEnable = enable; | ||
460 | } | 502 | } |
461 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 503 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
462 | } | 504 | } |
@@ -468,12 +510,15 @@ union adjust_pixel_clock { | |||
468 | 510 | ||
469 | static u32 atombios_adjust_pll(struct drm_crtc *crtc, | 511 | static u32 atombios_adjust_pll(struct drm_crtc *crtc, |
470 | struct drm_display_mode *mode, | 512 | struct drm_display_mode *mode, |
471 | struct radeon_pll *pll) | 513 | struct radeon_pll *pll, |
514 | bool ss_enabled, | ||
515 | struct radeon_atom_ss *ss) | ||
472 | { | 516 | { |
473 | struct drm_device *dev = crtc->dev; | 517 | struct drm_device *dev = crtc->dev; |
474 | struct radeon_device *rdev = dev->dev_private; | 518 | struct radeon_device *rdev = dev->dev_private; |
475 | struct drm_encoder *encoder = NULL; | 519 | struct drm_encoder *encoder = NULL; |
476 | struct radeon_encoder *radeon_encoder = NULL; | 520 | struct radeon_encoder *radeon_encoder = NULL; |
521 | struct drm_connector *connector = NULL; | ||
477 | u32 adjusted_clock = mode->clock; | 522 | u32 adjusted_clock = mode->clock; |
478 | int encoder_mode = 0; | 523 | int encoder_mode = 0; |
479 | u32 dp_clock = mode->clock; | 524 | u32 dp_clock = mode->clock; |
@@ -482,19 +527,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
482 | /* reset the pll flags */ | 527 | /* reset the pll flags */ |
483 | pll->flags = 0; | 528 | pll->flags = 0; |
484 | 529 | ||
485 | /* select the PLL algo */ | ||
486 | if (ASIC_IS_AVIVO(rdev)) { | ||
487 | if (radeon_new_pll == 0) | ||
488 | pll->algo = PLL_ALGO_LEGACY; | ||
489 | else | ||
490 | pll->algo = PLL_ALGO_NEW; | ||
491 | } else { | ||
492 | if (radeon_new_pll == 1) | ||
493 | pll->algo = PLL_ALGO_NEW; | ||
494 | else | ||
495 | pll->algo = PLL_ALGO_LEGACY; | ||
496 | } | ||
497 | |||
498 | if (ASIC_IS_AVIVO(rdev)) { | 530 | if (ASIC_IS_AVIVO(rdev)) { |
499 | if ((rdev->family == CHIP_RS600) || | 531 | if ((rdev->family == CHIP_RS600) || |
500 | (rdev->family == CHIP_RS690) || | 532 | (rdev->family == CHIP_RS690) || |
@@ -506,6 +538,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
506 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 538 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
507 | else | 539 | else |
508 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 540 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
541 | |||
542 | if (rdev->family < CHIP_RV770) | ||
543 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
509 | } else { | 544 | } else { |
510 | pll->flags |= RADEON_PLL_LEGACY; | 545 | pll->flags |= RADEON_PLL_LEGACY; |
511 | 546 | ||
@@ -513,15 +548,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
513 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 548 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
514 | else | 549 | else |
515 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 550 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
516 | |||
517 | } | 551 | } |
518 | 552 | ||
519 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 553 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
520 | if (encoder->crtc == crtc) { | 554 | if (encoder->crtc == crtc) { |
521 | radeon_encoder = to_radeon_encoder(encoder); | 555 | radeon_encoder = to_radeon_encoder(encoder); |
556 | connector = radeon_get_connector_for_encoder(encoder); | ||
557 | if (connector) | ||
558 | bpc = connector->display_info.bpc; | ||
522 | encoder_mode = atombios_get_encoder_mode(encoder); | 559 | encoder_mode = atombios_get_encoder_mode(encoder); |
523 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { | 560 | if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || |
524 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 561 | radeon_encoder_is_dp_bridge(encoder)) { |
525 | if (connector) { | 562 | if (connector) { |
526 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 563 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
527 | struct radeon_connector_atom_dig *dig_connector = | 564 | struct radeon_connector_atom_dig *dig_connector = |
@@ -531,29 +568,26 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
531 | } | 568 | } |
532 | } | 569 | } |
533 | 570 | ||
571 | /* use recommended ref_div for ss */ | ||
572 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
573 | if (ss_enabled) { | ||
574 | if (ss->refdiv) { | ||
575 | pll->flags |= RADEON_PLL_USE_REF_DIV; | ||
576 | pll->reference_div = ss->refdiv; | ||
577 | if (ASIC_IS_AVIVO(rdev)) | ||
578 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
579 | } | ||
580 | } | ||
581 | } | ||
582 | |||
534 | if (ASIC_IS_AVIVO(rdev)) { | 583 | if (ASIC_IS_AVIVO(rdev)) { |
535 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 584 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ |
536 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | 585 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) |
537 | adjusted_clock = mode->clock * 2; | 586 | adjusted_clock = mode->clock * 2; |
538 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { | 587 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
539 | pll->algo = PLL_ALGO_LEGACY; | ||
540 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; | 588 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; |
541 | } | 589 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
542 | /* There is some evidence (often anecdotal) that RV515/RV620 LVDS | 590 | pll->flags |= RADEON_PLL_IS_LCD; |
543 | * (on some boards at least) prefers the legacy algo. I'm not | ||
544 | * sure whether this should handled generically or on a | ||
545 | * case-by-case quirk basis. Both algos should work fine in the | ||
546 | * majority of cases. | ||
547 | */ | ||
548 | if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && | ||
549 | ((rdev->family == CHIP_RV515) || | ||
550 | (rdev->family == CHIP_RV620))) { | ||
551 | /* allow the user to overrride just in case */ | ||
552 | if (radeon_new_pll == 1) | ||
553 | pll->algo = PLL_ALGO_NEW; | ||
554 | else | ||
555 | pll->algo = PLL_ALGO_LEGACY; | ||
556 | } | ||
557 | } else { | 591 | } else { |
558 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 592 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
559 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 593 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
@@ -588,14 +622,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
588 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); | 622 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); |
589 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; | 623 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; |
590 | args.v1.ucEncodeMode = encoder_mode; | 624 | args.v1.ucEncodeMode = encoder_mode; |
591 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 625 | if (ss_enabled && ss->percentage) |
592 | /* may want to enable SS on DP eventually */ | ||
593 | /* args.v1.ucConfig |= | ||
594 | ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/ | ||
595 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { | ||
596 | args.v1.ucConfig |= | 626 | args.v1.ucConfig |= |
597 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; | 627 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; |
598 | } | ||
599 | 628 | ||
600 | atom_execute_table(rdev->mode_info.atom_context, | 629 | atom_execute_table(rdev->mode_info.atom_context, |
601 | index, (uint32_t *)&args); | 630 | index, (uint32_t *)&args); |
@@ -606,13 +635,13 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
606 | args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; | 635 | args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; |
607 | args.v3.sInput.ucEncodeMode = encoder_mode; | 636 | args.v3.sInput.ucEncodeMode = encoder_mode; |
608 | args.v3.sInput.ucDispPllConfig = 0; | 637 | args.v3.sInput.ucDispPllConfig = 0; |
609 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 638 | if (ss_enabled && ss->percentage) |
639 | args.v3.sInput.ucDispPllConfig |= | ||
640 | DISPPLL_CONFIG_SS_ENABLE; | ||
641 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) || | ||
642 | radeon_encoder_is_dp_bridge(encoder)) { | ||
610 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 643 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
611 | |||
612 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 644 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
613 | /* may want to enable SS on DP/eDP eventually */ | ||
614 | /*args.v3.sInput.ucDispPllConfig |= | ||
615 | DISPPLL_CONFIG_SS_ENABLE;*/ | ||
616 | args.v3.sInput.ucDispPllConfig |= | 645 | args.v3.sInput.ucDispPllConfig |= |
617 | DISPPLL_CONFIG_COHERENT_MODE; | 646 | DISPPLL_CONFIG_COHERENT_MODE; |
618 | /* 16200 or 27000 */ | 647 | /* 16200 or 27000 */ |
@@ -632,31 +661,33 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
632 | } | 661 | } |
633 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 662 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
634 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 663 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
635 | /* may want to enable SS on DP/eDP eventually */ | ||
636 | /*args.v3.sInput.ucDispPllConfig |= | ||
637 | DISPPLL_CONFIG_SS_ENABLE;*/ | ||
638 | args.v3.sInput.ucDispPllConfig |= | 664 | args.v3.sInput.ucDispPllConfig |= |
639 | DISPPLL_CONFIG_COHERENT_MODE; | 665 | DISPPLL_CONFIG_COHERENT_MODE; |
640 | /* 16200 or 27000 */ | 666 | /* 16200 or 27000 */ |
641 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | 667 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); |
642 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { | 668 | } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) { |
643 | /* want to enable SS on LVDS eventually */ | ||
644 | /*args.v3.sInput.ucDispPllConfig |= | ||
645 | DISPPLL_CONFIG_SS_ENABLE;*/ | ||
646 | } else { | ||
647 | if (mode->clock > 165000) | 669 | if (mode->clock > 165000) |
648 | args.v3.sInput.ucDispPllConfig |= | 670 | args.v3.sInput.ucDispPllConfig |= |
649 | DISPPLL_CONFIG_DUAL_LINK; | 671 | DISPPLL_CONFIG_DUAL_LINK; |
650 | } | 672 | } |
651 | } | 673 | } |
674 | if (radeon_encoder_is_dp_bridge(encoder)) { | ||
675 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
676 | struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); | ||
677 | args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id; | ||
678 | } else | ||
679 | args.v3.sInput.ucExtTransmitterID = 0; | ||
680 | |||
652 | atom_execute_table(rdev->mode_info.atom_context, | 681 | atom_execute_table(rdev->mode_info.atom_context, |
653 | index, (uint32_t *)&args); | 682 | index, (uint32_t *)&args); |
654 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; | 683 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; |
655 | if (args.v3.sOutput.ucRefDiv) { | 684 | if (args.v3.sOutput.ucRefDiv) { |
685 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
656 | pll->flags |= RADEON_PLL_USE_REF_DIV; | 686 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
657 | pll->reference_div = args.v3.sOutput.ucRefDiv; | 687 | pll->reference_div = args.v3.sOutput.ucRefDiv; |
658 | } | 688 | } |
659 | if (args.v3.sOutput.ucPostDiv) { | 689 | if (args.v3.sOutput.ucPostDiv) { |
690 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
660 | pll->flags |= RADEON_PLL_USE_POST_DIV; | 691 | pll->flags |= RADEON_PLL_USE_POST_DIV; |
661 | pll->post_div = args.v3.sOutput.ucPostDiv; | 692 | pll->post_div = args.v3.sOutput.ucPostDiv; |
662 | } | 693 | } |
@@ -680,9 +711,14 @@ union set_pixel_clock { | |||
680 | PIXEL_CLOCK_PARAMETERS_V2 v2; | 711 | PIXEL_CLOCK_PARAMETERS_V2 v2; |
681 | PIXEL_CLOCK_PARAMETERS_V3 v3; | 712 | PIXEL_CLOCK_PARAMETERS_V3 v3; |
682 | PIXEL_CLOCK_PARAMETERS_V5 v5; | 713 | PIXEL_CLOCK_PARAMETERS_V5 v5; |
714 | PIXEL_CLOCK_PARAMETERS_V6 v6; | ||
683 | }; | 715 | }; |
684 | 716 | ||
685 | static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) | 717 | /* on DCE5, make sure the voltage is high enough to support the |
718 | * required disp clk. | ||
719 | */ | ||
720 | static void atombios_crtc_set_dcpll(struct drm_crtc *crtc, | ||
721 | u32 dispclk) | ||
686 | { | 722 | { |
687 | struct drm_device *dev = crtc->dev; | 723 | struct drm_device *dev = crtc->dev; |
688 | struct radeon_device *rdev = dev->dev_private; | 724 | struct radeon_device *rdev = dev->dev_private; |
@@ -705,9 +741,16 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) | |||
705 | * SetPixelClock provides the dividers | 741 | * SetPixelClock provides the dividers |
706 | */ | 742 | */ |
707 | args.v5.ucCRTC = ATOM_CRTC_INVALID; | 743 | args.v5.ucCRTC = ATOM_CRTC_INVALID; |
708 | args.v5.usPixelClock = rdev->clock.default_dispclk; | 744 | args.v5.usPixelClock = cpu_to_le16(dispclk); |
709 | args.v5.ucPpll = ATOM_DCPLL; | 745 | args.v5.ucPpll = ATOM_DCPLL; |
710 | break; | 746 | break; |
747 | case 6: | ||
748 | /* if the default dcpll clock is specified, | ||
749 | * SetPixelClock provides the dividers | ||
750 | */ | ||
751 | args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk); | ||
752 | args.v6.ucPpll = ATOM_DCPLL; | ||
753 | break; | ||
711 | default: | 754 | default: |
712 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | 755 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); |
713 | return; | 756 | return; |
@@ -729,7 +772,10 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, | |||
729 | u32 ref_div, | 772 | u32 ref_div, |
730 | u32 fb_div, | 773 | u32 fb_div, |
731 | u32 frac_fb_div, | 774 | u32 frac_fb_div, |
732 | u32 post_div) | 775 | u32 post_div, |
776 | int bpc, | ||
777 | bool ss_enabled, | ||
778 | struct radeon_atom_ss *ss) | ||
733 | { | 779 | { |
734 | struct drm_device *dev = crtc->dev; | 780 | struct drm_device *dev = crtc->dev; |
735 | struct radeon_device *rdev = dev->dev_private; | 781 | struct radeon_device *rdev = dev->dev_private; |
@@ -776,6 +822,8 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, | |||
776 | args.v3.ucPostDiv = post_div; | 822 | args.v3.ucPostDiv = post_div; |
777 | args.v3.ucPpll = pll_id; | 823 | args.v3.ucPpll = pll_id; |
778 | args.v3.ucMiscInfo = (pll_id << 2); | 824 | args.v3.ucMiscInfo = (pll_id << 2); |
825 | if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) | ||
826 | args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; | ||
779 | args.v3.ucTransmitterId = encoder_id; | 827 | args.v3.ucTransmitterId = encoder_id; |
780 | args.v3.ucEncoderMode = encoder_mode; | 828 | args.v3.ucEncoderMode = encoder_mode; |
781 | break; | 829 | break; |
@@ -787,10 +835,50 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, | |||
787 | args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); | 835 | args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); |
788 | args.v5.ucPostDiv = post_div; | 836 | args.v5.ucPostDiv = post_div; |
789 | args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ | 837 | args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ |
838 | if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) | ||
839 | args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; | ||
840 | switch (bpc) { | ||
841 | case 8: | ||
842 | default: | ||
843 | args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; | ||
844 | break; | ||
845 | case 10: | ||
846 | args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; | ||
847 | break; | ||
848 | } | ||
790 | args.v5.ucTransmitterID = encoder_id; | 849 | args.v5.ucTransmitterID = encoder_id; |
791 | args.v5.ucEncoderMode = encoder_mode; | 850 | args.v5.ucEncoderMode = encoder_mode; |
792 | args.v5.ucPpll = pll_id; | 851 | args.v5.ucPpll = pll_id; |
793 | break; | 852 | break; |
853 | case 6: | ||
854 | args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id; | ||
855 | args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10); | ||
856 | args.v6.ucRefDiv = ref_div; | ||
857 | args.v6.usFbDiv = cpu_to_le16(fb_div); | ||
858 | args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); | ||
859 | args.v6.ucPostDiv = post_div; | ||
860 | args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ | ||
861 | if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) | ||
862 | args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; | ||
863 | switch (bpc) { | ||
864 | case 8: | ||
865 | default: | ||
866 | args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; | ||
867 | break; | ||
868 | case 10: | ||
869 | args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; | ||
870 | break; | ||
871 | case 12: | ||
872 | args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; | ||
873 | break; | ||
874 | case 16: | ||
875 | args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; | ||
876 | break; | ||
877 | } | ||
878 | args.v6.ucTransmitterID = encoder_id; | ||
879 | args.v6.ucEncoderMode = encoder_mode; | ||
880 | args.v6.ucPpll = pll_id; | ||
881 | break; | ||
794 | default: | 882 | default: |
795 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | 883 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); |
796 | return; | 884 | return; |
@@ -816,6 +904,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
816 | struct radeon_pll *pll; | 904 | struct radeon_pll *pll; |
817 | u32 adjusted_clock; | 905 | u32 adjusted_clock; |
818 | int encoder_mode = 0; | 906 | int encoder_mode = 0; |
907 | struct radeon_atom_ss ss; | ||
908 | bool ss_enabled = false; | ||
909 | int bpc = 8; | ||
819 | 910 | ||
820 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 911 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
821 | if (encoder->crtc == crtc) { | 912 | if (encoder->crtc == crtc) { |
@@ -842,54 +933,166 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
842 | break; | 933 | break; |
843 | } | 934 | } |
844 | 935 | ||
936 | if (radeon_encoder->active_device & | ||
937 | (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { | ||
938 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
939 | struct drm_connector *connector = | ||
940 | radeon_get_connector_for_encoder(encoder); | ||
941 | struct radeon_connector *radeon_connector = | ||
942 | to_radeon_connector(connector); | ||
943 | struct radeon_connector_atom_dig *dig_connector = | ||
944 | radeon_connector->con_priv; | ||
945 | int dp_clock; | ||
946 | bpc = connector->display_info.bpc; | ||
947 | |||
948 | switch (encoder_mode) { | ||
949 | case ATOM_ENCODER_MODE_DP: | ||
950 | /* DP/eDP */ | ||
951 | dp_clock = dig_connector->dp_clock / 10; | ||
952 | if (ASIC_IS_DCE4(rdev)) | ||
953 | ss_enabled = | ||
954 | radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
955 | ASIC_INTERNAL_SS_ON_DP, | ||
956 | dp_clock); | ||
957 | else { | ||
958 | if (dp_clock == 16200) { | ||
959 | ss_enabled = | ||
960 | radeon_atombios_get_ppll_ss_info(rdev, &ss, | ||
961 | ATOM_DP_SS_ID2); | ||
962 | if (!ss_enabled) | ||
963 | ss_enabled = | ||
964 | radeon_atombios_get_ppll_ss_info(rdev, &ss, | ||
965 | ATOM_DP_SS_ID1); | ||
966 | } else | ||
967 | ss_enabled = | ||
968 | radeon_atombios_get_ppll_ss_info(rdev, &ss, | ||
969 | ATOM_DP_SS_ID1); | ||
970 | } | ||
971 | break; | ||
972 | case ATOM_ENCODER_MODE_LVDS: | ||
973 | if (ASIC_IS_DCE4(rdev)) | ||
974 | ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
975 | dig->lcd_ss_id, | ||
976 | mode->clock / 10); | ||
977 | else | ||
978 | ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss, | ||
979 | dig->lcd_ss_id); | ||
980 | break; | ||
981 | case ATOM_ENCODER_MODE_DVI: | ||
982 | if (ASIC_IS_DCE4(rdev)) | ||
983 | ss_enabled = | ||
984 | radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
985 | ASIC_INTERNAL_SS_ON_TMDS, | ||
986 | mode->clock / 10); | ||
987 | break; | ||
988 | case ATOM_ENCODER_MODE_HDMI: | ||
989 | if (ASIC_IS_DCE4(rdev)) | ||
990 | ss_enabled = | ||
991 | radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
992 | ASIC_INTERNAL_SS_ON_HDMI, | ||
993 | mode->clock / 10); | ||
994 | break; | ||
995 | default: | ||
996 | break; | ||
997 | } | ||
998 | } | ||
999 | |||
845 | /* adjust pixel clock as needed */ | 1000 | /* adjust pixel clock as needed */ |
846 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll); | 1001 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); |
1002 | |||
1003 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1004 | /* TV seems to prefer the legacy algo on some boards */ | ||
1005 | radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | ||
1006 | &ref_div, &post_div); | ||
1007 | else if (ASIC_IS_AVIVO(rdev)) | ||
1008 | radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | ||
1009 | &ref_div, &post_div); | ||
1010 | else | ||
1011 | radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | ||
1012 | &ref_div, &post_div); | ||
847 | 1013 | ||
848 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | 1014 | atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); |
849 | &ref_div, &post_div); | ||
850 | 1015 | ||
851 | atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, | 1016 | atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, |
852 | encoder_mode, radeon_encoder->encoder_id, mode->clock, | 1017 | encoder_mode, radeon_encoder->encoder_id, mode->clock, |
853 | ref_div, fb_div, frac_fb_div, post_div); | 1018 | ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss); |
1019 | |||
1020 | if (ss_enabled) { | ||
1021 | /* calculate ss amount and step size */ | ||
1022 | if (ASIC_IS_DCE4(rdev)) { | ||
1023 | u32 step_size; | ||
1024 | u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000; | ||
1025 | ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; | ||
1026 | ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & | ||
1027 | ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; | ||
1028 | if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) | ||
1029 | step_size = (4 * amount * ref_div * (ss.rate * 2048)) / | ||
1030 | (125 * 25 * pll->reference_freq / 100); | ||
1031 | else | ||
1032 | step_size = (2 * amount * ref_div * (ss.rate * 2048)) / | ||
1033 | (125 * 25 * pll->reference_freq / 100); | ||
1034 | ss.step = step_size; | ||
1035 | } | ||
854 | 1036 | ||
1037 | atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss); | ||
1038 | } | ||
855 | } | 1039 | } |
856 | 1040 | ||
857 | static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, | 1041 | static int dce4_crtc_do_set_base(struct drm_crtc *crtc, |
858 | struct drm_framebuffer *old_fb) | 1042 | struct drm_framebuffer *fb, |
1043 | int x, int y, int atomic) | ||
859 | { | 1044 | { |
860 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 1045 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
861 | struct drm_device *dev = crtc->dev; | 1046 | struct drm_device *dev = crtc->dev; |
862 | struct radeon_device *rdev = dev->dev_private; | 1047 | struct radeon_device *rdev = dev->dev_private; |
863 | struct radeon_framebuffer *radeon_fb; | 1048 | struct radeon_framebuffer *radeon_fb; |
1049 | struct drm_framebuffer *target_fb; | ||
864 | struct drm_gem_object *obj; | 1050 | struct drm_gem_object *obj; |
865 | struct radeon_bo *rbo; | 1051 | struct radeon_bo *rbo; |
866 | uint64_t fb_location; | 1052 | uint64_t fb_location; |
867 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; | 1053 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
1054 | u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); | ||
1055 | u32 tmp, viewport_w, viewport_h; | ||
868 | int r; | 1056 | int r; |
869 | 1057 | ||
870 | /* no fb bound */ | 1058 | /* no fb bound */ |
871 | if (!crtc->fb) { | 1059 | if (!atomic && !crtc->fb) { |
872 | DRM_DEBUG_KMS("No FB bound\n"); | 1060 | DRM_DEBUG_KMS("No FB bound\n"); |
873 | return 0; | 1061 | return 0; |
874 | } | 1062 | } |
875 | 1063 | ||
876 | radeon_fb = to_radeon_framebuffer(crtc->fb); | 1064 | if (atomic) { |
1065 | radeon_fb = to_radeon_framebuffer(fb); | ||
1066 | target_fb = fb; | ||
1067 | } | ||
1068 | else { | ||
1069 | radeon_fb = to_radeon_framebuffer(crtc->fb); | ||
1070 | target_fb = crtc->fb; | ||
1071 | } | ||
877 | 1072 | ||
878 | /* Pin framebuffer & get tilling informations */ | 1073 | /* If atomic, assume fb object is pinned & idle & fenced and |
1074 | * just update base pointers | ||
1075 | */ | ||
879 | obj = radeon_fb->obj; | 1076 | obj = radeon_fb->obj; |
880 | rbo = obj->driver_private; | 1077 | rbo = gem_to_radeon_bo(obj); |
881 | r = radeon_bo_reserve(rbo, false); | 1078 | r = radeon_bo_reserve(rbo, false); |
882 | if (unlikely(r != 0)) | 1079 | if (unlikely(r != 0)) |
883 | return r; | 1080 | return r; |
884 | r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); | 1081 | |
885 | if (unlikely(r != 0)) { | 1082 | if (atomic) |
886 | radeon_bo_unreserve(rbo); | 1083 | fb_location = radeon_bo_gpu_offset(rbo); |
887 | return -EINVAL; | 1084 | else { |
1085 | r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); | ||
1086 | if (unlikely(r != 0)) { | ||
1087 | radeon_bo_unreserve(rbo); | ||
1088 | return -EINVAL; | ||
1089 | } | ||
888 | } | 1090 | } |
1091 | |||
889 | radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); | 1092 | radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); |
890 | radeon_bo_unreserve(rbo); | 1093 | radeon_bo_unreserve(rbo); |
891 | 1094 | ||
892 | switch (crtc->fb->bits_per_pixel) { | 1095 | switch (target_fb->bits_per_pixel) { |
893 | case 8: | 1096 | case 8: |
894 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | | 1097 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | |
895 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); | 1098 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); |
@@ -901,15 +1104,21 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
901 | case 16: | 1104 | case 16: |
902 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | | 1105 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | |
903 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); | 1106 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); |
1107 | #ifdef __BIG_ENDIAN | ||
1108 | fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); | ||
1109 | #endif | ||
904 | break; | 1110 | break; |
905 | case 24: | 1111 | case 24: |
906 | case 32: | 1112 | case 32: |
907 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | | 1113 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | |
908 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); | 1114 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); |
1115 | #ifdef __BIG_ENDIAN | ||
1116 | fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); | ||
1117 | #endif | ||
909 | break; | 1118 | break; |
910 | default: | 1119 | default: |
911 | DRM_ERROR("Unsupported screen depth %d\n", | 1120 | DRM_ERROR("Unsupported screen depth %d\n", |
912 | crtc->fb->bits_per_pixel); | 1121 | target_fb->bits_per_pixel); |
913 | return -EINVAL; | 1122 | return -EINVAL; |
914 | } | 1123 | } |
915 | 1124 | ||
@@ -950,15 +1159,16 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
950 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | 1159 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
951 | (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); | 1160 | (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); |
952 | WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); | 1161 | WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); |
1162 | WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); | ||
953 | 1163 | ||
954 | WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); | 1164 | WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); |
955 | WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); | 1165 | WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); |
956 | WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0); | 1166 | WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0); |
957 | WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0); | 1167 | WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0); |
958 | WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width); | 1168 | WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); |
959 | WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height); | 1169 | WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); |
960 | 1170 | ||
961 | fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); | 1171 | fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8); |
962 | WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); | 1172 | WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); |
963 | WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); | 1173 | WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); |
964 | 1174 | ||
@@ -968,18 +1178,23 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
968 | y &= ~1; | 1178 | y &= ~1; |
969 | WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, | 1179 | WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, |
970 | (x << 16) | y); | 1180 | (x << 16) | y); |
1181 | viewport_w = crtc->mode.hdisplay; | ||
1182 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; | ||
971 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, | 1183 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, |
972 | (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); | 1184 | (viewport_w << 16) | viewport_h); |
973 | 1185 | ||
974 | if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) | 1186 | /* pageflip setup */ |
975 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, | 1187 | /* make sure flip is at vb rather than hb */ |
976 | EVERGREEN_INTERLEAVE_EN); | 1188 | tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); |
977 | else | 1189 | tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; |
978 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | 1190 | WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); |
1191 | |||
1192 | /* set pageflip to happen anywhere in vblank interval */ | ||
1193 | WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); | ||
979 | 1194 | ||
980 | if (old_fb && old_fb != crtc->fb) { | 1195 | if (!atomic && fb && fb != crtc->fb) { |
981 | radeon_fb = to_radeon_framebuffer(old_fb); | 1196 | radeon_fb = to_radeon_framebuffer(fb); |
982 | rbo = radeon_fb->obj->driver_private; | 1197 | rbo = gem_to_radeon_bo(radeon_fb->obj); |
983 | r = radeon_bo_reserve(rbo, false); | 1198 | r = radeon_bo_reserve(rbo, false); |
984 | if (unlikely(r != 0)) | 1199 | if (unlikely(r != 0)) |
985 | return r; | 1200 | return r; |
@@ -993,8 +1208,9 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
993 | return 0; | 1208 | return 0; |
994 | } | 1209 | } |
995 | 1210 | ||
996 | static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, | 1211 | static int avivo_crtc_do_set_base(struct drm_crtc *crtc, |
997 | struct drm_framebuffer *old_fb) | 1212 | struct drm_framebuffer *fb, |
1213 | int x, int y, int atomic) | ||
998 | { | 1214 | { |
999 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 1215 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
1000 | struct drm_device *dev = crtc->dev; | 1216 | struct drm_device *dev = crtc->dev; |
@@ -1002,33 +1218,50 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
1002 | struct radeon_framebuffer *radeon_fb; | 1218 | struct radeon_framebuffer *radeon_fb; |
1003 | struct drm_gem_object *obj; | 1219 | struct drm_gem_object *obj; |
1004 | struct radeon_bo *rbo; | 1220 | struct radeon_bo *rbo; |
1221 | struct drm_framebuffer *target_fb; | ||
1005 | uint64_t fb_location; | 1222 | uint64_t fb_location; |
1006 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; | 1223 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
1224 | u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE; | ||
1225 | u32 tmp, viewport_w, viewport_h; | ||
1007 | int r; | 1226 | int r; |
1008 | 1227 | ||
1009 | /* no fb bound */ | 1228 | /* no fb bound */ |
1010 | if (!crtc->fb) { | 1229 | if (!atomic && !crtc->fb) { |
1011 | DRM_DEBUG_KMS("No FB bound\n"); | 1230 | DRM_DEBUG_KMS("No FB bound\n"); |
1012 | return 0; | 1231 | return 0; |
1013 | } | 1232 | } |
1014 | 1233 | ||
1015 | radeon_fb = to_radeon_framebuffer(crtc->fb); | 1234 | if (atomic) { |
1235 | radeon_fb = to_radeon_framebuffer(fb); | ||
1236 | target_fb = fb; | ||
1237 | } | ||
1238 | else { | ||
1239 | radeon_fb = to_radeon_framebuffer(crtc->fb); | ||
1240 | target_fb = crtc->fb; | ||
1241 | } | ||
1016 | 1242 | ||
1017 | /* Pin framebuffer & get tilling informations */ | ||
1018 | obj = radeon_fb->obj; | 1243 | obj = radeon_fb->obj; |
1019 | rbo = obj->driver_private; | 1244 | rbo = gem_to_radeon_bo(obj); |
1020 | r = radeon_bo_reserve(rbo, false); | 1245 | r = radeon_bo_reserve(rbo, false); |
1021 | if (unlikely(r != 0)) | 1246 | if (unlikely(r != 0)) |
1022 | return r; | 1247 | return r; |
1023 | r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); | 1248 | |
1024 | if (unlikely(r != 0)) { | 1249 | /* If atomic, assume fb object is pinned & idle & fenced and |
1025 | radeon_bo_unreserve(rbo); | 1250 | * just update base pointers |
1026 | return -EINVAL; | 1251 | */ |
1252 | if (atomic) | ||
1253 | fb_location = radeon_bo_gpu_offset(rbo); | ||
1254 | else { | ||
1255 | r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); | ||
1256 | if (unlikely(r != 0)) { | ||
1257 | radeon_bo_unreserve(rbo); | ||
1258 | return -EINVAL; | ||
1259 | } | ||
1027 | } | 1260 | } |
1028 | radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); | 1261 | radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); |
1029 | radeon_bo_unreserve(rbo); | 1262 | radeon_bo_unreserve(rbo); |
1030 | 1263 | ||
1031 | switch (crtc->fb->bits_per_pixel) { | 1264 | switch (target_fb->bits_per_pixel) { |
1032 | case 8: | 1265 | case 8: |
1033 | fb_format = | 1266 | fb_format = |
1034 | AVIVO_D1GRPH_CONTROL_DEPTH_8BPP | | 1267 | AVIVO_D1GRPH_CONTROL_DEPTH_8BPP | |
@@ -1043,16 +1276,22 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
1043 | fb_format = | 1276 | fb_format = |
1044 | AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | | 1277 | AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | |
1045 | AVIVO_D1GRPH_CONTROL_16BPP_RGB565; | 1278 | AVIVO_D1GRPH_CONTROL_16BPP_RGB565; |
1279 | #ifdef __BIG_ENDIAN | ||
1280 | fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT; | ||
1281 | #endif | ||
1046 | break; | 1282 | break; |
1047 | case 24: | 1283 | case 24: |
1048 | case 32: | 1284 | case 32: |
1049 | fb_format = | 1285 | fb_format = |
1050 | AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | | 1286 | AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | |
1051 | AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; | 1287 | AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; |
1288 | #ifdef __BIG_ENDIAN | ||
1289 | fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT; | ||
1290 | #endif | ||
1052 | break; | 1291 | break; |
1053 | default: | 1292 | default: |
1054 | DRM_ERROR("Unsupported screen depth %d\n", | 1293 | DRM_ERROR("Unsupported screen depth %d\n", |
1055 | crtc->fb->bits_per_pixel); | 1294 | target_fb->bits_per_pixel); |
1056 | return -EINVAL; | 1295 | return -EINVAL; |
1057 | } | 1296 | } |
1058 | 1297 | ||
@@ -1088,15 +1327,17 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
1088 | WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + | 1327 | WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + |
1089 | radeon_crtc->crtc_offset, (u32) fb_location); | 1328 | radeon_crtc->crtc_offset, (u32) fb_location); |
1090 | WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); | 1329 | WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); |
1330 | if (rdev->family >= CHIP_R600) | ||
1331 | WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); | ||
1091 | 1332 | ||
1092 | WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); | 1333 | WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); |
1093 | WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); | 1334 | WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); |
1094 | WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0); | 1335 | WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0); |
1095 | WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0); | 1336 | WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0); |
1096 | WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width); | 1337 | WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); |
1097 | WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height); | 1338 | WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); |
1098 | 1339 | ||
1099 | fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); | 1340 | fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8); |
1100 | WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); | 1341 | WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); |
1101 | WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); | 1342 | WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); |
1102 | 1343 | ||
@@ -1106,18 +1347,23 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
1106 | y &= ~1; | 1347 | y &= ~1; |
1107 | WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, | 1348 | WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, |
1108 | (x << 16) | y); | 1349 | (x << 16) | y); |
1350 | viewport_w = crtc->mode.hdisplay; | ||
1351 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; | ||
1109 | WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, | 1352 | WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, |
1110 | (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); | 1353 | (viewport_w << 16) | viewport_h); |
1111 | 1354 | ||
1112 | if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) | 1355 | /* pageflip setup */ |
1113 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, | 1356 | /* make sure flip is at vb rather than hb */ |
1114 | AVIVO_D1MODE_INTERLEAVE_EN); | 1357 | tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); |
1115 | else | 1358 | tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; |
1116 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | 1359 | WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); |
1117 | 1360 | ||
1118 | if (old_fb && old_fb != crtc->fb) { | 1361 | /* set pageflip to happen anywhere in vblank interval */ |
1119 | radeon_fb = to_radeon_framebuffer(old_fb); | 1362 | WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); |
1120 | rbo = radeon_fb->obj->driver_private; | 1363 | |
1364 | if (!atomic && fb && fb != crtc->fb) { | ||
1365 | radeon_fb = to_radeon_framebuffer(fb); | ||
1366 | rbo = gem_to_radeon_bo(radeon_fb->obj); | ||
1121 | r = radeon_bo_reserve(rbo, false); | 1367 | r = radeon_bo_reserve(rbo, false); |
1122 | if (unlikely(r != 0)) | 1368 | if (unlikely(r != 0)) |
1123 | return r; | 1369 | return r; |
@@ -1138,11 +1384,26 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
1138 | struct radeon_device *rdev = dev->dev_private; | 1384 | struct radeon_device *rdev = dev->dev_private; |
1139 | 1385 | ||
1140 | if (ASIC_IS_DCE4(rdev)) | 1386 | if (ASIC_IS_DCE4(rdev)) |
1141 | return evergreen_crtc_set_base(crtc, x, y, old_fb); | 1387 | return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0); |
1142 | else if (ASIC_IS_AVIVO(rdev)) | 1388 | else if (ASIC_IS_AVIVO(rdev)) |
1143 | return avivo_crtc_set_base(crtc, x, y, old_fb); | 1389 | return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); |
1144 | else | 1390 | else |
1145 | return radeon_crtc_set_base(crtc, x, y, old_fb); | 1391 | return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0); |
1392 | } | ||
1393 | |||
1394 | int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, | ||
1395 | struct drm_framebuffer *fb, | ||
1396 | int x, int y, enum mode_set_atomic state) | ||
1397 | { | ||
1398 | struct drm_device *dev = crtc->dev; | ||
1399 | struct radeon_device *rdev = dev->dev_private; | ||
1400 | |||
1401 | if (ASIC_IS_DCE4(rdev)) | ||
1402 | return dce4_crtc_do_set_base(crtc, fb, x, y, 1); | ||
1403 | else if (ASIC_IS_AVIVO(rdev)) | ||
1404 | return avivo_crtc_do_set_base(crtc, fb, x, y, 1); | ||
1405 | else | ||
1406 | return radeon_crtc_do_set_base(crtc, fb, x, y, 1); | ||
1146 | } | 1407 | } |
1147 | 1408 | ||
1148 | /* properly set additional regs when using atombios */ | 1409 | /* properly set additional regs when using atombios */ |
@@ -1179,11 +1440,19 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) | |||
1179 | uint32_t pll_in_use = 0; | 1440 | uint32_t pll_in_use = 0; |
1180 | 1441 | ||
1181 | if (ASIC_IS_DCE4(rdev)) { | 1442 | if (ASIC_IS_DCE4(rdev)) { |
1182 | /* if crtc is driving DP and we have an ext clock, use that */ | ||
1183 | list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { | 1443 | list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { |
1184 | if (test_encoder->crtc && (test_encoder->crtc == crtc)) { | 1444 | if (test_encoder->crtc && (test_encoder->crtc == crtc)) { |
1445 | /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, | ||
1446 | * depending on the asic: | ||
1447 | * DCE4: PPLL or ext clock | ||
1448 | * DCE5: DCPLL or ext clock | ||
1449 | * | ||
1450 | * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip | ||
1451 | * PPLL/DCPLL programming and only program the DP DTO for the | ||
1452 | * crtc virtual pixel clock. | ||
1453 | */ | ||
1185 | if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) { | 1454 | if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) { |
1186 | if (rdev->clock.dp_extclk) | 1455 | if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk) |
1187 | return ATOM_PPLL_INVALID; | 1456 | return ATOM_PPLL_INVALID; |
1188 | } | 1457 | } |
1189 | } | 1458 | } |
@@ -1230,12 +1499,20 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
1230 | } | 1499 | } |
1231 | } | 1500 | } |
1232 | 1501 | ||
1233 | atombios_disable_ss(crtc); | ||
1234 | /* always set DCPLL */ | 1502 | /* always set DCPLL */ |
1235 | if (ASIC_IS_DCE4(rdev)) | 1503 | if (ASIC_IS_DCE4(rdev)) { |
1236 | atombios_crtc_set_dcpll(crtc); | 1504 | struct radeon_atom_ss ss; |
1505 | bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
1506 | ASIC_INTERNAL_SS_ON_DCPLL, | ||
1507 | rdev->clock.default_dispclk); | ||
1508 | if (ss_enabled) | ||
1509 | atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss); | ||
1510 | /* XXX: DCE5, make sure voltage, dispclk is high enough */ | ||
1511 | atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk); | ||
1512 | if (ss_enabled) | ||
1513 | atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss); | ||
1514 | } | ||
1237 | atombios_crtc_set_pll(crtc, adjusted_mode); | 1515 | atombios_crtc_set_pll(crtc, adjusted_mode); |
1238 | atombios_enable_ss(crtc); | ||
1239 | 1516 | ||
1240 | if (ASIC_IS_DCE4(rdev)) | 1517 | if (ASIC_IS_DCE4(rdev)) |
1241 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | 1518 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
@@ -1291,6 +1568,8 @@ static void atombios_crtc_commit(struct drm_crtc *crtc) | |||
1291 | static void atombios_crtc_disable(struct drm_crtc *crtc) | 1568 | static void atombios_crtc_disable(struct drm_crtc *crtc) |
1292 | { | 1569 | { |
1293 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 1570 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
1571 | struct radeon_atom_ss ss; | ||
1572 | |||
1294 | atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | 1573 | atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); |
1295 | 1574 | ||
1296 | switch (radeon_crtc->pll_id) { | 1575 | switch (radeon_crtc->pll_id) { |
@@ -1298,7 +1577,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) | |||
1298 | case ATOM_PPLL2: | 1577 | case ATOM_PPLL2: |
1299 | /* disable the ppll */ | 1578 | /* disable the ppll */ |
1300 | atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, | 1579 | atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, |
1301 | 0, 0, ATOM_DISABLE, 0, 0, 0, 0); | 1580 | 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); |
1302 | break; | 1581 | break; |
1303 | default: | 1582 | default: |
1304 | break; | 1583 | break; |
@@ -1311,6 +1590,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = { | |||
1311 | .mode_fixup = atombios_crtc_mode_fixup, | 1590 | .mode_fixup = atombios_crtc_mode_fixup, |
1312 | .mode_set = atombios_crtc_mode_set, | 1591 | .mode_set = atombios_crtc_mode_set, |
1313 | .mode_set_base = atombios_crtc_set_base, | 1592 | .mode_set_base = atombios_crtc_set_base, |
1593 | .mode_set_base_atomic = atombios_crtc_set_base_atomic, | ||
1314 | .prepare = atombios_crtc_prepare, | 1594 | .prepare = atombios_crtc_prepare, |
1315 | .commit = atombios_crtc_commit, | 1595 | .commit = atombios_crtc_commit, |
1316 | .load_lut = radeon_crtc_load_lut, | 1596 | .load_lut = radeon_crtc_load_lut, |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 4e7778d44b8d..8c0f9e36ff8e 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -43,158 +43,242 @@ static char *pre_emph_names[] = { | |||
43 | "0dB", "3.5dB", "6dB", "9.5dB" | 43 | "0dB", "3.5dB", "6dB", "9.5dB" |
44 | }; | 44 | }; |
45 | 45 | ||
46 | static const int dp_clocks[] = { | 46 | /***** radeon AUX functions *****/ |
47 | 54000, /* 1 lane, 1.62 Ghz */ | 47 | union aux_channel_transaction { |
48 | 90000, /* 1 lane, 2.70 Ghz */ | 48 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; |
49 | 108000, /* 2 lane, 1.62 Ghz */ | 49 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; |
50 | 180000, /* 2 lane, 2.70 Ghz */ | ||
51 | 216000, /* 4 lane, 1.62 Ghz */ | ||
52 | 360000, /* 4 lane, 2.70 Ghz */ | ||
53 | }; | 50 | }; |
54 | 51 | ||
55 | static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int); | 52 | static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, |
53 | u8 *send, int send_bytes, | ||
54 | u8 *recv, int recv_size, | ||
55 | u8 delay, u8 *ack) | ||
56 | { | ||
57 | struct drm_device *dev = chan->dev; | ||
58 | struct radeon_device *rdev = dev->dev_private; | ||
59 | union aux_channel_transaction args; | ||
60 | int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); | ||
61 | unsigned char *base; | ||
62 | int recv_bytes; | ||
63 | |||
64 | memset(&args, 0, sizeof(args)); | ||
56 | 65 | ||
57 | /* common helper functions */ | 66 | base = (unsigned char *)rdev->mode_info.atom_context->scratch; |
58 | static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) | 67 | |
68 | memcpy(base, send, send_bytes); | ||
69 | |||
70 | args.v1.lpAuxRequest = 0; | ||
71 | args.v1.lpDataOut = 16; | ||
72 | args.v1.ucDataOutLen = 0; | ||
73 | args.v1.ucChannelID = chan->rec.i2c_id; | ||
74 | args.v1.ucDelay = delay / 10; | ||
75 | if (ASIC_IS_DCE4(rdev)) | ||
76 | args.v2.ucHPD_ID = chan->rec.hpd; | ||
77 | |||
78 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
79 | |||
80 | *ack = args.v1.ucReplyStatus; | ||
81 | |||
82 | /* timeout */ | ||
83 | if (args.v1.ucReplyStatus == 1) { | ||
84 | DRM_DEBUG_KMS("dp_aux_ch timeout\n"); | ||
85 | return -ETIMEDOUT; | ||
86 | } | ||
87 | |||
88 | /* flags not zero */ | ||
89 | if (args.v1.ucReplyStatus == 2) { | ||
90 | DRM_DEBUG_KMS("dp_aux_ch flags not zero\n"); | ||
91 | return -EBUSY; | ||
92 | } | ||
93 | |||
94 | /* error */ | ||
95 | if (args.v1.ucReplyStatus == 3) { | ||
96 | DRM_DEBUG_KMS("dp_aux_ch error\n"); | ||
97 | return -EIO; | ||
98 | } | ||
99 | |||
100 | recv_bytes = args.v1.ucDataOutLen; | ||
101 | if (recv_bytes > recv_size) | ||
102 | recv_bytes = recv_size; | ||
103 | |||
104 | if (recv && recv_size) | ||
105 | memcpy(recv, base + 16, recv_bytes); | ||
106 | |||
107 | return recv_bytes; | ||
108 | } | ||
109 | |||
110 | static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, | ||
111 | u16 address, u8 *send, u8 send_bytes, u8 delay) | ||
59 | { | 112 | { |
60 | int i; | 113 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; |
61 | u8 max_link_bw; | 114 | int ret; |
62 | u8 max_lane_count; | 115 | u8 msg[20]; |
116 | int msg_bytes = send_bytes + 4; | ||
117 | u8 ack; | ||
63 | 118 | ||
64 | if (!dpcd) | 119 | if (send_bytes > 16) |
65 | return 0; | 120 | return -1; |
66 | 121 | ||
67 | max_link_bw = dpcd[DP_MAX_LINK_RATE]; | 122 | msg[0] = address; |
68 | max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; | 123 | msg[1] = address >> 8; |
124 | msg[2] = AUX_NATIVE_WRITE << 4; | ||
125 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); | ||
126 | memcpy(&msg[4], send, send_bytes); | ||
69 | 127 | ||
70 | switch (max_link_bw) { | 128 | while (1) { |
71 | case DP_LINK_BW_1_62: | 129 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
72 | default: | 130 | msg, msg_bytes, NULL, 0, delay, &ack); |
73 | for (i = 0; i < num_dp_clocks; i++) { | 131 | if (ret < 0) |
74 | if (i % 2) | 132 | return ret; |
75 | continue; | 133 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
76 | switch (max_lane_count) { | 134 | break; |
77 | case 1: | 135 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
78 | if (i > 1) | 136 | udelay(400); |
79 | return 0; | 137 | else |
80 | break; | 138 | return -EIO; |
81 | case 2: | ||
82 | if (i > 3) | ||
83 | return 0; | ||
84 | break; | ||
85 | case 4: | ||
86 | default: | ||
87 | break; | ||
88 | } | ||
89 | if (dp_clocks[i] > mode_clock) { | ||
90 | if (i < 2) | ||
91 | return 1; | ||
92 | else if (i < 4) | ||
93 | return 2; | ||
94 | else | ||
95 | return 4; | ||
96 | } | ||
97 | } | ||
98 | break; | ||
99 | case DP_LINK_BW_2_7: | ||
100 | for (i = 0; i < num_dp_clocks; i++) { | ||
101 | switch (max_lane_count) { | ||
102 | case 1: | ||
103 | if (i > 1) | ||
104 | return 0; | ||
105 | break; | ||
106 | case 2: | ||
107 | if (i > 3) | ||
108 | return 0; | ||
109 | break; | ||
110 | case 4: | ||
111 | default: | ||
112 | break; | ||
113 | } | ||
114 | if (dp_clocks[i] > mode_clock) { | ||
115 | if (i < 2) | ||
116 | return 1; | ||
117 | else if (i < 4) | ||
118 | return 2; | ||
119 | else | ||
120 | return 4; | ||
121 | } | ||
122 | } | ||
123 | break; | ||
124 | } | 139 | } |
125 | 140 | ||
126 | return 0; | 141 | return send_bytes; |
127 | } | 142 | } |
128 | 143 | ||
129 | static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) | 144 | static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, |
145 | u16 address, u8 *recv, int recv_bytes, u8 delay) | ||
130 | { | 146 | { |
131 | int i; | 147 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; |
132 | u8 max_link_bw; | 148 | u8 msg[4]; |
133 | u8 max_lane_count; | 149 | int msg_bytes = 4; |
150 | u8 ack; | ||
151 | int ret; | ||
134 | 152 | ||
135 | if (!dpcd) | 153 | msg[0] = address; |
136 | return 0; | 154 | msg[1] = address >> 8; |
155 | msg[2] = AUX_NATIVE_READ << 4; | ||
156 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); | ||
157 | |||
158 | while (1) { | ||
159 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | ||
160 | msg, msg_bytes, recv, recv_bytes, delay, &ack); | ||
161 | if (ret == 0) | ||
162 | return -EPROTO; | ||
163 | if (ret < 0) | ||
164 | return ret; | ||
165 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | ||
166 | return ret; | ||
167 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | ||
168 | udelay(400); | ||
169 | else | ||
170 | return -EIO; | ||
171 | } | ||
172 | } | ||
137 | 173 | ||
138 | max_link_bw = dpcd[DP_MAX_LINK_RATE]; | 174 | static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, |
139 | max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; | 175 | u16 reg, u8 val) |
176 | { | ||
177 | radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0); | ||
178 | } | ||
140 | 179 | ||
141 | switch (max_link_bw) { | 180 | static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector, |
142 | case DP_LINK_BW_1_62: | 181 | u16 reg) |
182 | { | ||
183 | u8 val = 0; | ||
184 | |||
185 | radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0); | ||
186 | |||
187 | return val; | ||
188 | } | ||
189 | |||
190 | int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | ||
191 | u8 write_byte, u8 *read_byte) | ||
192 | { | ||
193 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
194 | struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter; | ||
195 | u16 address = algo_data->address; | ||
196 | u8 msg[5]; | ||
197 | u8 reply[2]; | ||
198 | unsigned retry; | ||
199 | int msg_bytes; | ||
200 | int reply_bytes = 1; | ||
201 | int ret; | ||
202 | u8 ack; | ||
203 | |||
204 | /* Set up the command byte */ | ||
205 | if (mode & MODE_I2C_READ) | ||
206 | msg[2] = AUX_I2C_READ << 4; | ||
207 | else | ||
208 | msg[2] = AUX_I2C_WRITE << 4; | ||
209 | |||
210 | if (!(mode & MODE_I2C_STOP)) | ||
211 | msg[2] |= AUX_I2C_MOT << 4; | ||
212 | |||
213 | msg[0] = address; | ||
214 | msg[1] = address >> 8; | ||
215 | |||
216 | switch (mode) { | ||
217 | case MODE_I2C_WRITE: | ||
218 | msg_bytes = 5; | ||
219 | msg[3] = msg_bytes << 4; | ||
220 | msg[4] = write_byte; | ||
221 | break; | ||
222 | case MODE_I2C_READ: | ||
223 | msg_bytes = 4; | ||
224 | msg[3] = msg_bytes << 4; | ||
225 | break; | ||
143 | default: | 226 | default: |
144 | for (i = 0; i < num_dp_clocks; i++) { | 227 | msg_bytes = 4; |
145 | if (i % 2) | 228 | msg[3] = 3 << 4; |
146 | continue; | ||
147 | switch (max_lane_count) { | ||
148 | case 1: | ||
149 | if (i > 1) | ||
150 | return 0; | ||
151 | break; | ||
152 | case 2: | ||
153 | if (i > 3) | ||
154 | return 0; | ||
155 | break; | ||
156 | case 4: | ||
157 | default: | ||
158 | break; | ||
159 | } | ||
160 | if (dp_clocks[i] > mode_clock) | ||
161 | return 162000; | ||
162 | } | ||
163 | break; | 229 | break; |
164 | case DP_LINK_BW_2_7: | ||
165 | for (i = 0; i < num_dp_clocks; i++) { | ||
166 | switch (max_lane_count) { | ||
167 | case 1: | ||
168 | if (i > 1) | ||
169 | return 0; | ||
170 | break; | ||
171 | case 2: | ||
172 | if (i > 3) | ||
173 | return 0; | ||
174 | break; | ||
175 | case 4: | ||
176 | default: | ||
177 | break; | ||
178 | } | ||
179 | if (dp_clocks[i] > mode_clock) | ||
180 | return (i % 2) ? 270000 : 162000; | ||
181 | } | ||
182 | } | 230 | } |
183 | 231 | ||
184 | return 0; | 232 | for (retry = 0; retry < 4; retry++) { |
185 | } | 233 | ret = radeon_process_aux_ch(auxch, |
234 | msg, msg_bytes, reply, reply_bytes, 0, &ack); | ||
235 | if (ret < 0) { | ||
236 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); | ||
237 | return ret; | ||
238 | } | ||
186 | 239 | ||
187 | int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) | 240 | switch (ack & AUX_NATIVE_REPLY_MASK) { |
188 | { | 241 | case AUX_NATIVE_REPLY_ACK: |
189 | int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); | 242 | /* I2C-over-AUX Reply field is only valid |
190 | int bw = dp_lanes_for_mode_clock(dpcd, mode_clock); | 243 | * when paired with AUX ACK. |
244 | */ | ||
245 | break; | ||
246 | case AUX_NATIVE_REPLY_NACK: | ||
247 | DRM_DEBUG_KMS("aux_ch native nack\n"); | ||
248 | return -EREMOTEIO; | ||
249 | case AUX_NATIVE_REPLY_DEFER: | ||
250 | DRM_DEBUG_KMS("aux_ch native defer\n"); | ||
251 | udelay(400); | ||
252 | continue; | ||
253 | default: | ||
254 | DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack); | ||
255 | return -EREMOTEIO; | ||
256 | } | ||
191 | 257 | ||
192 | if ((lanes == 0) || (bw == 0)) | 258 | switch (ack & AUX_I2C_REPLY_MASK) { |
193 | return MODE_CLOCK_HIGH; | 259 | case AUX_I2C_REPLY_ACK: |
260 | if (mode == MODE_I2C_READ) | ||
261 | *read_byte = reply[0]; | ||
262 | return ret; | ||
263 | case AUX_I2C_REPLY_NACK: | ||
264 | DRM_DEBUG_KMS("aux_i2c nack\n"); | ||
265 | return -EREMOTEIO; | ||
266 | case AUX_I2C_REPLY_DEFER: | ||
267 | DRM_DEBUG_KMS("aux_i2c defer\n"); | ||
268 | udelay(400); | ||
269 | break; | ||
270 | default: | ||
271 | DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack); | ||
272 | return -EREMOTEIO; | ||
273 | } | ||
274 | } | ||
194 | 275 | ||
195 | return MODE_OK; | 276 | DRM_ERROR("aux i2c too many retries, giving up\n"); |
277 | return -EREMOTEIO; | ||
196 | } | 278 | } |
197 | 279 | ||
280 | /***** general DP utility functions *****/ | ||
281 | |||
198 | static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) | 282 | static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) |
199 | { | 283 | { |
200 | return link_status[r - DP_LANE0_1_STATUS]; | 284 | return link_status[r - DP_LANE0_1_STATUS]; |
@@ -242,7 +326,7 @@ static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], | |||
242 | return true; | 326 | return true; |
243 | } | 327 | } |
244 | 328 | ||
245 | static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], | 329 | static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], |
246 | int lane) | 330 | int lane) |
247 | 331 | ||
248 | { | 332 | { |
@@ -255,7 +339,7 @@ static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE] | |||
255 | return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; | 339 | return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; |
256 | } | 340 | } |
257 | 341 | ||
258 | static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], | 342 | static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], |
259 | int lane) | 343 | int lane) |
260 | { | 344 | { |
261 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | 345 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); |
@@ -267,22 +351,8 @@ static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_ | |||
267 | return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; | 351 | return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; |
268 | } | 352 | } |
269 | 353 | ||
270 | /* XXX fix me -- chip specific */ | ||
271 | #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 | 354 | #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 |
272 | static u8 dp_pre_emphasis_max(u8 voltage_swing) | 355 | #define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 |
273 | { | ||
274 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | ||
275 | case DP_TRAIN_VOLTAGE_SWING_400: | ||
276 | return DP_TRAIN_PRE_EMPHASIS_6; | ||
277 | case DP_TRAIN_VOLTAGE_SWING_600: | ||
278 | return DP_TRAIN_PRE_EMPHASIS_6; | ||
279 | case DP_TRAIN_VOLTAGE_SWING_800: | ||
280 | return DP_TRAIN_PRE_EMPHASIS_3_5; | ||
281 | case DP_TRAIN_VOLTAGE_SWING_1200: | ||
282 | default: | ||
283 | return DP_TRAIN_PRE_EMPHASIS_0; | ||
284 | } | ||
285 | } | ||
286 | 356 | ||
287 | static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], | 357 | static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], |
288 | int lane_count, | 358 | int lane_count, |
@@ -308,10 +378,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], | |||
308 | } | 378 | } |
309 | 379 | ||
310 | if (v >= DP_VOLTAGE_MAX) | 380 | if (v >= DP_VOLTAGE_MAX) |
311 | v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; | 381 | v |= DP_TRAIN_MAX_SWING_REACHED; |
312 | 382 | ||
313 | if (p >= dp_pre_emphasis_max(v)) | 383 | if (p >= DP_PRE_EMPHASIS_MAX) |
314 | p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; | 384 | p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; |
315 | 385 | ||
316 | DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n", | 386 | DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n", |
317 | voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT], | 387 | voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT], |
@@ -321,110 +391,109 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], | |||
321 | train_set[lane] = v | p; | 391 | train_set[lane] = v | p; |
322 | } | 392 | } |
323 | 393 | ||
324 | union aux_channel_transaction { | 394 | /* convert bits per color to bits per pixel */ |
325 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; | 395 | /* get bpc from the EDID */ |
326 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; | 396 | static int convert_bpc_to_bpp(int bpc) |
327 | }; | ||
328 | |||
329 | /* radeon aux chan functions */ | ||
330 | bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, | ||
331 | int num_bytes, u8 *read_byte, | ||
332 | u8 read_buf_len, u8 delay) | ||
333 | { | 397 | { |
334 | struct drm_device *dev = chan->dev; | 398 | if (bpc == 0) |
335 | struct radeon_device *rdev = dev->dev_private; | 399 | return 24; |
336 | union aux_channel_transaction args; | 400 | else |
337 | int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); | 401 | return bpc * 3; |
338 | unsigned char *base; | 402 | } |
339 | int retry_count = 0; | ||
340 | |||
341 | memset(&args, 0, sizeof(args)); | ||
342 | |||
343 | base = (unsigned char *)rdev->mode_info.atom_context->scratch; | ||
344 | |||
345 | retry: | ||
346 | memcpy(base, req_bytes, num_bytes); | ||
347 | |||
348 | args.v1.lpAuxRequest = 0; | ||
349 | args.v1.lpDataOut = 16; | ||
350 | args.v1.ucDataOutLen = 0; | ||
351 | args.v1.ucChannelID = chan->rec.i2c_id; | ||
352 | args.v1.ucDelay = delay / 10; | ||
353 | if (ASIC_IS_DCE4(rdev)) | ||
354 | args.v2.ucHPD_ID = chan->rec.hpd; | ||
355 | 403 | ||
356 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 404 | /* get the max pix clock supported by the link rate and lane num */ |
405 | static int dp_get_max_dp_pix_clock(int link_rate, | ||
406 | int lane_num, | ||
407 | int bpp) | ||
408 | { | ||
409 | return (link_rate * lane_num * 8) / bpp; | ||
410 | } | ||
357 | 411 | ||
358 | if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) { | 412 | static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE]) |
359 | if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10) | 413 | { |
360 | goto retry; | 414 | switch (dpcd[DP_MAX_LINK_RATE]) { |
361 | DRM_DEBUG_KMS("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", | 415 | case DP_LINK_BW_1_62: |
362 | req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], | 416 | default: |
363 | chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count); | 417 | return 162000; |
364 | return false; | 418 | case DP_LINK_BW_2_7: |
419 | return 270000; | ||
420 | case DP_LINK_BW_5_4: | ||
421 | return 540000; | ||
365 | } | 422 | } |
423 | } | ||
366 | 424 | ||
367 | if (args.v1.ucDataOutLen && read_byte && read_buf_len) { | 425 | static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE]) |
368 | if (read_buf_len < args.v1.ucDataOutLen) { | 426 | { |
369 | DRM_ERROR("Buffer to small for return answer %d %d\n", | 427 | return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; |
370 | read_buf_len, args.v1.ucDataOutLen); | ||
371 | return false; | ||
372 | } | ||
373 | { | ||
374 | int len = min(read_buf_len, args.v1.ucDataOutLen); | ||
375 | memcpy(read_byte, base + 16, len); | ||
376 | } | ||
377 | } | ||
378 | return true; | ||
379 | } | 428 | } |
380 | 429 | ||
381 | bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address, | 430 | static u8 dp_get_dp_link_rate_coded(int link_rate) |
382 | uint8_t send_bytes, uint8_t *send) | ||
383 | { | 431 | { |
384 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | 432 | switch (link_rate) { |
385 | u8 msg[20]; | 433 | case 162000: |
386 | u8 msg_len, dp_msg_len; | 434 | default: |
387 | bool ret; | 435 | return DP_LINK_BW_1_62; |
436 | case 270000: | ||
437 | return DP_LINK_BW_2_7; | ||
438 | case 540000: | ||
439 | return DP_LINK_BW_5_4; | ||
440 | } | ||
441 | } | ||
388 | 442 | ||
389 | dp_msg_len = 4; | 443 | /***** radeon specific DP functions *****/ |
390 | msg[0] = address; | ||
391 | msg[1] = address >> 8; | ||
392 | msg[2] = AUX_NATIVE_WRITE << 4; | ||
393 | dp_msg_len += send_bytes; | ||
394 | msg[3] = (dp_msg_len << 4) | (send_bytes - 1); | ||
395 | 444 | ||
396 | if (send_bytes > 16) | 445 | /* First get the min lane# when low rate is used according to pixel clock |
397 | return false; | 446 | * (prefer low rate), second check max lane# supported by DP panel, |
447 | * if the max lane# < low rate lane# then use max lane# instead. | ||
448 | */ | ||
449 | static int radeon_dp_get_dp_lane_number(struct drm_connector *connector, | ||
450 | u8 dpcd[DP_DPCD_SIZE], | ||
451 | int pix_clock) | ||
452 | { | ||
453 | int bpp = convert_bpc_to_bpp(connector->display_info.bpc); | ||
454 | int max_link_rate = dp_get_max_link_rate(dpcd); | ||
455 | int max_lane_num = dp_get_max_lane_number(dpcd); | ||
456 | int lane_num; | ||
457 | int max_dp_pix_clock; | ||
458 | |||
459 | for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) { | ||
460 | max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp); | ||
461 | if (pix_clock <= max_dp_pix_clock) | ||
462 | break; | ||
463 | } | ||
398 | 464 | ||
399 | memcpy(&msg[4], send, send_bytes); | 465 | return lane_num; |
400 | msg_len = 4 + send_bytes; | ||
401 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0); | ||
402 | return ret; | ||
403 | } | 466 | } |
404 | 467 | ||
405 | bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address, | 468 | static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, |
406 | uint8_t delay, uint8_t expected_bytes, | 469 | u8 dpcd[DP_DPCD_SIZE], |
407 | uint8_t *read_p) | 470 | int pix_clock) |
408 | { | 471 | { |
409 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | 472 | int bpp = convert_bpc_to_bpp(connector->display_info.bpc); |
410 | u8 msg[20]; | 473 | int lane_num, max_pix_clock; |
411 | u8 msg_len, dp_msg_len; | 474 | |
412 | bool ret = false; | 475 | if (radeon_connector_encoder_is_dp_bridge(connector)) |
413 | msg_len = 4; | 476 | return 270000; |
414 | dp_msg_len = 4; | 477 | |
415 | msg[0] = address; | 478 | lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock); |
416 | msg[1] = address >> 8; | 479 | max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp); |
417 | msg[2] = AUX_NATIVE_READ << 4; | 480 | if (pix_clock <= max_pix_clock) |
418 | msg[3] = (dp_msg_len) << 4; | 481 | return 162000; |
419 | msg[3] |= expected_bytes - 1; | 482 | max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp); |
483 | if (pix_clock <= max_pix_clock) | ||
484 | return 270000; | ||
485 | if (radeon_connector_is_dp12_capable(connector)) { | ||
486 | max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp); | ||
487 | if (pix_clock <= max_pix_clock) | ||
488 | return 540000; | ||
489 | } | ||
420 | 490 | ||
421 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay); | 491 | return dp_get_max_link_rate(dpcd); |
422 | return ret; | ||
423 | } | 492 | } |
424 | 493 | ||
425 | /* radeon dp functions */ | 494 | static u8 radeon_dp_encoder_service(struct radeon_device *rdev, |
426 | static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock, | 495 | int action, int dp_clock, |
427 | uint8_t ucconfig, uint8_t lane_num) | 496 | u8 ucconfig, u8 lane_num) |
428 | { | 497 | { |
429 | DP_ENCODER_SERVICE_PARAMETERS args; | 498 | DP_ENCODER_SERVICE_PARAMETERS args; |
430 | int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); | 499 | int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); |
@@ -454,60 +523,86 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) | |||
454 | { | 523 | { |
455 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | 524 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; |
456 | u8 msg[25]; | 525 | u8 msg[25]; |
457 | int ret; | 526 | int ret, i; |
458 | 527 | ||
459 | ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg); | 528 | ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0); |
460 | if (ret) { | 529 | if (ret > 0) { |
461 | memcpy(dig_connector->dpcd, msg, 8); | 530 | memcpy(dig_connector->dpcd, msg, 8); |
462 | { | 531 | DRM_DEBUG_KMS("DPCD: "); |
463 | int i; | 532 | for (i = 0; i < 8; i++) |
464 | DRM_DEBUG_KMS("DPCD: "); | 533 | DRM_DEBUG_KMS("%02x ", msg[i]); |
465 | for (i = 0; i < 8; i++) | 534 | DRM_DEBUG_KMS("\n"); |
466 | DRM_DEBUG_KMS("%02x ", msg[i]); | ||
467 | DRM_DEBUG_KMS("\n"); | ||
468 | } | ||
469 | return true; | 535 | return true; |
470 | } | 536 | } |
471 | dig_connector->dpcd[0] = 0; | 537 | dig_connector->dpcd[0] = 0; |
472 | return false; | 538 | return false; |
473 | } | 539 | } |
474 | 540 | ||
541 | static void radeon_dp_set_panel_mode(struct drm_encoder *encoder, | ||
542 | struct drm_connector *connector) | ||
543 | { | ||
544 | struct drm_device *dev = encoder->dev; | ||
545 | struct radeon_device *rdev = dev->dev_private; | ||
546 | int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; | ||
547 | |||
548 | if (!ASIC_IS_DCE4(rdev)) | ||
549 | return; | ||
550 | |||
551 | if (radeon_connector_encoder_is_dp_bridge(connector)) | ||
552 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; | ||
553 | |||
554 | atombios_dig_encoder_setup(encoder, | ||
555 | ATOM_ENCODER_CMD_SETUP_PANEL_MODE, | ||
556 | panel_mode); | ||
557 | } | ||
558 | |||
475 | void radeon_dp_set_link_config(struct drm_connector *connector, | 559 | void radeon_dp_set_link_config(struct drm_connector *connector, |
476 | struct drm_display_mode *mode) | 560 | struct drm_display_mode *mode) |
477 | { | 561 | { |
478 | struct radeon_connector *radeon_connector; | 562 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
479 | struct radeon_connector_atom_dig *dig_connector; | 563 | struct radeon_connector_atom_dig *dig_connector; |
480 | 564 | ||
481 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) && | ||
482 | (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) | ||
483 | return; | ||
484 | |||
485 | radeon_connector = to_radeon_connector(connector); | ||
486 | if (!radeon_connector->con_priv) | 565 | if (!radeon_connector->con_priv) |
487 | return; | 566 | return; |
488 | dig_connector = radeon_connector->con_priv; | 567 | dig_connector = radeon_connector->con_priv; |
489 | 568 | ||
490 | dig_connector->dp_clock = | 569 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
491 | dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock); | 570 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { |
492 | dig_connector->dp_lane_count = | 571 | dig_connector->dp_clock = |
493 | dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock); | 572 | radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); |
573 | dig_connector->dp_lane_count = | ||
574 | radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock); | ||
575 | } | ||
494 | } | 576 | } |
495 | 577 | ||
496 | int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, | 578 | int radeon_dp_mode_valid_helper(struct drm_connector *connector, |
497 | struct drm_display_mode *mode) | 579 | struct drm_display_mode *mode) |
498 | { | 580 | { |
499 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | 581 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
582 | struct radeon_connector_atom_dig *dig_connector; | ||
583 | int dp_clock; | ||
584 | |||
585 | if (!radeon_connector->con_priv) | ||
586 | return MODE_CLOCK_HIGH; | ||
587 | dig_connector = radeon_connector->con_priv; | ||
588 | |||
589 | dp_clock = | ||
590 | radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); | ||
591 | |||
592 | if ((dp_clock == 540000) && | ||
593 | (!radeon_connector_is_dp12_capable(connector))) | ||
594 | return MODE_CLOCK_HIGH; | ||
500 | 595 | ||
501 | return dp_mode_valid(dig_connector->dpcd, mode->clock); | 596 | return MODE_OK; |
502 | } | 597 | } |
503 | 598 | ||
504 | static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector, | 599 | static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, |
505 | u8 link_status[DP_LINK_STATUS_SIZE]) | 600 | u8 link_status[DP_LINK_STATUS_SIZE]) |
506 | { | 601 | { |
507 | int ret; | 602 | int ret; |
508 | ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100, | 603 | ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, |
509 | DP_LINK_STATUS_SIZE, link_status); | 604 | link_status, DP_LINK_STATUS_SIZE, 100); |
510 | if (!ret) { | 605 | if (ret <= 0) { |
511 | DRM_ERROR("displayport link status failed\n"); | 606 | DRM_ERROR("displayport link status failed\n"); |
512 | return false; | 607 | return false; |
513 | } | 608 | } |
@@ -518,292 +613,309 @@ static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector, | |||
518 | return true; | 613 | return true; |
519 | } | 614 | } |
520 | 615 | ||
521 | bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) | 616 | struct radeon_dp_link_train_info { |
522 | { | 617 | struct radeon_device *rdev; |
523 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | 618 | struct drm_encoder *encoder; |
619 | struct drm_connector *connector; | ||
620 | struct radeon_connector *radeon_connector; | ||
621 | int enc_id; | ||
622 | int dp_clock; | ||
623 | int dp_lane_count; | ||
624 | int rd_interval; | ||
625 | bool tp3_supported; | ||
626 | u8 dpcd[8]; | ||
627 | u8 train_set[4]; | ||
524 | u8 link_status[DP_LINK_STATUS_SIZE]; | 628 | u8 link_status[DP_LINK_STATUS_SIZE]; |
629 | u8 tries; | ||
630 | }; | ||
525 | 631 | ||
526 | if (!atom_dp_get_link_status(radeon_connector, link_status)) | 632 | static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info) |
527 | return false; | 633 | { |
528 | if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) | 634 | /* set the initial vs/emph on the source */ |
529 | return false; | 635 | atombios_dig_transmitter_setup(dp_info->encoder, |
530 | return true; | 636 | ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH, |
637 | 0, dp_info->train_set[0]); /* sets all lanes at once */ | ||
638 | |||
639 | /* set the vs/emph on the sink */ | ||
640 | radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET, | ||
641 | dp_info->train_set, dp_info->dp_lane_count, 0); | ||
531 | } | 642 | } |
532 | 643 | ||
533 | static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state) | 644 | static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp) |
534 | { | 645 | { |
535 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | 646 | int rtp = 0; |
536 | 647 | ||
537 | if (dig_connector->dpcd[0] >= 0x11) { | 648 | /* set training pattern on the source */ |
538 | radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1, | 649 | if (ASIC_IS_DCE4(dp_info->rdev)) { |
539 | &power_state); | 650 | switch (tp) { |
651 | case DP_TRAINING_PATTERN_1: | ||
652 | rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1; | ||
653 | break; | ||
654 | case DP_TRAINING_PATTERN_2: | ||
655 | rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2; | ||
656 | break; | ||
657 | case DP_TRAINING_PATTERN_3: | ||
658 | rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3; | ||
659 | break; | ||
660 | } | ||
661 | atombios_dig_encoder_setup(dp_info->encoder, rtp, 0); | ||
662 | } else { | ||
663 | switch (tp) { | ||
664 | case DP_TRAINING_PATTERN_1: | ||
665 | rtp = 0; | ||
666 | break; | ||
667 | case DP_TRAINING_PATTERN_2: | ||
668 | rtp = 1; | ||
669 | break; | ||
670 | } | ||
671 | radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, | ||
672 | dp_info->dp_clock, dp_info->enc_id, rtp); | ||
540 | } | 673 | } |
541 | } | ||
542 | 674 | ||
543 | static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread) | 675 | /* enable training pattern on the sink */ |
544 | { | 676 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp); |
545 | radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1, | ||
546 | &downspread); | ||
547 | } | 677 | } |
548 | 678 | ||
549 | static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector, | 679 | static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) |
550 | u8 link_configuration[DP_LINK_CONFIGURATION_SIZE]) | ||
551 | { | 680 | { |
552 | radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2, | 681 | u8 tmp; |
553 | link_configuration); | ||
554 | } | ||
555 | 682 | ||
556 | static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector, | 683 | /* power up the sink */ |
557 | struct drm_encoder *encoder, | 684 | if (dp_info->dpcd[0] >= 0x11) |
558 | u8 train_set[4]) | 685 | radeon_write_dpcd_reg(dp_info->radeon_connector, |
559 | { | 686 | DP_SET_POWER, DP_SET_POWER_D0); |
560 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | 687 | |
561 | int i; | 688 | /* possibly enable downspread on the sink */ |
689 | if (dp_info->dpcd[3] & 0x1) | ||
690 | radeon_write_dpcd_reg(dp_info->radeon_connector, | ||
691 | DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); | ||
692 | else | ||
693 | radeon_write_dpcd_reg(dp_info->radeon_connector, | ||
694 | DP_DOWNSPREAD_CTRL, 0); | ||
562 | 695 | ||
563 | for (i = 0; i < dig_connector->dp_lane_count; i++) | 696 | radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector); |
564 | atombios_dig_transmitter_setup(encoder, | ||
565 | ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH, | ||
566 | i, train_set[i]); | ||
567 | 697 | ||
568 | radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET, | 698 | /* set the lane count on the sink */ |
569 | dig_connector->dp_lane_count, train_set); | 699 | tmp = dp_info->dp_lane_count; |
570 | } | 700 | if (dp_info->dpcd[0] >= 0x11) |
701 | tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | ||
702 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); | ||
571 | 703 | ||
572 | static void dp_set_training(struct radeon_connector *radeon_connector, | 704 | /* set the link rate on the sink */ |
573 | u8 training) | 705 | tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock); |
574 | { | 706 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); |
575 | radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET, | ||
576 | 1, &training); | ||
577 | } | ||
578 | 707 | ||
579 | void dp_link_train(struct drm_encoder *encoder, | 708 | /* start training on the source */ |
580 | struct drm_connector *connector) | 709 | if (ASIC_IS_DCE4(dp_info->rdev)) |
581 | { | 710 | atombios_dig_encoder_setup(dp_info->encoder, |
582 | struct drm_device *dev = encoder->dev; | 711 | ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0); |
583 | struct radeon_device *rdev = dev->dev_private; | 712 | else |
584 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 713 | radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START, |
585 | struct radeon_encoder_atom_dig *dig; | 714 | dp_info->dp_clock, dp_info->enc_id, 0); |
586 | struct radeon_connector *radeon_connector; | ||
587 | struct radeon_connector_atom_dig *dig_connector; | ||
588 | int enc_id = 0; | ||
589 | bool clock_recovery, channel_eq; | ||
590 | u8 link_status[DP_LINK_STATUS_SIZE]; | ||
591 | u8 link_configuration[DP_LINK_CONFIGURATION_SIZE]; | ||
592 | u8 tries, voltage; | ||
593 | u8 train_set[4]; | ||
594 | int i; | ||
595 | 715 | ||
596 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) && | 716 | /* disable the training pattern on the sink */ |
597 | (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) | 717 | radeon_write_dpcd_reg(dp_info->radeon_connector, |
598 | return; | 718 | DP_TRAINING_PATTERN_SET, |
719 | DP_TRAINING_PATTERN_DISABLE); | ||
599 | 720 | ||
600 | if (!radeon_encoder->enc_priv) | 721 | return 0; |
601 | return; | 722 | } |
602 | dig = radeon_encoder->enc_priv; | ||
603 | 723 | ||
604 | radeon_connector = to_radeon_connector(connector); | 724 | static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info) |
605 | if (!radeon_connector->con_priv) | 725 | { |
606 | return; | 726 | udelay(400); |
607 | dig_connector = radeon_connector->con_priv; | ||
608 | 727 | ||
609 | if (dig->dig_encoder) | 728 | /* disable the training pattern on the sink */ |
610 | enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; | 729 | radeon_write_dpcd_reg(dp_info->radeon_connector, |
611 | else | 730 | DP_TRAINING_PATTERN_SET, |
612 | enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; | 731 | DP_TRAINING_PATTERN_DISABLE); |
613 | if (dig->linkb) | ||
614 | enc_id |= ATOM_DP_CONFIG_LINK_B; | ||
615 | else | ||
616 | enc_id |= ATOM_DP_CONFIG_LINK_A; | ||
617 | 732 | ||
618 | memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | 733 | /* disable the training pattern on the source */ |
619 | if (dig_connector->dp_clock == 270000) | 734 | if (ASIC_IS_DCE4(dp_info->rdev)) |
620 | link_configuration[0] = DP_LINK_BW_2_7; | 735 | atombios_dig_encoder_setup(dp_info->encoder, |
736 | ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0); | ||
621 | else | 737 | else |
622 | link_configuration[0] = DP_LINK_BW_1_62; | 738 | radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, |
623 | link_configuration[1] = dig_connector->dp_lane_count; | 739 | dp_info->dp_clock, dp_info->enc_id, 0); |
624 | if (dig_connector->dpcd[0] >= 0x11) | ||
625 | link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | ||
626 | 740 | ||
627 | /* power up the sink */ | 741 | return 0; |
628 | dp_set_power(radeon_connector, DP_SET_POWER_D0); | 742 | } |
629 | /* disable the training pattern on the sink */ | ||
630 | dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); | ||
631 | /* set link bw and lanes on the sink */ | ||
632 | dp_set_link_bw_lanes(radeon_connector, link_configuration); | ||
633 | /* disable downspread on the sink */ | ||
634 | dp_set_downspread(radeon_connector, 0); | ||
635 | if (ASIC_IS_DCE4(rdev)) { | ||
636 | /* start training on the source */ | ||
637 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START); | ||
638 | /* set training pattern 1 on the source */ | ||
639 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1); | ||
640 | } else { | ||
641 | /* start training on the source */ | ||
642 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START, | ||
643 | dig_connector->dp_clock, enc_id, 0); | ||
644 | /* set training pattern 1 on the source */ | ||
645 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, | ||
646 | dig_connector->dp_clock, enc_id, 0); | ||
647 | } | ||
648 | 743 | ||
649 | /* set initial vs/emph */ | 744 | static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) |
650 | memset(train_set, 0, 4); | 745 | { |
651 | udelay(400); | 746 | bool clock_recovery; |
652 | /* set training pattern 1 on the sink */ | 747 | u8 voltage; |
653 | dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1); | 748 | int i; |
654 | 749 | ||
655 | dp_update_dpvs_emph(radeon_connector, encoder, train_set); | 750 | radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1); |
751 | memset(dp_info->train_set, 0, 4); | ||
752 | radeon_dp_update_vs_emph(dp_info); | ||
753 | |||
754 | udelay(400); | ||
656 | 755 | ||
657 | /* clock recovery loop */ | 756 | /* clock recovery loop */ |
658 | clock_recovery = false; | 757 | clock_recovery = false; |
659 | tries = 0; | 758 | dp_info->tries = 0; |
660 | voltage = 0xff; | 759 | voltage = 0xff; |
661 | for (;;) { | 760 | while (1) { |
662 | udelay(100); | 761 | if (dp_info->rd_interval == 0) |
663 | if (!atom_dp_get_link_status(radeon_connector, link_status)) | 762 | udelay(100); |
763 | else | ||
764 | mdelay(dp_info->rd_interval * 4); | ||
765 | |||
766 | if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) | ||
664 | break; | 767 | break; |
665 | 768 | ||
666 | if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) { | 769 | if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { |
667 | clock_recovery = true; | 770 | clock_recovery = true; |
668 | break; | 771 | break; |
669 | } | 772 | } |
670 | 773 | ||
671 | for (i = 0; i < dig_connector->dp_lane_count; i++) { | 774 | for (i = 0; i < dp_info->dp_lane_count; i++) { |
672 | if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 775 | if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
673 | break; | 776 | break; |
674 | } | 777 | } |
675 | if (i == dig_connector->dp_lane_count) { | 778 | if (i == dp_info->dp_lane_count) { |
676 | DRM_ERROR("clock recovery reached max voltage\n"); | 779 | DRM_ERROR("clock recovery reached max voltage\n"); |
677 | break; | 780 | break; |
678 | } | 781 | } |
679 | 782 | ||
680 | if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | 783 | if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
681 | ++tries; | 784 | ++dp_info->tries; |
682 | if (tries == 5) { | 785 | if (dp_info->tries == 5) { |
683 | DRM_ERROR("clock recovery tried 5 times\n"); | 786 | DRM_ERROR("clock recovery tried 5 times\n"); |
684 | break; | 787 | break; |
685 | } | 788 | } |
686 | } else | 789 | } else |
687 | tries = 0; | 790 | dp_info->tries = 0; |
688 | 791 | ||
689 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | 792 | voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
690 | 793 | ||
691 | /* Compute new train_set as requested by sink */ | 794 | /* Compute new train_set as requested by sink */ |
692 | dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); | 795 | dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set); |
693 | dp_update_dpvs_emph(radeon_connector, encoder, train_set); | 796 | |
797 | radeon_dp_update_vs_emph(dp_info); | ||
694 | } | 798 | } |
695 | if (!clock_recovery) | 799 | if (!clock_recovery) { |
696 | DRM_ERROR("clock recovery failed\n"); | 800 | DRM_ERROR("clock recovery failed\n"); |
697 | else | 801 | return -1; |
802 | } else { | ||
698 | DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n", | 803 | DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n", |
699 | train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, | 804 | dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, |
700 | (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> | 805 | (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> |
701 | DP_TRAIN_PRE_EMPHASIS_SHIFT); | 806 | DP_TRAIN_PRE_EMPHASIS_SHIFT); |
807 | return 0; | ||
808 | } | ||
809 | } | ||
702 | 810 | ||
811 | static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) | ||
812 | { | ||
813 | bool channel_eq; | ||
703 | 814 | ||
704 | /* set training pattern 2 on the sink */ | 815 | if (dp_info->tp3_supported) |
705 | dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2); | 816 | radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3); |
706 | /* set training pattern 2 on the source */ | ||
707 | if (ASIC_IS_DCE4(rdev)) | ||
708 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2); | ||
709 | else | 817 | else |
710 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, | 818 | radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2); |
711 | dig_connector->dp_clock, enc_id, 1); | ||
712 | 819 | ||
713 | /* channel equalization loop */ | 820 | /* channel equalization loop */ |
714 | tries = 0; | 821 | dp_info->tries = 0; |
715 | channel_eq = false; | 822 | channel_eq = false; |
716 | for (;;) { | 823 | while (1) { |
717 | udelay(400); | 824 | if (dp_info->rd_interval == 0) |
718 | if (!atom_dp_get_link_status(radeon_connector, link_status)) | 825 | udelay(400); |
826 | else | ||
827 | mdelay(dp_info->rd_interval * 4); | ||
828 | |||
829 | if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) | ||
719 | break; | 830 | break; |
720 | 831 | ||
721 | if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) { | 832 | if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { |
722 | channel_eq = true; | 833 | channel_eq = true; |
723 | break; | 834 | break; |
724 | } | 835 | } |
725 | 836 | ||
726 | /* Try 5 times */ | 837 | /* Try 5 times */ |
727 | if (tries > 5) { | 838 | if (dp_info->tries > 5) { |
728 | DRM_ERROR("channel eq failed: 5 tries\n"); | 839 | DRM_ERROR("channel eq failed: 5 tries\n"); |
729 | break; | 840 | break; |
730 | } | 841 | } |
731 | 842 | ||
732 | /* Compute new train_set as requested by sink */ | 843 | /* Compute new train_set as requested by sink */ |
733 | dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); | 844 | dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set); |
734 | dp_update_dpvs_emph(radeon_connector, encoder, train_set); | ||
735 | 845 | ||
736 | tries++; | 846 | radeon_dp_update_vs_emph(dp_info); |
847 | dp_info->tries++; | ||
737 | } | 848 | } |
738 | 849 | ||
739 | if (!channel_eq) | 850 | if (!channel_eq) { |
740 | DRM_ERROR("channel eq failed\n"); | 851 | DRM_ERROR("channel eq failed\n"); |
741 | else | 852 | return -1; |
853 | } else { | ||
742 | DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n", | 854 | DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n", |
743 | train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, | 855 | dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, |
744 | (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) | 856 | (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) |
745 | >> DP_TRAIN_PRE_EMPHASIS_SHIFT); | 857 | >> DP_TRAIN_PRE_EMPHASIS_SHIFT); |
746 | 858 | return 0; | |
747 | /* disable the training pattern on the sink */ | 859 | } |
748 | dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); | ||
749 | |||
750 | /* disable the training pattern on the source */ | ||
751 | if (ASIC_IS_DCE4(rdev)) | ||
752 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE); | ||
753 | else | ||
754 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, | ||
755 | dig_connector->dp_clock, enc_id, 0); | ||
756 | } | 860 | } |
757 | 861 | ||
758 | int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | 862 | void radeon_dp_link_train(struct drm_encoder *encoder, |
759 | uint8_t write_byte, uint8_t *read_byte) | 863 | struct drm_connector *connector) |
760 | { | 864 | { |
761 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | 865 | struct drm_device *dev = encoder->dev; |
762 | struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter; | 866 | struct radeon_device *rdev = dev->dev_private; |
763 | int ret = 0; | 867 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
764 | uint16_t address = algo_data->address; | 868 | struct radeon_encoder_atom_dig *dig; |
765 | uint8_t msg[5]; | 869 | struct radeon_connector *radeon_connector; |
766 | uint8_t reply[2]; | 870 | struct radeon_connector_atom_dig *dig_connector; |
767 | int msg_len, dp_msg_len; | 871 | struct radeon_dp_link_train_info dp_info; |
768 | int reply_bytes; | 872 | u8 tmp; |
769 | |||
770 | /* Set up the command byte */ | ||
771 | if (mode & MODE_I2C_READ) | ||
772 | msg[2] = AUX_I2C_READ << 4; | ||
773 | else | ||
774 | msg[2] = AUX_I2C_WRITE << 4; | ||
775 | |||
776 | if (!(mode & MODE_I2C_STOP)) | ||
777 | msg[2] |= AUX_I2C_MOT << 4; | ||
778 | 873 | ||
779 | msg[0] = address; | 874 | if (!radeon_encoder->enc_priv) |
780 | msg[1] = address >> 8; | 875 | return; |
876 | dig = radeon_encoder->enc_priv; | ||
781 | 877 | ||
782 | reply_bytes = 1; | 878 | radeon_connector = to_radeon_connector(connector); |
879 | if (!radeon_connector->con_priv) | ||
880 | return; | ||
881 | dig_connector = radeon_connector->con_priv; | ||
783 | 882 | ||
784 | msg_len = 4; | 883 | if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) && |
785 | dp_msg_len = 3; | 884 | (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP)) |
786 | switch (mode) { | 885 | return; |
787 | case MODE_I2C_WRITE: | ||
788 | msg[4] = write_byte; | ||
789 | msg_len++; | ||
790 | dp_msg_len += 2; | ||
791 | break; | ||
792 | case MODE_I2C_READ: | ||
793 | dp_msg_len += 1; | ||
794 | break; | ||
795 | default: | ||
796 | break; | ||
797 | } | ||
798 | 886 | ||
799 | msg[3] = (dp_msg_len) << 4; | 887 | dp_info.enc_id = 0; |
800 | ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0); | 888 | if (dig->dig_encoder) |
889 | dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; | ||
890 | else | ||
891 | dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; | ||
892 | if (dig->linkb) | ||
893 | dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B; | ||
894 | else | ||
895 | dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; | ||
801 | 896 | ||
802 | if (ret) { | 897 | dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL); |
803 | if (read_byte) | 898 | tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); |
804 | *read_byte = reply[0]; | 899 | if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) |
805 | return reply_bytes; | 900 | dp_info.tp3_supported = true; |
806 | } | 901 | else |
807 | return -EREMOTEIO; | 902 | dp_info.tp3_supported = false; |
903 | |||
904 | memcpy(dp_info.dpcd, dig_connector->dpcd, 8); | ||
905 | dp_info.rdev = rdev; | ||
906 | dp_info.encoder = encoder; | ||
907 | dp_info.connector = connector; | ||
908 | dp_info.radeon_connector = radeon_connector; | ||
909 | dp_info.dp_lane_count = dig_connector->dp_lane_count; | ||
910 | dp_info.dp_clock = dig_connector->dp_clock; | ||
911 | |||
912 | if (radeon_dp_link_train_init(&dp_info)) | ||
913 | goto done; | ||
914 | if (radeon_dp_link_train_cr(&dp_info)) | ||
915 | goto done; | ||
916 | if (radeon_dp_link_train_ce(&dp_info)) | ||
917 | goto done; | ||
918 | done: | ||
919 | if (radeon_dp_link_train_finish(&dp_info)) | ||
920 | return; | ||
808 | } | 921 | } |
809 | |||
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c new file mode 100644 index 000000000000..7b4eeb7b4a8c --- /dev/null +++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c | |||
@@ -0,0 +1,373 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Alex Deucher <alexander.deucher@amd.com> | ||
25 | */ | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/kernel.h> | ||
29 | |||
30 | /* | ||
31 | * evergreen cards need to use the 3D engine to blit data which requires | ||
32 | * quite a bit of hw state setup. Rather than pull the whole 3D driver | ||
33 | * (which normally generates the 3D state) into the DRM, we opt to use | ||
34 | * statically generated state tables. The regsiter state and shaders | ||
35 | * were hand generated to support blitting functionality. See the 3D | ||
36 | * driver or documentation for descriptions of the registers and | ||
37 | * shader instructions. | ||
38 | */ | ||
39 | |||
40 | const u32 cayman_default_state[] = | ||
41 | { | ||
42 | 0xc0066900, | ||
43 | 0x00000000, | ||
44 | 0x00000060, /* DB_RENDER_CONTROL */ | ||
45 | 0x00000000, /* DB_COUNT_CONTROL */ | ||
46 | 0x00000000, /* DB_DEPTH_VIEW */ | ||
47 | 0x0000002a, /* DB_RENDER_OVERRIDE */ | ||
48 | 0x00000000, /* DB_RENDER_OVERRIDE2 */ | ||
49 | 0x00000000, /* DB_HTILE_DATA_BASE */ | ||
50 | |||
51 | 0xc0026900, | ||
52 | 0x0000000a, | ||
53 | 0x00000000, /* DB_STENCIL_CLEAR */ | ||
54 | 0x00000000, /* DB_DEPTH_CLEAR */ | ||
55 | |||
56 | 0xc0036900, | ||
57 | 0x0000000f, | ||
58 | 0x00000000, /* DB_DEPTH_INFO */ | ||
59 | 0x00000000, /* DB_Z_INFO */ | ||
60 | 0x00000000, /* DB_STENCIL_INFO */ | ||
61 | |||
62 | 0xc0016900, | ||
63 | 0x00000080, | ||
64 | 0x00000000, /* PA_SC_WINDOW_OFFSET */ | ||
65 | |||
66 | 0xc00d6900, | ||
67 | 0x00000083, | ||
68 | 0x0000ffff, /* PA_SC_CLIPRECT_RULE */ | ||
69 | 0x00000000, /* PA_SC_CLIPRECT_0_TL */ | ||
70 | 0x20002000, /* PA_SC_CLIPRECT_0_BR */ | ||
71 | 0x00000000, | ||
72 | 0x20002000, | ||
73 | 0x00000000, | ||
74 | 0x20002000, | ||
75 | 0x00000000, | ||
76 | 0x20002000, | ||
77 | 0xaaaaaaaa, /* PA_SC_EDGERULE */ | ||
78 | 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */ | ||
79 | 0x0000000f, /* CB_TARGET_MASK */ | ||
80 | 0x0000000f, /* CB_SHADER_MASK */ | ||
81 | |||
82 | 0xc0226900, | ||
83 | 0x00000094, | ||
84 | 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ | ||
85 | 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ | ||
86 | 0x80000000, | ||
87 | 0x20002000, | ||
88 | 0x80000000, | ||
89 | 0x20002000, | ||
90 | 0x80000000, | ||
91 | 0x20002000, | ||
92 | 0x80000000, | ||
93 | 0x20002000, | ||
94 | 0x80000000, | ||
95 | 0x20002000, | ||
96 | 0x80000000, | ||
97 | 0x20002000, | ||
98 | 0x80000000, | ||
99 | 0x20002000, | ||
100 | 0x80000000, | ||
101 | 0x20002000, | ||
102 | 0x80000000, | ||
103 | 0x20002000, | ||
104 | 0x80000000, | ||
105 | 0x20002000, | ||
106 | 0x80000000, | ||
107 | 0x20002000, | ||
108 | 0x80000000, | ||
109 | 0x20002000, | ||
110 | 0x80000000, | ||
111 | 0x20002000, | ||
112 | 0x80000000, | ||
113 | 0x20002000, | ||
114 | 0x80000000, | ||
115 | 0x20002000, | ||
116 | 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ | ||
117 | 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */ | ||
118 | |||
119 | 0xc0016900, | ||
120 | 0x000000d4, | ||
121 | 0x00000000, /* SX_MISC */ | ||
122 | |||
123 | 0xc0026900, | ||
124 | 0x000000d9, | ||
125 | 0x00000000, /* CP_RINGID */ | ||
126 | 0x00000000, /* CP_VMID */ | ||
127 | |||
128 | 0xc0096900, | ||
129 | 0x00000100, | ||
130 | 0x00ffffff, /* VGT_MAX_VTX_INDX */ | ||
131 | 0x00000000, /* VGT_MIN_VTX_INDX */ | ||
132 | 0x00000000, /* VGT_INDX_OFFSET */ | ||
133 | 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */ | ||
134 | 0x00000000, /* SX_ALPHA_TEST_CONTROL */ | ||
135 | 0x00000000, /* CB_BLEND_RED */ | ||
136 | 0x00000000, /* CB_BLEND_GREEN */ | ||
137 | 0x00000000, /* CB_BLEND_BLUE */ | ||
138 | 0x00000000, /* CB_BLEND_ALPHA */ | ||
139 | |||
140 | 0xc0016900, | ||
141 | 0x00000187, | ||
142 | 0x00000100, /* SPI_VS_OUT_ID_0 */ | ||
143 | |||
144 | 0xc0026900, | ||
145 | 0x00000191, | ||
146 | 0x00000100, /* SPI_PS_INPUT_CNTL_0 */ | ||
147 | 0x00000101, /* SPI_PS_INPUT_CNTL_1 */ | ||
148 | |||
149 | 0xc0016900, | ||
150 | 0x000001b1, | ||
151 | 0x00000000, /* SPI_VS_OUT_CONFIG */ | ||
152 | |||
153 | 0xc0106900, | ||
154 | 0x000001b3, | ||
155 | 0x20000001, /* SPI_PS_IN_CONTROL_0 */ | ||
156 | 0x00000000, /* SPI_PS_IN_CONTROL_1 */ | ||
157 | 0x00000000, /* SPI_INTERP_CONTROL_0 */ | ||
158 | 0x00000000, /* SPI_INPUT_Z */ | ||
159 | 0x00000000, /* SPI_FOG_CNTL */ | ||
160 | 0x00100000, /* SPI_BARYC_CNTL */ | ||
161 | 0x00000000, /* SPI_PS_IN_CONTROL_2 */ | ||
162 | 0x00000000, /* SPI_COMPUTE_INPUT_CNTL */ | ||
163 | 0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */ | ||
164 | 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */ | ||
165 | 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */ | ||
166 | 0x00000000, /* SPI_GPR_MGMT */ | ||
167 | 0x00000000, /* SPI_LDS_MGMT */ | ||
168 | 0x00000000, /* SPI_STACK_MGMT */ | ||
169 | 0x00000000, /* SPI_WAVE_MGMT_1 */ | ||
170 | 0x00000000, /* SPI_WAVE_MGMT_2 */ | ||
171 | |||
172 | 0xc0016900, | ||
173 | 0x000001e0, | ||
174 | 0x00000000, /* CB_BLEND0_CONTROL */ | ||
175 | |||
176 | 0xc00e6900, | ||
177 | 0x00000200, | ||
178 | 0x00000000, /* DB_DEPTH_CONTROL */ | ||
179 | 0x00000000, /* DB_EQAA */ | ||
180 | 0x00cc0010, /* CB_COLOR_CONTROL */ | ||
181 | 0x00000210, /* DB_SHADER_CONTROL */ | ||
182 | 0x00010000, /* PA_CL_CLIP_CNTL */ | ||
183 | 0x00000004, /* PA_SU_SC_MODE_CNTL */ | ||
184 | 0x00000100, /* PA_CL_VTE_CNTL */ | ||
185 | 0x00000000, /* PA_CL_VS_OUT_CNTL */ | ||
186 | 0x00000000, /* PA_CL_NANINF_CNTL */ | ||
187 | 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */ | ||
188 | 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */ | ||
189 | 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */ | ||
190 | 0x00000000, /* */ | ||
191 | 0x00000000, /* */ | ||
192 | |||
193 | 0xc0026900, | ||
194 | 0x00000229, | ||
195 | 0x00000000, /* SQ_PGM_START_FS */ | ||
196 | 0x00000000, | ||
197 | |||
198 | 0xc0016900, | ||
199 | 0x0000023b, | ||
200 | 0x00000000, /* SQ_LDS_ALLOC_PS */ | ||
201 | |||
202 | 0xc0066900, | ||
203 | 0x00000240, | ||
204 | 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ | ||
205 | 0x00000000, | ||
206 | 0x00000000, | ||
207 | 0x00000000, | ||
208 | 0x00000000, | ||
209 | 0x00000000, | ||
210 | |||
211 | 0xc0046900, | ||
212 | 0x00000247, | ||
213 | 0x00000000, /* SQ_GS_VERT_ITEMSIZE */ | ||
214 | 0x00000000, | ||
215 | 0x00000000, | ||
216 | 0x00000000, | ||
217 | |||
218 | 0xc0116900, | ||
219 | 0x00000280, | ||
220 | 0x00000000, /* PA_SU_POINT_SIZE */ | ||
221 | 0x00000000, /* PA_SU_POINT_MINMAX */ | ||
222 | 0x00000008, /* PA_SU_LINE_CNTL */ | ||
223 | 0x00000000, /* PA_SC_LINE_STIPPLE */ | ||
224 | 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ | ||
225 | 0x00000000, /* VGT_HOS_CNTL */ | ||
226 | 0x00000000, | ||
227 | 0x00000000, | ||
228 | 0x00000000, | ||
229 | 0x00000000, | ||
230 | 0x00000000, | ||
231 | 0x00000000, | ||
232 | 0x00000000, | ||
233 | 0x00000000, | ||
234 | 0x00000000, | ||
235 | 0x00000000, | ||
236 | 0x00000000, /* VGT_GS_MODE */ | ||
237 | |||
238 | 0xc0026900, | ||
239 | 0x00000292, | ||
240 | 0x00000000, /* PA_SC_MODE_CNTL_0 */ | ||
241 | 0x00000000, /* PA_SC_MODE_CNTL_1 */ | ||
242 | |||
243 | 0xc0016900, | ||
244 | 0x000002a1, | ||
245 | 0x00000000, /* VGT_PRIMITIVEID_EN */ | ||
246 | |||
247 | 0xc0016900, | ||
248 | 0x000002a5, | ||
249 | 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */ | ||
250 | |||
251 | 0xc0026900, | ||
252 | 0x000002a8, | ||
253 | 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ | ||
254 | 0x00000000, | ||
255 | |||
256 | 0xc0026900, | ||
257 | 0x000002ad, | ||
258 | 0x00000000, /* VGT_REUSE_OFF */ | ||
259 | 0x00000000, | ||
260 | |||
261 | 0xc0016900, | ||
262 | 0x000002d5, | ||
263 | 0x00000000, /* VGT_SHADER_STAGES_EN */ | ||
264 | |||
265 | 0xc0016900, | ||
266 | 0x000002dc, | ||
267 | 0x0000aa00, /* DB_ALPHA_TO_MASK */ | ||
268 | |||
269 | 0xc0066900, | ||
270 | 0x000002de, | ||
271 | 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ | ||
272 | 0x00000000, | ||
273 | 0x00000000, | ||
274 | 0x00000000, | ||
275 | 0x00000000, | ||
276 | 0x00000000, | ||
277 | |||
278 | 0xc0026900, | ||
279 | 0x000002e5, | ||
280 | 0x00000000, /* VGT_STRMOUT_CONFIG */ | ||
281 | 0x00000000, | ||
282 | |||
283 | 0xc01b6900, | ||
284 | 0x000002f5, | ||
285 | 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */ | ||
286 | 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */ | ||
287 | 0x00000000, /* PA_SC_LINE_CNTL */ | ||
288 | 0x00000000, /* PA_SC_AA_CONFIG */ | ||
289 | 0x00000005, /* PA_SU_VTX_CNTL */ | ||
290 | 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ | ||
291 | 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */ | ||
292 | 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */ | ||
293 | 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */ | ||
294 | 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */ | ||
295 | 0x00000000, | ||
296 | 0x00000000, | ||
297 | 0x00000000, | ||
298 | 0x00000000, | ||
299 | 0x00000000, | ||
300 | 0x00000000, | ||
301 | 0x00000000, | ||
302 | 0x00000000, | ||
303 | 0x00000000, | ||
304 | 0x00000000, | ||
305 | 0x00000000, | ||
306 | 0x00000000, | ||
307 | 0x00000000, | ||
308 | 0x00000000, | ||
309 | 0x00000000, | ||
310 | 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */ | ||
311 | 0xffffffff, | ||
312 | |||
313 | 0xc0026900, | ||
314 | 0x00000316, | ||
315 | 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | ||
316 | 0x00000010, /* */ | ||
317 | }; | ||
318 | |||
319 | const u32 cayman_vs[] = | ||
320 | { | ||
321 | 0x00000004, | ||
322 | 0x80400400, | ||
323 | 0x0000a03c, | ||
324 | 0x95000688, | ||
325 | 0x00004000, | ||
326 | 0x15000688, | ||
327 | 0x00000000, | ||
328 | 0x88000000, | ||
329 | 0x04000000, | ||
330 | 0x67961001, | ||
331 | #ifdef __BIG_ENDIAN | ||
332 | 0x00020000, | ||
333 | #else | ||
334 | 0x00000000, | ||
335 | #endif | ||
336 | 0x00000000, | ||
337 | 0x04000000, | ||
338 | 0x67961000, | ||
339 | #ifdef __BIG_ENDIAN | ||
340 | 0x00020008, | ||
341 | #else | ||
342 | 0x00000008, | ||
343 | #endif | ||
344 | 0x00000000, | ||
345 | }; | ||
346 | |||
347 | const u32 cayman_ps[] = | ||
348 | { | ||
349 | 0x00000004, | ||
350 | 0xa00c0000, | ||
351 | 0x00000008, | ||
352 | 0x80400000, | ||
353 | 0x00000000, | ||
354 | 0x95000688, | ||
355 | 0x00000000, | ||
356 | 0x88000000, | ||
357 | 0x00380400, | ||
358 | 0x00146b10, | ||
359 | 0x00380000, | ||
360 | 0x20146b10, | ||
361 | 0x00380400, | ||
362 | 0x40146b00, | ||
363 | 0x80380000, | ||
364 | 0x60146b00, | ||
365 | 0x00000010, | ||
366 | 0x000d1000, | ||
367 | 0xb0800000, | ||
368 | 0x00000000, | ||
369 | }; | ||
370 | |||
371 | const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps); | ||
372 | const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs); | ||
373 | const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state); | ||
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.h b/drivers/gpu/drm/radeon/cayman_blit_shaders.h new file mode 100644 index 000000000000..f5d0e9a60267 --- /dev/null +++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef CAYMAN_BLIT_SHADERS_H | ||
26 | #define CAYMAN_BLIT_SHADERS_H | ||
27 | |||
28 | extern const u32 cayman_ps[]; | ||
29 | extern const u32 cayman_vs[]; | ||
30 | extern const u32 cayman_default_state[]; | ||
31 | |||
32 | extern const u32 cayman_ps_size, cayman_vs_size; | ||
33 | extern const u32 cayman_default_size; | ||
34 | |||
35 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 2f93d46ae69a..15bd0477a3e8 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -32,26 +32,102 @@ | |||
32 | #include "atom.h" | 32 | #include "atom.h" |
33 | #include "avivod.h" | 33 | #include "avivod.h" |
34 | #include "evergreen_reg.h" | 34 | #include "evergreen_reg.h" |
35 | #include "evergreen_blit_shaders.h" | ||
35 | 36 | ||
36 | #define EVERGREEN_PFP_UCODE_SIZE 1120 | 37 | #define EVERGREEN_PFP_UCODE_SIZE 1120 |
37 | #define EVERGREEN_PM4_UCODE_SIZE 1376 | 38 | #define EVERGREEN_PM4_UCODE_SIZE 1376 |
38 | 39 | ||
39 | static void evergreen_gpu_init(struct radeon_device *rdev); | 40 | static void evergreen_gpu_init(struct radeon_device *rdev); |
40 | void evergreen_fini(struct radeon_device *rdev); | 41 | void evergreen_fini(struct radeon_device *rdev); |
42 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); | ||
43 | |||
44 | void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) | ||
45 | { | ||
46 | /* enable the pflip int */ | ||
47 | radeon_irq_kms_pflip_irq_get(rdev, crtc); | ||
48 | } | ||
49 | |||
50 | void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) | ||
51 | { | ||
52 | /* disable the pflip int */ | ||
53 | radeon_irq_kms_pflip_irq_put(rdev, crtc); | ||
54 | } | ||
55 | |||
56 | u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | ||
57 | { | ||
58 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | ||
59 | u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); | ||
60 | |||
61 | /* Lock the graphics update lock */ | ||
62 | tmp |= EVERGREEN_GRPH_UPDATE_LOCK; | ||
63 | WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
64 | |||
65 | /* update the scanout addresses */ | ||
66 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, | ||
67 | upper_32_bits(crtc_base)); | ||
68 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
69 | (u32)crtc_base); | ||
70 | |||
71 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, | ||
72 | upper_32_bits(crtc_base)); | ||
73 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
74 | (u32)crtc_base); | ||
75 | |||
76 | /* Wait for update_pending to go high. */ | ||
77 | while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); | ||
78 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | ||
79 | |||
80 | /* Unlock the lock, so double-buffering can take place inside vblank */ | ||
81 | tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; | ||
82 | WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
83 | |||
84 | /* Return current update_pending status: */ | ||
85 | return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING; | ||
86 | } | ||
41 | 87 | ||
42 | /* get temperature in millidegrees */ | 88 | /* get temperature in millidegrees */ |
43 | u32 evergreen_get_temp(struct radeon_device *rdev) | 89 | int evergreen_get_temp(struct radeon_device *rdev) |
44 | { | 90 | { |
45 | u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> | 91 | u32 temp, toffset; |
46 | ASIC_T_SHIFT; | 92 | int actual_temp = 0; |
47 | u32 actual_temp = 0; | 93 | |
48 | 94 | if (rdev->family == CHIP_JUNIPER) { | |
49 | if ((temp >> 10) & 1) | 95 | toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >> |
50 | actual_temp = 0; | 96 | TOFFSET_SHIFT; |
51 | else if ((temp >> 9) & 1) | 97 | temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >> |
52 | actual_temp = 255; | 98 | TS0_ADC_DOUT_SHIFT; |
53 | else | 99 | |
54 | actual_temp = (temp >> 1) & 0xff; | 100 | if (toffset & 0x100) |
101 | actual_temp = temp / 2 - (0x200 - toffset); | ||
102 | else | ||
103 | actual_temp = temp / 2 + toffset; | ||
104 | |||
105 | actual_temp = actual_temp * 1000; | ||
106 | |||
107 | } else { | ||
108 | temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> | ||
109 | ASIC_T_SHIFT; | ||
110 | |||
111 | if (temp & 0x400) | ||
112 | actual_temp = -256; | ||
113 | else if (temp & 0x200) | ||
114 | actual_temp = 255; | ||
115 | else if (temp & 0x100) { | ||
116 | actual_temp = temp & 0x1ff; | ||
117 | actual_temp |= ~0x1ff; | ||
118 | } else | ||
119 | actual_temp = temp & 0xff; | ||
120 | |||
121 | actual_temp = (actual_temp * 1000) / 2; | ||
122 | } | ||
123 | |||
124 | return actual_temp; | ||
125 | } | ||
126 | |||
127 | int sumo_get_temp(struct radeon_device *rdev) | ||
128 | { | ||
129 | u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; | ||
130 | int actual_temp = temp - 49; | ||
55 | 131 | ||
56 | return actual_temp * 1000; | 132 | return actual_temp * 1000; |
57 | } | 133 | } |
@@ -63,11 +139,22 @@ void evergreen_pm_misc(struct radeon_device *rdev) | |||
63 | struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; | 139 | struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; |
64 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; | 140 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; |
65 | 141 | ||
66 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | 142 | if (voltage->type == VOLTAGE_SW) { |
67 | if (voltage->voltage != rdev->pm.current_vddc) { | 143 | /* 0xff01 is a flag rather then an actual voltage */ |
68 | radeon_atom_set_voltage(rdev, voltage->voltage); | 144 | if (voltage->voltage == 0xff01) |
145 | return; | ||
146 | if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) { | ||
147 | radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); | ||
69 | rdev->pm.current_vddc = voltage->voltage; | 148 | rdev->pm.current_vddc = voltage->voltage; |
70 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); | 149 | DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage); |
150 | } | ||
151 | /* 0xff01 is a flag rather then an actual voltage */ | ||
152 | if (voltage->vddci == 0xff01) | ||
153 | return; | ||
154 | if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) { | ||
155 | radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); | ||
156 | rdev->pm.current_vddci = voltage->vddci; | ||
157 | DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci); | ||
71 | } | 158 | } |
72 | } | 159 | } |
73 | } | 160 | } |
@@ -284,12 +371,458 @@ void evergreen_hpd_fini(struct radeon_device *rdev) | |||
284 | } | 371 | } |
285 | } | 372 | } |
286 | 373 | ||
374 | /* watermark setup */ | ||
375 | |||
376 | static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, | ||
377 | struct radeon_crtc *radeon_crtc, | ||
378 | struct drm_display_mode *mode, | ||
379 | struct drm_display_mode *other_mode) | ||
380 | { | ||
381 | u32 tmp; | ||
382 | /* | ||
383 | * Line Buffer Setup | ||
384 | * There are 3 line buffers, each one shared by 2 display controllers. | ||
385 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between | ||
386 | * the display controllers. The paritioning is done via one of four | ||
387 | * preset allocations specified in bits 2:0: | ||
388 | * first display controller | ||
389 | * 0 - first half of lb (3840 * 2) | ||
390 | * 1 - first 3/4 of lb (5760 * 2) | ||
391 | * 2 - whole lb (7680 * 2), other crtc must be disabled | ||
392 | * 3 - first 1/4 of lb (1920 * 2) | ||
393 | * second display controller | ||
394 | * 4 - second half of lb (3840 * 2) | ||
395 | * 5 - second 3/4 of lb (5760 * 2) | ||
396 | * 6 - whole lb (7680 * 2), other crtc must be disabled | ||
397 | * 7 - last 1/4 of lb (1920 * 2) | ||
398 | */ | ||
399 | /* this can get tricky if we have two large displays on a paired group | ||
400 | * of crtcs. Ideally for multiple large displays we'd assign them to | ||
401 | * non-linked crtcs for maximum line buffer allocation. | ||
402 | */ | ||
403 | if (radeon_crtc->base.enabled && mode) { | ||
404 | if (other_mode) | ||
405 | tmp = 0; /* 1/2 */ | ||
406 | else | ||
407 | tmp = 2; /* whole */ | ||
408 | } else | ||
409 | tmp = 0; | ||
410 | |||
411 | /* second controller of the pair uses second half of the lb */ | ||
412 | if (radeon_crtc->crtc_id % 2) | ||
413 | tmp += 4; | ||
414 | WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); | ||
415 | |||
416 | if (radeon_crtc->base.enabled && mode) { | ||
417 | switch (tmp) { | ||
418 | case 0: | ||
419 | case 4: | ||
420 | default: | ||
421 | if (ASIC_IS_DCE5(rdev)) | ||
422 | return 4096 * 2; | ||
423 | else | ||
424 | return 3840 * 2; | ||
425 | case 1: | ||
426 | case 5: | ||
427 | if (ASIC_IS_DCE5(rdev)) | ||
428 | return 6144 * 2; | ||
429 | else | ||
430 | return 5760 * 2; | ||
431 | case 2: | ||
432 | case 6: | ||
433 | if (ASIC_IS_DCE5(rdev)) | ||
434 | return 8192 * 2; | ||
435 | else | ||
436 | return 7680 * 2; | ||
437 | case 3: | ||
438 | case 7: | ||
439 | if (ASIC_IS_DCE5(rdev)) | ||
440 | return 2048 * 2; | ||
441 | else | ||
442 | return 1920 * 2; | ||
443 | } | ||
444 | } | ||
445 | |||
446 | /* controller not enabled, so no lb used */ | ||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev) | ||
451 | { | ||
452 | u32 tmp = RREG32(MC_SHARED_CHMAP); | ||
453 | |||
454 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
455 | case 0: | ||
456 | default: | ||
457 | return 1; | ||
458 | case 1: | ||
459 | return 2; | ||
460 | case 2: | ||
461 | return 4; | ||
462 | case 3: | ||
463 | return 8; | ||
464 | } | ||
465 | } | ||
466 | |||
467 | struct evergreen_wm_params { | ||
468 | u32 dram_channels; /* number of dram channels */ | ||
469 | u32 yclk; /* bandwidth per dram data pin in kHz */ | ||
470 | u32 sclk; /* engine clock in kHz */ | ||
471 | u32 disp_clk; /* display clock in kHz */ | ||
472 | u32 src_width; /* viewport width */ | ||
473 | u32 active_time; /* active display time in ns */ | ||
474 | u32 blank_time; /* blank time in ns */ | ||
475 | bool interlaced; /* mode is interlaced */ | ||
476 | fixed20_12 vsc; /* vertical scale ratio */ | ||
477 | u32 num_heads; /* number of active crtcs */ | ||
478 | u32 bytes_per_pixel; /* bytes per pixel display + overlay */ | ||
479 | u32 lb_size; /* line buffer allocated to pipe */ | ||
480 | u32 vtaps; /* vertical scaler taps */ | ||
481 | }; | ||
482 | |||
483 | static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm) | ||
484 | { | ||
485 | /* Calculate DRAM Bandwidth and the part allocated to display. */ | ||
486 | fixed20_12 dram_efficiency; /* 0.7 */ | ||
487 | fixed20_12 yclk, dram_channels, bandwidth; | ||
488 | fixed20_12 a; | ||
489 | |||
490 | a.full = dfixed_const(1000); | ||
491 | yclk.full = dfixed_const(wm->yclk); | ||
492 | yclk.full = dfixed_div(yclk, a); | ||
493 | dram_channels.full = dfixed_const(wm->dram_channels * 4); | ||
494 | a.full = dfixed_const(10); | ||
495 | dram_efficiency.full = dfixed_const(7); | ||
496 | dram_efficiency.full = dfixed_div(dram_efficiency, a); | ||
497 | bandwidth.full = dfixed_mul(dram_channels, yclk); | ||
498 | bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); | ||
499 | |||
500 | return dfixed_trunc(bandwidth); | ||
501 | } | ||
502 | |||
503 | static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm) | ||
504 | { | ||
505 | /* Calculate DRAM Bandwidth and the part allocated to display. */ | ||
506 | fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ | ||
507 | fixed20_12 yclk, dram_channels, bandwidth; | ||
508 | fixed20_12 a; | ||
509 | |||
510 | a.full = dfixed_const(1000); | ||
511 | yclk.full = dfixed_const(wm->yclk); | ||
512 | yclk.full = dfixed_div(yclk, a); | ||
513 | dram_channels.full = dfixed_const(wm->dram_channels * 4); | ||
514 | a.full = dfixed_const(10); | ||
515 | disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ | ||
516 | disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); | ||
517 | bandwidth.full = dfixed_mul(dram_channels, yclk); | ||
518 | bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); | ||
519 | |||
520 | return dfixed_trunc(bandwidth); | ||
521 | } | ||
522 | |||
523 | static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm) | ||
524 | { | ||
525 | /* Calculate the display Data return Bandwidth */ | ||
526 | fixed20_12 return_efficiency; /* 0.8 */ | ||
527 | fixed20_12 sclk, bandwidth; | ||
528 | fixed20_12 a; | ||
529 | |||
530 | a.full = dfixed_const(1000); | ||
531 | sclk.full = dfixed_const(wm->sclk); | ||
532 | sclk.full = dfixed_div(sclk, a); | ||
533 | a.full = dfixed_const(10); | ||
534 | return_efficiency.full = dfixed_const(8); | ||
535 | return_efficiency.full = dfixed_div(return_efficiency, a); | ||
536 | a.full = dfixed_const(32); | ||
537 | bandwidth.full = dfixed_mul(a, sclk); | ||
538 | bandwidth.full = dfixed_mul(bandwidth, return_efficiency); | ||
539 | |||
540 | return dfixed_trunc(bandwidth); | ||
541 | } | ||
542 | |||
543 | static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm) | ||
544 | { | ||
545 | /* Calculate the DMIF Request Bandwidth */ | ||
546 | fixed20_12 disp_clk_request_efficiency; /* 0.8 */ | ||
547 | fixed20_12 disp_clk, bandwidth; | ||
548 | fixed20_12 a; | ||
549 | |||
550 | a.full = dfixed_const(1000); | ||
551 | disp_clk.full = dfixed_const(wm->disp_clk); | ||
552 | disp_clk.full = dfixed_div(disp_clk, a); | ||
553 | a.full = dfixed_const(10); | ||
554 | disp_clk_request_efficiency.full = dfixed_const(8); | ||
555 | disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); | ||
556 | a.full = dfixed_const(32); | ||
557 | bandwidth.full = dfixed_mul(a, disp_clk); | ||
558 | bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency); | ||
559 | |||
560 | return dfixed_trunc(bandwidth); | ||
561 | } | ||
562 | |||
563 | static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm) | ||
564 | { | ||
565 | /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ | ||
566 | u32 dram_bandwidth = evergreen_dram_bandwidth(wm); | ||
567 | u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm); | ||
568 | u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm); | ||
569 | |||
570 | return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); | ||
571 | } | ||
572 | |||
573 | static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm) | ||
574 | { | ||
575 | /* Calculate the display mode Average Bandwidth | ||
576 | * DisplayMode should contain the source and destination dimensions, | ||
577 | * timing, etc. | ||
578 | */ | ||
579 | fixed20_12 bpp; | ||
580 | fixed20_12 line_time; | ||
581 | fixed20_12 src_width; | ||
582 | fixed20_12 bandwidth; | ||
583 | fixed20_12 a; | ||
584 | |||
585 | a.full = dfixed_const(1000); | ||
586 | line_time.full = dfixed_const(wm->active_time + wm->blank_time); | ||
587 | line_time.full = dfixed_div(line_time, a); | ||
588 | bpp.full = dfixed_const(wm->bytes_per_pixel); | ||
589 | src_width.full = dfixed_const(wm->src_width); | ||
590 | bandwidth.full = dfixed_mul(src_width, bpp); | ||
591 | bandwidth.full = dfixed_mul(bandwidth, wm->vsc); | ||
592 | bandwidth.full = dfixed_div(bandwidth, line_time); | ||
593 | |||
594 | return dfixed_trunc(bandwidth); | ||
595 | } | ||
596 | |||
597 | static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm) | ||
598 | { | ||
599 | /* First calcualte the latency in ns */ | ||
600 | u32 mc_latency = 2000; /* 2000 ns. */ | ||
601 | u32 available_bandwidth = evergreen_available_bandwidth(wm); | ||
602 | u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; | ||
603 | u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; | ||
604 | u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ | ||
605 | u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + | ||
606 | (wm->num_heads * cursor_line_pair_return_time); | ||
607 | u32 latency = mc_latency + other_heads_data_return_time + dc_latency; | ||
608 | u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; | ||
609 | fixed20_12 a, b, c; | ||
610 | |||
611 | if (wm->num_heads == 0) | ||
612 | return 0; | ||
613 | |||
614 | a.full = dfixed_const(2); | ||
615 | b.full = dfixed_const(1); | ||
616 | if ((wm->vsc.full > a.full) || | ||
617 | ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || | ||
618 | (wm->vtaps >= 5) || | ||
619 | ((wm->vsc.full >= a.full) && wm->interlaced)) | ||
620 | max_src_lines_per_dst_line = 4; | ||
621 | else | ||
622 | max_src_lines_per_dst_line = 2; | ||
623 | |||
624 | a.full = dfixed_const(available_bandwidth); | ||
625 | b.full = dfixed_const(wm->num_heads); | ||
626 | a.full = dfixed_div(a, b); | ||
627 | |||
628 | b.full = dfixed_const(1000); | ||
629 | c.full = dfixed_const(wm->disp_clk); | ||
630 | b.full = dfixed_div(c, b); | ||
631 | c.full = dfixed_const(wm->bytes_per_pixel); | ||
632 | b.full = dfixed_mul(b, c); | ||
633 | |||
634 | lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b)); | ||
635 | |||
636 | a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); | ||
637 | b.full = dfixed_const(1000); | ||
638 | c.full = dfixed_const(lb_fill_bw); | ||
639 | b.full = dfixed_div(c, b); | ||
640 | a.full = dfixed_div(a, b); | ||
641 | line_fill_time = dfixed_trunc(a); | ||
642 | |||
643 | if (line_fill_time < wm->active_time) | ||
644 | return latency; | ||
645 | else | ||
646 | return latency + (line_fill_time - wm->active_time); | ||
647 | |||
648 | } | ||
649 | |||
650 | static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm) | ||
651 | { | ||
652 | if (evergreen_average_bandwidth(wm) <= | ||
653 | (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads)) | ||
654 | return true; | ||
655 | else | ||
656 | return false; | ||
657 | }; | ||
658 | |||
659 | static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm) | ||
660 | { | ||
661 | if (evergreen_average_bandwidth(wm) <= | ||
662 | (evergreen_available_bandwidth(wm) / wm->num_heads)) | ||
663 | return true; | ||
664 | else | ||
665 | return false; | ||
666 | }; | ||
667 | |||
668 | static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm) | ||
669 | { | ||
670 | u32 lb_partitions = wm->lb_size / wm->src_width; | ||
671 | u32 line_time = wm->active_time + wm->blank_time; | ||
672 | u32 latency_tolerant_lines; | ||
673 | u32 latency_hiding; | ||
674 | fixed20_12 a; | ||
675 | |||
676 | a.full = dfixed_const(1); | ||
677 | if (wm->vsc.full > a.full) | ||
678 | latency_tolerant_lines = 1; | ||
679 | else { | ||
680 | if (lb_partitions <= (wm->vtaps + 1)) | ||
681 | latency_tolerant_lines = 1; | ||
682 | else | ||
683 | latency_tolerant_lines = 2; | ||
684 | } | ||
685 | |||
686 | latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); | ||
687 | |||
688 | if (evergreen_latency_watermark(wm) <= latency_hiding) | ||
689 | return true; | ||
690 | else | ||
691 | return false; | ||
692 | } | ||
693 | |||
694 | static void evergreen_program_watermarks(struct radeon_device *rdev, | ||
695 | struct radeon_crtc *radeon_crtc, | ||
696 | u32 lb_size, u32 num_heads) | ||
697 | { | ||
698 | struct drm_display_mode *mode = &radeon_crtc->base.mode; | ||
699 | struct evergreen_wm_params wm; | ||
700 | u32 pixel_period; | ||
701 | u32 line_time = 0; | ||
702 | u32 latency_watermark_a = 0, latency_watermark_b = 0; | ||
703 | u32 priority_a_mark = 0, priority_b_mark = 0; | ||
704 | u32 priority_a_cnt = PRIORITY_OFF; | ||
705 | u32 priority_b_cnt = PRIORITY_OFF; | ||
706 | u32 pipe_offset = radeon_crtc->crtc_id * 16; | ||
707 | u32 tmp, arb_control3; | ||
708 | fixed20_12 a, b, c; | ||
709 | |||
710 | if (radeon_crtc->base.enabled && num_heads && mode) { | ||
711 | pixel_period = 1000000 / (u32)mode->clock; | ||
712 | line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); | ||
713 | priority_a_cnt = 0; | ||
714 | priority_b_cnt = 0; | ||
715 | |||
716 | wm.yclk = rdev->pm.current_mclk * 10; | ||
717 | wm.sclk = rdev->pm.current_sclk * 10; | ||
718 | wm.disp_clk = mode->clock; | ||
719 | wm.src_width = mode->crtc_hdisplay; | ||
720 | wm.active_time = mode->crtc_hdisplay * pixel_period; | ||
721 | wm.blank_time = line_time - wm.active_time; | ||
722 | wm.interlaced = false; | ||
723 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
724 | wm.interlaced = true; | ||
725 | wm.vsc = radeon_crtc->vsc; | ||
726 | wm.vtaps = 1; | ||
727 | if (radeon_crtc->rmx_type != RMX_OFF) | ||
728 | wm.vtaps = 2; | ||
729 | wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ | ||
730 | wm.lb_size = lb_size; | ||
731 | wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); | ||
732 | wm.num_heads = num_heads; | ||
733 | |||
734 | /* set for high clocks */ | ||
735 | latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535); | ||
736 | /* set for low clocks */ | ||
737 | /* wm.yclk = low clk; wm.sclk = low clk */ | ||
738 | latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535); | ||
739 | |||
740 | /* possibly force display priority to high */ | ||
741 | /* should really do this at mode validation time... */ | ||
742 | if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || | ||
743 | !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || | ||
744 | !evergreen_check_latency_hiding(&wm) || | ||
745 | (rdev->disp_priority == 2)) { | ||
746 | DRM_INFO("force priority to high\n"); | ||
747 | priority_a_cnt |= PRIORITY_ALWAYS_ON; | ||
748 | priority_b_cnt |= PRIORITY_ALWAYS_ON; | ||
749 | } | ||
750 | |||
751 | a.full = dfixed_const(1000); | ||
752 | b.full = dfixed_const(mode->clock); | ||
753 | b.full = dfixed_div(b, a); | ||
754 | c.full = dfixed_const(latency_watermark_a); | ||
755 | c.full = dfixed_mul(c, b); | ||
756 | c.full = dfixed_mul(c, radeon_crtc->hsc); | ||
757 | c.full = dfixed_div(c, a); | ||
758 | a.full = dfixed_const(16); | ||
759 | c.full = dfixed_div(c, a); | ||
760 | priority_a_mark = dfixed_trunc(c); | ||
761 | priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; | ||
762 | |||
763 | a.full = dfixed_const(1000); | ||
764 | b.full = dfixed_const(mode->clock); | ||
765 | b.full = dfixed_div(b, a); | ||
766 | c.full = dfixed_const(latency_watermark_b); | ||
767 | c.full = dfixed_mul(c, b); | ||
768 | c.full = dfixed_mul(c, radeon_crtc->hsc); | ||
769 | c.full = dfixed_div(c, a); | ||
770 | a.full = dfixed_const(16); | ||
771 | c.full = dfixed_div(c, a); | ||
772 | priority_b_mark = dfixed_trunc(c); | ||
773 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; | ||
774 | } | ||
775 | |||
776 | /* select wm A */ | ||
777 | arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset); | ||
778 | tmp = arb_control3; | ||
779 | tmp &= ~LATENCY_WATERMARK_MASK(3); | ||
780 | tmp |= LATENCY_WATERMARK_MASK(1); | ||
781 | WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp); | ||
782 | WREG32(PIPE0_LATENCY_CONTROL + pipe_offset, | ||
783 | (LATENCY_LOW_WATERMARK(latency_watermark_a) | | ||
784 | LATENCY_HIGH_WATERMARK(line_time))); | ||
785 | /* select wm B */ | ||
786 | tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset); | ||
787 | tmp &= ~LATENCY_WATERMARK_MASK(3); | ||
788 | tmp |= LATENCY_WATERMARK_MASK(2); | ||
789 | WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp); | ||
790 | WREG32(PIPE0_LATENCY_CONTROL + pipe_offset, | ||
791 | (LATENCY_LOW_WATERMARK(latency_watermark_b) | | ||
792 | LATENCY_HIGH_WATERMARK(line_time))); | ||
793 | /* restore original selection */ | ||
794 | WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3); | ||
795 | |||
796 | /* write the priority marks */ | ||
797 | WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); | ||
798 | WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); | ||
799 | |||
800 | } | ||
801 | |||
287 | void evergreen_bandwidth_update(struct radeon_device *rdev) | 802 | void evergreen_bandwidth_update(struct radeon_device *rdev) |
288 | { | 803 | { |
289 | /* XXX */ | 804 | struct drm_display_mode *mode0 = NULL; |
805 | struct drm_display_mode *mode1 = NULL; | ||
806 | u32 num_heads = 0, lb_size; | ||
807 | int i; | ||
808 | |||
809 | radeon_update_display_priority(rdev); | ||
810 | |||
811 | for (i = 0; i < rdev->num_crtc; i++) { | ||
812 | if (rdev->mode_info.crtcs[i]->base.enabled) | ||
813 | num_heads++; | ||
814 | } | ||
815 | for (i = 0; i < rdev->num_crtc; i += 2) { | ||
816 | mode0 = &rdev->mode_info.crtcs[i]->base.mode; | ||
817 | mode1 = &rdev->mode_info.crtcs[i+1]->base.mode; | ||
818 | lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1); | ||
819 | evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads); | ||
820 | lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0); | ||
821 | evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads); | ||
822 | } | ||
290 | } | 823 | } |
291 | 824 | ||
292 | static int evergreen_mc_wait_for_idle(struct radeon_device *rdev) | 825 | int evergreen_mc_wait_for_idle(struct radeon_device *rdev) |
293 | { | 826 | { |
294 | unsigned i; | 827 | unsigned i; |
295 | u32 tmp; | 828 | u32 tmp; |
@@ -312,6 +845,8 @@ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
312 | unsigned i; | 845 | unsigned i; |
313 | u32 tmp; | 846 | u32 tmp; |
314 | 847 | ||
848 | WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
849 | |||
315 | WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); | 850 | WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); |
316 | for (i = 0; i < rdev->usec_timeout; i++) { | 851 | for (i = 0; i < rdev->usec_timeout; i++) { |
317 | /* read MC_STATUS */ | 852 | /* read MC_STATUS */ |
@@ -352,9 +887,15 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) | |||
352 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | 887 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | |
353 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | | 888 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | |
354 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); | 889 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); |
355 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); | 890 | if (rdev->flags & RADEON_IS_IGP) { |
356 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); | 891 | WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp); |
357 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); | 892 | WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp); |
893 | WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp); | ||
894 | } else { | ||
895 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); | ||
896 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); | ||
897 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); | ||
898 | } | ||
358 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); | 899 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); |
359 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); | 900 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); |
360 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | 901 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
@@ -440,53 +981,73 @@ void evergreen_agp_enable(struct radeon_device *rdev) | |||
440 | WREG32(VM_CONTEXT1_CNTL, 0); | 981 | WREG32(VM_CONTEXT1_CNTL, 0); |
441 | } | 982 | } |
442 | 983 | ||
443 | static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) | 984 | void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) |
444 | { | 985 | { |
445 | save->vga_control[0] = RREG32(D1VGA_CONTROL); | 986 | save->vga_control[0] = RREG32(D1VGA_CONTROL); |
446 | save->vga_control[1] = RREG32(D2VGA_CONTROL); | 987 | save->vga_control[1] = RREG32(D2VGA_CONTROL); |
447 | save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL); | ||
448 | save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL); | ||
449 | save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL); | ||
450 | save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL); | ||
451 | save->vga_render_control = RREG32(VGA_RENDER_CONTROL); | 988 | save->vga_render_control = RREG32(VGA_RENDER_CONTROL); |
452 | save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); | 989 | save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); |
453 | save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); | 990 | save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); |
454 | save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); | 991 | save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); |
455 | save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); | 992 | if (rdev->num_crtc >= 4) { |
456 | save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); | 993 | save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL); |
457 | save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); | 994 | save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL); |
458 | save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); | 995 | save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); |
996 | save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); | ||
997 | } | ||
998 | if (rdev->num_crtc >= 6) { | ||
999 | save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL); | ||
1000 | save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL); | ||
1001 | save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); | ||
1002 | save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
1003 | } | ||
459 | 1004 | ||
460 | /* Stop all video */ | 1005 | /* Stop all video */ |
461 | WREG32(VGA_RENDER_CONTROL, 0); | 1006 | WREG32(VGA_RENDER_CONTROL, 0); |
462 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); | 1007 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); |
463 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); | 1008 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); |
464 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); | 1009 | if (rdev->num_crtc >= 4) { |
465 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); | 1010 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); |
466 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); | 1011 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); |
467 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); | 1012 | } |
1013 | if (rdev->num_crtc >= 6) { | ||
1014 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); | ||
1015 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); | ||
1016 | } | ||
468 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 1017 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
469 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 1018 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
470 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | 1019 | if (rdev->num_crtc >= 4) { |
471 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | 1020 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); |
472 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | 1021 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); |
473 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 1022 | } |
1023 | if (rdev->num_crtc >= 6) { | ||
1024 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | ||
1025 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
1026 | } | ||
474 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 1027 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
475 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 1028 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
476 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | 1029 | if (rdev->num_crtc >= 4) { |
477 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | 1030 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); |
478 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | 1031 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); |
479 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 1032 | } |
1033 | if (rdev->num_crtc >= 6) { | ||
1034 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | ||
1035 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
1036 | } | ||
480 | 1037 | ||
481 | WREG32(D1VGA_CONTROL, 0); | 1038 | WREG32(D1VGA_CONTROL, 0); |
482 | WREG32(D2VGA_CONTROL, 0); | 1039 | WREG32(D2VGA_CONTROL, 0); |
483 | WREG32(EVERGREEN_D3VGA_CONTROL, 0); | 1040 | if (rdev->num_crtc >= 4) { |
484 | WREG32(EVERGREEN_D4VGA_CONTROL, 0); | 1041 | WREG32(EVERGREEN_D3VGA_CONTROL, 0); |
485 | WREG32(EVERGREEN_D5VGA_CONTROL, 0); | 1042 | WREG32(EVERGREEN_D4VGA_CONTROL, 0); |
486 | WREG32(EVERGREEN_D6VGA_CONTROL, 0); | 1043 | } |
1044 | if (rdev->num_crtc >= 6) { | ||
1045 | WREG32(EVERGREEN_D5VGA_CONTROL, 0); | ||
1046 | WREG32(EVERGREEN_D6VGA_CONTROL, 0); | ||
1047 | } | ||
487 | } | 1048 | } |
488 | 1049 | ||
489 | static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) | 1050 | void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) |
490 | { | 1051 | { |
491 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, | 1052 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, |
492 | upper_32_bits(rdev->mc.vram_start)); | 1053 | upper_32_bits(rdev->mc.vram_start)); |
@@ -506,41 +1067,44 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_ | |||
506 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, | 1067 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, |
507 | (u32)rdev->mc.vram_start); | 1068 | (u32)rdev->mc.vram_start); |
508 | 1069 | ||
509 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, | 1070 | if (rdev->num_crtc >= 4) { |
510 | upper_32_bits(rdev->mc.vram_start)); | 1071 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, |
511 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, | 1072 | upper_32_bits(rdev->mc.vram_start)); |
512 | upper_32_bits(rdev->mc.vram_start)); | 1073 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, |
513 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, | 1074 | upper_32_bits(rdev->mc.vram_start)); |
514 | (u32)rdev->mc.vram_start); | 1075 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, |
515 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, | 1076 | (u32)rdev->mc.vram_start); |
516 | (u32)rdev->mc.vram_start); | 1077 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, |
517 | 1078 | (u32)rdev->mc.vram_start); | |
518 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, | 1079 | |
519 | upper_32_bits(rdev->mc.vram_start)); | 1080 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, |
520 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, | 1081 | upper_32_bits(rdev->mc.vram_start)); |
521 | upper_32_bits(rdev->mc.vram_start)); | 1082 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, |
522 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, | 1083 | upper_32_bits(rdev->mc.vram_start)); |
523 | (u32)rdev->mc.vram_start); | 1084 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, |
524 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, | 1085 | (u32)rdev->mc.vram_start); |
525 | (u32)rdev->mc.vram_start); | 1086 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, |
526 | 1087 | (u32)rdev->mc.vram_start); | |
527 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, | 1088 | } |
528 | upper_32_bits(rdev->mc.vram_start)); | 1089 | if (rdev->num_crtc >= 6) { |
529 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, | 1090 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, |
530 | upper_32_bits(rdev->mc.vram_start)); | 1091 | upper_32_bits(rdev->mc.vram_start)); |
531 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, | 1092 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, |
532 | (u32)rdev->mc.vram_start); | 1093 | upper_32_bits(rdev->mc.vram_start)); |
533 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, | 1094 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, |
534 | (u32)rdev->mc.vram_start); | 1095 | (u32)rdev->mc.vram_start); |
535 | 1096 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, | |
536 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, | 1097 | (u32)rdev->mc.vram_start); |
537 | upper_32_bits(rdev->mc.vram_start)); | 1098 | |
538 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, | 1099 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, |
539 | upper_32_bits(rdev->mc.vram_start)); | 1100 | upper_32_bits(rdev->mc.vram_start)); |
540 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, | 1101 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, |
541 | (u32)rdev->mc.vram_start); | 1102 | upper_32_bits(rdev->mc.vram_start)); |
542 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, | 1103 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, |
543 | (u32)rdev->mc.vram_start); | 1104 | (u32)rdev->mc.vram_start); |
1105 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
1106 | (u32)rdev->mc.vram_start); | ||
1107 | } | ||
544 | 1108 | ||
545 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); | 1109 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); |
546 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); | 1110 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); |
@@ -550,32 +1114,48 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_ | |||
550 | /* Restore video state */ | 1114 | /* Restore video state */ |
551 | WREG32(D1VGA_CONTROL, save->vga_control[0]); | 1115 | WREG32(D1VGA_CONTROL, save->vga_control[0]); |
552 | WREG32(D2VGA_CONTROL, save->vga_control[1]); | 1116 | WREG32(D2VGA_CONTROL, save->vga_control[1]); |
553 | WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]); | 1117 | if (rdev->num_crtc >= 4) { |
554 | WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]); | 1118 | WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]); |
555 | WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]); | 1119 | WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]); |
556 | WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); | 1120 | } |
1121 | if (rdev->num_crtc >= 6) { | ||
1122 | WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]); | ||
1123 | WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); | ||
1124 | } | ||
557 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); | 1125 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); |
558 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); | 1126 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); |
559 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); | 1127 | if (rdev->num_crtc >= 4) { |
560 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); | 1128 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); |
561 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); | 1129 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); |
562 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); | 1130 | } |
1131 | if (rdev->num_crtc >= 6) { | ||
1132 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); | ||
1133 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); | ||
1134 | } | ||
563 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); | 1135 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); |
564 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); | 1136 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); |
565 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); | 1137 | if (rdev->num_crtc >= 4) { |
566 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); | 1138 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); |
567 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); | 1139 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); |
568 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); | 1140 | } |
1141 | if (rdev->num_crtc >= 6) { | ||
1142 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); | ||
1143 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); | ||
1144 | } | ||
569 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 1145 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
570 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 1146 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
571 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | 1147 | if (rdev->num_crtc >= 4) { |
572 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | 1148 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); |
573 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | 1149 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); |
574 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 1150 | } |
1151 | if (rdev->num_crtc >= 6) { | ||
1152 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | ||
1153 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
1154 | } | ||
575 | WREG32(VGA_RENDER_CONTROL, save->vga_render_control); | 1155 | WREG32(VGA_RENDER_CONTROL, save->vga_render_control); |
576 | } | 1156 | } |
577 | 1157 | ||
578 | static void evergreen_mc_program(struct radeon_device *rdev) | 1158 | void evergreen_mc_program(struct radeon_device *rdev) |
579 | { | 1159 | { |
580 | struct evergreen_mc_save save; | 1160 | struct evergreen_mc_save save; |
581 | u32 tmp; | 1161 | u32 tmp; |
@@ -619,11 +1199,17 @@ static void evergreen_mc_program(struct radeon_device *rdev) | |||
619 | rdev->mc.vram_end >> 12); | 1199 | rdev->mc.vram_end >> 12); |
620 | } | 1200 | } |
621 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 1201 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); |
1202 | if (rdev->flags & RADEON_IS_IGP) { | ||
1203 | tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; | ||
1204 | tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; | ||
1205 | tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20; | ||
1206 | WREG32(MC_FUS_VM_FB_OFFSET, tmp); | ||
1207 | } | ||
622 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; | 1208 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
623 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | 1209 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
624 | WREG32(MC_VM_FB_LOCATION, tmp); | 1210 | WREG32(MC_VM_FB_LOCATION, tmp); |
625 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 1211 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
626 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 1212 | WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); |
627 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); | 1213 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
628 | if (rdev->flags & RADEON_IS_AGP) { | 1214 | if (rdev->flags & RADEON_IS_AGP) { |
629 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); | 1215 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
@@ -646,6 +1232,22 @@ static void evergreen_mc_program(struct radeon_device *rdev) | |||
646 | /* | 1232 | /* |
647 | * CP. | 1233 | * CP. |
648 | */ | 1234 | */ |
1235 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | ||
1236 | { | ||
1237 | /* set to DX10/11 mode */ | ||
1238 | radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0)); | ||
1239 | radeon_ring_write(rdev, 1); | ||
1240 | /* FIXME: implement */ | ||
1241 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | ||
1242 | radeon_ring_write(rdev, | ||
1243 | #ifdef __BIG_ENDIAN | ||
1244 | (2 << 0) | | ||
1245 | #endif | ||
1246 | (ib->gpu_addr & 0xFFFFFFFC)); | ||
1247 | radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); | ||
1248 | radeon_ring_write(rdev, ib->length_dw); | ||
1249 | } | ||
1250 | |||
649 | 1251 | ||
650 | static int evergreen_cp_load_microcode(struct radeon_device *rdev) | 1252 | static int evergreen_cp_load_microcode(struct radeon_device *rdev) |
651 | { | 1253 | { |
@@ -656,7 +1258,11 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) | |||
656 | return -EINVAL; | 1258 | return -EINVAL; |
657 | 1259 | ||
658 | r700_cp_stop(rdev); | 1260 | r700_cp_stop(rdev); |
659 | WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); | 1261 | WREG32(CP_RB_CNTL, |
1262 | #ifdef __BIG_ENDIAN | ||
1263 | BUF_SWAP_32BIT | | ||
1264 | #endif | ||
1265 | RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | ||
660 | 1266 | ||
661 | fw_data = (const __be32 *)rdev->pfp_fw->data; | 1267 | fw_data = (const __be32 *)rdev->pfp_fw->data; |
662 | WREG32(CP_PFP_UCODE_ADDR, 0); | 1268 | WREG32(CP_PFP_UCODE_ADDR, 0); |
@@ -677,7 +1283,7 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) | |||
677 | 1283 | ||
678 | static int evergreen_cp_start(struct radeon_device *rdev) | 1284 | static int evergreen_cp_start(struct radeon_device *rdev) |
679 | { | 1285 | { |
680 | int r; | 1286 | int r, i; |
681 | uint32_t cp_me; | 1287 | uint32_t cp_me; |
682 | 1288 | ||
683 | r = radeon_ring_lock(rdev, 7); | 1289 | r = radeon_ring_lock(rdev, 7); |
@@ -697,16 +1303,44 @@ static int evergreen_cp_start(struct radeon_device *rdev) | |||
697 | cp_me = 0xff; | 1303 | cp_me = 0xff; |
698 | WREG32(CP_ME_CNTL, cp_me); | 1304 | WREG32(CP_ME_CNTL, cp_me); |
699 | 1305 | ||
700 | r = radeon_ring_lock(rdev, 4); | 1306 | r = radeon_ring_lock(rdev, evergreen_default_size + 19); |
701 | if (r) { | 1307 | if (r) { |
702 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | 1308 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
703 | return r; | 1309 | return r; |
704 | } | 1310 | } |
705 | /* init some VGT regs */ | 1311 | |
706 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | 1312 | /* setup clear context state */ |
707 | radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2); | 1313 | radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); |
708 | radeon_ring_write(rdev, 0xe); | 1314 | radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); |
709 | radeon_ring_write(rdev, 0x10); | 1315 | |
1316 | for (i = 0; i < evergreen_default_size; i++) | ||
1317 | radeon_ring_write(rdev, evergreen_default_state[i]); | ||
1318 | |||
1319 | radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | ||
1320 | radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); | ||
1321 | |||
1322 | /* set clear context state */ | ||
1323 | radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); | ||
1324 | radeon_ring_write(rdev, 0); | ||
1325 | |||
1326 | /* SQ_VTX_BASE_VTX_LOC */ | ||
1327 | radeon_ring_write(rdev, 0xc0026f00); | ||
1328 | radeon_ring_write(rdev, 0x00000000); | ||
1329 | radeon_ring_write(rdev, 0x00000000); | ||
1330 | radeon_ring_write(rdev, 0x00000000); | ||
1331 | |||
1332 | /* Clear consts */ | ||
1333 | radeon_ring_write(rdev, 0xc0036f00); | ||
1334 | radeon_ring_write(rdev, 0x00000bc4); | ||
1335 | radeon_ring_write(rdev, 0xffffffff); | ||
1336 | radeon_ring_write(rdev, 0xffffffff); | ||
1337 | radeon_ring_write(rdev, 0xffffffff); | ||
1338 | |||
1339 | radeon_ring_write(rdev, 0xc0026900); | ||
1340 | radeon_ring_write(rdev, 0x00000316); | ||
1341 | radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | ||
1342 | radeon_ring_write(rdev, 0x00000010); /* */ | ||
1343 | |||
710 | radeon_ring_unlock_commit(rdev); | 1344 | radeon_ring_unlock_commit(rdev); |
711 | 1345 | ||
712 | return 0; | 1346 | return 0; |
@@ -731,7 +1365,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
731 | 1365 | ||
732 | /* Set ring buffer size */ | 1366 | /* Set ring buffer size */ |
733 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); | 1367 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); |
734 | tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 1368 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
735 | #ifdef __BIG_ENDIAN | 1369 | #ifdef __BIG_ENDIAN |
736 | tmp |= BUF_SWAP_32BIT; | 1370 | tmp |= BUF_SWAP_32BIT; |
737 | #endif | 1371 | #endif |
@@ -745,8 +1379,23 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
745 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 1379 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
746 | WREG32(CP_RB_RPTR_WR, 0); | 1380 | WREG32(CP_RB_RPTR_WR, 0); |
747 | WREG32(CP_RB_WPTR, 0); | 1381 | WREG32(CP_RB_WPTR, 0); |
748 | WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); | 1382 | |
749 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); | 1383 | /* set the wb address wether it's enabled or not */ |
1384 | WREG32(CP_RB_RPTR_ADDR, | ||
1385 | #ifdef __BIG_ENDIAN | ||
1386 | RB_RPTR_SWAP(2) | | ||
1387 | #endif | ||
1388 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); | ||
1389 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | ||
1390 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | ||
1391 | |||
1392 | if (rdev->wb.enabled) | ||
1393 | WREG32(SCRATCH_UMSK, 0xff); | ||
1394 | else { | ||
1395 | tmp |= RB_NO_UPDATE; | ||
1396 | WREG32(SCRATCH_UMSK, 0); | ||
1397 | } | ||
1398 | |||
750 | mdelay(1); | 1399 | mdelay(1); |
751 | WREG32(CP_RB_CNTL, tmp); | 1400 | WREG32(CP_RB_CNTL, tmp); |
752 | 1401 | ||
@@ -813,11 +1462,17 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
813 | switch (rdev->family) { | 1462 | switch (rdev->family) { |
814 | case CHIP_CEDAR: | 1463 | case CHIP_CEDAR: |
815 | case CHIP_REDWOOD: | 1464 | case CHIP_REDWOOD: |
1465 | case CHIP_PALM: | ||
1466 | case CHIP_SUMO: | ||
1467 | case CHIP_SUMO2: | ||
1468 | case CHIP_TURKS: | ||
1469 | case CHIP_CAICOS: | ||
816 | force_no_swizzle = false; | 1470 | force_no_swizzle = false; |
817 | break; | 1471 | break; |
818 | case CHIP_CYPRESS: | 1472 | case CHIP_CYPRESS: |
819 | case CHIP_HEMLOCK: | 1473 | case CHIP_HEMLOCK: |
820 | case CHIP_JUNIPER: | 1474 | case CHIP_JUNIPER: |
1475 | case CHIP_BARTS: | ||
821 | default: | 1476 | default: |
822 | force_no_swizzle = true; | 1477 | force_no_swizzle = true; |
823 | break; | 1478 | break; |
@@ -912,6 +1567,48 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
912 | return backend_map; | 1567 | return backend_map; |
913 | } | 1568 | } |
914 | 1569 | ||
1570 | static void evergreen_program_channel_remap(struct radeon_device *rdev) | ||
1571 | { | ||
1572 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
1573 | |||
1574 | tmp = RREG32(MC_SHARED_CHMAP); | ||
1575 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
1576 | case 0: | ||
1577 | case 1: | ||
1578 | case 2: | ||
1579 | case 3: | ||
1580 | default: | ||
1581 | /* default mapping */ | ||
1582 | mc_shared_chremap = 0x00fac688; | ||
1583 | break; | ||
1584 | } | ||
1585 | |||
1586 | switch (rdev->family) { | ||
1587 | case CHIP_HEMLOCK: | ||
1588 | case CHIP_CYPRESS: | ||
1589 | case CHIP_BARTS: | ||
1590 | tcp_chan_steer_lo = 0x54763210; | ||
1591 | tcp_chan_steer_hi = 0x0000ba98; | ||
1592 | break; | ||
1593 | case CHIP_JUNIPER: | ||
1594 | case CHIP_REDWOOD: | ||
1595 | case CHIP_CEDAR: | ||
1596 | case CHIP_PALM: | ||
1597 | case CHIP_SUMO: | ||
1598 | case CHIP_SUMO2: | ||
1599 | case CHIP_TURKS: | ||
1600 | case CHIP_CAICOS: | ||
1601 | default: | ||
1602 | tcp_chan_steer_lo = 0x76543210; | ||
1603 | tcp_chan_steer_hi = 0x0000ba98; | ||
1604 | break; | ||
1605 | } | ||
1606 | |||
1607 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
1608 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
1609 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
1610 | } | ||
1611 | |||
915 | static void evergreen_gpu_init(struct radeon_device *rdev) | 1612 | static void evergreen_gpu_init(struct radeon_device *rdev) |
916 | { | 1613 | { |
917 | u32 cc_rb_backend_disable = 0; | 1614 | u32 cc_rb_backend_disable = 0; |
@@ -933,7 +1630,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
933 | u32 sq_stack_resource_mgmt_2; | 1630 | u32 sq_stack_resource_mgmt_2; |
934 | u32 sq_stack_resource_mgmt_3; | 1631 | u32 sq_stack_resource_mgmt_3; |
935 | u32 vgt_cache_invalidation; | 1632 | u32 vgt_cache_invalidation; |
936 | u32 hdp_host_path_cntl; | 1633 | u32 hdp_host_path_cntl, tmp; |
937 | int i, j, num_shader_engines, ps_thread_count; | 1634 | int i, j, num_shader_engines, ps_thread_count; |
938 | 1635 | ||
939 | switch (rdev->family) { | 1636 | switch (rdev->family) { |
@@ -1023,6 +1720,138 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1023 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1720 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1024 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1721 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1025 | break; | 1722 | break; |
1723 | case CHIP_PALM: | ||
1724 | rdev->config.evergreen.num_ses = 1; | ||
1725 | rdev->config.evergreen.max_pipes = 2; | ||
1726 | rdev->config.evergreen.max_tile_pipes = 2; | ||
1727 | rdev->config.evergreen.max_simds = 2; | ||
1728 | rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; | ||
1729 | rdev->config.evergreen.max_gprs = 256; | ||
1730 | rdev->config.evergreen.max_threads = 192; | ||
1731 | rdev->config.evergreen.max_gs_threads = 16; | ||
1732 | rdev->config.evergreen.max_stack_entries = 256; | ||
1733 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
1734 | rdev->config.evergreen.sx_max_export_size = 128; | ||
1735 | rdev->config.evergreen.sx_max_export_pos_size = 32; | ||
1736 | rdev->config.evergreen.sx_max_export_smx_size = 96; | ||
1737 | rdev->config.evergreen.max_hw_contexts = 4; | ||
1738 | rdev->config.evergreen.sq_num_cf_insts = 1; | ||
1739 | |||
1740 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | ||
1741 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
1742 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
1743 | break; | ||
1744 | case CHIP_SUMO: | ||
1745 | rdev->config.evergreen.num_ses = 1; | ||
1746 | rdev->config.evergreen.max_pipes = 4; | ||
1747 | rdev->config.evergreen.max_tile_pipes = 2; | ||
1748 | if (rdev->pdev->device == 0x9648) | ||
1749 | rdev->config.evergreen.max_simds = 3; | ||
1750 | else if ((rdev->pdev->device == 0x9647) || | ||
1751 | (rdev->pdev->device == 0x964a)) | ||
1752 | rdev->config.evergreen.max_simds = 4; | ||
1753 | else | ||
1754 | rdev->config.evergreen.max_simds = 5; | ||
1755 | rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; | ||
1756 | rdev->config.evergreen.max_gprs = 256; | ||
1757 | rdev->config.evergreen.max_threads = 248; | ||
1758 | rdev->config.evergreen.max_gs_threads = 32; | ||
1759 | rdev->config.evergreen.max_stack_entries = 256; | ||
1760 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
1761 | rdev->config.evergreen.sx_max_export_size = 256; | ||
1762 | rdev->config.evergreen.sx_max_export_pos_size = 64; | ||
1763 | rdev->config.evergreen.sx_max_export_smx_size = 192; | ||
1764 | rdev->config.evergreen.max_hw_contexts = 8; | ||
1765 | rdev->config.evergreen.sq_num_cf_insts = 2; | ||
1766 | |||
1767 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | ||
1768 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
1769 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
1770 | break; | ||
1771 | case CHIP_SUMO2: | ||
1772 | rdev->config.evergreen.num_ses = 1; | ||
1773 | rdev->config.evergreen.max_pipes = 4; | ||
1774 | rdev->config.evergreen.max_tile_pipes = 4; | ||
1775 | rdev->config.evergreen.max_simds = 2; | ||
1776 | rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; | ||
1777 | rdev->config.evergreen.max_gprs = 256; | ||
1778 | rdev->config.evergreen.max_threads = 248; | ||
1779 | rdev->config.evergreen.max_gs_threads = 32; | ||
1780 | rdev->config.evergreen.max_stack_entries = 512; | ||
1781 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
1782 | rdev->config.evergreen.sx_max_export_size = 256; | ||
1783 | rdev->config.evergreen.sx_max_export_pos_size = 64; | ||
1784 | rdev->config.evergreen.sx_max_export_smx_size = 192; | ||
1785 | rdev->config.evergreen.max_hw_contexts = 8; | ||
1786 | rdev->config.evergreen.sq_num_cf_insts = 2; | ||
1787 | |||
1788 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | ||
1789 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
1790 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
1791 | break; | ||
1792 | case CHIP_BARTS: | ||
1793 | rdev->config.evergreen.num_ses = 2; | ||
1794 | rdev->config.evergreen.max_pipes = 4; | ||
1795 | rdev->config.evergreen.max_tile_pipes = 8; | ||
1796 | rdev->config.evergreen.max_simds = 7; | ||
1797 | rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; | ||
1798 | rdev->config.evergreen.max_gprs = 256; | ||
1799 | rdev->config.evergreen.max_threads = 248; | ||
1800 | rdev->config.evergreen.max_gs_threads = 32; | ||
1801 | rdev->config.evergreen.max_stack_entries = 512; | ||
1802 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
1803 | rdev->config.evergreen.sx_max_export_size = 256; | ||
1804 | rdev->config.evergreen.sx_max_export_pos_size = 64; | ||
1805 | rdev->config.evergreen.sx_max_export_smx_size = 192; | ||
1806 | rdev->config.evergreen.max_hw_contexts = 8; | ||
1807 | rdev->config.evergreen.sq_num_cf_insts = 2; | ||
1808 | |||
1809 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | ||
1810 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
1811 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
1812 | break; | ||
1813 | case CHIP_TURKS: | ||
1814 | rdev->config.evergreen.num_ses = 1; | ||
1815 | rdev->config.evergreen.max_pipes = 4; | ||
1816 | rdev->config.evergreen.max_tile_pipes = 4; | ||
1817 | rdev->config.evergreen.max_simds = 6; | ||
1818 | rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; | ||
1819 | rdev->config.evergreen.max_gprs = 256; | ||
1820 | rdev->config.evergreen.max_threads = 248; | ||
1821 | rdev->config.evergreen.max_gs_threads = 32; | ||
1822 | rdev->config.evergreen.max_stack_entries = 256; | ||
1823 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
1824 | rdev->config.evergreen.sx_max_export_size = 256; | ||
1825 | rdev->config.evergreen.sx_max_export_pos_size = 64; | ||
1826 | rdev->config.evergreen.sx_max_export_smx_size = 192; | ||
1827 | rdev->config.evergreen.max_hw_contexts = 8; | ||
1828 | rdev->config.evergreen.sq_num_cf_insts = 2; | ||
1829 | |||
1830 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | ||
1831 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
1832 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
1833 | break; | ||
1834 | case CHIP_CAICOS: | ||
1835 | rdev->config.evergreen.num_ses = 1; | ||
1836 | rdev->config.evergreen.max_pipes = 4; | ||
1837 | rdev->config.evergreen.max_tile_pipes = 2; | ||
1838 | rdev->config.evergreen.max_simds = 2; | ||
1839 | rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; | ||
1840 | rdev->config.evergreen.max_gprs = 256; | ||
1841 | rdev->config.evergreen.max_threads = 192; | ||
1842 | rdev->config.evergreen.max_gs_threads = 16; | ||
1843 | rdev->config.evergreen.max_stack_entries = 256; | ||
1844 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
1845 | rdev->config.evergreen.sx_max_export_size = 128; | ||
1846 | rdev->config.evergreen.sx_max_export_pos_size = 32; | ||
1847 | rdev->config.evergreen.sx_max_export_smx_size = 96; | ||
1848 | rdev->config.evergreen.max_hw_contexts = 4; | ||
1849 | rdev->config.evergreen.sq_num_cf_insts = 1; | ||
1850 | |||
1851 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | ||
1852 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
1853 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
1854 | break; | ||
1026 | } | 1855 | } |
1027 | 1856 | ||
1028 | /* Initialize HDP */ | 1857 | /* Initialize HDP */ |
@@ -1051,7 +1880,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1051 | 1880 | ||
1052 | 1881 | ||
1053 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | 1882 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); |
1054 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | 1883 | if (rdev->flags & RADEON_IS_IGP) |
1884 | mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG); | ||
1885 | else | ||
1886 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | ||
1055 | 1887 | ||
1056 | switch (rdev->config.evergreen.max_tile_pipes) { | 1888 | switch (rdev->config.evergreen.max_tile_pipes) { |
1057 | case 1: | 1889 | case 1: |
@@ -1164,10 +1996,11 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1164 | switch (rdev->family) { | 1996 | switch (rdev->family) { |
1165 | case CHIP_CYPRESS: | 1997 | case CHIP_CYPRESS: |
1166 | case CHIP_HEMLOCK: | 1998 | case CHIP_HEMLOCK: |
1999 | case CHIP_BARTS: | ||
1167 | gb_backend_map = 0x66442200; | 2000 | gb_backend_map = 0x66442200; |
1168 | break; | 2001 | break; |
1169 | case CHIP_JUNIPER: | 2002 | case CHIP_JUNIPER: |
1170 | gb_backend_map = 0x00006420; | 2003 | gb_backend_map = 0x00002200; |
1171 | break; | 2004 | break; |
1172 | default: | 2005 | default: |
1173 | gb_backend_map = | 2006 | gb_backend_map = |
@@ -1180,12 +2013,47 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1180 | } | 2013 | } |
1181 | } | 2014 | } |
1182 | 2015 | ||
1183 | rdev->config.evergreen.tile_config = gb_addr_config; | 2016 | /* setup tiling info dword. gb_addr_config is not adequate since it does |
2017 | * not have bank info, so create a custom tiling dword. | ||
2018 | * bits 3:0 num_pipes | ||
2019 | * bits 7:4 num_banks | ||
2020 | * bits 11:8 group_size | ||
2021 | * bits 15:12 row_size | ||
2022 | */ | ||
2023 | rdev->config.evergreen.tile_config = 0; | ||
2024 | switch (rdev->config.evergreen.max_tile_pipes) { | ||
2025 | case 1: | ||
2026 | default: | ||
2027 | rdev->config.evergreen.tile_config |= (0 << 0); | ||
2028 | break; | ||
2029 | case 2: | ||
2030 | rdev->config.evergreen.tile_config |= (1 << 0); | ||
2031 | break; | ||
2032 | case 4: | ||
2033 | rdev->config.evergreen.tile_config |= (2 << 0); | ||
2034 | break; | ||
2035 | case 8: | ||
2036 | rdev->config.evergreen.tile_config |= (3 << 0); | ||
2037 | break; | ||
2038 | } | ||
2039 | /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ | ||
2040 | if (rdev->flags & RADEON_IS_IGP) | ||
2041 | rdev->config.evergreen.tile_config |= 1 << 4; | ||
2042 | else | ||
2043 | rdev->config.evergreen.tile_config |= | ||
2044 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; | ||
2045 | rdev->config.evergreen.tile_config |= | ||
2046 | ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; | ||
2047 | rdev->config.evergreen.tile_config |= | ||
2048 | ((gb_addr_config & 0x30000000) >> 28) << 12; | ||
2049 | |||
1184 | WREG32(GB_BACKEND_MAP, gb_backend_map); | 2050 | WREG32(GB_BACKEND_MAP, gb_backend_map); |
1185 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | 2051 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
1186 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 2052 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
1187 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 2053 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
1188 | 2054 | ||
2055 | evergreen_program_channel_remap(rdev); | ||
2056 | |||
1189 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; | 2057 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; |
1190 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; | 2058 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; |
1191 | 2059 | ||
@@ -1268,9 +2136,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1268 | GS_PRIO(2) | | 2136 | GS_PRIO(2) | |
1269 | ES_PRIO(3)); | 2137 | ES_PRIO(3)); |
1270 | 2138 | ||
1271 | if (rdev->family == CHIP_CEDAR) | 2139 | switch (rdev->family) { |
2140 | case CHIP_CEDAR: | ||
2141 | case CHIP_PALM: | ||
2142 | case CHIP_SUMO: | ||
2143 | case CHIP_SUMO2: | ||
2144 | case CHIP_CAICOS: | ||
1272 | /* no vertex cache */ | 2145 | /* no vertex cache */ |
1273 | sq_config &= ~VC_ENABLE; | 2146 | sq_config &= ~VC_ENABLE; |
2147 | break; | ||
2148 | default: | ||
2149 | break; | ||
2150 | } | ||
1274 | 2151 | ||
1275 | sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); | 2152 | sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); |
1276 | 2153 | ||
@@ -1282,10 +2159,17 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1282 | sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); | 2159 | sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); |
1283 | sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); | 2160 | sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); |
1284 | 2161 | ||
1285 | if (rdev->family == CHIP_CEDAR) | 2162 | switch (rdev->family) { |
2163 | case CHIP_CEDAR: | ||
2164 | case CHIP_PALM: | ||
2165 | case CHIP_SUMO: | ||
2166 | case CHIP_SUMO2: | ||
1286 | ps_thread_count = 96; | 2167 | ps_thread_count = 96; |
1287 | else | 2168 | break; |
2169 | default: | ||
1288 | ps_thread_count = 128; | 2170 | ps_thread_count = 128; |
2171 | break; | ||
2172 | } | ||
1289 | 2173 | ||
1290 | sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); | 2174 | sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); |
1291 | sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); | 2175 | sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
@@ -1316,14 +2200,23 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1316 | WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | | 2200 | WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | |
1317 | FORCE_EOV_MAX_REZ_CNT(255))); | 2201 | FORCE_EOV_MAX_REZ_CNT(255))); |
1318 | 2202 | ||
1319 | if (rdev->family == CHIP_CEDAR) | 2203 | switch (rdev->family) { |
2204 | case CHIP_CEDAR: | ||
2205 | case CHIP_PALM: | ||
2206 | case CHIP_SUMO: | ||
2207 | case CHIP_SUMO2: | ||
2208 | case CHIP_CAICOS: | ||
1320 | vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); | 2209 | vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); |
1321 | else | 2210 | break; |
2211 | default: | ||
1322 | vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); | 2212 | vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); |
2213 | break; | ||
2214 | } | ||
1323 | vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); | 2215 | vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); |
1324 | WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); | 2216 | WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); |
1325 | 2217 | ||
1326 | WREG32(VGT_GS_VERTEX_REUSE, 16); | 2218 | WREG32(VGT_GS_VERTEX_REUSE, 16); |
2219 | WREG32(PA_SU_LINE_STIPPLE_VALUE, 0); | ||
1327 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); | 2220 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); |
1328 | 2221 | ||
1329 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); | 2222 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); |
@@ -1358,6 +2251,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1358 | for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) | 2251 | for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) |
1359 | WREG32(i, 0); | 2252 | WREG32(i, 0); |
1360 | 2253 | ||
2254 | tmp = RREG32(HDP_MISC_CNTL); | ||
2255 | tmp |= HDP_FLUSH_INVALIDATE_CACHE; | ||
2256 | WREG32(HDP_MISC_CNTL, tmp); | ||
2257 | |||
1361 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); | 2258 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); |
1362 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); | 2259 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); |
1363 | 2260 | ||
@@ -1374,7 +2271,10 @@ int evergreen_mc_init(struct radeon_device *rdev) | |||
1374 | 2271 | ||
1375 | /* Get VRAM informations */ | 2272 | /* Get VRAM informations */ |
1376 | rdev->mc.vram_is_ddr = true; | 2273 | rdev->mc.vram_is_ddr = true; |
1377 | tmp = RREG32(MC_ARB_RAMCFG); | 2274 | if (rdev->flags & RADEON_IS_IGP) |
2275 | tmp = RREG32(FUS_MC_ARB_RAMCFG); | ||
2276 | else | ||
2277 | tmp = RREG32(MC_ARB_RAMCFG); | ||
1378 | if (tmp & CHANSIZE_OVERRIDE) { | 2278 | if (tmp & CHANSIZE_OVERRIDE) { |
1379 | chansize = 16; | 2279 | chansize = 16; |
1380 | } else if (tmp & CHANSIZE_MASK) { | 2280 | } else if (tmp & CHANSIZE_MASK) { |
@@ -1403,12 +2303,17 @@ int evergreen_mc_init(struct radeon_device *rdev) | |||
1403 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); | 2303 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
1404 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | 2304 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
1405 | /* Setup GPU memory space */ | 2305 | /* Setup GPU memory space */ |
1406 | /* size in MB on evergreen */ | 2306 | if (rdev->flags & RADEON_IS_IGP) { |
1407 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | 2307 | /* size in bytes on fusion */ |
1408 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | 2308 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
2309 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | ||
2310 | } else { | ||
2311 | /* size in MB on evergreen */ | ||
2312 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | ||
2313 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | ||
2314 | } | ||
1409 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 2315 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
1410 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 2316 | r700_vram_gtt_location(rdev, &rdev->mc); |
1411 | r600_vram_gtt_location(rdev, &rdev->mc); | ||
1412 | radeon_update_bandwidth_info(rdev); | 2317 | radeon_update_bandwidth_info(rdev); |
1413 | 2318 | ||
1414 | return 0; | 2319 | return 0; |
@@ -1416,16 +2321,40 @@ int evergreen_mc_init(struct radeon_device *rdev) | |||
1416 | 2321 | ||
1417 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev) | 2322 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev) |
1418 | { | 2323 | { |
1419 | /* FIXME: implement for evergreen */ | 2324 | u32 srbm_status; |
1420 | return false; | 2325 | u32 grbm_status; |
2326 | u32 grbm_status_se0, grbm_status_se1; | ||
2327 | struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup; | ||
2328 | int r; | ||
2329 | |||
2330 | srbm_status = RREG32(SRBM_STATUS); | ||
2331 | grbm_status = RREG32(GRBM_STATUS); | ||
2332 | grbm_status_se0 = RREG32(GRBM_STATUS_SE0); | ||
2333 | grbm_status_se1 = RREG32(GRBM_STATUS_SE1); | ||
2334 | if (!(grbm_status & GUI_ACTIVE)) { | ||
2335 | r100_gpu_lockup_update(lockup, &rdev->cp); | ||
2336 | return false; | ||
2337 | } | ||
2338 | /* force CP activities */ | ||
2339 | r = radeon_ring_lock(rdev, 2); | ||
2340 | if (!r) { | ||
2341 | /* PACKET2 NOP */ | ||
2342 | radeon_ring_write(rdev, 0x80000000); | ||
2343 | radeon_ring_write(rdev, 0x80000000); | ||
2344 | radeon_ring_unlock_commit(rdev); | ||
2345 | } | ||
2346 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | ||
2347 | return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); | ||
1421 | } | 2348 | } |
1422 | 2349 | ||
1423 | static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | 2350 | static int evergreen_gpu_soft_reset(struct radeon_device *rdev) |
1424 | { | 2351 | { |
1425 | struct evergreen_mc_save save; | 2352 | struct evergreen_mc_save save; |
1426 | u32 srbm_reset = 0; | ||
1427 | u32 grbm_reset = 0; | 2353 | u32 grbm_reset = 0; |
1428 | 2354 | ||
2355 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | ||
2356 | return 0; | ||
2357 | |||
1429 | dev_info(rdev->dev, "GPU softreset \n"); | 2358 | dev_info(rdev->dev, "GPU softreset \n"); |
1430 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | 2359 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", |
1431 | RREG32(GRBM_STATUS)); | 2360 | RREG32(GRBM_STATUS)); |
@@ -1462,16 +2391,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | |||
1462 | udelay(50); | 2391 | udelay(50); |
1463 | WREG32(GRBM_SOFT_RESET, 0); | 2392 | WREG32(GRBM_SOFT_RESET, 0); |
1464 | (void)RREG32(GRBM_SOFT_RESET); | 2393 | (void)RREG32(GRBM_SOFT_RESET); |
1465 | |||
1466 | /* reset all the system blocks */ | ||
1467 | srbm_reset = SRBM_SOFT_RESET_ALL_MASK; | ||
1468 | |||
1469 | dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset); | ||
1470 | WREG32(SRBM_SOFT_RESET, srbm_reset); | ||
1471 | (void)RREG32(SRBM_SOFT_RESET); | ||
1472 | udelay(50); | ||
1473 | WREG32(SRBM_SOFT_RESET, 0); | ||
1474 | (void)RREG32(SRBM_SOFT_RESET); | ||
1475 | /* Wait a little for things to settle down */ | 2394 | /* Wait a little for things to settle down */ |
1476 | udelay(50); | 2395 | udelay(50); |
1477 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | 2396 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", |
@@ -1482,10 +2401,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | |||
1482 | RREG32(GRBM_STATUS_SE1)); | 2401 | RREG32(GRBM_STATUS_SE1)); |
1483 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", | 2402 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", |
1484 | RREG32(SRBM_STATUS)); | 2403 | RREG32(SRBM_STATUS)); |
1485 | /* After reset we need to reinit the asic as GPU often endup in an | ||
1486 | * incoherent state. | ||
1487 | */ | ||
1488 | atom_asic_init(rdev->mode_info.atom_context); | ||
1489 | evergreen_mc_resume(rdev, &save); | 2404 | evergreen_mc_resume(rdev, &save); |
1490 | return 0; | 2405 | return 0; |
1491 | } | 2406 | } |
@@ -1525,17 +2440,25 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) | |||
1525 | WREG32(GRBM_INT_CNTL, 0); | 2440 | WREG32(GRBM_INT_CNTL, 0); |
1526 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 2441 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
1527 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 2442 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
1528 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | 2443 | if (rdev->num_crtc >= 4) { |
1529 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | 2444 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); |
1530 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | 2445 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); |
1531 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 2446 | } |
2447 | if (rdev->num_crtc >= 6) { | ||
2448 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | ||
2449 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
2450 | } | ||
1532 | 2451 | ||
1533 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 2452 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
1534 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 2453 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
1535 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | 2454 | if (rdev->num_crtc >= 4) { |
1536 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | 2455 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); |
1537 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | 2456 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); |
1538 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 2457 | } |
2458 | if (rdev->num_crtc >= 6) { | ||
2459 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | ||
2460 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
2461 | } | ||
1539 | 2462 | ||
1540 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); | 2463 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); |
1541 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); | 2464 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); |
@@ -1561,9 +2484,10 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
1561 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; | 2484 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; |
1562 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; | 2485 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; |
1563 | u32 grbm_int_cntl = 0; | 2486 | u32 grbm_int_cntl = 0; |
2487 | u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; | ||
1564 | 2488 | ||
1565 | if (!rdev->irq.installed) { | 2489 | if (!rdev->irq.installed) { |
1566 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 2490 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
1567 | return -EINVAL; | 2491 | return -EINVAL; |
1568 | } | 2492 | } |
1569 | /* don't enable anything if the ih is disabled */ | 2493 | /* don't enable anything if the ih is disabled */ |
@@ -1584,28 +2508,35 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
1584 | if (rdev->irq.sw_int) { | 2508 | if (rdev->irq.sw_int) { |
1585 | DRM_DEBUG("evergreen_irq_set: sw int\n"); | 2509 | DRM_DEBUG("evergreen_irq_set: sw int\n"); |
1586 | cp_int_cntl |= RB_INT_ENABLE; | 2510 | cp_int_cntl |= RB_INT_ENABLE; |
2511 | cp_int_cntl |= TIME_STAMP_INT_ENABLE; | ||
1587 | } | 2512 | } |
1588 | if (rdev->irq.crtc_vblank_int[0]) { | 2513 | if (rdev->irq.crtc_vblank_int[0] || |
2514 | rdev->irq.pflip[0]) { | ||
1589 | DRM_DEBUG("evergreen_irq_set: vblank 0\n"); | 2515 | DRM_DEBUG("evergreen_irq_set: vblank 0\n"); |
1590 | crtc1 |= VBLANK_INT_MASK; | 2516 | crtc1 |= VBLANK_INT_MASK; |
1591 | } | 2517 | } |
1592 | if (rdev->irq.crtc_vblank_int[1]) { | 2518 | if (rdev->irq.crtc_vblank_int[1] || |
2519 | rdev->irq.pflip[1]) { | ||
1593 | DRM_DEBUG("evergreen_irq_set: vblank 1\n"); | 2520 | DRM_DEBUG("evergreen_irq_set: vblank 1\n"); |
1594 | crtc2 |= VBLANK_INT_MASK; | 2521 | crtc2 |= VBLANK_INT_MASK; |
1595 | } | 2522 | } |
1596 | if (rdev->irq.crtc_vblank_int[2]) { | 2523 | if (rdev->irq.crtc_vblank_int[2] || |
2524 | rdev->irq.pflip[2]) { | ||
1597 | DRM_DEBUG("evergreen_irq_set: vblank 2\n"); | 2525 | DRM_DEBUG("evergreen_irq_set: vblank 2\n"); |
1598 | crtc3 |= VBLANK_INT_MASK; | 2526 | crtc3 |= VBLANK_INT_MASK; |
1599 | } | 2527 | } |
1600 | if (rdev->irq.crtc_vblank_int[3]) { | 2528 | if (rdev->irq.crtc_vblank_int[3] || |
2529 | rdev->irq.pflip[3]) { | ||
1601 | DRM_DEBUG("evergreen_irq_set: vblank 3\n"); | 2530 | DRM_DEBUG("evergreen_irq_set: vblank 3\n"); |
1602 | crtc4 |= VBLANK_INT_MASK; | 2531 | crtc4 |= VBLANK_INT_MASK; |
1603 | } | 2532 | } |
1604 | if (rdev->irq.crtc_vblank_int[4]) { | 2533 | if (rdev->irq.crtc_vblank_int[4] || |
2534 | rdev->irq.pflip[4]) { | ||
1605 | DRM_DEBUG("evergreen_irq_set: vblank 4\n"); | 2535 | DRM_DEBUG("evergreen_irq_set: vblank 4\n"); |
1606 | crtc5 |= VBLANK_INT_MASK; | 2536 | crtc5 |= VBLANK_INT_MASK; |
1607 | } | 2537 | } |
1608 | if (rdev->irq.crtc_vblank_int[5]) { | 2538 | if (rdev->irq.crtc_vblank_int[5] || |
2539 | rdev->irq.pflip[5]) { | ||
1609 | DRM_DEBUG("evergreen_irq_set: vblank 5\n"); | 2540 | DRM_DEBUG("evergreen_irq_set: vblank 5\n"); |
1610 | crtc6 |= VBLANK_INT_MASK; | 2541 | crtc6 |= VBLANK_INT_MASK; |
1611 | } | 2542 | } |
@@ -1643,10 +2574,25 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
1643 | 2574 | ||
1644 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); | 2575 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); |
1645 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); | 2576 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); |
1646 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); | 2577 | if (rdev->num_crtc >= 4) { |
1647 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); | 2578 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); |
1648 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); | 2579 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); |
1649 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); | 2580 | } |
2581 | if (rdev->num_crtc >= 6) { | ||
2582 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); | ||
2583 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); | ||
2584 | } | ||
2585 | |||
2586 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); | ||
2587 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); | ||
2588 | if (rdev->num_crtc >= 4) { | ||
2589 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); | ||
2590 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); | ||
2591 | } | ||
2592 | if (rdev->num_crtc >= 6) { | ||
2593 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); | ||
2594 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); | ||
2595 | } | ||
1650 | 2596 | ||
1651 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | 2597 | WREG32(DC_HPD1_INT_CONTROL, hpd1); |
1652 | WREG32(DC_HPD2_INT_CONTROL, hpd2); | 2598 | WREG32(DC_HPD2_INT_CONTROL, hpd2); |
@@ -1658,79 +2604,96 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
1658 | return 0; | 2604 | return 0; |
1659 | } | 2605 | } |
1660 | 2606 | ||
1661 | static inline void evergreen_irq_ack(struct radeon_device *rdev, | 2607 | static inline void evergreen_irq_ack(struct radeon_device *rdev) |
1662 | u32 *disp_int, | ||
1663 | u32 *disp_int_cont, | ||
1664 | u32 *disp_int_cont2, | ||
1665 | u32 *disp_int_cont3, | ||
1666 | u32 *disp_int_cont4, | ||
1667 | u32 *disp_int_cont5) | ||
1668 | { | 2608 | { |
1669 | u32 tmp; | 2609 | u32 tmp; |
1670 | 2610 | ||
1671 | *disp_int = RREG32(DISP_INTERRUPT_STATUS); | 2611 | rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); |
1672 | *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); | 2612 | rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); |
1673 | *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); | 2613 | rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); |
1674 | *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); | 2614 | rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); |
1675 | *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); | 2615 | rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); |
1676 | *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); | 2616 | rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); |
1677 | 2617 | rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET); | |
1678 | if (*disp_int & LB_D1_VBLANK_INTERRUPT) | 2618 | rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET); |
2619 | if (rdev->num_crtc >= 4) { | ||
2620 | rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET); | ||
2621 | rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET); | ||
2622 | } | ||
2623 | if (rdev->num_crtc >= 6) { | ||
2624 | rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET); | ||
2625 | rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
2626 | } | ||
2627 | |||
2628 | if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
2629 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | ||
2630 | if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
2631 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | ||
2632 | if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) | ||
1679 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); | 2633 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); |
1680 | if (*disp_int & LB_D1_VLINE_INTERRUPT) | 2634 | if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) |
1681 | WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); | 2635 | WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); |
1682 | 2636 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) | |
1683 | if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) | ||
1684 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); | 2637 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); |
1685 | if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) | 2638 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) |
1686 | WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); | 2639 | WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); |
1687 | 2640 | ||
1688 | if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) | 2641 | if (rdev->num_crtc >= 4) { |
1689 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); | 2642 | if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED) |
1690 | if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) | 2643 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); |
1691 | WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); | 2644 | if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED) |
1692 | 2645 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | |
1693 | if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) | 2646 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) |
1694 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); | 2647 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); |
1695 | if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) | 2648 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) |
1696 | WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); | 2649 | WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); |
1697 | 2650 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) | |
1698 | if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) | 2651 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); |
1699 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); | 2652 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) |
1700 | if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) | 2653 | WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); |
1701 | WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); | 2654 | } |
1702 | 2655 | ||
1703 | if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) | 2656 | if (rdev->num_crtc >= 6) { |
1704 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); | 2657 | if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED) |
1705 | if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) | 2658 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); |
1706 | WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); | 2659 | if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED) |
1707 | 2660 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | |
1708 | if (*disp_int & DC_HPD1_INTERRUPT) { | 2661 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) |
2662 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); | ||
2663 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) | ||
2664 | WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); | ||
2665 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) | ||
2666 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); | ||
2667 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) | ||
2668 | WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); | ||
2669 | } | ||
2670 | |||
2671 | if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { | ||
1709 | tmp = RREG32(DC_HPD1_INT_CONTROL); | 2672 | tmp = RREG32(DC_HPD1_INT_CONTROL); |
1710 | tmp |= DC_HPDx_INT_ACK; | 2673 | tmp |= DC_HPDx_INT_ACK; |
1711 | WREG32(DC_HPD1_INT_CONTROL, tmp); | 2674 | WREG32(DC_HPD1_INT_CONTROL, tmp); |
1712 | } | 2675 | } |
1713 | if (*disp_int_cont & DC_HPD2_INTERRUPT) { | 2676 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { |
1714 | tmp = RREG32(DC_HPD2_INT_CONTROL); | 2677 | tmp = RREG32(DC_HPD2_INT_CONTROL); |
1715 | tmp |= DC_HPDx_INT_ACK; | 2678 | tmp |= DC_HPDx_INT_ACK; |
1716 | WREG32(DC_HPD2_INT_CONTROL, tmp); | 2679 | WREG32(DC_HPD2_INT_CONTROL, tmp); |
1717 | } | 2680 | } |
1718 | if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { | 2681 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { |
1719 | tmp = RREG32(DC_HPD3_INT_CONTROL); | 2682 | tmp = RREG32(DC_HPD3_INT_CONTROL); |
1720 | tmp |= DC_HPDx_INT_ACK; | 2683 | tmp |= DC_HPDx_INT_ACK; |
1721 | WREG32(DC_HPD3_INT_CONTROL, tmp); | 2684 | WREG32(DC_HPD3_INT_CONTROL, tmp); |
1722 | } | 2685 | } |
1723 | if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { | 2686 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { |
1724 | tmp = RREG32(DC_HPD4_INT_CONTROL); | 2687 | tmp = RREG32(DC_HPD4_INT_CONTROL); |
1725 | tmp |= DC_HPDx_INT_ACK; | 2688 | tmp |= DC_HPDx_INT_ACK; |
1726 | WREG32(DC_HPD4_INT_CONTROL, tmp); | 2689 | WREG32(DC_HPD4_INT_CONTROL, tmp); |
1727 | } | 2690 | } |
1728 | if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { | 2691 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { |
1729 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 2692 | tmp = RREG32(DC_HPD5_INT_CONTROL); |
1730 | tmp |= DC_HPDx_INT_ACK; | 2693 | tmp |= DC_HPDx_INT_ACK; |
1731 | WREG32(DC_HPD5_INT_CONTROL, tmp); | 2694 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
1732 | } | 2695 | } |
1733 | if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { | 2696 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { |
1734 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 2697 | tmp = RREG32(DC_HPD5_INT_CONTROL); |
1735 | tmp |= DC_HPDx_INT_ACK; | 2698 | tmp |= DC_HPDx_INT_ACK; |
1736 | WREG32(DC_HPD6_INT_CONTROL, tmp); | 2699 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
@@ -1739,18 +2702,14 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev, | |||
1739 | 2702 | ||
1740 | void evergreen_irq_disable(struct radeon_device *rdev) | 2703 | void evergreen_irq_disable(struct radeon_device *rdev) |
1741 | { | 2704 | { |
1742 | u32 disp_int, disp_int_cont, disp_int_cont2; | ||
1743 | u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; | ||
1744 | |||
1745 | r600_disable_interrupts(rdev); | 2705 | r600_disable_interrupts(rdev); |
1746 | /* Wait and acknowledge irq */ | 2706 | /* Wait and acknowledge irq */ |
1747 | mdelay(1); | 2707 | mdelay(1); |
1748 | evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, | 2708 | evergreen_irq_ack(rdev); |
1749 | &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); | ||
1750 | evergreen_disable_interrupt_state(rdev); | 2709 | evergreen_disable_interrupt_state(rdev); |
1751 | } | 2710 | } |
1752 | 2711 | ||
1753 | static void evergreen_irq_suspend(struct radeon_device *rdev) | 2712 | void evergreen_irq_suspend(struct radeon_device *rdev) |
1754 | { | 2713 | { |
1755 | evergreen_irq_disable(rdev); | 2714 | evergreen_irq_disable(rdev); |
1756 | r600_rlc_stop(rdev); | 2715 | r600_rlc_stop(rdev); |
@@ -1760,8 +2719,10 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) | |||
1760 | { | 2719 | { |
1761 | u32 wptr, tmp; | 2720 | u32 wptr, tmp; |
1762 | 2721 | ||
1763 | /* XXX use writeback */ | 2722 | if (rdev->wb.enabled) |
1764 | wptr = RREG32(IH_RB_WPTR); | 2723 | wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); |
2724 | else | ||
2725 | wptr = RREG32(IH_RB_WPTR); | ||
1765 | 2726 | ||
1766 | if (wptr & RB_OVERFLOW) { | 2727 | if (wptr & RB_OVERFLOW) { |
1767 | /* When a ring buffer overflow happen start parsing interrupt | 2728 | /* When a ring buffer overflow happen start parsing interrupt |
@@ -1780,56 +2741,55 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) | |||
1780 | 2741 | ||
1781 | int evergreen_irq_process(struct radeon_device *rdev) | 2742 | int evergreen_irq_process(struct radeon_device *rdev) |
1782 | { | 2743 | { |
1783 | u32 wptr = evergreen_get_ih_wptr(rdev); | 2744 | u32 wptr; |
1784 | u32 rptr = rdev->ih.rptr; | 2745 | u32 rptr; |
1785 | u32 src_id, src_data; | 2746 | u32 src_id, src_data; |
1786 | u32 ring_index; | 2747 | u32 ring_index; |
1787 | u32 disp_int, disp_int_cont, disp_int_cont2; | ||
1788 | u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; | ||
1789 | unsigned long flags; | 2748 | unsigned long flags; |
1790 | bool queue_hotplug = false; | 2749 | bool queue_hotplug = false; |
1791 | 2750 | ||
1792 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | 2751 | if (!rdev->ih.enabled || rdev->shutdown) |
1793 | if (!rdev->ih.enabled) | ||
1794 | return IRQ_NONE; | 2752 | return IRQ_NONE; |
1795 | 2753 | ||
1796 | spin_lock_irqsave(&rdev->ih.lock, flags); | 2754 | wptr = evergreen_get_ih_wptr(rdev); |
2755 | rptr = rdev->ih.rptr; | ||
2756 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | ||
1797 | 2757 | ||
2758 | spin_lock_irqsave(&rdev->ih.lock, flags); | ||
1798 | if (rptr == wptr) { | 2759 | if (rptr == wptr) { |
1799 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | 2760 | spin_unlock_irqrestore(&rdev->ih.lock, flags); |
1800 | return IRQ_NONE; | 2761 | return IRQ_NONE; |
1801 | } | 2762 | } |
1802 | if (rdev->shutdown) { | ||
1803 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | ||
1804 | return IRQ_NONE; | ||
1805 | } | ||
1806 | |||
1807 | restart_ih: | 2763 | restart_ih: |
1808 | /* display interrupts */ | 2764 | /* display interrupts */ |
1809 | evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, | 2765 | evergreen_irq_ack(rdev); |
1810 | &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); | ||
1811 | 2766 | ||
1812 | rdev->ih.wptr = wptr; | 2767 | rdev->ih.wptr = wptr; |
1813 | while (rptr != wptr) { | 2768 | while (rptr != wptr) { |
1814 | /* wptr/rptr are in bytes! */ | 2769 | /* wptr/rptr are in bytes! */ |
1815 | ring_index = rptr / 4; | 2770 | ring_index = rptr / 4; |
1816 | src_id = rdev->ih.ring[ring_index] & 0xff; | 2771 | src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; |
1817 | src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; | 2772 | src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; |
1818 | 2773 | ||
1819 | switch (src_id) { | 2774 | switch (src_id) { |
1820 | case 1: /* D1 vblank/vline */ | 2775 | case 1: /* D1 vblank/vline */ |
1821 | switch (src_data) { | 2776 | switch (src_data) { |
1822 | case 0: /* D1 vblank */ | 2777 | case 0: /* D1 vblank */ |
1823 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | 2778 | if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { |
1824 | drm_handle_vblank(rdev->ddev, 0); | 2779 | if (rdev->irq.crtc_vblank_int[0]) { |
1825 | wake_up(&rdev->irq.vblank_queue); | 2780 | drm_handle_vblank(rdev->ddev, 0); |
1826 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | 2781 | rdev->pm.vblank_sync = true; |
2782 | wake_up(&rdev->irq.vblank_queue); | ||
2783 | } | ||
2784 | if (rdev->irq.pflip[0]) | ||
2785 | radeon_crtc_handle_flip(rdev, 0); | ||
2786 | rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; | ||
1827 | DRM_DEBUG("IH: D1 vblank\n"); | 2787 | DRM_DEBUG("IH: D1 vblank\n"); |
1828 | } | 2788 | } |
1829 | break; | 2789 | break; |
1830 | case 1: /* D1 vline */ | 2790 | case 1: /* D1 vline */ |
1831 | if (disp_int & LB_D1_VLINE_INTERRUPT) { | 2791 | if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { |
1832 | disp_int &= ~LB_D1_VLINE_INTERRUPT; | 2792 | rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; |
1833 | DRM_DEBUG("IH: D1 vline\n"); | 2793 | DRM_DEBUG("IH: D1 vline\n"); |
1834 | } | 2794 | } |
1835 | break; | 2795 | break; |
@@ -1841,16 +2801,21 @@ restart_ih: | |||
1841 | case 2: /* D2 vblank/vline */ | 2801 | case 2: /* D2 vblank/vline */ |
1842 | switch (src_data) { | 2802 | switch (src_data) { |
1843 | case 0: /* D2 vblank */ | 2803 | case 0: /* D2 vblank */ |
1844 | if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { | 2804 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { |
1845 | drm_handle_vblank(rdev->ddev, 1); | 2805 | if (rdev->irq.crtc_vblank_int[1]) { |
1846 | wake_up(&rdev->irq.vblank_queue); | 2806 | drm_handle_vblank(rdev->ddev, 1); |
1847 | disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; | 2807 | rdev->pm.vblank_sync = true; |
2808 | wake_up(&rdev->irq.vblank_queue); | ||
2809 | } | ||
2810 | if (rdev->irq.pflip[1]) | ||
2811 | radeon_crtc_handle_flip(rdev, 1); | ||
2812 | rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; | ||
1848 | DRM_DEBUG("IH: D2 vblank\n"); | 2813 | DRM_DEBUG("IH: D2 vblank\n"); |
1849 | } | 2814 | } |
1850 | break; | 2815 | break; |
1851 | case 1: /* D2 vline */ | 2816 | case 1: /* D2 vline */ |
1852 | if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { | 2817 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { |
1853 | disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; | 2818 | rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; |
1854 | DRM_DEBUG("IH: D2 vline\n"); | 2819 | DRM_DEBUG("IH: D2 vline\n"); |
1855 | } | 2820 | } |
1856 | break; | 2821 | break; |
@@ -1862,16 +2827,21 @@ restart_ih: | |||
1862 | case 3: /* D3 vblank/vline */ | 2827 | case 3: /* D3 vblank/vline */ |
1863 | switch (src_data) { | 2828 | switch (src_data) { |
1864 | case 0: /* D3 vblank */ | 2829 | case 0: /* D3 vblank */ |
1865 | if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { | 2830 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { |
1866 | drm_handle_vblank(rdev->ddev, 2); | 2831 | if (rdev->irq.crtc_vblank_int[2]) { |
1867 | wake_up(&rdev->irq.vblank_queue); | 2832 | drm_handle_vblank(rdev->ddev, 2); |
1868 | disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; | 2833 | rdev->pm.vblank_sync = true; |
2834 | wake_up(&rdev->irq.vblank_queue); | ||
2835 | } | ||
2836 | if (rdev->irq.pflip[2]) | ||
2837 | radeon_crtc_handle_flip(rdev, 2); | ||
2838 | rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; | ||
1869 | DRM_DEBUG("IH: D3 vblank\n"); | 2839 | DRM_DEBUG("IH: D3 vblank\n"); |
1870 | } | 2840 | } |
1871 | break; | 2841 | break; |
1872 | case 1: /* D3 vline */ | 2842 | case 1: /* D3 vline */ |
1873 | if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { | 2843 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { |
1874 | disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; | 2844 | rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; |
1875 | DRM_DEBUG("IH: D3 vline\n"); | 2845 | DRM_DEBUG("IH: D3 vline\n"); |
1876 | } | 2846 | } |
1877 | break; | 2847 | break; |
@@ -1883,16 +2853,21 @@ restart_ih: | |||
1883 | case 4: /* D4 vblank/vline */ | 2853 | case 4: /* D4 vblank/vline */ |
1884 | switch (src_data) { | 2854 | switch (src_data) { |
1885 | case 0: /* D4 vblank */ | 2855 | case 0: /* D4 vblank */ |
1886 | if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { | 2856 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { |
1887 | drm_handle_vblank(rdev->ddev, 3); | 2857 | if (rdev->irq.crtc_vblank_int[3]) { |
1888 | wake_up(&rdev->irq.vblank_queue); | 2858 | drm_handle_vblank(rdev->ddev, 3); |
1889 | disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; | 2859 | rdev->pm.vblank_sync = true; |
2860 | wake_up(&rdev->irq.vblank_queue); | ||
2861 | } | ||
2862 | if (rdev->irq.pflip[3]) | ||
2863 | radeon_crtc_handle_flip(rdev, 3); | ||
2864 | rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; | ||
1890 | DRM_DEBUG("IH: D4 vblank\n"); | 2865 | DRM_DEBUG("IH: D4 vblank\n"); |
1891 | } | 2866 | } |
1892 | break; | 2867 | break; |
1893 | case 1: /* D4 vline */ | 2868 | case 1: /* D4 vline */ |
1894 | if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { | 2869 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { |
1895 | disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; | 2870 | rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; |
1896 | DRM_DEBUG("IH: D4 vline\n"); | 2871 | DRM_DEBUG("IH: D4 vline\n"); |
1897 | } | 2872 | } |
1898 | break; | 2873 | break; |
@@ -1904,16 +2879,21 @@ restart_ih: | |||
1904 | case 5: /* D5 vblank/vline */ | 2879 | case 5: /* D5 vblank/vline */ |
1905 | switch (src_data) { | 2880 | switch (src_data) { |
1906 | case 0: /* D5 vblank */ | 2881 | case 0: /* D5 vblank */ |
1907 | if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { | 2882 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { |
1908 | drm_handle_vblank(rdev->ddev, 4); | 2883 | if (rdev->irq.crtc_vblank_int[4]) { |
1909 | wake_up(&rdev->irq.vblank_queue); | 2884 | drm_handle_vblank(rdev->ddev, 4); |
1910 | disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; | 2885 | rdev->pm.vblank_sync = true; |
2886 | wake_up(&rdev->irq.vblank_queue); | ||
2887 | } | ||
2888 | if (rdev->irq.pflip[4]) | ||
2889 | radeon_crtc_handle_flip(rdev, 4); | ||
2890 | rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; | ||
1911 | DRM_DEBUG("IH: D5 vblank\n"); | 2891 | DRM_DEBUG("IH: D5 vblank\n"); |
1912 | } | 2892 | } |
1913 | break; | 2893 | break; |
1914 | case 1: /* D5 vline */ | 2894 | case 1: /* D5 vline */ |
1915 | if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { | 2895 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { |
1916 | disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; | 2896 | rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; |
1917 | DRM_DEBUG("IH: D5 vline\n"); | 2897 | DRM_DEBUG("IH: D5 vline\n"); |
1918 | } | 2898 | } |
1919 | break; | 2899 | break; |
@@ -1925,16 +2905,21 @@ restart_ih: | |||
1925 | case 6: /* D6 vblank/vline */ | 2905 | case 6: /* D6 vblank/vline */ |
1926 | switch (src_data) { | 2906 | switch (src_data) { |
1927 | case 0: /* D6 vblank */ | 2907 | case 0: /* D6 vblank */ |
1928 | if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { | 2908 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { |
1929 | drm_handle_vblank(rdev->ddev, 5); | 2909 | if (rdev->irq.crtc_vblank_int[5]) { |
1930 | wake_up(&rdev->irq.vblank_queue); | 2910 | drm_handle_vblank(rdev->ddev, 5); |
1931 | disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; | 2911 | rdev->pm.vblank_sync = true; |
2912 | wake_up(&rdev->irq.vblank_queue); | ||
2913 | } | ||
2914 | if (rdev->irq.pflip[5]) | ||
2915 | radeon_crtc_handle_flip(rdev, 5); | ||
2916 | rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; | ||
1932 | DRM_DEBUG("IH: D6 vblank\n"); | 2917 | DRM_DEBUG("IH: D6 vblank\n"); |
1933 | } | 2918 | } |
1934 | break; | 2919 | break; |
1935 | case 1: /* D6 vline */ | 2920 | case 1: /* D6 vline */ |
1936 | if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { | 2921 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { |
1937 | disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; | 2922 | rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; |
1938 | DRM_DEBUG("IH: D6 vline\n"); | 2923 | DRM_DEBUG("IH: D6 vline\n"); |
1939 | } | 2924 | } |
1940 | break; | 2925 | break; |
@@ -1946,43 +2931,43 @@ restart_ih: | |||
1946 | case 42: /* HPD hotplug */ | 2931 | case 42: /* HPD hotplug */ |
1947 | switch (src_data) { | 2932 | switch (src_data) { |
1948 | case 0: | 2933 | case 0: |
1949 | if (disp_int & DC_HPD1_INTERRUPT) { | 2934 | if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { |
1950 | disp_int &= ~DC_HPD1_INTERRUPT; | 2935 | rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; |
1951 | queue_hotplug = true; | 2936 | queue_hotplug = true; |
1952 | DRM_DEBUG("IH: HPD1\n"); | 2937 | DRM_DEBUG("IH: HPD1\n"); |
1953 | } | 2938 | } |
1954 | break; | 2939 | break; |
1955 | case 1: | 2940 | case 1: |
1956 | if (disp_int_cont & DC_HPD2_INTERRUPT) { | 2941 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { |
1957 | disp_int_cont &= ~DC_HPD2_INTERRUPT; | 2942 | rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; |
1958 | queue_hotplug = true; | 2943 | queue_hotplug = true; |
1959 | DRM_DEBUG("IH: HPD2\n"); | 2944 | DRM_DEBUG("IH: HPD2\n"); |
1960 | } | 2945 | } |
1961 | break; | 2946 | break; |
1962 | case 2: | 2947 | case 2: |
1963 | if (disp_int_cont2 & DC_HPD3_INTERRUPT) { | 2948 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { |
1964 | disp_int_cont2 &= ~DC_HPD3_INTERRUPT; | 2949 | rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; |
1965 | queue_hotplug = true; | 2950 | queue_hotplug = true; |
1966 | DRM_DEBUG("IH: HPD3\n"); | 2951 | DRM_DEBUG("IH: HPD3\n"); |
1967 | } | 2952 | } |
1968 | break; | 2953 | break; |
1969 | case 3: | 2954 | case 3: |
1970 | if (disp_int_cont3 & DC_HPD4_INTERRUPT) { | 2955 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { |
1971 | disp_int_cont3 &= ~DC_HPD4_INTERRUPT; | 2956 | rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; |
1972 | queue_hotplug = true; | 2957 | queue_hotplug = true; |
1973 | DRM_DEBUG("IH: HPD4\n"); | 2958 | DRM_DEBUG("IH: HPD4\n"); |
1974 | } | 2959 | } |
1975 | break; | 2960 | break; |
1976 | case 4: | 2961 | case 4: |
1977 | if (disp_int_cont4 & DC_HPD5_INTERRUPT) { | 2962 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { |
1978 | disp_int_cont4 &= ~DC_HPD5_INTERRUPT; | 2963 | rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; |
1979 | queue_hotplug = true; | 2964 | queue_hotplug = true; |
1980 | DRM_DEBUG("IH: HPD5\n"); | 2965 | DRM_DEBUG("IH: HPD5\n"); |
1981 | } | 2966 | } |
1982 | break; | 2967 | break; |
1983 | case 5: | 2968 | case 5: |
1984 | if (disp_int_cont5 & DC_HPD6_INTERRUPT) { | 2969 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { |
1985 | disp_int_cont5 &= ~DC_HPD6_INTERRUPT; | 2970 | rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; |
1986 | queue_hotplug = true; | 2971 | queue_hotplug = true; |
1987 | DRM_DEBUG("IH: HPD6\n"); | 2972 | DRM_DEBUG("IH: HPD6\n"); |
1988 | } | 2973 | } |
@@ -2000,9 +2985,10 @@ restart_ih: | |||
2000 | break; | 2985 | break; |
2001 | case 181: /* CP EOP event */ | 2986 | case 181: /* CP EOP event */ |
2002 | DRM_DEBUG("IH: CP EOP\n"); | 2987 | DRM_DEBUG("IH: CP EOP\n"); |
2988 | radeon_fence_process(rdev); | ||
2003 | break; | 2989 | break; |
2004 | case 233: /* GUI IDLE */ | 2990 | case 233: /* GUI IDLE */ |
2005 | DRM_DEBUG("IH: CP EOP\n"); | 2991 | DRM_DEBUG("IH: GUI idle\n"); |
2006 | rdev->pm.gui_idle = true; | 2992 | rdev->pm.gui_idle = true; |
2007 | wake_up(&rdev->irq.idle_queue); | 2993 | wake_up(&rdev->irq.idle_queue); |
2008 | break; | 2994 | break; |
@@ -2020,7 +3006,7 @@ restart_ih: | |||
2020 | if (wptr != rdev->ih.wptr) | 3006 | if (wptr != rdev->ih.wptr) |
2021 | goto restart_ih; | 3007 | goto restart_ih; |
2022 | if (queue_hotplug) | 3008 | if (queue_hotplug) |
2023 | queue_work(rdev->wq, &rdev->hotplug_work); | 3009 | schedule_work(&rdev->hotplug_work); |
2024 | rdev->ih.rptr = rptr; | 3010 | rdev->ih.rptr = rptr; |
2025 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | 3011 | WREG32(IH_RB_RPTR, rdev->ih.rptr); |
2026 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | 3012 | spin_unlock_irqrestore(&rdev->ih.lock, flags); |
@@ -2031,12 +3017,31 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
2031 | { | 3017 | { |
2032 | int r; | 3018 | int r; |
2033 | 3019 | ||
2034 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 3020 | /* enable pcie gen2 link */ |
2035 | r = r600_init_microcode(rdev); | 3021 | if (!ASIC_IS_DCE5(rdev)) |
3022 | evergreen_pcie_gen2_enable(rdev); | ||
3023 | |||
3024 | if (ASIC_IS_DCE5(rdev)) { | ||
3025 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | ||
3026 | r = ni_init_microcode(rdev); | ||
3027 | if (r) { | ||
3028 | DRM_ERROR("Failed to load firmware!\n"); | ||
3029 | return r; | ||
3030 | } | ||
3031 | } | ||
3032 | r = ni_mc_load_microcode(rdev); | ||
2036 | if (r) { | 3033 | if (r) { |
2037 | DRM_ERROR("Failed to load firmware!\n"); | 3034 | DRM_ERROR("Failed to load MC firmware!\n"); |
2038 | return r; | 3035 | return r; |
2039 | } | 3036 | } |
3037 | } else { | ||
3038 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
3039 | r = r600_init_microcode(rdev); | ||
3040 | if (r) { | ||
3041 | DRM_ERROR("Failed to load firmware!\n"); | ||
3042 | return r; | ||
3043 | } | ||
3044 | } | ||
2040 | } | 3045 | } |
2041 | 3046 | ||
2042 | evergreen_mc_program(rdev); | 3047 | evergreen_mc_program(rdev); |
@@ -2048,26 +3053,18 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
2048 | return r; | 3053 | return r; |
2049 | } | 3054 | } |
2050 | evergreen_gpu_init(rdev); | 3055 | evergreen_gpu_init(rdev); |
2051 | #if 0 | ||
2052 | if (!rdev->r600_blit.shader_obj) { | ||
2053 | r = r600_blit_init(rdev); | ||
2054 | if (r) { | ||
2055 | DRM_ERROR("radeon: failed blitter (%d).\n", r); | ||
2056 | return r; | ||
2057 | } | ||
2058 | } | ||
2059 | 3056 | ||
2060 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 3057 | r = evergreen_blit_init(rdev); |
2061 | if (unlikely(r != 0)) | ||
2062 | return r; | ||
2063 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
2064 | &rdev->r600_blit.shader_gpu_addr); | ||
2065 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
2066 | if (r) { | 3058 | if (r) { |
2067 | DRM_ERROR("failed to pin blit object %d\n", r); | 3059 | evergreen_blit_fini(rdev); |
2068 | return r; | 3060 | rdev->asic->copy = NULL; |
3061 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
2069 | } | 3062 | } |
2070 | #endif | 3063 | |
3064 | /* allocate wb buffer */ | ||
3065 | r = radeon_wb_init(rdev); | ||
3066 | if (r) | ||
3067 | return r; | ||
2071 | 3068 | ||
2072 | /* Enable IRQ */ | 3069 | /* Enable IRQ */ |
2073 | r = r600_irq_init(rdev); | 3070 | r = r600_irq_init(rdev); |
@@ -2087,8 +3084,6 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
2087 | r = evergreen_cp_resume(rdev); | 3084 | r = evergreen_cp_resume(rdev); |
2088 | if (r) | 3085 | if (r) |
2089 | return r; | 3086 | return r; |
2090 | /* write back buffer are not vital so don't worry about failure */ | ||
2091 | r600_wb_enable(rdev); | ||
2092 | 3087 | ||
2093 | return 0; | 3088 | return 0; |
2094 | } | 3089 | } |
@@ -2097,6 +3092,11 @@ int evergreen_resume(struct radeon_device *rdev) | |||
2097 | { | 3092 | { |
2098 | int r; | 3093 | int r; |
2099 | 3094 | ||
3095 | /* reset the asic, the gfx blocks are often in a bad state | ||
3096 | * after the driver is unloaded or after a resume | ||
3097 | */ | ||
3098 | if (radeon_asic_reset(rdev)) | ||
3099 | dev_warn(rdev->dev, "GPU reset failed !\n"); | ||
2100 | /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, | 3100 | /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, |
2101 | * posting will perform necessary task to bring back GPU into good | 3101 | * posting will perform necessary task to bring back GPU into good |
2102 | * shape. | 3102 | * shape. |
@@ -2106,13 +3106,13 @@ int evergreen_resume(struct radeon_device *rdev) | |||
2106 | 3106 | ||
2107 | r = evergreen_startup(rdev); | 3107 | r = evergreen_startup(rdev); |
2108 | if (r) { | 3108 | if (r) { |
2109 | DRM_ERROR("r600 startup failed on resume\n"); | 3109 | DRM_ERROR("evergreen startup failed on resume\n"); |
2110 | return r; | 3110 | return r; |
2111 | } | 3111 | } |
2112 | 3112 | ||
2113 | r = r600_ib_test(rdev); | 3113 | r = r600_ib_test(rdev); |
2114 | if (r) { | 3114 | if (r) { |
2115 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 3115 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); |
2116 | return r; | 3116 | return r; |
2117 | } | 3117 | } |
2118 | 3118 | ||
@@ -2122,45 +3122,44 @@ int evergreen_resume(struct radeon_device *rdev) | |||
2122 | 3122 | ||
2123 | int evergreen_suspend(struct radeon_device *rdev) | 3123 | int evergreen_suspend(struct radeon_device *rdev) |
2124 | { | 3124 | { |
2125 | #if 0 | ||
2126 | int r; | 3125 | int r; |
2127 | #endif | 3126 | |
2128 | /* FIXME: we should wait for ring to be empty */ | 3127 | /* FIXME: we should wait for ring to be empty */ |
2129 | r700_cp_stop(rdev); | 3128 | r700_cp_stop(rdev); |
2130 | rdev->cp.ready = false; | 3129 | rdev->cp.ready = false; |
2131 | evergreen_irq_suspend(rdev); | 3130 | evergreen_irq_suspend(rdev); |
2132 | r600_wb_disable(rdev); | 3131 | radeon_wb_disable(rdev); |
2133 | evergreen_pcie_gart_disable(rdev); | 3132 | evergreen_pcie_gart_disable(rdev); |
2134 | #if 0 | 3133 | |
2135 | /* unpin shaders bo */ | 3134 | /* unpin shaders bo */ |
2136 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 3135 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
2137 | if (likely(r == 0)) { | 3136 | if (likely(r == 0)) { |
2138 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | 3137 | radeon_bo_unpin(rdev->r600_blit.shader_obj); |
2139 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 3138 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
2140 | } | 3139 | } |
2141 | #endif | 3140 | |
2142 | return 0; | 3141 | return 0; |
2143 | } | 3142 | } |
2144 | 3143 | ||
2145 | static bool evergreen_card_posted(struct radeon_device *rdev) | 3144 | int evergreen_copy_blit(struct radeon_device *rdev, |
3145 | uint64_t src_offset, uint64_t dst_offset, | ||
3146 | unsigned num_pages, struct radeon_fence *fence) | ||
2146 | { | 3147 | { |
2147 | u32 reg; | 3148 | int r; |
2148 | |||
2149 | /* first check CRTCs */ | ||
2150 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | ||
2151 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | | ||
2152 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | | ||
2153 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | | ||
2154 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | | ||
2155 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
2156 | if (reg & EVERGREEN_CRTC_MASTER_EN) | ||
2157 | return true; | ||
2158 | |||
2159 | /* then check MEM_SIZE, in case the crtcs are off */ | ||
2160 | if (RREG32(CONFIG_MEMSIZE)) | ||
2161 | return true; | ||
2162 | 3149 | ||
2163 | return false; | 3150 | mutex_lock(&rdev->r600_blit.mutex); |
3151 | rdev->r600_blit.vb_ib = NULL; | ||
3152 | r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | ||
3153 | if (r) { | ||
3154 | if (rdev->r600_blit.vb_ib) | ||
3155 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | ||
3156 | mutex_unlock(&rdev->r600_blit.mutex); | ||
3157 | return r; | ||
3158 | } | ||
3159 | evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); | ||
3160 | evergreen_blit_done_copy(rdev, fence); | ||
3161 | mutex_unlock(&rdev->r600_blit.mutex); | ||
3162 | return 0; | ||
2164 | } | 3163 | } |
2165 | 3164 | ||
2166 | /* Plan is to move initialization in that function and use | 3165 | /* Plan is to move initialization in that function and use |
@@ -2173,9 +3172,6 @@ int evergreen_init(struct radeon_device *rdev) | |||
2173 | { | 3172 | { |
2174 | int r; | 3173 | int r; |
2175 | 3174 | ||
2176 | r = radeon_dummy_page_init(rdev); | ||
2177 | if (r) | ||
2178 | return r; | ||
2179 | /* This don't do much */ | 3175 | /* This don't do much */ |
2180 | r = radeon_gem_init(rdev); | 3176 | r = radeon_gem_init(rdev); |
2181 | if (r) | 3177 | if (r) |
@@ -2187,14 +3183,19 @@ int evergreen_init(struct radeon_device *rdev) | |||
2187 | } | 3183 | } |
2188 | /* Must be an ATOMBIOS */ | 3184 | /* Must be an ATOMBIOS */ |
2189 | if (!rdev->is_atom_bios) { | 3185 | if (!rdev->is_atom_bios) { |
2190 | dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); | 3186 | dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n"); |
2191 | return -EINVAL; | 3187 | return -EINVAL; |
2192 | } | 3188 | } |
2193 | r = radeon_atombios_init(rdev); | 3189 | r = radeon_atombios_init(rdev); |
2194 | if (r) | 3190 | if (r) |
2195 | return r; | 3191 | return r; |
3192 | /* reset the asic, the gfx blocks are often in a bad state | ||
3193 | * after the driver is unloaded or after a resume | ||
3194 | */ | ||
3195 | if (radeon_asic_reset(rdev)) | ||
3196 | dev_warn(rdev->dev, "GPU reset failed !\n"); | ||
2196 | /* Post card if necessary */ | 3197 | /* Post card if necessary */ |
2197 | if (!evergreen_card_posted(rdev)) { | 3198 | if (!radeon_card_posted(rdev)) { |
2198 | if (!rdev->bios) { | 3199 | if (!rdev->bios) { |
2199 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | 3200 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); |
2200 | return -EINVAL; | 3201 | return -EINVAL; |
@@ -2246,8 +3247,8 @@ int evergreen_init(struct radeon_device *rdev) | |||
2246 | if (r) { | 3247 | if (r) { |
2247 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 3248 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
2248 | r700_cp_fini(rdev); | 3249 | r700_cp_fini(rdev); |
2249 | r600_wb_fini(rdev); | ||
2250 | r600_irq_fini(rdev); | 3250 | r600_irq_fini(rdev); |
3251 | radeon_wb_fini(rdev); | ||
2251 | radeon_irq_kms_fini(rdev); | 3252 | radeon_irq_kms_fini(rdev); |
2252 | evergreen_pcie_gart_fini(rdev); | 3253 | evergreen_pcie_gart_fini(rdev); |
2253 | rdev->accel_working = false; | 3254 | rdev->accel_working = false; |
@@ -2269,10 +3270,11 @@ int evergreen_init(struct radeon_device *rdev) | |||
2269 | 3270 | ||
2270 | void evergreen_fini(struct radeon_device *rdev) | 3271 | void evergreen_fini(struct radeon_device *rdev) |
2271 | { | 3272 | { |
2272 | /*r600_blit_fini(rdev);*/ | 3273 | evergreen_blit_fini(rdev); |
2273 | r700_cp_fini(rdev); | 3274 | r700_cp_fini(rdev); |
2274 | r600_wb_fini(rdev); | ||
2275 | r600_irq_fini(rdev); | 3275 | r600_irq_fini(rdev); |
3276 | radeon_wb_fini(rdev); | ||
3277 | radeon_ib_pool_fini(rdev); | ||
2276 | radeon_irq_kms_fini(rdev); | 3278 | radeon_irq_kms_fini(rdev); |
2277 | evergreen_pcie_gart_fini(rdev); | 3279 | evergreen_pcie_gart_fini(rdev); |
2278 | radeon_gem_fini(rdev); | 3280 | radeon_gem_fini(rdev); |
@@ -2282,5 +3284,56 @@ void evergreen_fini(struct radeon_device *rdev) | |||
2282 | radeon_atombios_fini(rdev); | 3284 | radeon_atombios_fini(rdev); |
2283 | kfree(rdev->bios); | 3285 | kfree(rdev->bios); |
2284 | rdev->bios = NULL; | 3286 | rdev->bios = NULL; |
2285 | radeon_dummy_page_fini(rdev); | 3287 | } |
3288 | |||
3289 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) | ||
3290 | { | ||
3291 | u32 link_width_cntl, speed_cntl; | ||
3292 | |||
3293 | if (radeon_pcie_gen2 == 0) | ||
3294 | return; | ||
3295 | |||
3296 | if (rdev->flags & RADEON_IS_IGP) | ||
3297 | return; | ||
3298 | |||
3299 | if (!(rdev->flags & RADEON_IS_PCIE)) | ||
3300 | return; | ||
3301 | |||
3302 | /* x2 cards have a special sequence */ | ||
3303 | if (ASIC_IS_X2(rdev)) | ||
3304 | return; | ||
3305 | |||
3306 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3307 | if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || | ||
3308 | (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { | ||
3309 | |||
3310 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
3311 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | ||
3312 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3313 | |||
3314 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3315 | speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; | ||
3316 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3317 | |||
3318 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3319 | speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; | ||
3320 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3321 | |||
3322 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3323 | speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; | ||
3324 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3325 | |||
3326 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3327 | speed_cntl |= LC_GEN2_EN_STRAP; | ||
3328 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3329 | |||
3330 | } else { | ||
3331 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
3332 | /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ | ||
3333 | if (1) | ||
3334 | link_width_cntl |= LC_UPCONFIGURE_DIS; | ||
3335 | else | ||
3336 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | ||
3337 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3338 | } | ||
2286 | } | 3339 | } |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c new file mode 100644 index 000000000000..2eb251858e72 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -0,0 +1,988 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Alex Deucher <alexander.deucher@amd.com> | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "radeon_drm.h" | ||
30 | #include "radeon.h" | ||
31 | |||
32 | #include "evergreend.h" | ||
33 | #include "evergreen_blit_shaders.h" | ||
34 | #include "cayman_blit_shaders.h" | ||
35 | |||
36 | #define DI_PT_RECTLIST 0x11 | ||
37 | #define DI_INDEX_SIZE_16_BIT 0x0 | ||
38 | #define DI_SRC_SEL_AUTO_INDEX 0x2 | ||
39 | |||
40 | #define FMT_8 0x1 | ||
41 | #define FMT_5_6_5 0x8 | ||
42 | #define FMT_8_8_8_8 0x1a | ||
43 | #define COLOR_8 0x1 | ||
44 | #define COLOR_5_6_5 0x8 | ||
45 | #define COLOR_8_8_8_8 0x1a | ||
46 | |||
47 | /* emits 17 */ | ||
48 | static void | ||
49 | set_render_target(struct radeon_device *rdev, int format, | ||
50 | int w, int h, u64 gpu_addr) | ||
51 | { | ||
52 | u32 cb_color_info; | ||
53 | int pitch, slice; | ||
54 | |||
55 | h = ALIGN(h, 8); | ||
56 | if (h < 8) | ||
57 | h = 8; | ||
58 | |||
59 | cb_color_info = ((format << 2) | (1 << 24) | (1 << 8)); | ||
60 | pitch = (w / 8) - 1; | ||
61 | slice = ((w * h) / 64) - 1; | ||
62 | |||
63 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15)); | ||
64 | radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
65 | radeon_ring_write(rdev, gpu_addr >> 8); | ||
66 | radeon_ring_write(rdev, pitch); | ||
67 | radeon_ring_write(rdev, slice); | ||
68 | radeon_ring_write(rdev, 0); | ||
69 | radeon_ring_write(rdev, cb_color_info); | ||
70 | radeon_ring_write(rdev, (1 << 4)); | ||
71 | radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16)); | ||
72 | radeon_ring_write(rdev, 0); | ||
73 | radeon_ring_write(rdev, 0); | ||
74 | radeon_ring_write(rdev, 0); | ||
75 | radeon_ring_write(rdev, 0); | ||
76 | radeon_ring_write(rdev, 0); | ||
77 | radeon_ring_write(rdev, 0); | ||
78 | radeon_ring_write(rdev, 0); | ||
79 | radeon_ring_write(rdev, 0); | ||
80 | } | ||
81 | |||
82 | /* emits 5dw */ | ||
83 | static void | ||
84 | cp_set_surface_sync(struct radeon_device *rdev, | ||
85 | u32 sync_type, u32 size, | ||
86 | u64 mc_addr) | ||
87 | { | ||
88 | u32 cp_coher_size; | ||
89 | |||
90 | if (size == 0xffffffff) | ||
91 | cp_coher_size = 0xffffffff; | ||
92 | else | ||
93 | cp_coher_size = ((size + 255) >> 8); | ||
94 | |||
95 | radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); | ||
96 | radeon_ring_write(rdev, sync_type); | ||
97 | radeon_ring_write(rdev, cp_coher_size); | ||
98 | radeon_ring_write(rdev, mc_addr >> 8); | ||
99 | radeon_ring_write(rdev, 10); /* poll interval */ | ||
100 | } | ||
101 | |||
102 | /* emits 11dw + 1 surface sync = 16dw */ | ||
103 | static void | ||
104 | set_shaders(struct radeon_device *rdev) | ||
105 | { | ||
106 | u64 gpu_addr; | ||
107 | |||
108 | /* VS */ | ||
109 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; | ||
110 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3)); | ||
111 | radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
112 | radeon_ring_write(rdev, gpu_addr >> 8); | ||
113 | radeon_ring_write(rdev, 2); | ||
114 | radeon_ring_write(rdev, 0); | ||
115 | |||
116 | /* PS */ | ||
117 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; | ||
118 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4)); | ||
119 | radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
120 | radeon_ring_write(rdev, gpu_addr >> 8); | ||
121 | radeon_ring_write(rdev, 1); | ||
122 | radeon_ring_write(rdev, 0); | ||
123 | radeon_ring_write(rdev, 2); | ||
124 | |||
125 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; | ||
126 | cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); | ||
127 | } | ||
128 | |||
129 | /* emits 10 + 1 sync (5) = 15 */ | ||
130 | static void | ||
131 | set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | ||
132 | { | ||
133 | u32 sq_vtx_constant_word2, sq_vtx_constant_word3; | ||
134 | |||
135 | /* high addr, stride */ | ||
136 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); | ||
137 | #ifdef __BIG_ENDIAN | ||
138 | sq_vtx_constant_word2 |= (2 << 30); | ||
139 | #endif | ||
140 | /* xyzw swizzles */ | ||
141 | sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); | ||
142 | |||
143 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); | ||
144 | radeon_ring_write(rdev, 0x580); | ||
145 | radeon_ring_write(rdev, gpu_addr & 0xffffffff); | ||
146 | radeon_ring_write(rdev, 48 - 1); /* size */ | ||
147 | radeon_ring_write(rdev, sq_vtx_constant_word2); | ||
148 | radeon_ring_write(rdev, sq_vtx_constant_word3); | ||
149 | radeon_ring_write(rdev, 0); | ||
150 | radeon_ring_write(rdev, 0); | ||
151 | radeon_ring_write(rdev, 0); | ||
152 | radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); | ||
153 | |||
154 | if ((rdev->family == CHIP_CEDAR) || | ||
155 | (rdev->family == CHIP_PALM) || | ||
156 | (rdev->family == CHIP_SUMO) || | ||
157 | (rdev->family == CHIP_SUMO2) || | ||
158 | (rdev->family == CHIP_CAICOS)) | ||
159 | cp_set_surface_sync(rdev, | ||
160 | PACKET3_TC_ACTION_ENA, 48, gpu_addr); | ||
161 | else | ||
162 | cp_set_surface_sync(rdev, | ||
163 | PACKET3_VC_ACTION_ENA, 48, gpu_addr); | ||
164 | |||
165 | } | ||
166 | |||
167 | /* emits 10 */ | ||
168 | static void | ||
169 | set_tex_resource(struct radeon_device *rdev, | ||
170 | int format, int w, int h, int pitch, | ||
171 | u64 gpu_addr) | ||
172 | { | ||
173 | u32 sq_tex_resource_word0, sq_tex_resource_word1; | ||
174 | u32 sq_tex_resource_word4, sq_tex_resource_word7; | ||
175 | |||
176 | if (h < 1) | ||
177 | h = 1; | ||
178 | |||
179 | sq_tex_resource_word0 = (1 << 0); /* 2D */ | ||
180 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | | ||
181 | ((w - 1) << 18)); | ||
182 | sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28); | ||
183 | /* xyzw swizzles */ | ||
184 | sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); | ||
185 | |||
186 | sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30); | ||
187 | |||
188 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); | ||
189 | radeon_ring_write(rdev, 0); | ||
190 | radeon_ring_write(rdev, sq_tex_resource_word0); | ||
191 | radeon_ring_write(rdev, sq_tex_resource_word1); | ||
192 | radeon_ring_write(rdev, gpu_addr >> 8); | ||
193 | radeon_ring_write(rdev, gpu_addr >> 8); | ||
194 | radeon_ring_write(rdev, sq_tex_resource_word4); | ||
195 | radeon_ring_write(rdev, 0); | ||
196 | radeon_ring_write(rdev, 0); | ||
197 | radeon_ring_write(rdev, sq_tex_resource_word7); | ||
198 | } | ||
199 | |||
200 | /* emits 12 */ | ||
201 | static void | ||
202 | set_scissors(struct radeon_device *rdev, int x1, int y1, | ||
203 | int x2, int y2) | ||
204 | { | ||
205 | /* workaround some hw bugs */ | ||
206 | if (x2 == 0) | ||
207 | x1 = 1; | ||
208 | if (y2 == 0) | ||
209 | y1 = 1; | ||
210 | if (rdev->family == CHIP_CAYMAN) { | ||
211 | if ((x2 == 1) && (y2 == 1)) | ||
212 | x2 = 2; | ||
213 | } | ||
214 | |||
215 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | ||
216 | radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
217 | radeon_ring_write(rdev, (x1 << 0) | (y1 << 16)); | ||
218 | radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); | ||
219 | |||
220 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | ||
221 | radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
222 | radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); | ||
223 | radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); | ||
224 | |||
225 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | ||
226 | radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
227 | radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); | ||
228 | radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); | ||
229 | } | ||
230 | |||
231 | /* emits 10 */ | ||
232 | static void | ||
233 | draw_auto(struct radeon_device *rdev) | ||
234 | { | ||
235 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
236 | radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
237 | radeon_ring_write(rdev, DI_PT_RECTLIST); | ||
238 | |||
239 | radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); | ||
240 | radeon_ring_write(rdev, | ||
241 | #ifdef __BIG_ENDIAN | ||
242 | (2 << 2) | | ||
243 | #endif | ||
244 | DI_INDEX_SIZE_16_BIT); | ||
245 | |||
246 | radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); | ||
247 | radeon_ring_write(rdev, 1); | ||
248 | |||
249 | radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); | ||
250 | radeon_ring_write(rdev, 3); | ||
251 | radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX); | ||
252 | |||
253 | } | ||
254 | |||
255 | /* emits 39 */ | ||
256 | static void | ||
257 | set_default_state(struct radeon_device *rdev) | ||
258 | { | ||
259 | u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; | ||
260 | u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; | ||
261 | u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; | ||
262 | int num_ps_gprs, num_vs_gprs, num_temp_gprs; | ||
263 | int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs; | ||
264 | int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; | ||
265 | int num_hs_threads, num_ls_threads; | ||
266 | int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; | ||
267 | int num_hs_stack_entries, num_ls_stack_entries; | ||
268 | u64 gpu_addr; | ||
269 | int dwords; | ||
270 | |||
271 | /* set clear context state */ | ||
272 | radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); | ||
273 | radeon_ring_write(rdev, 0); | ||
274 | |||
275 | if (rdev->family < CHIP_CAYMAN) { | ||
276 | switch (rdev->family) { | ||
277 | case CHIP_CEDAR: | ||
278 | default: | ||
279 | num_ps_gprs = 93; | ||
280 | num_vs_gprs = 46; | ||
281 | num_temp_gprs = 4; | ||
282 | num_gs_gprs = 31; | ||
283 | num_es_gprs = 31; | ||
284 | num_hs_gprs = 23; | ||
285 | num_ls_gprs = 23; | ||
286 | num_ps_threads = 96; | ||
287 | num_vs_threads = 16; | ||
288 | num_gs_threads = 16; | ||
289 | num_es_threads = 16; | ||
290 | num_hs_threads = 16; | ||
291 | num_ls_threads = 16; | ||
292 | num_ps_stack_entries = 42; | ||
293 | num_vs_stack_entries = 42; | ||
294 | num_gs_stack_entries = 42; | ||
295 | num_es_stack_entries = 42; | ||
296 | num_hs_stack_entries = 42; | ||
297 | num_ls_stack_entries = 42; | ||
298 | break; | ||
299 | case CHIP_REDWOOD: | ||
300 | num_ps_gprs = 93; | ||
301 | num_vs_gprs = 46; | ||
302 | num_temp_gprs = 4; | ||
303 | num_gs_gprs = 31; | ||
304 | num_es_gprs = 31; | ||
305 | num_hs_gprs = 23; | ||
306 | num_ls_gprs = 23; | ||
307 | num_ps_threads = 128; | ||
308 | num_vs_threads = 20; | ||
309 | num_gs_threads = 20; | ||
310 | num_es_threads = 20; | ||
311 | num_hs_threads = 20; | ||
312 | num_ls_threads = 20; | ||
313 | num_ps_stack_entries = 42; | ||
314 | num_vs_stack_entries = 42; | ||
315 | num_gs_stack_entries = 42; | ||
316 | num_es_stack_entries = 42; | ||
317 | num_hs_stack_entries = 42; | ||
318 | num_ls_stack_entries = 42; | ||
319 | break; | ||
320 | case CHIP_JUNIPER: | ||
321 | num_ps_gprs = 93; | ||
322 | num_vs_gprs = 46; | ||
323 | num_temp_gprs = 4; | ||
324 | num_gs_gprs = 31; | ||
325 | num_es_gprs = 31; | ||
326 | num_hs_gprs = 23; | ||
327 | num_ls_gprs = 23; | ||
328 | num_ps_threads = 128; | ||
329 | num_vs_threads = 20; | ||
330 | num_gs_threads = 20; | ||
331 | num_es_threads = 20; | ||
332 | num_hs_threads = 20; | ||
333 | num_ls_threads = 20; | ||
334 | num_ps_stack_entries = 85; | ||
335 | num_vs_stack_entries = 85; | ||
336 | num_gs_stack_entries = 85; | ||
337 | num_es_stack_entries = 85; | ||
338 | num_hs_stack_entries = 85; | ||
339 | num_ls_stack_entries = 85; | ||
340 | break; | ||
341 | case CHIP_CYPRESS: | ||
342 | case CHIP_HEMLOCK: | ||
343 | num_ps_gprs = 93; | ||
344 | num_vs_gprs = 46; | ||
345 | num_temp_gprs = 4; | ||
346 | num_gs_gprs = 31; | ||
347 | num_es_gprs = 31; | ||
348 | num_hs_gprs = 23; | ||
349 | num_ls_gprs = 23; | ||
350 | num_ps_threads = 128; | ||
351 | num_vs_threads = 20; | ||
352 | num_gs_threads = 20; | ||
353 | num_es_threads = 20; | ||
354 | num_hs_threads = 20; | ||
355 | num_ls_threads = 20; | ||
356 | num_ps_stack_entries = 85; | ||
357 | num_vs_stack_entries = 85; | ||
358 | num_gs_stack_entries = 85; | ||
359 | num_es_stack_entries = 85; | ||
360 | num_hs_stack_entries = 85; | ||
361 | num_ls_stack_entries = 85; | ||
362 | break; | ||
363 | case CHIP_PALM: | ||
364 | num_ps_gprs = 93; | ||
365 | num_vs_gprs = 46; | ||
366 | num_temp_gprs = 4; | ||
367 | num_gs_gprs = 31; | ||
368 | num_es_gprs = 31; | ||
369 | num_hs_gprs = 23; | ||
370 | num_ls_gprs = 23; | ||
371 | num_ps_threads = 96; | ||
372 | num_vs_threads = 16; | ||
373 | num_gs_threads = 16; | ||
374 | num_es_threads = 16; | ||
375 | num_hs_threads = 16; | ||
376 | num_ls_threads = 16; | ||
377 | num_ps_stack_entries = 42; | ||
378 | num_vs_stack_entries = 42; | ||
379 | num_gs_stack_entries = 42; | ||
380 | num_es_stack_entries = 42; | ||
381 | num_hs_stack_entries = 42; | ||
382 | num_ls_stack_entries = 42; | ||
383 | break; | ||
384 | case CHIP_SUMO: | ||
385 | num_ps_gprs = 93; | ||
386 | num_vs_gprs = 46; | ||
387 | num_temp_gprs = 4; | ||
388 | num_gs_gprs = 31; | ||
389 | num_es_gprs = 31; | ||
390 | num_hs_gprs = 23; | ||
391 | num_ls_gprs = 23; | ||
392 | num_ps_threads = 96; | ||
393 | num_vs_threads = 25; | ||
394 | num_gs_threads = 25; | ||
395 | num_es_threads = 25; | ||
396 | num_hs_threads = 25; | ||
397 | num_ls_threads = 25; | ||
398 | num_ps_stack_entries = 42; | ||
399 | num_vs_stack_entries = 42; | ||
400 | num_gs_stack_entries = 42; | ||
401 | num_es_stack_entries = 42; | ||
402 | num_hs_stack_entries = 42; | ||
403 | num_ls_stack_entries = 42; | ||
404 | break; | ||
405 | case CHIP_SUMO2: | ||
406 | num_ps_gprs = 93; | ||
407 | num_vs_gprs = 46; | ||
408 | num_temp_gprs = 4; | ||
409 | num_gs_gprs = 31; | ||
410 | num_es_gprs = 31; | ||
411 | num_hs_gprs = 23; | ||
412 | num_ls_gprs = 23; | ||
413 | num_ps_threads = 96; | ||
414 | num_vs_threads = 25; | ||
415 | num_gs_threads = 25; | ||
416 | num_es_threads = 25; | ||
417 | num_hs_threads = 25; | ||
418 | num_ls_threads = 25; | ||
419 | num_ps_stack_entries = 85; | ||
420 | num_vs_stack_entries = 85; | ||
421 | num_gs_stack_entries = 85; | ||
422 | num_es_stack_entries = 85; | ||
423 | num_hs_stack_entries = 85; | ||
424 | num_ls_stack_entries = 85; | ||
425 | break; | ||
426 | case CHIP_BARTS: | ||
427 | num_ps_gprs = 93; | ||
428 | num_vs_gprs = 46; | ||
429 | num_temp_gprs = 4; | ||
430 | num_gs_gprs = 31; | ||
431 | num_es_gprs = 31; | ||
432 | num_hs_gprs = 23; | ||
433 | num_ls_gprs = 23; | ||
434 | num_ps_threads = 128; | ||
435 | num_vs_threads = 20; | ||
436 | num_gs_threads = 20; | ||
437 | num_es_threads = 20; | ||
438 | num_hs_threads = 20; | ||
439 | num_ls_threads = 20; | ||
440 | num_ps_stack_entries = 85; | ||
441 | num_vs_stack_entries = 85; | ||
442 | num_gs_stack_entries = 85; | ||
443 | num_es_stack_entries = 85; | ||
444 | num_hs_stack_entries = 85; | ||
445 | num_ls_stack_entries = 85; | ||
446 | break; | ||
447 | case CHIP_TURKS: | ||
448 | num_ps_gprs = 93; | ||
449 | num_vs_gprs = 46; | ||
450 | num_temp_gprs = 4; | ||
451 | num_gs_gprs = 31; | ||
452 | num_es_gprs = 31; | ||
453 | num_hs_gprs = 23; | ||
454 | num_ls_gprs = 23; | ||
455 | num_ps_threads = 128; | ||
456 | num_vs_threads = 20; | ||
457 | num_gs_threads = 20; | ||
458 | num_es_threads = 20; | ||
459 | num_hs_threads = 20; | ||
460 | num_ls_threads = 20; | ||
461 | num_ps_stack_entries = 42; | ||
462 | num_vs_stack_entries = 42; | ||
463 | num_gs_stack_entries = 42; | ||
464 | num_es_stack_entries = 42; | ||
465 | num_hs_stack_entries = 42; | ||
466 | num_ls_stack_entries = 42; | ||
467 | break; | ||
468 | case CHIP_CAICOS: | ||
469 | num_ps_gprs = 93; | ||
470 | num_vs_gprs = 46; | ||
471 | num_temp_gprs = 4; | ||
472 | num_gs_gprs = 31; | ||
473 | num_es_gprs = 31; | ||
474 | num_hs_gprs = 23; | ||
475 | num_ls_gprs = 23; | ||
476 | num_ps_threads = 128; | ||
477 | num_vs_threads = 10; | ||
478 | num_gs_threads = 10; | ||
479 | num_es_threads = 10; | ||
480 | num_hs_threads = 10; | ||
481 | num_ls_threads = 10; | ||
482 | num_ps_stack_entries = 42; | ||
483 | num_vs_stack_entries = 42; | ||
484 | num_gs_stack_entries = 42; | ||
485 | num_es_stack_entries = 42; | ||
486 | num_hs_stack_entries = 42; | ||
487 | num_ls_stack_entries = 42; | ||
488 | break; | ||
489 | } | ||
490 | |||
491 | if ((rdev->family == CHIP_CEDAR) || | ||
492 | (rdev->family == CHIP_PALM) || | ||
493 | (rdev->family == CHIP_SUMO) || | ||
494 | (rdev->family == CHIP_SUMO2) || | ||
495 | (rdev->family == CHIP_CAICOS)) | ||
496 | sq_config = 0; | ||
497 | else | ||
498 | sq_config = VC_ENABLE; | ||
499 | |||
500 | sq_config |= (EXPORT_SRC_C | | ||
501 | CS_PRIO(0) | | ||
502 | LS_PRIO(0) | | ||
503 | HS_PRIO(0) | | ||
504 | PS_PRIO(0) | | ||
505 | VS_PRIO(1) | | ||
506 | GS_PRIO(2) | | ||
507 | ES_PRIO(3)); | ||
508 | |||
509 | sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) | | ||
510 | NUM_VS_GPRS(num_vs_gprs) | | ||
511 | NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); | ||
512 | sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) | | ||
513 | NUM_ES_GPRS(num_es_gprs)); | ||
514 | sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) | | ||
515 | NUM_LS_GPRS(num_ls_gprs)); | ||
516 | sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) | | ||
517 | NUM_VS_THREADS(num_vs_threads) | | ||
518 | NUM_GS_THREADS(num_gs_threads) | | ||
519 | NUM_ES_THREADS(num_es_threads)); | ||
520 | sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) | | ||
521 | NUM_LS_THREADS(num_ls_threads)); | ||
522 | sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | | ||
523 | NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); | ||
524 | sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | | ||
525 | NUM_ES_STACK_ENTRIES(num_es_stack_entries)); | ||
526 | sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) | | ||
527 | NUM_LS_STACK_ENTRIES(num_ls_stack_entries)); | ||
528 | |||
529 | /* disable dyn gprs */ | ||
530 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
531 | radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
532 | radeon_ring_write(rdev, 0); | ||
533 | |||
534 | /* setup LDS */ | ||
535 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
536 | radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
537 | radeon_ring_write(rdev, 0x10001000); | ||
538 | |||
539 | /* SQ config */ | ||
540 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11)); | ||
541 | radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
542 | radeon_ring_write(rdev, sq_config); | ||
543 | radeon_ring_write(rdev, sq_gpr_resource_mgmt_1); | ||
544 | radeon_ring_write(rdev, sq_gpr_resource_mgmt_2); | ||
545 | radeon_ring_write(rdev, sq_gpr_resource_mgmt_3); | ||
546 | radeon_ring_write(rdev, 0); | ||
547 | radeon_ring_write(rdev, 0); | ||
548 | radeon_ring_write(rdev, sq_thread_resource_mgmt); | ||
549 | radeon_ring_write(rdev, sq_thread_resource_mgmt_2); | ||
550 | radeon_ring_write(rdev, sq_stack_resource_mgmt_1); | ||
551 | radeon_ring_write(rdev, sq_stack_resource_mgmt_2); | ||
552 | radeon_ring_write(rdev, sq_stack_resource_mgmt_3); | ||
553 | } | ||
554 | |||
555 | /* CONTEXT_CONTROL */ | ||
556 | radeon_ring_write(rdev, 0xc0012800); | ||
557 | radeon_ring_write(rdev, 0x80000000); | ||
558 | radeon_ring_write(rdev, 0x80000000); | ||
559 | |||
560 | /* SQ_VTX_BASE_VTX_LOC */ | ||
561 | radeon_ring_write(rdev, 0xc0026f00); | ||
562 | radeon_ring_write(rdev, 0x00000000); | ||
563 | radeon_ring_write(rdev, 0x00000000); | ||
564 | radeon_ring_write(rdev, 0x00000000); | ||
565 | |||
566 | /* SET_SAMPLER */ | ||
567 | radeon_ring_write(rdev, 0xc0036e00); | ||
568 | radeon_ring_write(rdev, 0x00000000); | ||
569 | radeon_ring_write(rdev, 0x00000012); | ||
570 | radeon_ring_write(rdev, 0x00000000); | ||
571 | radeon_ring_write(rdev, 0x00000000); | ||
572 | |||
573 | /* set to DX10/11 mode */ | ||
574 | radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0)); | ||
575 | radeon_ring_write(rdev, 1); | ||
576 | |||
577 | /* emit an IB pointing at default state */ | ||
578 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); | ||
579 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; | ||
580 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | ||
581 | radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); | ||
582 | radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); | ||
583 | radeon_ring_write(rdev, dwords); | ||
584 | |||
585 | } | ||
586 | |||
587 | static inline uint32_t i2f(uint32_t input) | ||
588 | { | ||
589 | u32 result, i, exponent, fraction; | ||
590 | |||
591 | if ((input & 0x3fff) == 0) | ||
592 | result = 0; /* 0 is a special case */ | ||
593 | else { | ||
594 | exponent = 140; /* exponent biased by 127; */ | ||
595 | fraction = (input & 0x3fff) << 10; /* cheat and only | ||
596 | handle numbers below 2^^15 */ | ||
597 | for (i = 0; i < 14; i++) { | ||
598 | if (fraction & 0x800000) | ||
599 | break; | ||
600 | else { | ||
601 | fraction = fraction << 1; /* keep | ||
602 | shifting left until top bit = 1 */ | ||
603 | exponent = exponent - 1; | ||
604 | } | ||
605 | } | ||
606 | result = exponent << 23 | (fraction & 0x7fffff); /* mask | ||
607 | off top bit; assumed 1 */ | ||
608 | } | ||
609 | return result; | ||
610 | } | ||
611 | |||
612 | int evergreen_blit_init(struct radeon_device *rdev) | ||
613 | { | ||
614 | u32 obj_size; | ||
615 | int i, r, dwords; | ||
616 | void *ptr; | ||
617 | u32 packet2s[16]; | ||
618 | int num_packet2s = 0; | ||
619 | |||
620 | /* pin copy shader into vram if already initialized */ | ||
621 | if (rdev->r600_blit.shader_obj) | ||
622 | goto done; | ||
623 | |||
624 | mutex_init(&rdev->r600_blit.mutex); | ||
625 | rdev->r600_blit.state_offset = 0; | ||
626 | |||
627 | if (rdev->family < CHIP_CAYMAN) | ||
628 | rdev->r600_blit.state_len = evergreen_default_size; | ||
629 | else | ||
630 | rdev->r600_blit.state_len = cayman_default_size; | ||
631 | |||
632 | dwords = rdev->r600_blit.state_len; | ||
633 | while (dwords & 0xf) { | ||
634 | packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); | ||
635 | dwords++; | ||
636 | } | ||
637 | |||
638 | obj_size = dwords * 4; | ||
639 | obj_size = ALIGN(obj_size, 256); | ||
640 | |||
641 | rdev->r600_blit.vs_offset = obj_size; | ||
642 | if (rdev->family < CHIP_CAYMAN) | ||
643 | obj_size += evergreen_vs_size * 4; | ||
644 | else | ||
645 | obj_size += cayman_vs_size * 4; | ||
646 | obj_size = ALIGN(obj_size, 256); | ||
647 | |||
648 | rdev->r600_blit.ps_offset = obj_size; | ||
649 | if (rdev->family < CHIP_CAYMAN) | ||
650 | obj_size += evergreen_ps_size * 4; | ||
651 | else | ||
652 | obj_size += cayman_ps_size * 4; | ||
653 | obj_size = ALIGN(obj_size, 256); | ||
654 | |||
655 | r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, | ||
656 | &rdev->r600_blit.shader_obj); | ||
657 | if (r) { | ||
658 | DRM_ERROR("evergreen failed to allocate shader\n"); | ||
659 | return r; | ||
660 | } | ||
661 | |||
662 | DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n", | ||
663 | obj_size, | ||
664 | rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); | ||
665 | |||
666 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
667 | if (unlikely(r != 0)) | ||
668 | return r; | ||
669 | r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); | ||
670 | if (r) { | ||
671 | DRM_ERROR("failed to map blit object %d\n", r); | ||
672 | return r; | ||
673 | } | ||
674 | |||
675 | if (rdev->family < CHIP_CAYMAN) { | ||
676 | memcpy_toio(ptr + rdev->r600_blit.state_offset, | ||
677 | evergreen_default_state, rdev->r600_blit.state_len * 4); | ||
678 | |||
679 | if (num_packet2s) | ||
680 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), | ||
681 | packet2s, num_packet2s * 4); | ||
682 | for (i = 0; i < evergreen_vs_size; i++) | ||
683 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]); | ||
684 | for (i = 0; i < evergreen_ps_size; i++) | ||
685 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]); | ||
686 | } else { | ||
687 | memcpy_toio(ptr + rdev->r600_blit.state_offset, | ||
688 | cayman_default_state, rdev->r600_blit.state_len * 4); | ||
689 | |||
690 | if (num_packet2s) | ||
691 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), | ||
692 | packet2s, num_packet2s * 4); | ||
693 | for (i = 0; i < cayman_vs_size; i++) | ||
694 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]); | ||
695 | for (i = 0; i < cayman_ps_size; i++) | ||
696 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]); | ||
697 | } | ||
698 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); | ||
699 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
700 | |||
701 | done: | ||
702 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
703 | if (unlikely(r != 0)) | ||
704 | return r; | ||
705 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
706 | &rdev->r600_blit.shader_gpu_addr); | ||
707 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
708 | if (r) { | ||
709 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | ||
710 | return r; | ||
711 | } | ||
712 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | void evergreen_blit_fini(struct radeon_device *rdev) | ||
717 | { | ||
718 | int r; | ||
719 | |||
720 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
721 | if (rdev->r600_blit.shader_obj == NULL) | ||
722 | return; | ||
723 | /* If we can't reserve the bo, unref should be enough to destroy | ||
724 | * it when it becomes idle. | ||
725 | */ | ||
726 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
727 | if (!r) { | ||
728 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
729 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
730 | } | ||
731 | radeon_bo_unref(&rdev->r600_blit.shader_obj); | ||
732 | } | ||
733 | |||
734 | static int evergreen_vb_ib_get(struct radeon_device *rdev) | ||
735 | { | ||
736 | int r; | ||
737 | r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); | ||
738 | if (r) { | ||
739 | DRM_ERROR("failed to get IB for vertex buffer\n"); | ||
740 | return r; | ||
741 | } | ||
742 | |||
743 | rdev->r600_blit.vb_total = 64*1024; | ||
744 | rdev->r600_blit.vb_used = 0; | ||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | static void evergreen_vb_ib_put(struct radeon_device *rdev) | ||
749 | { | ||
750 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); | ||
751 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | ||
752 | } | ||
753 | |||
754 | int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | ||
755 | { | ||
756 | int r; | ||
757 | int ring_size, line_size; | ||
758 | int max_size; | ||
759 | /* loops of emits + fence emit possible */ | ||
760 | int dwords_per_loop = 74, num_loops; | ||
761 | |||
762 | r = evergreen_vb_ib_get(rdev); | ||
763 | if (r) | ||
764 | return r; | ||
765 | |||
766 | /* 8 bpp vs 32 bpp for xfer unit */ | ||
767 | if (size_bytes & 3) | ||
768 | line_size = 8192; | ||
769 | else | ||
770 | line_size = 8192 * 4; | ||
771 | |||
772 | max_size = 8192 * line_size; | ||
773 | |||
774 | /* major loops cover the max size transfer */ | ||
775 | num_loops = ((size_bytes + max_size) / max_size); | ||
776 | /* minor loops cover the extra non aligned bits */ | ||
777 | num_loops += ((size_bytes % line_size) ? 1 : 0); | ||
778 | /* calculate number of loops correctly */ | ||
779 | ring_size = num_loops * dwords_per_loop; | ||
780 | /* set default + shaders */ | ||
781 | ring_size += 55; /* shaders + def state */ | ||
782 | ring_size += 10; /* fence emit for VB IB */ | ||
783 | ring_size += 5; /* done copy */ | ||
784 | ring_size += 10; /* fence emit for done copy */ | ||
785 | r = radeon_ring_lock(rdev, ring_size); | ||
786 | if (r) | ||
787 | return r; | ||
788 | |||
789 | set_default_state(rdev); /* 36 */ | ||
790 | set_shaders(rdev); /* 16 */ | ||
791 | return 0; | ||
792 | } | ||
793 | |||
794 | void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) | ||
795 | { | ||
796 | int r; | ||
797 | |||
798 | if (rdev->r600_blit.vb_ib) | ||
799 | evergreen_vb_ib_put(rdev); | ||
800 | |||
801 | if (fence) | ||
802 | r = radeon_fence_emit(rdev, fence); | ||
803 | |||
804 | radeon_ring_unlock_commit(rdev); | ||
805 | } | ||
806 | |||
807 | void evergreen_kms_blit_copy(struct radeon_device *rdev, | ||
808 | u64 src_gpu_addr, u64 dst_gpu_addr, | ||
809 | int size_bytes) | ||
810 | { | ||
811 | int max_bytes; | ||
812 | u64 vb_gpu_addr; | ||
813 | u32 *vb; | ||
814 | |||
815 | DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, | ||
816 | size_bytes, rdev->r600_blit.vb_used); | ||
817 | vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); | ||
818 | if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { | ||
819 | max_bytes = 8192; | ||
820 | |||
821 | while (size_bytes) { | ||
822 | int cur_size = size_bytes; | ||
823 | int src_x = src_gpu_addr & 255; | ||
824 | int dst_x = dst_gpu_addr & 255; | ||
825 | int h = 1; | ||
826 | src_gpu_addr = src_gpu_addr & ~255ULL; | ||
827 | dst_gpu_addr = dst_gpu_addr & ~255ULL; | ||
828 | |||
829 | if (!src_x && !dst_x) { | ||
830 | h = (cur_size / max_bytes); | ||
831 | if (h > 8192) | ||
832 | h = 8192; | ||
833 | if (h == 0) | ||
834 | h = 1; | ||
835 | else | ||
836 | cur_size = max_bytes; | ||
837 | } else { | ||
838 | if (cur_size > max_bytes) | ||
839 | cur_size = max_bytes; | ||
840 | if (cur_size > (max_bytes - dst_x)) | ||
841 | cur_size = (max_bytes - dst_x); | ||
842 | if (cur_size > (max_bytes - src_x)) | ||
843 | cur_size = (max_bytes - src_x); | ||
844 | } | ||
845 | |||
846 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { | ||
847 | WARN_ON(1); | ||
848 | } | ||
849 | |||
850 | vb[0] = i2f(dst_x); | ||
851 | vb[1] = 0; | ||
852 | vb[2] = i2f(src_x); | ||
853 | vb[3] = 0; | ||
854 | |||
855 | vb[4] = i2f(dst_x); | ||
856 | vb[5] = i2f(h); | ||
857 | vb[6] = i2f(src_x); | ||
858 | vb[7] = i2f(h); | ||
859 | |||
860 | vb[8] = i2f(dst_x + cur_size); | ||
861 | vb[9] = i2f(h); | ||
862 | vb[10] = i2f(src_x + cur_size); | ||
863 | vb[11] = i2f(h); | ||
864 | |||
865 | /* src 10 */ | ||
866 | set_tex_resource(rdev, FMT_8, | ||
867 | src_x + cur_size, h, src_x + cur_size, | ||
868 | src_gpu_addr); | ||
869 | |||
870 | /* 5 */ | ||
871 | cp_set_surface_sync(rdev, | ||
872 | PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); | ||
873 | |||
874 | |||
875 | /* dst 17 */ | ||
876 | set_render_target(rdev, COLOR_8, | ||
877 | dst_x + cur_size, h, | ||
878 | dst_gpu_addr); | ||
879 | |||
880 | /* scissors 12 */ | ||
881 | set_scissors(rdev, dst_x, 0, dst_x + cur_size, h); | ||
882 | |||
883 | /* 15 */ | ||
884 | vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; | ||
885 | set_vtx_resource(rdev, vb_gpu_addr); | ||
886 | |||
887 | /* draw 10 */ | ||
888 | draw_auto(rdev); | ||
889 | |||
890 | /* 5 */ | ||
891 | cp_set_surface_sync(rdev, | ||
892 | PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, | ||
893 | cur_size * h, dst_gpu_addr); | ||
894 | |||
895 | vb += 12; | ||
896 | rdev->r600_blit.vb_used += 12 * 4; | ||
897 | |||
898 | src_gpu_addr += cur_size * h; | ||
899 | dst_gpu_addr += cur_size * h; | ||
900 | size_bytes -= cur_size * h; | ||
901 | } | ||
902 | } else { | ||
903 | max_bytes = 8192 * 4; | ||
904 | |||
905 | while (size_bytes) { | ||
906 | int cur_size = size_bytes; | ||
907 | int src_x = (src_gpu_addr & 255); | ||
908 | int dst_x = (dst_gpu_addr & 255); | ||
909 | int h = 1; | ||
910 | src_gpu_addr = src_gpu_addr & ~255ULL; | ||
911 | dst_gpu_addr = dst_gpu_addr & ~255ULL; | ||
912 | |||
913 | if (!src_x && !dst_x) { | ||
914 | h = (cur_size / max_bytes); | ||
915 | if (h > 8192) | ||
916 | h = 8192; | ||
917 | if (h == 0) | ||
918 | h = 1; | ||
919 | else | ||
920 | cur_size = max_bytes; | ||
921 | } else { | ||
922 | if (cur_size > max_bytes) | ||
923 | cur_size = max_bytes; | ||
924 | if (cur_size > (max_bytes - dst_x)) | ||
925 | cur_size = (max_bytes - dst_x); | ||
926 | if (cur_size > (max_bytes - src_x)) | ||
927 | cur_size = (max_bytes - src_x); | ||
928 | } | ||
929 | |||
930 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { | ||
931 | WARN_ON(1); | ||
932 | } | ||
933 | |||
934 | vb[0] = i2f(dst_x / 4); | ||
935 | vb[1] = 0; | ||
936 | vb[2] = i2f(src_x / 4); | ||
937 | vb[3] = 0; | ||
938 | |||
939 | vb[4] = i2f(dst_x / 4); | ||
940 | vb[5] = i2f(h); | ||
941 | vb[6] = i2f(src_x / 4); | ||
942 | vb[7] = i2f(h); | ||
943 | |||
944 | vb[8] = i2f((dst_x + cur_size) / 4); | ||
945 | vb[9] = i2f(h); | ||
946 | vb[10] = i2f((src_x + cur_size) / 4); | ||
947 | vb[11] = i2f(h); | ||
948 | |||
949 | /* src 10 */ | ||
950 | set_tex_resource(rdev, FMT_8_8_8_8, | ||
951 | (src_x + cur_size) / 4, | ||
952 | h, (src_x + cur_size) / 4, | ||
953 | src_gpu_addr); | ||
954 | /* 5 */ | ||
955 | cp_set_surface_sync(rdev, | ||
956 | PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); | ||
957 | |||
958 | /* dst 17 */ | ||
959 | set_render_target(rdev, COLOR_8_8_8_8, | ||
960 | (dst_x + cur_size) / 4, h, | ||
961 | dst_gpu_addr); | ||
962 | |||
963 | /* scissors 12 */ | ||
964 | set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h); | ||
965 | |||
966 | /* Vertex buffer setup 15 */ | ||
967 | vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; | ||
968 | set_vtx_resource(rdev, vb_gpu_addr); | ||
969 | |||
970 | /* draw 10 */ | ||
971 | draw_auto(rdev); | ||
972 | |||
973 | /* 5 */ | ||
974 | cp_set_surface_sync(rdev, | ||
975 | PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, | ||
976 | cur_size * h, dst_gpu_addr); | ||
977 | |||
978 | /* 74 ring dwords per loop */ | ||
979 | vb += 12; | ||
980 | rdev->r600_blit.vb_used += 12 * 4; | ||
981 | |||
982 | src_gpu_addr += cur_size * h; | ||
983 | dst_gpu_addr += cur_size * h; | ||
984 | size_bytes -= cur_size * h; | ||
985 | } | ||
986 | } | ||
987 | } | ||
988 | |||
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c new file mode 100644 index 000000000000..3a10399e0066 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c | |||
@@ -0,0 +1,356 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Alex Deucher <alexander.deucher@amd.com> | ||
25 | */ | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/kernel.h> | ||
29 | |||
30 | /* | ||
31 | * evergreen cards need to use the 3D engine to blit data which requires | ||
32 | * quite a bit of hw state setup. Rather than pull the whole 3D driver | ||
33 | * (which normally generates the 3D state) into the DRM, we opt to use | ||
34 | * statically generated state tables. The regsiter state and shaders | ||
35 | * were hand generated to support blitting functionality. See the 3D | ||
36 | * driver or documentation for descriptions of the registers and | ||
37 | * shader instructions. | ||
38 | */ | ||
39 | |||
40 | const u32 evergreen_default_state[] = | ||
41 | { | ||
42 | 0xc0016900, | ||
43 | 0x0000023b, | ||
44 | 0x00000000, /* SQ_LDS_ALLOC_PS */ | ||
45 | |||
46 | 0xc0066900, | ||
47 | 0x00000240, | ||
48 | 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ | ||
49 | 0x00000000, | ||
50 | 0x00000000, | ||
51 | 0x00000000, | ||
52 | 0x00000000, | ||
53 | 0x00000000, | ||
54 | |||
55 | 0xc0046900, | ||
56 | 0x00000247, | ||
57 | 0x00000000, /* SQ_GS_VERT_ITEMSIZE */ | ||
58 | 0x00000000, | ||
59 | 0x00000000, | ||
60 | 0x00000000, | ||
61 | |||
62 | 0xc0026900, | ||
63 | 0x00000010, | ||
64 | 0x00000000, /* DB_Z_INFO */ | ||
65 | 0x00000000, /* DB_STENCIL_INFO */ | ||
66 | |||
67 | 0xc0016900, | ||
68 | 0x00000200, | ||
69 | 0x00000000, /* DB_DEPTH_CONTROL */ | ||
70 | |||
71 | 0xc0066900, | ||
72 | 0x00000000, | ||
73 | 0x00000060, /* DB_RENDER_CONTROL */ | ||
74 | 0x00000000, /* DB_COUNT_CONTROL */ | ||
75 | 0x00000000, /* DB_DEPTH_VIEW */ | ||
76 | 0x0000002a, /* DB_RENDER_OVERRIDE */ | ||
77 | 0x00000000, /* DB_RENDER_OVERRIDE2 */ | ||
78 | 0x00000000, /* DB_HTILE_DATA_BASE */ | ||
79 | |||
80 | 0xc0026900, | ||
81 | 0x0000000a, | ||
82 | 0x00000000, /* DB_STENCIL_CLEAR */ | ||
83 | 0x00000000, /* DB_DEPTH_CLEAR */ | ||
84 | |||
85 | 0xc0016900, | ||
86 | 0x000002dc, | ||
87 | 0x0000aa00, /* DB_ALPHA_TO_MASK */ | ||
88 | |||
89 | 0xc0016900, | ||
90 | 0x00000080, | ||
91 | 0x00000000, /* PA_SC_WINDOW_OFFSET */ | ||
92 | |||
93 | 0xc00d6900, | ||
94 | 0x00000083, | ||
95 | 0x0000ffff, /* PA_SC_CLIPRECT_RULE */ | ||
96 | 0x00000000, /* PA_SC_CLIPRECT_0_TL */ | ||
97 | 0x20002000, /* PA_SC_CLIPRECT_0_BR */ | ||
98 | 0x00000000, | ||
99 | 0x20002000, | ||
100 | 0x00000000, | ||
101 | 0x20002000, | ||
102 | 0x00000000, | ||
103 | 0x20002000, | ||
104 | 0xaaaaaaaa, /* PA_SC_EDGERULE */ | ||
105 | 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */ | ||
106 | 0x0000000f, /* CB_TARGET_MASK */ | ||
107 | 0x0000000f, /* CB_SHADER_MASK */ | ||
108 | |||
109 | 0xc0226900, | ||
110 | 0x00000094, | ||
111 | 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ | ||
112 | 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ | ||
113 | 0x80000000, | ||
114 | 0x20002000, | ||
115 | 0x80000000, | ||
116 | 0x20002000, | ||
117 | 0x80000000, | ||
118 | 0x20002000, | ||
119 | 0x80000000, | ||
120 | 0x20002000, | ||
121 | 0x80000000, | ||
122 | 0x20002000, | ||
123 | 0x80000000, | ||
124 | 0x20002000, | ||
125 | 0x80000000, | ||
126 | 0x20002000, | ||
127 | 0x80000000, | ||
128 | 0x20002000, | ||
129 | 0x80000000, | ||
130 | 0x20002000, | ||
131 | 0x80000000, | ||
132 | 0x20002000, | ||
133 | 0x80000000, | ||
134 | 0x20002000, | ||
135 | 0x80000000, | ||
136 | 0x20002000, | ||
137 | 0x80000000, | ||
138 | 0x20002000, | ||
139 | 0x80000000, | ||
140 | 0x20002000, | ||
141 | 0x80000000, | ||
142 | 0x20002000, | ||
143 | 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ | ||
144 | 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */ | ||
145 | |||
146 | 0xc0016900, | ||
147 | 0x000000d4, | ||
148 | 0x00000000, /* SX_MISC */ | ||
149 | |||
150 | 0xc0026900, | ||
151 | 0x00000292, | ||
152 | 0x00000000, /* PA_SC_MODE_CNTL_0 */ | ||
153 | 0x00000000, /* PA_SC_MODE_CNTL_1 */ | ||
154 | |||
155 | 0xc0106900, | ||
156 | 0x00000300, | ||
157 | 0x00000000, /* PA_SC_LINE_CNTL */ | ||
158 | 0x00000000, /* PA_SC_AA_CONFIG */ | ||
159 | 0x00000005, /* PA_SU_VTX_CNTL */ | ||
160 | 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ | ||
161 | 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */ | ||
162 | 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */ | ||
163 | 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */ | ||
164 | 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */ | ||
165 | 0x00000000, /* */ | ||
166 | 0x00000000, /* */ | ||
167 | 0x00000000, /* */ | ||
168 | 0x00000000, /* */ | ||
169 | 0x00000000, /* */ | ||
170 | 0x00000000, /* */ | ||
171 | 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */ | ||
172 | 0xffffffff, /* PA_SC_AA_MASK */ | ||
173 | |||
174 | 0xc00d6900, | ||
175 | 0x00000202, | ||
176 | 0x00cc0010, /* CB_COLOR_CONTROL */ | ||
177 | 0x00000210, /* DB_SHADER_CONTROL */ | ||
178 | 0x00010000, /* PA_CL_CLIP_CNTL */ | ||
179 | 0x00000004, /* PA_SU_SC_MODE_CNTL */ | ||
180 | 0x00000100, /* PA_CL_VTE_CNTL */ | ||
181 | 0x00000000, /* PA_CL_VS_OUT_CNTL */ | ||
182 | 0x00000000, /* PA_CL_NANINF_CNTL */ | ||
183 | 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */ | ||
184 | 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */ | ||
185 | 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */ | ||
186 | 0x00000000, /* */ | ||
187 | 0x00000000, /* */ | ||
188 | 0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */ | ||
189 | |||
190 | 0xc0066900, | ||
191 | 0x000002de, | ||
192 | 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ | ||
193 | 0x00000000, /* */ | ||
194 | 0x00000000, /* */ | ||
195 | 0x00000000, /* */ | ||
196 | 0x00000000, /* */ | ||
197 | 0x00000000, /* */ | ||
198 | |||
199 | 0xc0016900, | ||
200 | 0x00000229, | ||
201 | 0x00000000, /* SQ_PGM_START_FS */ | ||
202 | |||
203 | 0xc0016900, | ||
204 | 0x0000022a, | ||
205 | 0x00000000, /* SQ_PGM_RESOURCES_FS */ | ||
206 | |||
207 | 0xc0096900, | ||
208 | 0x00000100, | ||
209 | 0x00ffffff, /* VGT_MAX_VTX_INDX */ | ||
210 | 0x00000000, /* */ | ||
211 | 0x00000000, /* */ | ||
212 | 0x00000000, /* */ | ||
213 | 0x00000000, /* SX_ALPHA_TEST_CONTROL */ | ||
214 | 0x00000000, /* CB_BLEND_RED */ | ||
215 | 0x00000000, /* CB_BLEND_GREEN */ | ||
216 | 0x00000000, /* CB_BLEND_BLUE */ | ||
217 | 0x00000000, /* CB_BLEND_ALPHA */ | ||
218 | |||
219 | 0xc0026900, | ||
220 | 0x000002a8, | ||
221 | 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ | ||
222 | 0x00000000, /* */ | ||
223 | |||
224 | 0xc0026900, | ||
225 | 0x000002ad, | ||
226 | 0x00000000, /* VGT_REUSE_OFF */ | ||
227 | 0x00000000, /* */ | ||
228 | |||
229 | 0xc0116900, | ||
230 | 0x00000280, | ||
231 | 0x00000000, /* PA_SU_POINT_SIZE */ | ||
232 | 0x00000000, /* PA_SU_POINT_MINMAX */ | ||
233 | 0x00000008, /* PA_SU_LINE_CNTL */ | ||
234 | 0x00000000, /* PA_SC_LINE_STIPPLE */ | ||
235 | 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ | ||
236 | 0x00000000, /* VGT_HOS_CNTL */ | ||
237 | 0x00000000, /* */ | ||
238 | 0x00000000, /* */ | ||
239 | 0x00000000, /* */ | ||
240 | 0x00000000, /* */ | ||
241 | 0x00000000, /* */ | ||
242 | 0x00000000, /* */ | ||
243 | 0x00000000, /* */ | ||
244 | 0x00000000, /* */ | ||
245 | 0x00000000, /* */ | ||
246 | 0x00000000, /* */ | ||
247 | 0x00000000, /* VGT_GS_MODE */ | ||
248 | |||
249 | 0xc0016900, | ||
250 | 0x000002a1, | ||
251 | 0x00000000, /* VGT_PRIMITIVEID_EN */ | ||
252 | |||
253 | 0xc0016900, | ||
254 | 0x000002a5, | ||
255 | 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */ | ||
256 | |||
257 | 0xc0016900, | ||
258 | 0x000002d5, | ||
259 | 0x00000000, /* VGT_SHADER_STAGES_EN */ | ||
260 | |||
261 | 0xc0026900, | ||
262 | 0x000002e5, | ||
263 | 0x00000000, /* VGT_STRMOUT_CONFIG */ | ||
264 | 0x00000000, /* */ | ||
265 | |||
266 | 0xc0016900, | ||
267 | 0x000001e0, | ||
268 | 0x00000000, /* CB_BLEND0_CONTROL */ | ||
269 | |||
270 | 0xc0016900, | ||
271 | 0x000001b1, | ||
272 | 0x00000000, /* SPI_VS_OUT_CONFIG */ | ||
273 | |||
274 | 0xc0016900, | ||
275 | 0x00000187, | ||
276 | 0x00000000, /* SPI_VS_OUT_ID_0 */ | ||
277 | |||
278 | 0xc0016900, | ||
279 | 0x00000191, | ||
280 | 0x00000100, /* SPI_PS_INPUT_CNTL_0 */ | ||
281 | |||
282 | 0xc00b6900, | ||
283 | 0x000001b3, | ||
284 | 0x20000001, /* SPI_PS_IN_CONTROL_0 */ | ||
285 | 0x00000000, /* SPI_PS_IN_CONTROL_1 */ | ||
286 | 0x00000000, /* SPI_INTERP_CONTROL_0 */ | ||
287 | 0x00000000, /* SPI_INPUT_Z */ | ||
288 | 0x00000000, /* SPI_FOG_CNTL */ | ||
289 | 0x00100000, /* SPI_BARYC_CNTL */ | ||
290 | 0x00000000, /* SPI_PS_IN_CONTROL_2 */ | ||
291 | 0x00000000, /* */ | ||
292 | 0x00000000, /* */ | ||
293 | 0x00000000, /* */ | ||
294 | 0x00000000, /* */ | ||
295 | |||
296 | 0xc0026900, | ||
297 | 0x00000316, | ||
298 | 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | ||
299 | 0x00000010, /* */ | ||
300 | }; | ||
301 | |||
302 | const u32 evergreen_vs[] = | ||
303 | { | ||
304 | 0x00000004, | ||
305 | 0x80800400, | ||
306 | 0x0000a03c, | ||
307 | 0x95000688, | ||
308 | 0x00004000, | ||
309 | 0x15200688, | ||
310 | 0x00000000, | ||
311 | 0x00000000, | ||
312 | 0x3c000000, | ||
313 | 0x67961001, | ||
314 | #ifdef __BIG_ENDIAN | ||
315 | 0x000a0000, | ||
316 | #else | ||
317 | 0x00080000, | ||
318 | #endif | ||
319 | 0x00000000, | ||
320 | 0x1c000000, | ||
321 | 0x67961000, | ||
322 | #ifdef __BIG_ENDIAN | ||
323 | 0x00020008, | ||
324 | #else | ||
325 | 0x00000008, | ||
326 | #endif | ||
327 | 0x00000000, | ||
328 | }; | ||
329 | |||
330 | const u32 evergreen_ps[] = | ||
331 | { | ||
332 | 0x00000003, | ||
333 | 0xa00c0000, | ||
334 | 0x00000008, | ||
335 | 0x80400000, | ||
336 | 0x00000000, | ||
337 | 0x95200688, | ||
338 | 0x00380400, | ||
339 | 0x00146b10, | ||
340 | 0x00380000, | ||
341 | 0x20146b10, | ||
342 | 0x00380400, | ||
343 | 0x40146b00, | ||
344 | 0x80380000, | ||
345 | 0x60146b00, | ||
346 | 0x00000000, | ||
347 | 0x00000000, | ||
348 | 0x00000010, | ||
349 | 0x000d1000, | ||
350 | 0xb0800000, | ||
351 | 0x00000000, | ||
352 | }; | ||
353 | |||
354 | const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps); | ||
355 | const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs); | ||
356 | const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state); | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.h b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h new file mode 100644 index 000000000000..bb8d6c751595 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef EVERGREEN_BLIT_SHADERS_H | ||
26 | #define EVERGREEN_BLIT_SHADERS_H | ||
27 | |||
28 | extern const u32 evergreen_ps[]; | ||
29 | extern const u32 evergreen_vs[]; | ||
30 | extern const u32 evergreen_default_state[]; | ||
31 | |||
32 | extern const u32 evergreen_ps_size, evergreen_vs_size; | ||
33 | extern const u32 evergreen_default_size; | ||
34 | |||
35 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 345a75a03c96..23d36417158d 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "evergreend.h" | 30 | #include "evergreend.h" |
31 | #include "evergreen_reg_safe.h" | 31 | #include "evergreen_reg_safe.h" |
32 | #include "cayman_reg_safe.h" | ||
32 | 33 | ||
33 | static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, | 34 | static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, |
34 | struct radeon_cs_reloc **cs_reloc); | 35 | struct radeon_cs_reloc **cs_reloc); |
@@ -292,33 +293,28 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
292 | if (wait_reg_mem.type != PACKET_TYPE3 || | 293 | if (wait_reg_mem.type != PACKET_TYPE3 || |
293 | wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { | 294 | wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { |
294 | DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); | 295 | DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); |
295 | r = -EINVAL; | 296 | return -EINVAL; |
296 | return r; | ||
297 | } | 297 | } |
298 | 298 | ||
299 | wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); | 299 | wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); |
300 | /* bit 4 is reg (0) or mem (1) */ | 300 | /* bit 4 is reg (0) or mem (1) */ |
301 | if (wait_reg_mem_info & 0x10) { | 301 | if (wait_reg_mem_info & 0x10) { |
302 | DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); | 302 | DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); |
303 | r = -EINVAL; | 303 | return -EINVAL; |
304 | return r; | ||
305 | } | 304 | } |
306 | /* waiting for value to be equal */ | 305 | /* waiting for value to be equal */ |
307 | if ((wait_reg_mem_info & 0x7) != 0x3) { | 306 | if ((wait_reg_mem_info & 0x7) != 0x3) { |
308 | DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); | 307 | DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); |
309 | r = -EINVAL; | 308 | return -EINVAL; |
310 | return r; | ||
311 | } | 309 | } |
312 | if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) { | 310 | if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) { |
313 | DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); | 311 | DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); |
314 | r = -EINVAL; | 312 | return -EINVAL; |
315 | return r; | ||
316 | } | 313 | } |
317 | 314 | ||
318 | if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) { | 315 | if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) { |
319 | DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); | 316 | DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); |
320 | r = -EINVAL; | 317 | return -EINVAL; |
321 | return r; | ||
322 | } | 318 | } |
323 | 319 | ||
324 | /* jump over the NOP */ | 320 | /* jump over the NOP */ |
@@ -336,8 +332,7 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
336 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 332 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
337 | if (!obj) { | 333 | if (!obj) { |
338 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | 334 | DRM_ERROR("cannot find crtc %d\n", crtc_id); |
339 | r = -EINVAL; | 335 | return -EINVAL; |
340 | goto out; | ||
341 | } | 336 | } |
342 | crtc = obj_to_crtc(obj); | 337 | crtc = obj_to_crtc(obj); |
343 | radeon_crtc = to_radeon_crtc(crtc); | 338 | radeon_crtc = to_radeon_crtc(crtc); |
@@ -362,12 +357,10 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
362 | break; | 357 | break; |
363 | default: | 358 | default: |
364 | DRM_ERROR("unknown crtc reloc\n"); | 359 | DRM_ERROR("unknown crtc reloc\n"); |
365 | r = -EINVAL; | 360 | return -EINVAL; |
366 | goto out; | ||
367 | } | 361 | } |
368 | } | 362 | } |
369 | out: | 363 | return 0; |
370 | return r; | ||
371 | } | 364 | } |
372 | 365 | ||
373 | static int evergreen_packet0_check(struct radeon_cs_parser *p, | 366 | static int evergreen_packet0_check(struct radeon_cs_parser *p, |
@@ -425,21 +418,31 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3 | |||
425 | { | 418 | { |
426 | struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; | 419 | struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; |
427 | struct radeon_cs_reloc *reloc; | 420 | struct radeon_cs_reloc *reloc; |
428 | u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); | 421 | u32 last_reg; |
429 | u32 m, i, tmp, *ib; | 422 | u32 m, i, tmp, *ib; |
430 | int r; | 423 | int r; |
431 | 424 | ||
425 | if (p->rdev->family >= CHIP_CAYMAN) | ||
426 | last_reg = ARRAY_SIZE(cayman_reg_safe_bm); | ||
427 | else | ||
428 | last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); | ||
429 | |||
432 | i = (reg >> 7); | 430 | i = (reg >> 7); |
433 | if (i > last_reg) { | 431 | if (i > last_reg) { |
434 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | 432 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); |
435 | return -EINVAL; | 433 | return -EINVAL; |
436 | } | 434 | } |
437 | m = 1 << ((reg >> 2) & 31); | 435 | m = 1 << ((reg >> 2) & 31); |
438 | if (!(evergreen_reg_safe_bm[i] & m)) | 436 | if (p->rdev->family >= CHIP_CAYMAN) { |
439 | return 0; | 437 | if (!(cayman_reg_safe_bm[i] & m)) |
438 | return 0; | ||
439 | } else { | ||
440 | if (!(evergreen_reg_safe_bm[i] & m)) | ||
441 | return 0; | ||
442 | } | ||
440 | ib = p->ib->ptr; | 443 | ib = p->ib->ptr; |
441 | switch (reg) { | 444 | switch (reg) { |
442 | /* force following reg to 0 in an attemp to disable out buffer | 445 | /* force following reg to 0 in an attempt to disable out buffer |
443 | * which will need us to better understand how it works to perform | 446 | * which will need us to better understand how it works to perform |
444 | * security check on it (Jerome) | 447 | * security check on it (Jerome) |
445 | */ | 448 | */ |
@@ -468,12 +471,42 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3 | |||
468 | case SQ_VSTMP_RING_ITEMSIZE: | 471 | case SQ_VSTMP_RING_ITEMSIZE: |
469 | case VGT_TF_RING_SIZE: | 472 | case VGT_TF_RING_SIZE: |
470 | /* get value to populate the IB don't remove */ | 473 | /* get value to populate the IB don't remove */ |
471 | tmp =radeon_get_ib_value(p, idx); | 474 | /*tmp =radeon_get_ib_value(p, idx); |
472 | ib[idx] = 0; | 475 | ib[idx] = 0;*/ |
476 | break; | ||
477 | case SQ_ESGS_RING_BASE: | ||
478 | case SQ_GSVS_RING_BASE: | ||
479 | case SQ_ESTMP_RING_BASE: | ||
480 | case SQ_GSTMP_RING_BASE: | ||
481 | case SQ_HSTMP_RING_BASE: | ||
482 | case SQ_LSTMP_RING_BASE: | ||
483 | case SQ_PSTMP_RING_BASE: | ||
484 | case SQ_VSTMP_RING_BASE: | ||
485 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
486 | if (r) { | ||
487 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
488 | "0x%04X\n", reg); | ||
489 | return -EINVAL; | ||
490 | } | ||
491 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
473 | break; | 492 | break; |
474 | case DB_DEPTH_CONTROL: | 493 | case DB_DEPTH_CONTROL: |
475 | track->db_depth_control = radeon_get_ib_value(p, idx); | 494 | track->db_depth_control = radeon_get_ib_value(p, idx); |
476 | break; | 495 | break; |
496 | case CAYMAN_DB_EQAA: | ||
497 | if (p->rdev->family < CHIP_CAYMAN) { | ||
498 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
499 | "0x%04X\n", reg); | ||
500 | return -EINVAL; | ||
501 | } | ||
502 | break; | ||
503 | case CAYMAN_DB_DEPTH_INFO: | ||
504 | if (p->rdev->family < CHIP_CAYMAN) { | ||
505 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
506 | "0x%04X\n", reg); | ||
507 | return -EINVAL; | ||
508 | } | ||
509 | break; | ||
477 | case DB_Z_INFO: | 510 | case DB_Z_INFO: |
478 | r = evergreen_cs_packet_next_reloc(p, &reloc); | 511 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
479 | if (r) { | 512 | if (r) { |
@@ -559,9 +592,23 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3 | |||
559 | track->cb_shader_mask = radeon_get_ib_value(p, idx); | 592 | track->cb_shader_mask = radeon_get_ib_value(p, idx); |
560 | break; | 593 | break; |
561 | case PA_SC_AA_CONFIG: | 594 | case PA_SC_AA_CONFIG: |
595 | if (p->rdev->family >= CHIP_CAYMAN) { | ||
596 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
597 | "0x%04X\n", reg); | ||
598 | return -EINVAL; | ||
599 | } | ||
562 | tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK; | 600 | tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK; |
563 | track->nsamples = 1 << tmp; | 601 | track->nsamples = 1 << tmp; |
564 | break; | 602 | break; |
603 | case CAYMAN_PA_SC_AA_CONFIG: | ||
604 | if (p->rdev->family < CHIP_CAYMAN) { | ||
605 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
606 | "0x%04X\n", reg); | ||
607 | return -EINVAL; | ||
608 | } | ||
609 | tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK; | ||
610 | track->nsamples = 1 << tmp; | ||
611 | break; | ||
565 | case CB_COLOR0_VIEW: | 612 | case CB_COLOR0_VIEW: |
566 | case CB_COLOR1_VIEW: | 613 | case CB_COLOR1_VIEW: |
567 | case CB_COLOR2_VIEW: | 614 | case CB_COLOR2_VIEW: |
@@ -942,6 +989,37 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
942 | idx_value = radeon_get_ib_value(p, idx); | 989 | idx_value = radeon_get_ib_value(p, idx); |
943 | 990 | ||
944 | switch (pkt->opcode) { | 991 | switch (pkt->opcode) { |
992 | case PACKET3_SET_PREDICATION: | ||
993 | { | ||
994 | int pred_op; | ||
995 | int tmp; | ||
996 | if (pkt->count != 1) { | ||
997 | DRM_ERROR("bad SET PREDICATION\n"); | ||
998 | return -EINVAL; | ||
999 | } | ||
1000 | |||
1001 | tmp = radeon_get_ib_value(p, idx + 1); | ||
1002 | pred_op = (tmp >> 16) & 0x7; | ||
1003 | |||
1004 | /* for the clear predicate operation */ | ||
1005 | if (pred_op == 0) | ||
1006 | return 0; | ||
1007 | |||
1008 | if (pred_op > 2) { | ||
1009 | DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); | ||
1010 | return -EINVAL; | ||
1011 | } | ||
1012 | |||
1013 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
1014 | if (r) { | ||
1015 | DRM_ERROR("bad SET PREDICATION\n"); | ||
1016 | return -EINVAL; | ||
1017 | } | ||
1018 | |||
1019 | ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
1020 | ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff); | ||
1021 | } | ||
1022 | break; | ||
945 | case PACKET3_CONTEXT_CONTROL: | 1023 | case PACKET3_CONTEXT_CONTROL: |
946 | if (pkt->count != 1) { | 1024 | if (pkt->count != 1) { |
947 | DRM_ERROR("bad CONTEXT_CONTROL\n"); | 1025 | DRM_ERROR("bad CONTEXT_CONTROL\n"); |
@@ -956,6 +1034,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
956 | return -EINVAL; | 1034 | return -EINVAL; |
957 | } | 1035 | } |
958 | break; | 1036 | break; |
1037 | case CAYMAN_PACKET3_DEALLOC_STATE: | ||
1038 | if (p->rdev->family < CHIP_CAYMAN) { | ||
1039 | DRM_ERROR("bad PACKET3_DEALLOC_STATE\n"); | ||
1040 | return -EINVAL; | ||
1041 | } | ||
1042 | if (pkt->count) { | ||
1043 | DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); | ||
1044 | return -EINVAL; | ||
1045 | } | ||
1046 | break; | ||
959 | case PACKET3_INDEX_BASE: | 1047 | case PACKET3_INDEX_BASE: |
960 | if (pkt->count != 1) { | 1048 | if (pkt->count != 1) { |
961 | DRM_ERROR("bad INDEX_BASE\n"); | 1049 | DRM_ERROR("bad INDEX_BASE\n"); |
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index 2330f3a36fd5..c781c92c3451 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h | |||
@@ -105,6 +105,11 @@ | |||
105 | #define EVERGREEN_GRPH_Y_START 0x6830 | 105 | #define EVERGREEN_GRPH_Y_START 0x6830 |
106 | #define EVERGREEN_GRPH_X_END 0x6834 | 106 | #define EVERGREEN_GRPH_X_END 0x6834 |
107 | #define EVERGREEN_GRPH_Y_END 0x6838 | 107 | #define EVERGREEN_GRPH_Y_END 0x6838 |
108 | #define EVERGREEN_GRPH_UPDATE 0x6844 | ||
109 | # define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2) | ||
110 | # define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16) | ||
111 | #define EVERGREEN_GRPH_FLIP_CONTROL 0x6848 | ||
112 | # define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0) | ||
108 | 113 | ||
109 | /* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ | 114 | /* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ |
110 | #define EVERGREEN_CUR_CONTROL 0x6998 | 115 | #define EVERGREEN_CUR_CONTROL 0x6998 |
@@ -178,6 +183,7 @@ | |||
178 | # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) | 183 | # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) |
179 | #define EVERGREEN_CRTC_STATUS 0x6e8c | 184 | #define EVERGREEN_CRTC_STATUS 0x6e8c |
180 | #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 | 185 | #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 |
186 | #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 | ||
181 | #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 | 187 | #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 |
182 | 188 | ||
183 | #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 | 189 | #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 9b7532dd30f7..b7b2714f0b32 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -64,6 +64,8 @@ | |||
64 | #define GB_BACKEND_MAP 0x98FC | 64 | #define GB_BACKEND_MAP 0x98FC |
65 | #define DMIF_ADDR_CONFIG 0xBD4 | 65 | #define DMIF_ADDR_CONFIG 0xBD4 |
66 | #define HDP_ADDR_CONFIG 0x2F48 | 66 | #define HDP_ADDR_CONFIG 0x2F48 |
67 | #define HDP_MISC_CNTL 0x2F4C | ||
68 | #define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) | ||
67 | 69 | ||
68 | #define CC_SYS_RB_BACKEND_DISABLE 0x3F88 | 70 | #define CC_SYS_RB_BACKEND_DISABLE 0x3F88 |
69 | #define GC_USER_RB_BACKEND_DISABLE 0x9B7C | 71 | #define GC_USER_RB_BACKEND_DISABLE 0x9B7C |
@@ -98,6 +100,7 @@ | |||
98 | #define BUF_SWAP_32BIT (2 << 16) | 100 | #define BUF_SWAP_32BIT (2 << 16) |
99 | #define CP_RB_RPTR 0x8700 | 101 | #define CP_RB_RPTR 0x8700 |
100 | #define CP_RB_RPTR_ADDR 0xC10C | 102 | #define CP_RB_RPTR_ADDR 0xC10C |
103 | #define RB_RPTR_SWAP(x) ((x) << 0) | ||
101 | #define CP_RB_RPTR_ADDR_HI 0xC110 | 104 | #define CP_RB_RPTR_ADDR_HI 0xC110 |
102 | #define CP_RB_RPTR_WR 0xC108 | 105 | #define CP_RB_RPTR_WR 0xC108 |
103 | #define CP_RB_WPTR 0xC114 | 106 | #define CP_RB_WPTR 0xC114 |
@@ -164,22 +167,32 @@ | |||
164 | #define SE_SC_BUSY (1 << 29) | 167 | #define SE_SC_BUSY (1 << 29) |
165 | #define SE_DB_BUSY (1 << 30) | 168 | #define SE_DB_BUSY (1 << 30) |
166 | #define SE_CB_BUSY (1 << 31) | 169 | #define SE_CB_BUSY (1 << 31) |
167 | 170 | /* evergreen */ | |
171 | #define CG_THERMAL_CTRL 0x72c | ||
172 | #define TOFFSET_MASK 0x00003FE0 | ||
173 | #define TOFFSET_SHIFT 5 | ||
168 | #define CG_MULT_THERMAL_STATUS 0x740 | 174 | #define CG_MULT_THERMAL_STATUS 0x740 |
169 | #define ASIC_T(x) ((x) << 16) | 175 | #define ASIC_T(x) ((x) << 16) |
170 | #define ASIC_T_MASK 0x7FF0000 | 176 | #define ASIC_T_MASK 0x07FF0000 |
171 | #define ASIC_T_SHIFT 16 | 177 | #define ASIC_T_SHIFT 16 |
178 | #define CG_TS0_STATUS 0x760 | ||
179 | #define TS0_ADC_DOUT_MASK 0x000003FF | ||
180 | #define TS0_ADC_DOUT_SHIFT 0 | ||
181 | /* APU */ | ||
182 | #define CG_THERMAL_STATUS 0x678 | ||
172 | 183 | ||
173 | #define HDP_HOST_PATH_CNTL 0x2C00 | 184 | #define HDP_HOST_PATH_CNTL 0x2C00 |
174 | #define HDP_NONSURFACE_BASE 0x2C04 | 185 | #define HDP_NONSURFACE_BASE 0x2C04 |
175 | #define HDP_NONSURFACE_INFO 0x2C08 | 186 | #define HDP_NONSURFACE_INFO 0x2C08 |
176 | #define HDP_NONSURFACE_SIZE 0x2C0C | 187 | #define HDP_NONSURFACE_SIZE 0x2C0C |
188 | #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 | ||
177 | #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 | 189 | #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 |
178 | #define HDP_TILING_CONFIG 0x2F3C | 190 | #define HDP_TILING_CONFIG 0x2F3C |
179 | 191 | ||
180 | #define MC_SHARED_CHMAP 0x2004 | 192 | #define MC_SHARED_CHMAP 0x2004 |
181 | #define NOOFCHAN_SHIFT 12 | 193 | #define NOOFCHAN_SHIFT 12 |
182 | #define NOOFCHAN_MASK 0x00003000 | 194 | #define NOOFCHAN_MASK 0x00003000 |
195 | #define MC_SHARED_CHREMAP 0x2008 | ||
183 | 196 | ||
184 | #define MC_ARB_RAMCFG 0x2760 | 197 | #define MC_ARB_RAMCFG 0x2760 |
185 | #define NOOFBANK_SHIFT 0 | 198 | #define NOOFBANK_SHIFT 0 |
@@ -195,10 +208,12 @@ | |||
195 | #define BURSTLENGTH_SHIFT 9 | 208 | #define BURSTLENGTH_SHIFT 9 |
196 | #define BURSTLENGTH_MASK 0x00000200 | 209 | #define BURSTLENGTH_MASK 0x00000200 |
197 | #define CHANSIZE_OVERRIDE (1 << 11) | 210 | #define CHANSIZE_OVERRIDE (1 << 11) |
211 | #define FUS_MC_ARB_RAMCFG 0x2768 | ||
198 | #define MC_VM_AGP_TOP 0x2028 | 212 | #define MC_VM_AGP_TOP 0x2028 |
199 | #define MC_VM_AGP_BOT 0x202C | 213 | #define MC_VM_AGP_BOT 0x202C |
200 | #define MC_VM_AGP_BASE 0x2030 | 214 | #define MC_VM_AGP_BASE 0x2030 |
201 | #define MC_VM_FB_LOCATION 0x2024 | 215 | #define MC_VM_FB_LOCATION 0x2024 |
216 | #define MC_FUS_VM_FB_OFFSET 0x2898 | ||
202 | #define MC_VM_MB_L1_TLB0_CNTL 0x2234 | 217 | #define MC_VM_MB_L1_TLB0_CNTL 0x2234 |
203 | #define MC_VM_MB_L1_TLB1_CNTL 0x2238 | 218 | #define MC_VM_MB_L1_TLB1_CNTL 0x2238 |
204 | #define MC_VM_MB_L1_TLB2_CNTL 0x223C | 219 | #define MC_VM_MB_L1_TLB2_CNTL 0x223C |
@@ -215,6 +230,11 @@ | |||
215 | #define MC_VM_MD_L1_TLB0_CNTL 0x2654 | 230 | #define MC_VM_MD_L1_TLB0_CNTL 0x2654 |
216 | #define MC_VM_MD_L1_TLB1_CNTL 0x2658 | 231 | #define MC_VM_MD_L1_TLB1_CNTL 0x2658 |
217 | #define MC_VM_MD_L1_TLB2_CNTL 0x265C | 232 | #define MC_VM_MD_L1_TLB2_CNTL 0x265C |
233 | |||
234 | #define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C | ||
235 | #define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 | ||
236 | #define FUS_MC_VM_MD_L1_TLB2_CNTL 0x2664 | ||
237 | |||
218 | #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C | 238 | #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C |
219 | #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 | 239 | #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 |
220 | #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 | 240 | #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 |
@@ -235,6 +255,7 @@ | |||
235 | #define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) | 255 | #define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) |
236 | #define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) | 256 | #define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) |
237 | #define PA_SC_LINE_STIPPLE 0x28A0C | 257 | #define PA_SC_LINE_STIPPLE 0x28A0C |
258 | #define PA_SU_LINE_STIPPLE_VALUE 0x8A60 | ||
238 | #define PA_SC_LINE_STIPPLE_STATE 0x8B10 | 259 | #define PA_SC_LINE_STIPPLE_STATE 0x8B10 |
239 | 260 | ||
240 | #define SCRATCH_REG0 0x8500 | 261 | #define SCRATCH_REG0 0x8500 |
@@ -348,6 +369,9 @@ | |||
348 | #define SYNC_WALKER (1 << 25) | 369 | #define SYNC_WALKER (1 << 25) |
349 | #define SYNC_ALIGNER (1 << 26) | 370 | #define SYNC_ALIGNER (1 << 26) |
350 | 371 | ||
372 | #define TCP_CHAN_STEER_LO 0x960c | ||
373 | #define TCP_CHAN_STEER_HI 0x9610 | ||
374 | |||
351 | #define VGT_CACHE_INVALIDATION 0x88C4 | 375 | #define VGT_CACHE_INVALIDATION 0x88C4 |
352 | #define CACHE_INVALIDATION(x) ((x) << 0) | 376 | #define CACHE_INVALIDATION(x) ((x) << 0) |
353 | #define VC_ONLY 0 | 377 | #define VC_ONLY 0 |
@@ -412,6 +436,19 @@ | |||
412 | #define SOFT_RESET_REGBB (1 << 22) | 436 | #define SOFT_RESET_REGBB (1 << 22) |
413 | #define SOFT_RESET_ORB (1 << 23) | 437 | #define SOFT_RESET_ORB (1 << 23) |
414 | 438 | ||
439 | /* display watermarks */ | ||
440 | #define DC_LB_MEMORY_SPLIT 0x6b0c | ||
441 | #define PRIORITY_A_CNT 0x6b18 | ||
442 | #define PRIORITY_MARK_MASK 0x7fff | ||
443 | #define PRIORITY_OFF (1 << 16) | ||
444 | #define PRIORITY_ALWAYS_ON (1 << 20) | ||
445 | #define PRIORITY_B_CNT 0x6b1c | ||
446 | #define PIPE0_ARBITRATION_CONTROL3 0x0bf0 | ||
447 | # define LATENCY_WATERMARK_MASK(x) ((x) << 16) | ||
448 | #define PIPE0_LATENCY_CONTROL 0x0bf4 | ||
449 | # define LATENCY_LOW_WATERMARK(x) ((x) << 0) | ||
450 | # define LATENCY_HIGH_WATERMARK(x) ((x) << 16) | ||
451 | |||
415 | #define IH_RB_CNTL 0x3e00 | 452 | #define IH_RB_CNTL 0x3e00 |
416 | # define IH_RB_ENABLE (1 << 0) | 453 | # define IH_RB_ENABLE (1 << 0) |
417 | # define IH_IB_SIZE(x) ((x) << 1) /* log2 */ | 454 | # define IH_IB_SIZE(x) ((x) << 1) /* log2 */ |
@@ -429,7 +466,7 @@ | |||
429 | #define IH_RB_WPTR_ADDR_LO 0x3e14 | 466 | #define IH_RB_WPTR_ADDR_LO 0x3e14 |
430 | #define IH_CNTL 0x3e18 | 467 | #define IH_CNTL 0x3e18 |
431 | # define ENABLE_INTR (1 << 0) | 468 | # define ENABLE_INTR (1 << 0) |
432 | # define IH_MC_SWAP(x) ((x) << 2) | 469 | # define IH_MC_SWAP(x) ((x) << 1) |
433 | # define IH_MC_SWAP_NONE 0 | 470 | # define IH_MC_SWAP_NONE 0 |
434 | # define IH_MC_SWAP_16BIT 1 | 471 | # define IH_MC_SWAP_16BIT 1 |
435 | # define IH_MC_SWAP_32BIT 2 | 472 | # define IH_MC_SWAP_32BIT 2 |
@@ -510,7 +547,7 @@ | |||
510 | # define LB_D5_VBLANK_INTERRUPT (1 << 3) | 547 | # define LB_D5_VBLANK_INTERRUPT (1 << 3) |
511 | # define DC_HPD5_INTERRUPT (1 << 17) | 548 | # define DC_HPD5_INTERRUPT (1 << 17) |
512 | # define DC_HPD5_RX_INTERRUPT (1 << 18) | 549 | # define DC_HPD5_RX_INTERRUPT (1 << 18) |
513 | #define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050 | 550 | #define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150 |
514 | # define LB_D6_VLINE_INTERRUPT (1 << 2) | 551 | # define LB_D6_VLINE_INTERRUPT (1 << 2) |
515 | # define LB_D6_VBLANK_INTERRUPT (1 << 3) | 552 | # define LB_D6_VBLANK_INTERRUPT (1 << 3) |
516 | # define DC_HPD6_INTERRUPT (1 << 17) | 553 | # define DC_HPD6_INTERRUPT (1 << 17) |
@@ -560,6 +597,44 @@ | |||
560 | # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) | 597 | # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) |
561 | # define DC_HPDx_EN (1 << 28) | 598 | # define DC_HPDx_EN (1 << 28) |
562 | 599 | ||
600 | /* PCIE link stuff */ | ||
601 | #define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ | ||
602 | #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ | ||
603 | # define LC_LINK_WIDTH_SHIFT 0 | ||
604 | # define LC_LINK_WIDTH_MASK 0x7 | ||
605 | # define LC_LINK_WIDTH_X0 0 | ||
606 | # define LC_LINK_WIDTH_X1 1 | ||
607 | # define LC_LINK_WIDTH_X2 2 | ||
608 | # define LC_LINK_WIDTH_X4 3 | ||
609 | # define LC_LINK_WIDTH_X8 4 | ||
610 | # define LC_LINK_WIDTH_X16 6 | ||
611 | # define LC_LINK_WIDTH_RD_SHIFT 4 | ||
612 | # define LC_LINK_WIDTH_RD_MASK 0x70 | ||
613 | # define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) | ||
614 | # define LC_RECONFIG_NOW (1 << 8) | ||
615 | # define LC_RENEGOTIATION_SUPPORT (1 << 9) | ||
616 | # define LC_RENEGOTIATE_EN (1 << 10) | ||
617 | # define LC_SHORT_RECONFIG_EN (1 << 11) | ||
618 | # define LC_UPCONFIGURE_SUPPORT (1 << 12) | ||
619 | # define LC_UPCONFIGURE_DIS (1 << 13) | ||
620 | #define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ | ||
621 | # define LC_GEN2_EN_STRAP (1 << 0) | ||
622 | # define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) | ||
623 | # define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) | ||
624 | # define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) | ||
625 | # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) | ||
626 | # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 | ||
627 | # define LC_CURRENT_DATA_RATE (1 << 11) | ||
628 | # define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) | ||
629 | # define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) | ||
630 | # define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) | ||
631 | # define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) | ||
632 | #define MM_CFGREGS_CNTL 0x544c | ||
633 | # define MM_WR_TO_CFG_EN (1 << 3) | ||
634 | #define LINK_CNTL2 0x88 /* F0 */ | ||
635 | # define TARGET_LINK_SPEED_MASK (0xf << 0) | ||
636 | # define SELECTABLE_DEEMPHASIS (1 << 6) | ||
637 | |||
563 | /* | 638 | /* |
564 | * PM4 | 639 | * PM4 |
565 | */ | 640 | */ |
@@ -589,10 +664,11 @@ | |||
589 | #define PACKET3_NOP 0x10 | 664 | #define PACKET3_NOP 0x10 |
590 | #define PACKET3_SET_BASE 0x11 | 665 | #define PACKET3_SET_BASE 0x11 |
591 | #define PACKET3_CLEAR_STATE 0x12 | 666 | #define PACKET3_CLEAR_STATE 0x12 |
592 | #define PACKET3_INDIRECT_BUFFER_SIZE 0x13 | 667 | #define PACKET3_INDEX_BUFFER_SIZE 0x13 |
593 | #define PACKET3_DISPATCH_DIRECT 0x15 | 668 | #define PACKET3_DISPATCH_DIRECT 0x15 |
594 | #define PACKET3_DISPATCH_INDIRECT 0x16 | 669 | #define PACKET3_DISPATCH_INDIRECT 0x16 |
595 | #define PACKET3_INDIRECT_BUFFER_END 0x17 | 670 | #define PACKET3_INDIRECT_BUFFER_END 0x17 |
671 | #define PACKET3_MODE_CONTROL 0x18 | ||
596 | #define PACKET3_SET_PREDICATION 0x20 | 672 | #define PACKET3_SET_PREDICATION 0x20 |
597 | #define PACKET3_REG_RMW 0x21 | 673 | #define PACKET3_REG_RMW 0x21 |
598 | #define PACKET3_COND_EXEC 0x22 | 674 | #define PACKET3_COND_EXEC 0x22 |
@@ -630,14 +706,14 @@ | |||
630 | # define PACKET3_CB8_DEST_BASE_ENA (1 << 15) | 706 | # define PACKET3_CB8_DEST_BASE_ENA (1 << 15) |
631 | # define PACKET3_CB9_DEST_BASE_ENA (1 << 16) | 707 | # define PACKET3_CB9_DEST_BASE_ENA (1 << 16) |
632 | # define PACKET3_CB10_DEST_BASE_ENA (1 << 17) | 708 | # define PACKET3_CB10_DEST_BASE_ENA (1 << 17) |
633 | # define PACKET3_CB11_DEST_BASE_ENA (1 << 17) | 709 | # define PACKET3_CB11_DEST_BASE_ENA (1 << 18) |
634 | # define PACKET3_FULL_CACHE_ENA (1 << 20) | 710 | # define PACKET3_FULL_CACHE_ENA (1 << 20) |
635 | # define PACKET3_TC_ACTION_ENA (1 << 23) | 711 | # define PACKET3_TC_ACTION_ENA (1 << 23) |
636 | # define PACKET3_VC_ACTION_ENA (1 << 24) | 712 | # define PACKET3_VC_ACTION_ENA (1 << 24) |
637 | # define PACKET3_CB_ACTION_ENA (1 << 25) | 713 | # define PACKET3_CB_ACTION_ENA (1 << 25) |
638 | # define PACKET3_DB_ACTION_ENA (1 << 26) | 714 | # define PACKET3_DB_ACTION_ENA (1 << 26) |
639 | # define PACKET3_SH_ACTION_ENA (1 << 27) | 715 | # define PACKET3_SH_ACTION_ENA (1 << 27) |
640 | # define PACKET3_SMX_ACTION_ENA (1 << 28) | 716 | # define PACKET3_SX_ACTION_ENA (1 << 28) |
641 | #define PACKET3_ME_INITIALIZE 0x44 | 717 | #define PACKET3_ME_INITIALIZE 0x44 |
642 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) | 718 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) |
643 | #define PACKET3_COND_WRITE 0x45 | 719 | #define PACKET3_COND_WRITE 0x45 |
@@ -645,6 +721,8 @@ | |||
645 | #define PACKET3_EVENT_WRITE_EOP 0x47 | 721 | #define PACKET3_EVENT_WRITE_EOP 0x47 |
646 | #define PACKET3_EVENT_WRITE_EOS 0x48 | 722 | #define PACKET3_EVENT_WRITE_EOS 0x48 |
647 | #define PACKET3_PREAMBLE_CNTL 0x4A | 723 | #define PACKET3_PREAMBLE_CNTL 0x4A |
724 | # define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) | ||
725 | # define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) | ||
648 | #define PACKET3_RB_OFFSET 0x4B | 726 | #define PACKET3_RB_OFFSET 0x4B |
649 | #define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C | 727 | #define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C |
650 | #define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D | 728 | #define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D |
@@ -691,13 +769,21 @@ | |||
691 | 769 | ||
692 | #define SQ_CONST_MEM_BASE 0x8df8 | 770 | #define SQ_CONST_MEM_BASE 0x8df8 |
693 | 771 | ||
772 | #define SQ_ESGS_RING_BASE 0x8c40 | ||
694 | #define SQ_ESGS_RING_SIZE 0x8c44 | 773 | #define SQ_ESGS_RING_SIZE 0x8c44 |
774 | #define SQ_GSVS_RING_BASE 0x8c48 | ||
695 | #define SQ_GSVS_RING_SIZE 0x8c4c | 775 | #define SQ_GSVS_RING_SIZE 0x8c4c |
776 | #define SQ_ESTMP_RING_BASE 0x8c50 | ||
696 | #define SQ_ESTMP_RING_SIZE 0x8c54 | 777 | #define SQ_ESTMP_RING_SIZE 0x8c54 |
778 | #define SQ_GSTMP_RING_BASE 0x8c58 | ||
697 | #define SQ_GSTMP_RING_SIZE 0x8c5c | 779 | #define SQ_GSTMP_RING_SIZE 0x8c5c |
780 | #define SQ_VSTMP_RING_BASE 0x8c60 | ||
698 | #define SQ_VSTMP_RING_SIZE 0x8c64 | 781 | #define SQ_VSTMP_RING_SIZE 0x8c64 |
782 | #define SQ_PSTMP_RING_BASE 0x8c68 | ||
699 | #define SQ_PSTMP_RING_SIZE 0x8c6c | 783 | #define SQ_PSTMP_RING_SIZE 0x8c6c |
784 | #define SQ_LSTMP_RING_BASE 0x8e10 | ||
700 | #define SQ_LSTMP_RING_SIZE 0x8e14 | 785 | #define SQ_LSTMP_RING_SIZE 0x8e14 |
786 | #define SQ_HSTMP_RING_BASE 0x8e18 | ||
701 | #define SQ_HSTMP_RING_SIZE 0x8e1c | 787 | #define SQ_HSTMP_RING_SIZE 0x8e1c |
702 | #define VGT_TF_RING_SIZE 0x8988 | 788 | #define VGT_TF_RING_SIZE 0x8988 |
703 | 789 | ||
@@ -802,6 +888,11 @@ | |||
802 | #define SQ_ALU_CONST_CACHE_LS_14 0x28f78 | 888 | #define SQ_ALU_CONST_CACHE_LS_14 0x28f78 |
803 | #define SQ_ALU_CONST_CACHE_LS_15 0x28f7c | 889 | #define SQ_ALU_CONST_CACHE_LS_15 0x28f7c |
804 | 890 | ||
891 | #define PA_SC_SCREEN_SCISSOR_TL 0x28030 | ||
892 | #define PA_SC_GENERIC_SCISSOR_TL 0x28240 | ||
893 | #define PA_SC_WINDOW_SCISSOR_TL 0x28204 | ||
894 | #define VGT_PRIMITIVE_TYPE 0x8958 | ||
895 | |||
805 | #define DB_DEPTH_CONTROL 0x28800 | 896 | #define DB_DEPTH_CONTROL 0x28800 |
806 | #define DB_DEPTH_VIEW 0x28008 | 897 | #define DB_DEPTH_VIEW 0x28008 |
807 | #define DB_HTILE_DATA_BASE 0x28014 | 898 | #define DB_HTILE_DATA_BASE 0x28014 |
@@ -1024,5 +1115,14 @@ | |||
1024 | #define SQ_TEX_RESOURCE_WORD6_0 0x30018 | 1115 | #define SQ_TEX_RESOURCE_WORD6_0 0x30018 |
1025 | #define SQ_TEX_RESOURCE_WORD7_0 0x3001c | 1116 | #define SQ_TEX_RESOURCE_WORD7_0 0x3001c |
1026 | 1117 | ||
1118 | /* cayman 3D regs */ | ||
1119 | #define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0 | ||
1120 | #define CAYMAN_DB_EQAA 0x28804 | ||
1121 | #define CAYMAN_DB_DEPTH_INFO 0x2803C | ||
1122 | #define CAYMAN_PA_SC_AA_CONFIG 0x28BE0 | ||
1123 | #define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0 | ||
1124 | #define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7 | ||
1125 | /* cayman packet3 addition */ | ||
1126 | #define CAYMAN_PACKET3_DEALLOC_STATE 0x14 | ||
1027 | 1127 | ||
1028 | #endif | 1128 | #endif |
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index 607241c6a8a9..5a82b6b75849 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c | |||
@@ -673,8 +673,10 @@ static int parser_auth(struct table *t, const char *filename) | |||
673 | last_reg = strtol(last_reg_s, NULL, 16); | 673 | last_reg = strtol(last_reg_s, NULL, 16); |
674 | 674 | ||
675 | do { | 675 | do { |
676 | if (fgets(buf, 1024, file) == NULL) | 676 | if (fgets(buf, 1024, file) == NULL) { |
677 | fclose(file); | ||
677 | return -1; | 678 | return -1; |
679 | } | ||
678 | len = strlen(buf); | 680 | len = strlen(buf); |
679 | if (ftell(file) == end) | 681 | if (ftell(file) == end) |
680 | done = 1; | 682 | done = 1; |
@@ -685,6 +687,7 @@ static int parser_auth(struct table *t, const char *filename) | |||
685 | fprintf(stderr, | 687 | fprintf(stderr, |
686 | "Error matching regular expression %d in %s\n", | 688 | "Error matching regular expression %d in %s\n", |
687 | r, filename); | 689 | r, filename); |
690 | fclose(file); | ||
688 | return -1; | 691 | return -1; |
689 | } else { | 692 | } else { |
690 | buf[match[0].rm_eo] = 0; | 693 | buf[match[0].rm_eo] = 0; |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c new file mode 100644 index 000000000000..559dbd412906 --- /dev/null +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -0,0 +1,1594 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Alex Deucher | ||
23 | */ | ||
24 | #include <linux/firmware.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include "drmP.h" | ||
28 | #include "radeon.h" | ||
29 | #include "radeon_asic.h" | ||
30 | #include "radeon_drm.h" | ||
31 | #include "nid.h" | ||
32 | #include "atom.h" | ||
33 | #include "ni_reg.h" | ||
34 | #include "cayman_blit_shaders.h" | ||
35 | |||
36 | extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); | ||
37 | extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); | ||
38 | extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); | ||
39 | extern void evergreen_mc_program(struct radeon_device *rdev); | ||
40 | extern void evergreen_irq_suspend(struct radeon_device *rdev); | ||
41 | extern int evergreen_mc_init(struct radeon_device *rdev); | ||
42 | |||
43 | #define EVERGREEN_PFP_UCODE_SIZE 1120 | ||
44 | #define EVERGREEN_PM4_UCODE_SIZE 1376 | ||
45 | #define EVERGREEN_RLC_UCODE_SIZE 768 | ||
46 | #define BTC_MC_UCODE_SIZE 6024 | ||
47 | |||
48 | #define CAYMAN_PFP_UCODE_SIZE 2176 | ||
49 | #define CAYMAN_PM4_UCODE_SIZE 2176 | ||
50 | #define CAYMAN_RLC_UCODE_SIZE 1024 | ||
51 | #define CAYMAN_MC_UCODE_SIZE 6037 | ||
52 | |||
53 | /* Firmware Names */ | ||
54 | MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); | ||
55 | MODULE_FIRMWARE("radeon/BARTS_me.bin"); | ||
56 | MODULE_FIRMWARE("radeon/BARTS_mc.bin"); | ||
57 | MODULE_FIRMWARE("radeon/BTC_rlc.bin"); | ||
58 | MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); | ||
59 | MODULE_FIRMWARE("radeon/TURKS_me.bin"); | ||
60 | MODULE_FIRMWARE("radeon/TURKS_mc.bin"); | ||
61 | MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); | ||
62 | MODULE_FIRMWARE("radeon/CAICOS_me.bin"); | ||
63 | MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); | ||
64 | MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin"); | ||
65 | MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); | ||
66 | MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); | ||
67 | MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); | ||
68 | |||
69 | #define BTC_IO_MC_REGS_SIZE 29 | ||
70 | |||
71 | static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { | ||
72 | {0x00000077, 0xff010100}, | ||
73 | {0x00000078, 0x00000000}, | ||
74 | {0x00000079, 0x00001434}, | ||
75 | {0x0000007a, 0xcc08ec08}, | ||
76 | {0x0000007b, 0x00040000}, | ||
77 | {0x0000007c, 0x000080c0}, | ||
78 | {0x0000007d, 0x09000000}, | ||
79 | {0x0000007e, 0x00210404}, | ||
80 | {0x00000081, 0x08a8e800}, | ||
81 | {0x00000082, 0x00030444}, | ||
82 | {0x00000083, 0x00000000}, | ||
83 | {0x00000085, 0x00000001}, | ||
84 | {0x00000086, 0x00000002}, | ||
85 | {0x00000087, 0x48490000}, | ||
86 | {0x00000088, 0x20244647}, | ||
87 | {0x00000089, 0x00000005}, | ||
88 | {0x0000008b, 0x66030000}, | ||
89 | {0x0000008c, 0x00006603}, | ||
90 | {0x0000008d, 0x00000100}, | ||
91 | {0x0000008f, 0x00001c0a}, | ||
92 | {0x00000090, 0xff000001}, | ||
93 | {0x00000094, 0x00101101}, | ||
94 | {0x00000095, 0x00000fff}, | ||
95 | {0x00000096, 0x00116fff}, | ||
96 | {0x00000097, 0x60010000}, | ||
97 | {0x00000098, 0x10010000}, | ||
98 | {0x00000099, 0x00006000}, | ||
99 | {0x0000009a, 0x00001000}, | ||
100 | {0x0000009f, 0x00946a00} | ||
101 | }; | ||
102 | |||
103 | static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { | ||
104 | {0x00000077, 0xff010100}, | ||
105 | {0x00000078, 0x00000000}, | ||
106 | {0x00000079, 0x00001434}, | ||
107 | {0x0000007a, 0xcc08ec08}, | ||
108 | {0x0000007b, 0x00040000}, | ||
109 | {0x0000007c, 0x000080c0}, | ||
110 | {0x0000007d, 0x09000000}, | ||
111 | {0x0000007e, 0x00210404}, | ||
112 | {0x00000081, 0x08a8e800}, | ||
113 | {0x00000082, 0x00030444}, | ||
114 | {0x00000083, 0x00000000}, | ||
115 | {0x00000085, 0x00000001}, | ||
116 | {0x00000086, 0x00000002}, | ||
117 | {0x00000087, 0x48490000}, | ||
118 | {0x00000088, 0x20244647}, | ||
119 | {0x00000089, 0x00000005}, | ||
120 | {0x0000008b, 0x66030000}, | ||
121 | {0x0000008c, 0x00006603}, | ||
122 | {0x0000008d, 0x00000100}, | ||
123 | {0x0000008f, 0x00001c0a}, | ||
124 | {0x00000090, 0xff000001}, | ||
125 | {0x00000094, 0x00101101}, | ||
126 | {0x00000095, 0x00000fff}, | ||
127 | {0x00000096, 0x00116fff}, | ||
128 | {0x00000097, 0x60010000}, | ||
129 | {0x00000098, 0x10010000}, | ||
130 | {0x00000099, 0x00006000}, | ||
131 | {0x0000009a, 0x00001000}, | ||
132 | {0x0000009f, 0x00936a00} | ||
133 | }; | ||
134 | |||
135 | static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { | ||
136 | {0x00000077, 0xff010100}, | ||
137 | {0x00000078, 0x00000000}, | ||
138 | {0x00000079, 0x00001434}, | ||
139 | {0x0000007a, 0xcc08ec08}, | ||
140 | {0x0000007b, 0x00040000}, | ||
141 | {0x0000007c, 0x000080c0}, | ||
142 | {0x0000007d, 0x09000000}, | ||
143 | {0x0000007e, 0x00210404}, | ||
144 | {0x00000081, 0x08a8e800}, | ||
145 | {0x00000082, 0x00030444}, | ||
146 | {0x00000083, 0x00000000}, | ||
147 | {0x00000085, 0x00000001}, | ||
148 | {0x00000086, 0x00000002}, | ||
149 | {0x00000087, 0x48490000}, | ||
150 | {0x00000088, 0x20244647}, | ||
151 | {0x00000089, 0x00000005}, | ||
152 | {0x0000008b, 0x66030000}, | ||
153 | {0x0000008c, 0x00006603}, | ||
154 | {0x0000008d, 0x00000100}, | ||
155 | {0x0000008f, 0x00001c0a}, | ||
156 | {0x00000090, 0xff000001}, | ||
157 | {0x00000094, 0x00101101}, | ||
158 | {0x00000095, 0x00000fff}, | ||
159 | {0x00000096, 0x00116fff}, | ||
160 | {0x00000097, 0x60010000}, | ||
161 | {0x00000098, 0x10010000}, | ||
162 | {0x00000099, 0x00006000}, | ||
163 | {0x0000009a, 0x00001000}, | ||
164 | {0x0000009f, 0x00916a00} | ||
165 | }; | ||
166 | |||
167 | static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { | ||
168 | {0x00000077, 0xff010100}, | ||
169 | {0x00000078, 0x00000000}, | ||
170 | {0x00000079, 0x00001434}, | ||
171 | {0x0000007a, 0xcc08ec08}, | ||
172 | {0x0000007b, 0x00040000}, | ||
173 | {0x0000007c, 0x000080c0}, | ||
174 | {0x0000007d, 0x09000000}, | ||
175 | {0x0000007e, 0x00210404}, | ||
176 | {0x00000081, 0x08a8e800}, | ||
177 | {0x00000082, 0x00030444}, | ||
178 | {0x00000083, 0x00000000}, | ||
179 | {0x00000085, 0x00000001}, | ||
180 | {0x00000086, 0x00000002}, | ||
181 | {0x00000087, 0x48490000}, | ||
182 | {0x00000088, 0x20244647}, | ||
183 | {0x00000089, 0x00000005}, | ||
184 | {0x0000008b, 0x66030000}, | ||
185 | {0x0000008c, 0x00006603}, | ||
186 | {0x0000008d, 0x00000100}, | ||
187 | {0x0000008f, 0x00001c0a}, | ||
188 | {0x00000090, 0xff000001}, | ||
189 | {0x00000094, 0x00101101}, | ||
190 | {0x00000095, 0x00000fff}, | ||
191 | {0x00000096, 0x00116fff}, | ||
192 | {0x00000097, 0x60010000}, | ||
193 | {0x00000098, 0x10010000}, | ||
194 | {0x00000099, 0x00006000}, | ||
195 | {0x0000009a, 0x00001000}, | ||
196 | {0x0000009f, 0x00976b00} | ||
197 | }; | ||
198 | |||
199 | int ni_mc_load_microcode(struct radeon_device *rdev) | ||
200 | { | ||
201 | const __be32 *fw_data; | ||
202 | u32 mem_type, running, blackout = 0; | ||
203 | u32 *io_mc_regs; | ||
204 | int i, ucode_size, regs_size; | ||
205 | |||
206 | if (!rdev->mc_fw) | ||
207 | return -EINVAL; | ||
208 | |||
209 | switch (rdev->family) { | ||
210 | case CHIP_BARTS: | ||
211 | io_mc_regs = (u32 *)&barts_io_mc_regs; | ||
212 | ucode_size = BTC_MC_UCODE_SIZE; | ||
213 | regs_size = BTC_IO_MC_REGS_SIZE; | ||
214 | break; | ||
215 | case CHIP_TURKS: | ||
216 | io_mc_regs = (u32 *)&turks_io_mc_regs; | ||
217 | ucode_size = BTC_MC_UCODE_SIZE; | ||
218 | regs_size = BTC_IO_MC_REGS_SIZE; | ||
219 | break; | ||
220 | case CHIP_CAICOS: | ||
221 | default: | ||
222 | io_mc_regs = (u32 *)&caicos_io_mc_regs; | ||
223 | ucode_size = BTC_MC_UCODE_SIZE; | ||
224 | regs_size = BTC_IO_MC_REGS_SIZE; | ||
225 | break; | ||
226 | case CHIP_CAYMAN: | ||
227 | io_mc_regs = (u32 *)&cayman_io_mc_regs; | ||
228 | ucode_size = CAYMAN_MC_UCODE_SIZE; | ||
229 | regs_size = BTC_IO_MC_REGS_SIZE; | ||
230 | break; | ||
231 | } | ||
232 | |||
233 | mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; | ||
234 | running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; | ||
235 | |||
236 | if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { | ||
237 | if (running) { | ||
238 | blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); | ||
239 | WREG32(MC_SHARED_BLACKOUT_CNTL, 1); | ||
240 | } | ||
241 | |||
242 | /* reset the engine and set to writable */ | ||
243 | WREG32(MC_SEQ_SUP_CNTL, 0x00000008); | ||
244 | WREG32(MC_SEQ_SUP_CNTL, 0x00000010); | ||
245 | |||
246 | /* load mc io regs */ | ||
247 | for (i = 0; i < regs_size; i++) { | ||
248 | WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); | ||
249 | WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); | ||
250 | } | ||
251 | /* load the MC ucode */ | ||
252 | fw_data = (const __be32 *)rdev->mc_fw->data; | ||
253 | for (i = 0; i < ucode_size; i++) | ||
254 | WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); | ||
255 | |||
256 | /* put the engine back into the active state */ | ||
257 | WREG32(MC_SEQ_SUP_CNTL, 0x00000008); | ||
258 | WREG32(MC_SEQ_SUP_CNTL, 0x00000004); | ||
259 | WREG32(MC_SEQ_SUP_CNTL, 0x00000001); | ||
260 | |||
261 | /* wait for training to complete */ | ||
262 | while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)) | ||
263 | udelay(10); | ||
264 | |||
265 | if (running) | ||
266 | WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); | ||
267 | } | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | int ni_init_microcode(struct radeon_device *rdev) | ||
273 | { | ||
274 | struct platform_device *pdev; | ||
275 | const char *chip_name; | ||
276 | const char *rlc_chip_name; | ||
277 | size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; | ||
278 | char fw_name[30]; | ||
279 | int err; | ||
280 | |||
281 | DRM_DEBUG("\n"); | ||
282 | |||
283 | pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); | ||
284 | err = IS_ERR(pdev); | ||
285 | if (err) { | ||
286 | printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); | ||
287 | return -EINVAL; | ||
288 | } | ||
289 | |||
290 | switch (rdev->family) { | ||
291 | case CHIP_BARTS: | ||
292 | chip_name = "BARTS"; | ||
293 | rlc_chip_name = "BTC"; | ||
294 | pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; | ||
295 | me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; | ||
296 | rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; | ||
297 | mc_req_size = BTC_MC_UCODE_SIZE * 4; | ||
298 | break; | ||
299 | case CHIP_TURKS: | ||
300 | chip_name = "TURKS"; | ||
301 | rlc_chip_name = "BTC"; | ||
302 | pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; | ||
303 | me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; | ||
304 | rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; | ||
305 | mc_req_size = BTC_MC_UCODE_SIZE * 4; | ||
306 | break; | ||
307 | case CHIP_CAICOS: | ||
308 | chip_name = "CAICOS"; | ||
309 | rlc_chip_name = "BTC"; | ||
310 | pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; | ||
311 | me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; | ||
312 | rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; | ||
313 | mc_req_size = BTC_MC_UCODE_SIZE * 4; | ||
314 | break; | ||
315 | case CHIP_CAYMAN: | ||
316 | chip_name = "CAYMAN"; | ||
317 | rlc_chip_name = "CAYMAN"; | ||
318 | pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; | ||
319 | me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; | ||
320 | rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; | ||
321 | mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; | ||
322 | break; | ||
323 | default: BUG(); | ||
324 | } | ||
325 | |||
326 | DRM_INFO("Loading %s Microcode\n", chip_name); | ||
327 | |||
328 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); | ||
329 | err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); | ||
330 | if (err) | ||
331 | goto out; | ||
332 | if (rdev->pfp_fw->size != pfp_req_size) { | ||
333 | printk(KERN_ERR | ||
334 | "ni_cp: Bogus length %zu in firmware \"%s\"\n", | ||
335 | rdev->pfp_fw->size, fw_name); | ||
336 | err = -EINVAL; | ||
337 | goto out; | ||
338 | } | ||
339 | |||
340 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); | ||
341 | err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); | ||
342 | if (err) | ||
343 | goto out; | ||
344 | if (rdev->me_fw->size != me_req_size) { | ||
345 | printk(KERN_ERR | ||
346 | "ni_cp: Bogus length %zu in firmware \"%s\"\n", | ||
347 | rdev->me_fw->size, fw_name); | ||
348 | err = -EINVAL; | ||
349 | } | ||
350 | |||
351 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); | ||
352 | err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); | ||
353 | if (err) | ||
354 | goto out; | ||
355 | if (rdev->rlc_fw->size != rlc_req_size) { | ||
356 | printk(KERN_ERR | ||
357 | "ni_rlc: Bogus length %zu in firmware \"%s\"\n", | ||
358 | rdev->rlc_fw->size, fw_name); | ||
359 | err = -EINVAL; | ||
360 | } | ||
361 | |||
362 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | ||
363 | err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); | ||
364 | if (err) | ||
365 | goto out; | ||
366 | if (rdev->mc_fw->size != mc_req_size) { | ||
367 | printk(KERN_ERR | ||
368 | "ni_mc: Bogus length %zu in firmware \"%s\"\n", | ||
369 | rdev->mc_fw->size, fw_name); | ||
370 | err = -EINVAL; | ||
371 | } | ||
372 | out: | ||
373 | platform_device_unregister(pdev); | ||
374 | |||
375 | if (err) { | ||
376 | if (err != -EINVAL) | ||
377 | printk(KERN_ERR | ||
378 | "ni_cp: Failed to load firmware \"%s\"\n", | ||
379 | fw_name); | ||
380 | release_firmware(rdev->pfp_fw); | ||
381 | rdev->pfp_fw = NULL; | ||
382 | release_firmware(rdev->me_fw); | ||
383 | rdev->me_fw = NULL; | ||
384 | release_firmware(rdev->rlc_fw); | ||
385 | rdev->rlc_fw = NULL; | ||
386 | release_firmware(rdev->mc_fw); | ||
387 | rdev->mc_fw = NULL; | ||
388 | } | ||
389 | return err; | ||
390 | } | ||
391 | |||
392 | /* | ||
393 | * Core functions | ||
394 | */ | ||
395 | static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | ||
396 | u32 num_tile_pipes, | ||
397 | u32 num_backends_per_asic, | ||
398 | u32 *backend_disable_mask_per_asic, | ||
399 | u32 num_shader_engines) | ||
400 | { | ||
401 | u32 backend_map = 0; | ||
402 | u32 enabled_backends_mask = 0; | ||
403 | u32 enabled_backends_count = 0; | ||
404 | u32 num_backends_per_se; | ||
405 | u32 cur_pipe; | ||
406 | u32 swizzle_pipe[CAYMAN_MAX_PIPES]; | ||
407 | u32 cur_backend = 0; | ||
408 | u32 i; | ||
409 | bool force_no_swizzle; | ||
410 | |||
411 | /* force legal values */ | ||
412 | if (num_tile_pipes < 1) | ||
413 | num_tile_pipes = 1; | ||
414 | if (num_tile_pipes > rdev->config.cayman.max_tile_pipes) | ||
415 | num_tile_pipes = rdev->config.cayman.max_tile_pipes; | ||
416 | if (num_shader_engines < 1) | ||
417 | num_shader_engines = 1; | ||
418 | if (num_shader_engines > rdev->config.cayman.max_shader_engines) | ||
419 | num_shader_engines = rdev->config.cayman.max_shader_engines; | ||
420 | if (num_backends_per_asic < num_shader_engines) | ||
421 | num_backends_per_asic = num_shader_engines; | ||
422 | if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines)) | ||
423 | num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines; | ||
424 | |||
425 | /* make sure we have the same number of backends per se */ | ||
426 | num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); | ||
427 | /* set up the number of backends per se */ | ||
428 | num_backends_per_se = num_backends_per_asic / num_shader_engines; | ||
429 | if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) { | ||
430 | num_backends_per_se = rdev->config.cayman.max_backends_per_se; | ||
431 | num_backends_per_asic = num_backends_per_se * num_shader_engines; | ||
432 | } | ||
433 | |||
434 | /* create enable mask and count for enabled backends */ | ||
435 | for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { | ||
436 | if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { | ||
437 | enabled_backends_mask |= (1 << i); | ||
438 | ++enabled_backends_count; | ||
439 | } | ||
440 | if (enabled_backends_count == num_backends_per_asic) | ||
441 | break; | ||
442 | } | ||
443 | |||
444 | /* force the backends mask to match the current number of backends */ | ||
445 | if (enabled_backends_count != num_backends_per_asic) { | ||
446 | u32 this_backend_enabled; | ||
447 | u32 shader_engine; | ||
448 | u32 backend_per_se; | ||
449 | |||
450 | enabled_backends_mask = 0; | ||
451 | enabled_backends_count = 0; | ||
452 | *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK; | ||
453 | for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { | ||
454 | /* calc the current se */ | ||
455 | shader_engine = i / rdev->config.cayman.max_backends_per_se; | ||
456 | /* calc the backend per se */ | ||
457 | backend_per_se = i % rdev->config.cayman.max_backends_per_se; | ||
458 | /* default to not enabled */ | ||
459 | this_backend_enabled = 0; | ||
460 | if ((shader_engine < num_shader_engines) && | ||
461 | (backend_per_se < num_backends_per_se)) | ||
462 | this_backend_enabled = 1; | ||
463 | if (this_backend_enabled) { | ||
464 | enabled_backends_mask |= (1 << i); | ||
465 | *backend_disable_mask_per_asic &= ~(1 << i); | ||
466 | ++enabled_backends_count; | ||
467 | } | ||
468 | } | ||
469 | } | ||
470 | |||
471 | |||
472 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES); | ||
473 | switch (rdev->family) { | ||
474 | case CHIP_CAYMAN: | ||
475 | force_no_swizzle = true; | ||
476 | break; | ||
477 | default: | ||
478 | force_no_swizzle = false; | ||
479 | break; | ||
480 | } | ||
481 | if (force_no_swizzle) { | ||
482 | bool last_backend_enabled = false; | ||
483 | |||
484 | force_no_swizzle = false; | ||
485 | for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { | ||
486 | if (((enabled_backends_mask >> i) & 1) == 1) { | ||
487 | if (last_backend_enabled) | ||
488 | force_no_swizzle = true; | ||
489 | last_backend_enabled = true; | ||
490 | } else | ||
491 | last_backend_enabled = false; | ||
492 | } | ||
493 | } | ||
494 | |||
495 | switch (num_tile_pipes) { | ||
496 | case 1: | ||
497 | case 3: | ||
498 | case 5: | ||
499 | case 7: | ||
500 | DRM_ERROR("odd number of pipes!\n"); | ||
501 | break; | ||
502 | case 2: | ||
503 | swizzle_pipe[0] = 0; | ||
504 | swizzle_pipe[1] = 1; | ||
505 | break; | ||
506 | case 4: | ||
507 | if (force_no_swizzle) { | ||
508 | swizzle_pipe[0] = 0; | ||
509 | swizzle_pipe[1] = 1; | ||
510 | swizzle_pipe[2] = 2; | ||
511 | swizzle_pipe[3] = 3; | ||
512 | } else { | ||
513 | swizzle_pipe[0] = 0; | ||
514 | swizzle_pipe[1] = 2; | ||
515 | swizzle_pipe[2] = 1; | ||
516 | swizzle_pipe[3] = 3; | ||
517 | } | ||
518 | break; | ||
519 | case 6: | ||
520 | if (force_no_swizzle) { | ||
521 | swizzle_pipe[0] = 0; | ||
522 | swizzle_pipe[1] = 1; | ||
523 | swizzle_pipe[2] = 2; | ||
524 | swizzle_pipe[3] = 3; | ||
525 | swizzle_pipe[4] = 4; | ||
526 | swizzle_pipe[5] = 5; | ||
527 | } else { | ||
528 | swizzle_pipe[0] = 0; | ||
529 | swizzle_pipe[1] = 2; | ||
530 | swizzle_pipe[2] = 4; | ||
531 | swizzle_pipe[3] = 1; | ||
532 | swizzle_pipe[4] = 3; | ||
533 | swizzle_pipe[5] = 5; | ||
534 | } | ||
535 | break; | ||
536 | case 8: | ||
537 | if (force_no_swizzle) { | ||
538 | swizzle_pipe[0] = 0; | ||
539 | swizzle_pipe[1] = 1; | ||
540 | swizzle_pipe[2] = 2; | ||
541 | swizzle_pipe[3] = 3; | ||
542 | swizzle_pipe[4] = 4; | ||
543 | swizzle_pipe[5] = 5; | ||
544 | swizzle_pipe[6] = 6; | ||
545 | swizzle_pipe[7] = 7; | ||
546 | } else { | ||
547 | swizzle_pipe[0] = 0; | ||
548 | swizzle_pipe[1] = 2; | ||
549 | swizzle_pipe[2] = 4; | ||
550 | swizzle_pipe[3] = 6; | ||
551 | swizzle_pipe[4] = 1; | ||
552 | swizzle_pipe[5] = 3; | ||
553 | swizzle_pipe[6] = 5; | ||
554 | swizzle_pipe[7] = 7; | ||
555 | } | ||
556 | break; | ||
557 | } | ||
558 | |||
559 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { | ||
560 | while (((1 << cur_backend) & enabled_backends_mask) == 0) | ||
561 | cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; | ||
562 | |||
563 | backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); | ||
564 | |||
565 | cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; | ||
566 | } | ||
567 | |||
568 | return backend_map; | ||
569 | } | ||
570 | |||
571 | static void cayman_program_channel_remap(struct radeon_device *rdev) | ||
572 | { | ||
573 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
574 | |||
575 | tmp = RREG32(MC_SHARED_CHMAP); | ||
576 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
577 | case 0: | ||
578 | case 1: | ||
579 | case 2: | ||
580 | case 3: | ||
581 | default: | ||
582 | /* default mapping */ | ||
583 | mc_shared_chremap = 0x00fac688; | ||
584 | break; | ||
585 | } | ||
586 | |||
587 | switch (rdev->family) { | ||
588 | case CHIP_CAYMAN: | ||
589 | default: | ||
590 | //tcp_chan_steer_lo = 0x54763210 | ||
591 | tcp_chan_steer_lo = 0x76543210; | ||
592 | tcp_chan_steer_hi = 0x0000ba98; | ||
593 | break; | ||
594 | } | ||
595 | |||
596 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
597 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
598 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
599 | } | ||
600 | |||
601 | static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, | ||
602 | u32 disable_mask_per_se, | ||
603 | u32 max_disable_mask_per_se, | ||
604 | u32 num_shader_engines) | ||
605 | { | ||
606 | u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); | ||
607 | u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; | ||
608 | |||
609 | if (num_shader_engines == 1) | ||
610 | return disable_mask_per_asic; | ||
611 | else if (num_shader_engines == 2) | ||
612 | return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); | ||
613 | else | ||
614 | return 0xffffffff; | ||
615 | } | ||
616 | |||
617 | static void cayman_gpu_init(struct radeon_device *rdev) | ||
618 | { | ||
619 | u32 cc_rb_backend_disable = 0; | ||
620 | u32 cc_gc_shader_pipe_config; | ||
621 | u32 gb_addr_config = 0; | ||
622 | u32 mc_shared_chmap, mc_arb_ramcfg; | ||
623 | u32 gb_backend_map; | ||
624 | u32 cgts_tcc_disable; | ||
625 | u32 sx_debug_1; | ||
626 | u32 smx_dc_ctl0; | ||
627 | u32 gc_user_shader_pipe_config; | ||
628 | u32 gc_user_rb_backend_disable; | ||
629 | u32 cgts_user_tcc_disable; | ||
630 | u32 cgts_sm_ctrl_reg; | ||
631 | u32 hdp_host_path_cntl; | ||
632 | u32 tmp; | ||
633 | int i, j; | ||
634 | |||
635 | switch (rdev->family) { | ||
636 | case CHIP_CAYMAN: | ||
637 | default: | ||
638 | rdev->config.cayman.max_shader_engines = 2; | ||
639 | rdev->config.cayman.max_pipes_per_simd = 4; | ||
640 | rdev->config.cayman.max_tile_pipes = 8; | ||
641 | rdev->config.cayman.max_simds_per_se = 12; | ||
642 | rdev->config.cayman.max_backends_per_se = 4; | ||
643 | rdev->config.cayman.max_texture_channel_caches = 8; | ||
644 | rdev->config.cayman.max_gprs = 256; | ||
645 | rdev->config.cayman.max_threads = 256; | ||
646 | rdev->config.cayman.max_gs_threads = 32; | ||
647 | rdev->config.cayman.max_stack_entries = 512; | ||
648 | rdev->config.cayman.sx_num_of_sets = 8; | ||
649 | rdev->config.cayman.sx_max_export_size = 256; | ||
650 | rdev->config.cayman.sx_max_export_pos_size = 64; | ||
651 | rdev->config.cayman.sx_max_export_smx_size = 192; | ||
652 | rdev->config.cayman.max_hw_contexts = 8; | ||
653 | rdev->config.cayman.sq_num_cf_insts = 2; | ||
654 | |||
655 | rdev->config.cayman.sc_prim_fifo_size = 0x100; | ||
656 | rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; | ||
657 | rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; | ||
658 | break; | ||
659 | } | ||
660 | |||
661 | /* Initialize HDP */ | ||
662 | for (i = 0, j = 0; i < 32; i++, j += 0x18) { | ||
663 | WREG32((0x2c14 + j), 0x00000000); | ||
664 | WREG32((0x2c18 + j), 0x00000000); | ||
665 | WREG32((0x2c1c + j), 0x00000000); | ||
666 | WREG32((0x2c20 + j), 0x00000000); | ||
667 | WREG32((0x2c24 + j), 0x00000000); | ||
668 | } | ||
669 | |||
670 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | ||
671 | |||
672 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | ||
673 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | ||
674 | |||
675 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); | ||
676 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); | ||
677 | cgts_tcc_disable = 0xff000000; | ||
678 | gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); | ||
679 | gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); | ||
680 | cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); | ||
681 | |||
682 | rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines; | ||
683 | tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT; | ||
684 | rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp); | ||
685 | rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes; | ||
686 | tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT; | ||
687 | rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp); | ||
688 | tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; | ||
689 | rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp); | ||
690 | tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; | ||
691 | rdev->config.cayman.backend_disable_mask_per_asic = | ||
692 | cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK, | ||
693 | rdev->config.cayman.num_shader_engines); | ||
694 | rdev->config.cayman.backend_map = | ||
695 | cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, | ||
696 | rdev->config.cayman.num_backends_per_se * | ||
697 | rdev->config.cayman.num_shader_engines, | ||
698 | &rdev->config.cayman.backend_disable_mask_per_asic, | ||
699 | rdev->config.cayman.num_shader_engines); | ||
700 | tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; | ||
701 | rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp); | ||
702 | tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT; | ||
703 | rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; | ||
704 | if (rdev->config.cayman.mem_max_burst_length_bytes > 512) | ||
705 | rdev->config.cayman.mem_max_burst_length_bytes = 512; | ||
706 | tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; | ||
707 | rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; | ||
708 | if (rdev->config.cayman.mem_row_size_in_kb > 4) | ||
709 | rdev->config.cayman.mem_row_size_in_kb = 4; | ||
710 | /* XXX use MC settings? */ | ||
711 | rdev->config.cayman.shader_engine_tile_size = 32; | ||
712 | rdev->config.cayman.num_gpus = 1; | ||
713 | rdev->config.cayman.multi_gpu_tile_size = 64; | ||
714 | |||
715 | //gb_addr_config = 0x02011003 | ||
716 | #if 0 | ||
717 | gb_addr_config = RREG32(GB_ADDR_CONFIG); | ||
718 | #else | ||
719 | gb_addr_config = 0; | ||
720 | switch (rdev->config.cayman.num_tile_pipes) { | ||
721 | case 1: | ||
722 | default: | ||
723 | gb_addr_config |= NUM_PIPES(0); | ||
724 | break; | ||
725 | case 2: | ||
726 | gb_addr_config |= NUM_PIPES(1); | ||
727 | break; | ||
728 | case 4: | ||
729 | gb_addr_config |= NUM_PIPES(2); | ||
730 | break; | ||
731 | case 8: | ||
732 | gb_addr_config |= NUM_PIPES(3); | ||
733 | break; | ||
734 | } | ||
735 | |||
736 | tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1; | ||
737 | gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); | ||
738 | gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1); | ||
739 | tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1; | ||
740 | gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); | ||
741 | switch (rdev->config.cayman.num_gpus) { | ||
742 | case 1: | ||
743 | default: | ||
744 | gb_addr_config |= NUM_GPUS(0); | ||
745 | break; | ||
746 | case 2: | ||
747 | gb_addr_config |= NUM_GPUS(1); | ||
748 | break; | ||
749 | case 4: | ||
750 | gb_addr_config |= NUM_GPUS(2); | ||
751 | break; | ||
752 | } | ||
753 | switch (rdev->config.cayman.multi_gpu_tile_size) { | ||
754 | case 16: | ||
755 | gb_addr_config |= MULTI_GPU_TILE_SIZE(0); | ||
756 | break; | ||
757 | case 32: | ||
758 | default: | ||
759 | gb_addr_config |= MULTI_GPU_TILE_SIZE(1); | ||
760 | break; | ||
761 | case 64: | ||
762 | gb_addr_config |= MULTI_GPU_TILE_SIZE(2); | ||
763 | break; | ||
764 | case 128: | ||
765 | gb_addr_config |= MULTI_GPU_TILE_SIZE(3); | ||
766 | break; | ||
767 | } | ||
768 | switch (rdev->config.cayman.mem_row_size_in_kb) { | ||
769 | case 1: | ||
770 | default: | ||
771 | gb_addr_config |= ROW_SIZE(0); | ||
772 | break; | ||
773 | case 2: | ||
774 | gb_addr_config |= ROW_SIZE(1); | ||
775 | break; | ||
776 | case 4: | ||
777 | gb_addr_config |= ROW_SIZE(2); | ||
778 | break; | ||
779 | } | ||
780 | #endif | ||
781 | |||
782 | tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; | ||
783 | rdev->config.cayman.num_tile_pipes = (1 << tmp); | ||
784 | tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; | ||
785 | rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; | ||
786 | tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; | ||
787 | rdev->config.cayman.num_shader_engines = tmp + 1; | ||
788 | tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; | ||
789 | rdev->config.cayman.num_gpus = tmp + 1; | ||
790 | tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; | ||
791 | rdev->config.cayman.multi_gpu_tile_size = 1 << tmp; | ||
792 | tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; | ||
793 | rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; | ||
794 | |||
795 | //gb_backend_map = 0x76541032; | ||
796 | #if 0 | ||
797 | gb_backend_map = RREG32(GB_BACKEND_MAP); | ||
798 | #else | ||
799 | gb_backend_map = | ||
800 | cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, | ||
801 | rdev->config.cayman.num_backends_per_se * | ||
802 | rdev->config.cayman.num_shader_engines, | ||
803 | &rdev->config.cayman.backend_disable_mask_per_asic, | ||
804 | rdev->config.cayman.num_shader_engines); | ||
805 | #endif | ||
806 | /* setup tiling info dword. gb_addr_config is not adequate since it does | ||
807 | * not have bank info, so create a custom tiling dword. | ||
808 | * bits 3:0 num_pipes | ||
809 | * bits 7:4 num_banks | ||
810 | * bits 11:8 group_size | ||
811 | * bits 15:12 row_size | ||
812 | */ | ||
813 | rdev->config.cayman.tile_config = 0; | ||
814 | switch (rdev->config.cayman.num_tile_pipes) { | ||
815 | case 1: | ||
816 | default: | ||
817 | rdev->config.cayman.tile_config |= (0 << 0); | ||
818 | break; | ||
819 | case 2: | ||
820 | rdev->config.cayman.tile_config |= (1 << 0); | ||
821 | break; | ||
822 | case 4: | ||
823 | rdev->config.cayman.tile_config |= (2 << 0); | ||
824 | break; | ||
825 | case 8: | ||
826 | rdev->config.cayman.tile_config |= (3 << 0); | ||
827 | break; | ||
828 | } | ||
829 | rdev->config.cayman.tile_config |= | ||
830 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; | ||
831 | rdev->config.cayman.tile_config |= | ||
832 | ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; | ||
833 | rdev->config.cayman.tile_config |= | ||
834 | ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; | ||
835 | |||
836 | WREG32(GB_BACKEND_MAP, gb_backend_map); | ||
837 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | ||
838 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | ||
839 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | ||
840 | |||
841 | cayman_program_channel_remap(rdev); | ||
842 | |||
843 | /* primary versions */ | ||
844 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
845 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
846 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
847 | |||
848 | WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); | ||
849 | WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); | ||
850 | |||
851 | /* user versions */ | ||
852 | WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
853 | WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
854 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
855 | |||
856 | WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); | ||
857 | WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); | ||
858 | |||
859 | /* reprogram the shader complex */ | ||
860 | cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG); | ||
861 | for (i = 0; i < 16; i++) | ||
862 | WREG32(CGTS_SM_CTRL_REG, OVERRIDE); | ||
863 | WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg); | ||
864 | |||
865 | /* set HW defaults for 3D engine */ | ||
866 | WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); | ||
867 | |||
868 | sx_debug_1 = RREG32(SX_DEBUG_1); | ||
869 | sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; | ||
870 | WREG32(SX_DEBUG_1, sx_debug_1); | ||
871 | |||
872 | smx_dc_ctl0 = RREG32(SMX_DC_CTL0); | ||
873 | smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); | ||
874 | smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); | ||
875 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); | ||
876 | |||
877 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); | ||
878 | |||
879 | /* need to be explicitly zero-ed */ | ||
880 | WREG32(VGT_OFFCHIP_LDS_BASE, 0); | ||
881 | WREG32(SQ_LSTMP_RING_BASE, 0); | ||
882 | WREG32(SQ_HSTMP_RING_BASE, 0); | ||
883 | WREG32(SQ_ESTMP_RING_BASE, 0); | ||
884 | WREG32(SQ_GSTMP_RING_BASE, 0); | ||
885 | WREG32(SQ_VSTMP_RING_BASE, 0); | ||
886 | WREG32(SQ_PSTMP_RING_BASE, 0); | ||
887 | |||
888 | WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); | ||
889 | |||
890 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | | ||
891 | POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | | ||
892 | SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); | ||
893 | |||
894 | WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | | ||
895 | SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | | ||
896 | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); | ||
897 | |||
898 | |||
899 | WREG32(VGT_NUM_INSTANCES, 1); | ||
900 | |||
901 | WREG32(CP_PERFMON_CNTL, 0); | ||
902 | |||
903 | WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | | ||
904 | FETCH_FIFO_HIWATER(0x4) | | ||
905 | DONE_FIFO_HIWATER(0xe0) | | ||
906 | ALU_UPDATE_FIFO_HIWATER(0x8))); | ||
907 | |||
908 | WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4)); | ||
909 | WREG32(SQ_CONFIG, (VC_ENABLE | | ||
910 | EXPORT_SRC_C | | ||
911 | GFX_PRIO(0) | | ||
912 | CS1_PRIO(0) | | ||
913 | CS2_PRIO(1))); | ||
914 | WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE); | ||
915 | |||
916 | WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | | ||
917 | FORCE_EOV_MAX_REZ_CNT(255))); | ||
918 | |||
919 | WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | | ||
920 | AUTO_INVLD_EN(ES_AND_GS_AUTO)); | ||
921 | |||
922 | WREG32(VGT_GS_VERTEX_REUSE, 16); | ||
923 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); | ||
924 | |||
925 | WREG32(CB_PERF_CTR0_SEL_0, 0); | ||
926 | WREG32(CB_PERF_CTR0_SEL_1, 0); | ||
927 | WREG32(CB_PERF_CTR1_SEL_0, 0); | ||
928 | WREG32(CB_PERF_CTR1_SEL_1, 0); | ||
929 | WREG32(CB_PERF_CTR2_SEL_0, 0); | ||
930 | WREG32(CB_PERF_CTR2_SEL_1, 0); | ||
931 | WREG32(CB_PERF_CTR3_SEL_0, 0); | ||
932 | WREG32(CB_PERF_CTR3_SEL_1, 0); | ||
933 | |||
934 | tmp = RREG32(HDP_MISC_CNTL); | ||
935 | tmp |= HDP_FLUSH_INVALIDATE_CACHE; | ||
936 | WREG32(HDP_MISC_CNTL, tmp); | ||
937 | |||
938 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); | ||
939 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); | ||
940 | |||
941 | WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); | ||
942 | |||
943 | udelay(50); | ||
944 | } | ||
945 | |||
946 | /* | ||
947 | * GART | ||
948 | */ | ||
949 | void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) | ||
950 | { | ||
951 | /* flush hdp cache */ | ||
952 | WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
953 | |||
954 | /* bits 0-7 are the VM contexts0-7 */ | ||
955 | WREG32(VM_INVALIDATE_REQUEST, 1); | ||
956 | } | ||
957 | |||
958 | int cayman_pcie_gart_enable(struct radeon_device *rdev) | ||
959 | { | ||
960 | int r; | ||
961 | |||
962 | if (rdev->gart.table.vram.robj == NULL) { | ||
963 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | ||
964 | return -EINVAL; | ||
965 | } | ||
966 | r = radeon_gart_table_vram_pin(rdev); | ||
967 | if (r) | ||
968 | return r; | ||
969 | radeon_gart_restore(rdev); | ||
970 | /* Setup TLB control */ | ||
971 | WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB | | ||
972 | ENABLE_L1_FRAGMENT_PROCESSING | | ||
973 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | ||
974 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); | ||
975 | /* Setup L2 cache */ | ||
976 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | | ||
977 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | ||
978 | ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | | ||
979 | EFFECTIVE_L2_QUEUE_SIZE(7) | | ||
980 | CONTEXT1_IDENTITY_ACCESS_MODE(1)); | ||
981 | WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); | ||
982 | WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | | ||
983 | L2_CACHE_BIGK_FRAGMENT_SIZE(6)); | ||
984 | /* setup context0 */ | ||
985 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | ||
986 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); | ||
987 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | ||
988 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | ||
989 | (u32)(rdev->dummy_page.addr >> 12)); | ||
990 | WREG32(VM_CONTEXT0_CNTL2, 0); | ||
991 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | ||
992 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | ||
993 | /* disable context1-7 */ | ||
994 | WREG32(VM_CONTEXT1_CNTL2, 0); | ||
995 | WREG32(VM_CONTEXT1_CNTL, 0); | ||
996 | |||
997 | cayman_pcie_gart_tlb_flush(rdev); | ||
998 | rdev->gart.ready = true; | ||
999 | return 0; | ||
1000 | } | ||
1001 | |||
1002 | void cayman_pcie_gart_disable(struct radeon_device *rdev) | ||
1003 | { | ||
1004 | int r; | ||
1005 | |||
1006 | /* Disable all tables */ | ||
1007 | WREG32(VM_CONTEXT0_CNTL, 0); | ||
1008 | WREG32(VM_CONTEXT1_CNTL, 0); | ||
1009 | /* Setup TLB control */ | ||
1010 | WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING | | ||
1011 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | ||
1012 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); | ||
1013 | /* Setup L2 cache */ | ||
1014 | WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | ||
1015 | ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | | ||
1016 | EFFECTIVE_L2_QUEUE_SIZE(7) | | ||
1017 | CONTEXT1_IDENTITY_ACCESS_MODE(1)); | ||
1018 | WREG32(VM_L2_CNTL2, 0); | ||
1019 | WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | | ||
1020 | L2_CACHE_BIGK_FRAGMENT_SIZE(6)); | ||
1021 | if (rdev->gart.table.vram.robj) { | ||
1022 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); | ||
1023 | if (likely(r == 0)) { | ||
1024 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
1025 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
1026 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
1027 | } | ||
1028 | } | ||
1029 | } | ||
1030 | |||
1031 | void cayman_pcie_gart_fini(struct radeon_device *rdev) | ||
1032 | { | ||
1033 | cayman_pcie_gart_disable(rdev); | ||
1034 | radeon_gart_table_vram_free(rdev); | ||
1035 | radeon_gart_fini(rdev); | ||
1036 | } | ||
1037 | |||
1038 | /* | ||
1039 | * CP. | ||
1040 | */ | ||
1041 | static void cayman_cp_enable(struct radeon_device *rdev, bool enable) | ||
1042 | { | ||
1043 | if (enable) | ||
1044 | WREG32(CP_ME_CNTL, 0); | ||
1045 | else { | ||
1046 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
1047 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); | ||
1048 | WREG32(SCRATCH_UMSK, 0); | ||
1049 | } | ||
1050 | } | ||
1051 | |||
1052 | static int cayman_cp_load_microcode(struct radeon_device *rdev) | ||
1053 | { | ||
1054 | const __be32 *fw_data; | ||
1055 | int i; | ||
1056 | |||
1057 | if (!rdev->me_fw || !rdev->pfp_fw) | ||
1058 | return -EINVAL; | ||
1059 | |||
1060 | cayman_cp_enable(rdev, false); | ||
1061 | |||
1062 | fw_data = (const __be32 *)rdev->pfp_fw->data; | ||
1063 | WREG32(CP_PFP_UCODE_ADDR, 0); | ||
1064 | for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++) | ||
1065 | WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); | ||
1066 | WREG32(CP_PFP_UCODE_ADDR, 0); | ||
1067 | |||
1068 | fw_data = (const __be32 *)rdev->me_fw->data; | ||
1069 | WREG32(CP_ME_RAM_WADDR, 0); | ||
1070 | for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++) | ||
1071 | WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); | ||
1072 | |||
1073 | WREG32(CP_PFP_UCODE_ADDR, 0); | ||
1074 | WREG32(CP_ME_RAM_WADDR, 0); | ||
1075 | WREG32(CP_ME_RAM_RADDR, 0); | ||
1076 | return 0; | ||
1077 | } | ||
1078 | |||
1079 | static int cayman_cp_start(struct radeon_device *rdev) | ||
1080 | { | ||
1081 | int r, i; | ||
1082 | |||
1083 | r = radeon_ring_lock(rdev, 7); | ||
1084 | if (r) { | ||
1085 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | ||
1086 | return r; | ||
1087 | } | ||
1088 | radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); | ||
1089 | radeon_ring_write(rdev, 0x1); | ||
1090 | radeon_ring_write(rdev, 0x0); | ||
1091 | radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1); | ||
1092 | radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | ||
1093 | radeon_ring_write(rdev, 0); | ||
1094 | radeon_ring_write(rdev, 0); | ||
1095 | radeon_ring_unlock_commit(rdev); | ||
1096 | |||
1097 | cayman_cp_enable(rdev, true); | ||
1098 | |||
1099 | r = radeon_ring_lock(rdev, cayman_default_size + 19); | ||
1100 | if (r) { | ||
1101 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | ||
1102 | return r; | ||
1103 | } | ||
1104 | |||
1105 | /* setup clear context state */ | ||
1106 | radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | ||
1107 | radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); | ||
1108 | |||
1109 | for (i = 0; i < cayman_default_size; i++) | ||
1110 | radeon_ring_write(rdev, cayman_default_state[i]); | ||
1111 | |||
1112 | radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | ||
1113 | radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); | ||
1114 | |||
1115 | /* set clear context state */ | ||
1116 | radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); | ||
1117 | radeon_ring_write(rdev, 0); | ||
1118 | |||
1119 | /* SQ_VTX_BASE_VTX_LOC */ | ||
1120 | radeon_ring_write(rdev, 0xc0026f00); | ||
1121 | radeon_ring_write(rdev, 0x00000000); | ||
1122 | radeon_ring_write(rdev, 0x00000000); | ||
1123 | radeon_ring_write(rdev, 0x00000000); | ||
1124 | |||
1125 | /* Clear consts */ | ||
1126 | radeon_ring_write(rdev, 0xc0036f00); | ||
1127 | radeon_ring_write(rdev, 0x00000bc4); | ||
1128 | radeon_ring_write(rdev, 0xffffffff); | ||
1129 | radeon_ring_write(rdev, 0xffffffff); | ||
1130 | radeon_ring_write(rdev, 0xffffffff); | ||
1131 | |||
1132 | radeon_ring_write(rdev, 0xc0026900); | ||
1133 | radeon_ring_write(rdev, 0x00000316); | ||
1134 | radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | ||
1135 | radeon_ring_write(rdev, 0x00000010); /* */ | ||
1136 | |||
1137 | radeon_ring_unlock_commit(rdev); | ||
1138 | |||
1139 | /* XXX init other rings */ | ||
1140 | |||
1141 | return 0; | ||
1142 | } | ||
1143 | |||
1144 | static void cayman_cp_fini(struct radeon_device *rdev) | ||
1145 | { | ||
1146 | cayman_cp_enable(rdev, false); | ||
1147 | radeon_ring_fini(rdev); | ||
1148 | } | ||
1149 | |||
1150 | int cayman_cp_resume(struct radeon_device *rdev) | ||
1151 | { | ||
1152 | u32 tmp; | ||
1153 | u32 rb_bufsz; | ||
1154 | int r; | ||
1155 | |||
1156 | /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ | ||
1157 | WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | | ||
1158 | SOFT_RESET_PA | | ||
1159 | SOFT_RESET_SH | | ||
1160 | SOFT_RESET_VGT | | ||
1161 | SOFT_RESET_SX)); | ||
1162 | RREG32(GRBM_SOFT_RESET); | ||
1163 | mdelay(15); | ||
1164 | WREG32(GRBM_SOFT_RESET, 0); | ||
1165 | RREG32(GRBM_SOFT_RESET); | ||
1166 | |||
1167 | WREG32(CP_SEM_WAIT_TIMER, 0x4); | ||
1168 | |||
1169 | /* Set the write pointer delay */ | ||
1170 | WREG32(CP_RB_WPTR_DELAY, 0); | ||
1171 | |||
1172 | WREG32(CP_DEBUG, (1 << 27)); | ||
1173 | |||
1174 | /* ring 0 - compute and gfx */ | ||
1175 | /* Set ring buffer size */ | ||
1176 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); | ||
1177 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | ||
1178 | #ifdef __BIG_ENDIAN | ||
1179 | tmp |= BUF_SWAP_32BIT; | ||
1180 | #endif | ||
1181 | WREG32(CP_RB0_CNTL, tmp); | ||
1182 | |||
1183 | /* Initialize the ring buffer's read and write pointers */ | ||
1184 | WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); | ||
1185 | WREG32(CP_RB0_WPTR, 0); | ||
1186 | |||
1187 | /* set the wb address wether it's enabled or not */ | ||
1188 | WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | ||
1189 | WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | ||
1190 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | ||
1191 | |||
1192 | if (rdev->wb.enabled) | ||
1193 | WREG32(SCRATCH_UMSK, 0xff); | ||
1194 | else { | ||
1195 | tmp |= RB_NO_UPDATE; | ||
1196 | WREG32(SCRATCH_UMSK, 0); | ||
1197 | } | ||
1198 | |||
1199 | mdelay(1); | ||
1200 | WREG32(CP_RB0_CNTL, tmp); | ||
1201 | |||
1202 | WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); | ||
1203 | |||
1204 | rdev->cp.rptr = RREG32(CP_RB0_RPTR); | ||
1205 | rdev->cp.wptr = RREG32(CP_RB0_WPTR); | ||
1206 | |||
1207 | /* ring1 - compute only */ | ||
1208 | /* Set ring buffer size */ | ||
1209 | rb_bufsz = drm_order(rdev->cp1.ring_size / 8); | ||
1210 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | ||
1211 | #ifdef __BIG_ENDIAN | ||
1212 | tmp |= BUF_SWAP_32BIT; | ||
1213 | #endif | ||
1214 | WREG32(CP_RB1_CNTL, tmp); | ||
1215 | |||
1216 | /* Initialize the ring buffer's read and write pointers */ | ||
1217 | WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); | ||
1218 | WREG32(CP_RB1_WPTR, 0); | ||
1219 | |||
1220 | /* set the wb address wether it's enabled or not */ | ||
1221 | WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); | ||
1222 | WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); | ||
1223 | |||
1224 | mdelay(1); | ||
1225 | WREG32(CP_RB1_CNTL, tmp); | ||
1226 | |||
1227 | WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); | ||
1228 | |||
1229 | rdev->cp1.rptr = RREG32(CP_RB1_RPTR); | ||
1230 | rdev->cp1.wptr = RREG32(CP_RB1_WPTR); | ||
1231 | |||
1232 | /* ring2 - compute only */ | ||
1233 | /* Set ring buffer size */ | ||
1234 | rb_bufsz = drm_order(rdev->cp2.ring_size / 8); | ||
1235 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | ||
1236 | #ifdef __BIG_ENDIAN | ||
1237 | tmp |= BUF_SWAP_32BIT; | ||
1238 | #endif | ||
1239 | WREG32(CP_RB2_CNTL, tmp); | ||
1240 | |||
1241 | /* Initialize the ring buffer's read and write pointers */ | ||
1242 | WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); | ||
1243 | WREG32(CP_RB2_WPTR, 0); | ||
1244 | |||
1245 | /* set the wb address wether it's enabled or not */ | ||
1246 | WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); | ||
1247 | WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); | ||
1248 | |||
1249 | mdelay(1); | ||
1250 | WREG32(CP_RB2_CNTL, tmp); | ||
1251 | |||
1252 | WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); | ||
1253 | |||
1254 | rdev->cp2.rptr = RREG32(CP_RB2_RPTR); | ||
1255 | rdev->cp2.wptr = RREG32(CP_RB2_WPTR); | ||
1256 | |||
1257 | /* start the rings */ | ||
1258 | cayman_cp_start(rdev); | ||
1259 | rdev->cp.ready = true; | ||
1260 | rdev->cp1.ready = true; | ||
1261 | rdev->cp2.ready = true; | ||
1262 | /* this only test cp0 */ | ||
1263 | r = radeon_ring_test(rdev); | ||
1264 | if (r) { | ||
1265 | rdev->cp.ready = false; | ||
1266 | rdev->cp1.ready = false; | ||
1267 | rdev->cp2.ready = false; | ||
1268 | return r; | ||
1269 | } | ||
1270 | |||
1271 | return 0; | ||
1272 | } | ||
1273 | |||
1274 | bool cayman_gpu_is_lockup(struct radeon_device *rdev) | ||
1275 | { | ||
1276 | u32 srbm_status; | ||
1277 | u32 grbm_status; | ||
1278 | u32 grbm_status_se0, grbm_status_se1; | ||
1279 | struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup; | ||
1280 | int r; | ||
1281 | |||
1282 | srbm_status = RREG32(SRBM_STATUS); | ||
1283 | grbm_status = RREG32(GRBM_STATUS); | ||
1284 | grbm_status_se0 = RREG32(GRBM_STATUS_SE0); | ||
1285 | grbm_status_se1 = RREG32(GRBM_STATUS_SE1); | ||
1286 | if (!(grbm_status & GUI_ACTIVE)) { | ||
1287 | r100_gpu_lockup_update(lockup, &rdev->cp); | ||
1288 | return false; | ||
1289 | } | ||
1290 | /* force CP activities */ | ||
1291 | r = radeon_ring_lock(rdev, 2); | ||
1292 | if (!r) { | ||
1293 | /* PACKET2 NOP */ | ||
1294 | radeon_ring_write(rdev, 0x80000000); | ||
1295 | radeon_ring_write(rdev, 0x80000000); | ||
1296 | radeon_ring_unlock_commit(rdev); | ||
1297 | } | ||
1298 | /* XXX deal with CP0,1,2 */ | ||
1299 | rdev->cp.rptr = RREG32(CP_RB0_RPTR); | ||
1300 | return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); | ||
1301 | } | ||
1302 | |||
1303 | static int cayman_gpu_soft_reset(struct radeon_device *rdev) | ||
1304 | { | ||
1305 | struct evergreen_mc_save save; | ||
1306 | u32 grbm_reset = 0; | ||
1307 | |||
1308 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | ||
1309 | return 0; | ||
1310 | |||
1311 | dev_info(rdev->dev, "GPU softreset \n"); | ||
1312 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | ||
1313 | RREG32(GRBM_STATUS)); | ||
1314 | dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", | ||
1315 | RREG32(GRBM_STATUS_SE0)); | ||
1316 | dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", | ||
1317 | RREG32(GRBM_STATUS_SE1)); | ||
1318 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", | ||
1319 | RREG32(SRBM_STATUS)); | ||
1320 | evergreen_mc_stop(rdev, &save); | ||
1321 | if (evergreen_mc_wait_for_idle(rdev)) { | ||
1322 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | ||
1323 | } | ||
1324 | /* Disable CP parsing/prefetching */ | ||
1325 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); | ||
1326 | |||
1327 | /* reset all the gfx blocks */ | ||
1328 | grbm_reset = (SOFT_RESET_CP | | ||
1329 | SOFT_RESET_CB | | ||
1330 | SOFT_RESET_DB | | ||
1331 | SOFT_RESET_GDS | | ||
1332 | SOFT_RESET_PA | | ||
1333 | SOFT_RESET_SC | | ||
1334 | SOFT_RESET_SPI | | ||
1335 | SOFT_RESET_SH | | ||
1336 | SOFT_RESET_SX | | ||
1337 | SOFT_RESET_TC | | ||
1338 | SOFT_RESET_TA | | ||
1339 | SOFT_RESET_VGT | | ||
1340 | SOFT_RESET_IA); | ||
1341 | |||
1342 | dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); | ||
1343 | WREG32(GRBM_SOFT_RESET, grbm_reset); | ||
1344 | (void)RREG32(GRBM_SOFT_RESET); | ||
1345 | udelay(50); | ||
1346 | WREG32(GRBM_SOFT_RESET, 0); | ||
1347 | (void)RREG32(GRBM_SOFT_RESET); | ||
1348 | /* Wait a little for things to settle down */ | ||
1349 | udelay(50); | ||
1350 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | ||
1351 | RREG32(GRBM_STATUS)); | ||
1352 | dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", | ||
1353 | RREG32(GRBM_STATUS_SE0)); | ||
1354 | dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", | ||
1355 | RREG32(GRBM_STATUS_SE1)); | ||
1356 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", | ||
1357 | RREG32(SRBM_STATUS)); | ||
1358 | evergreen_mc_resume(rdev, &save); | ||
1359 | return 0; | ||
1360 | } | ||
1361 | |||
1362 | int cayman_asic_reset(struct radeon_device *rdev) | ||
1363 | { | ||
1364 | return cayman_gpu_soft_reset(rdev); | ||
1365 | } | ||
1366 | |||
1367 | static int cayman_startup(struct radeon_device *rdev) | ||
1368 | { | ||
1369 | int r; | ||
1370 | |||
1371 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | ||
1372 | r = ni_init_microcode(rdev); | ||
1373 | if (r) { | ||
1374 | DRM_ERROR("Failed to load firmware!\n"); | ||
1375 | return r; | ||
1376 | } | ||
1377 | } | ||
1378 | r = ni_mc_load_microcode(rdev); | ||
1379 | if (r) { | ||
1380 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
1381 | return r; | ||
1382 | } | ||
1383 | |||
1384 | evergreen_mc_program(rdev); | ||
1385 | r = cayman_pcie_gart_enable(rdev); | ||
1386 | if (r) | ||
1387 | return r; | ||
1388 | cayman_gpu_init(rdev); | ||
1389 | |||
1390 | r = evergreen_blit_init(rdev); | ||
1391 | if (r) { | ||
1392 | evergreen_blit_fini(rdev); | ||
1393 | rdev->asic->copy = NULL; | ||
1394 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
1395 | } | ||
1396 | |||
1397 | /* allocate wb buffer */ | ||
1398 | r = radeon_wb_init(rdev); | ||
1399 | if (r) | ||
1400 | return r; | ||
1401 | |||
1402 | /* Enable IRQ */ | ||
1403 | r = r600_irq_init(rdev); | ||
1404 | if (r) { | ||
1405 | DRM_ERROR("radeon: IH init failed (%d).\n", r); | ||
1406 | radeon_irq_kms_fini(rdev); | ||
1407 | return r; | ||
1408 | } | ||
1409 | evergreen_irq_set(rdev); | ||
1410 | |||
1411 | r = radeon_ring_init(rdev, rdev->cp.ring_size); | ||
1412 | if (r) | ||
1413 | return r; | ||
1414 | r = cayman_cp_load_microcode(rdev); | ||
1415 | if (r) | ||
1416 | return r; | ||
1417 | r = cayman_cp_resume(rdev); | ||
1418 | if (r) | ||
1419 | return r; | ||
1420 | |||
1421 | return 0; | ||
1422 | } | ||
1423 | |||
1424 | int cayman_resume(struct radeon_device *rdev) | ||
1425 | { | ||
1426 | int r; | ||
1427 | |||
1428 | /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, | ||
1429 | * posting will perform necessary task to bring back GPU into good | ||
1430 | * shape. | ||
1431 | */ | ||
1432 | /* post card */ | ||
1433 | atom_asic_init(rdev->mode_info.atom_context); | ||
1434 | |||
1435 | r = cayman_startup(rdev); | ||
1436 | if (r) { | ||
1437 | DRM_ERROR("cayman startup failed on resume\n"); | ||
1438 | return r; | ||
1439 | } | ||
1440 | |||
1441 | r = r600_ib_test(rdev); | ||
1442 | if (r) { | ||
1443 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | ||
1444 | return r; | ||
1445 | } | ||
1446 | |||
1447 | return r; | ||
1448 | |||
1449 | } | ||
1450 | |||
1451 | int cayman_suspend(struct radeon_device *rdev) | ||
1452 | { | ||
1453 | int r; | ||
1454 | |||
1455 | /* FIXME: we should wait for ring to be empty */ | ||
1456 | cayman_cp_enable(rdev, false); | ||
1457 | rdev->cp.ready = false; | ||
1458 | evergreen_irq_suspend(rdev); | ||
1459 | radeon_wb_disable(rdev); | ||
1460 | cayman_pcie_gart_disable(rdev); | ||
1461 | |||
1462 | /* unpin shaders bo */ | ||
1463 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
1464 | if (likely(r == 0)) { | ||
1465 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
1466 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1467 | } | ||
1468 | |||
1469 | return 0; | ||
1470 | } | ||
1471 | |||
1472 | /* Plan is to move initialization in that function and use | ||
1473 | * helper function so that radeon_device_init pretty much | ||
1474 | * do nothing more than calling asic specific function. This | ||
1475 | * should also allow to remove a bunch of callback function | ||
1476 | * like vram_info. | ||
1477 | */ | ||
1478 | int cayman_init(struct radeon_device *rdev) | ||
1479 | { | ||
1480 | int r; | ||
1481 | |||
1482 | /* This don't do much */ | ||
1483 | r = radeon_gem_init(rdev); | ||
1484 | if (r) | ||
1485 | return r; | ||
1486 | /* Read BIOS */ | ||
1487 | if (!radeon_get_bios(rdev)) { | ||
1488 | if (ASIC_IS_AVIVO(rdev)) | ||
1489 | return -EINVAL; | ||
1490 | } | ||
1491 | /* Must be an ATOMBIOS */ | ||
1492 | if (!rdev->is_atom_bios) { | ||
1493 | dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); | ||
1494 | return -EINVAL; | ||
1495 | } | ||
1496 | r = radeon_atombios_init(rdev); | ||
1497 | if (r) | ||
1498 | return r; | ||
1499 | |||
1500 | /* Post card if necessary */ | ||
1501 | if (!radeon_card_posted(rdev)) { | ||
1502 | if (!rdev->bios) { | ||
1503 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | ||
1504 | return -EINVAL; | ||
1505 | } | ||
1506 | DRM_INFO("GPU not posted. posting now...\n"); | ||
1507 | atom_asic_init(rdev->mode_info.atom_context); | ||
1508 | } | ||
1509 | /* Initialize scratch registers */ | ||
1510 | r600_scratch_init(rdev); | ||
1511 | /* Initialize surface registers */ | ||
1512 | radeon_surface_init(rdev); | ||
1513 | /* Initialize clocks */ | ||
1514 | radeon_get_clock_info(rdev->ddev); | ||
1515 | /* Fence driver */ | ||
1516 | r = radeon_fence_driver_init(rdev); | ||
1517 | if (r) | ||
1518 | return r; | ||
1519 | /* initialize memory controller */ | ||
1520 | r = evergreen_mc_init(rdev); | ||
1521 | if (r) | ||
1522 | return r; | ||
1523 | /* Memory manager */ | ||
1524 | r = radeon_bo_init(rdev); | ||
1525 | if (r) | ||
1526 | return r; | ||
1527 | |||
1528 | r = radeon_irq_kms_init(rdev); | ||
1529 | if (r) | ||
1530 | return r; | ||
1531 | |||
1532 | rdev->cp.ring_obj = NULL; | ||
1533 | r600_ring_init(rdev, 1024 * 1024); | ||
1534 | |||
1535 | rdev->ih.ring_obj = NULL; | ||
1536 | r600_ih_ring_init(rdev, 64 * 1024); | ||
1537 | |||
1538 | r = r600_pcie_gart_init(rdev); | ||
1539 | if (r) | ||
1540 | return r; | ||
1541 | |||
1542 | rdev->accel_working = true; | ||
1543 | r = cayman_startup(rdev); | ||
1544 | if (r) { | ||
1545 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | ||
1546 | cayman_cp_fini(rdev); | ||
1547 | r600_irq_fini(rdev); | ||
1548 | radeon_wb_fini(rdev); | ||
1549 | radeon_irq_kms_fini(rdev); | ||
1550 | cayman_pcie_gart_fini(rdev); | ||
1551 | rdev->accel_working = false; | ||
1552 | } | ||
1553 | if (rdev->accel_working) { | ||
1554 | r = radeon_ib_pool_init(rdev); | ||
1555 | if (r) { | ||
1556 | DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); | ||
1557 | rdev->accel_working = false; | ||
1558 | } | ||
1559 | r = r600_ib_test(rdev); | ||
1560 | if (r) { | ||
1561 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
1562 | rdev->accel_working = false; | ||
1563 | } | ||
1564 | } | ||
1565 | |||
1566 | /* Don't start up if the MC ucode is missing. | ||
1567 | * The default clocks and voltages before the MC ucode | ||
1568 | * is loaded are not suffient for advanced operations. | ||
1569 | */ | ||
1570 | if (!rdev->mc_fw) { | ||
1571 | DRM_ERROR("radeon: MC ucode required for NI+.\n"); | ||
1572 | return -EINVAL; | ||
1573 | } | ||
1574 | |||
1575 | return 0; | ||
1576 | } | ||
1577 | |||
1578 | void cayman_fini(struct radeon_device *rdev) | ||
1579 | { | ||
1580 | evergreen_blit_fini(rdev); | ||
1581 | cayman_cp_fini(rdev); | ||
1582 | r600_irq_fini(rdev); | ||
1583 | radeon_wb_fini(rdev); | ||
1584 | radeon_ib_pool_fini(rdev); | ||
1585 | radeon_irq_kms_fini(rdev); | ||
1586 | cayman_pcie_gart_fini(rdev); | ||
1587 | radeon_gem_fini(rdev); | ||
1588 | radeon_fence_driver_fini(rdev); | ||
1589 | radeon_bo_fini(rdev); | ||
1590 | radeon_atombios_fini(rdev); | ||
1591 | kfree(rdev->bios); | ||
1592 | rdev->bios = NULL; | ||
1593 | } | ||
1594 | |||
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h new file mode 100644 index 000000000000..5db7b7d6feb0 --- /dev/null +++ b/drivers/gpu/drm/radeon/ni_reg.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Alex Deucher | ||
23 | */ | ||
24 | #ifndef __NI_REG_H__ | ||
25 | #define __NI_REG_H__ | ||
26 | |||
27 | /* northern islands - DCE5 */ | ||
28 | |||
29 | #define NI_INPUT_GAMMA_CONTROL 0x6840 | ||
30 | # define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0) | ||
31 | # define NI_INPUT_GAMMA_USE_LUT 0 | ||
32 | # define NI_INPUT_GAMMA_BYPASS 1 | ||
33 | # define NI_INPUT_GAMMA_SRGB_24 2 | ||
34 | # define NI_INPUT_GAMMA_XVYCC_222 3 | ||
35 | # define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4) | ||
36 | |||
37 | #define NI_PRESCALE_GRPH_CONTROL 0x68b4 | ||
38 | # define NI_GRPH_PRESCALE_BYPASS (1 << 4) | ||
39 | |||
40 | #define NI_PRESCALE_OVL_CONTROL 0x68c4 | ||
41 | # define NI_OVL_PRESCALE_BYPASS (1 << 4) | ||
42 | |||
43 | #define NI_INPUT_CSC_CONTROL 0x68d4 | ||
44 | # define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0) | ||
45 | # define NI_INPUT_CSC_BYPASS 0 | ||
46 | # define NI_INPUT_CSC_PROG_COEFF 1 | ||
47 | # define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2 | ||
48 | # define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4) | ||
49 | |||
50 | #define NI_OUTPUT_CSC_CONTROL 0x68f0 | ||
51 | # define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0) | ||
52 | # define NI_OUTPUT_CSC_BYPASS 0 | ||
53 | # define NI_OUTPUT_CSC_TV_RGB 1 | ||
54 | # define NI_OUTPUT_CSC_YCBCR_601 2 | ||
55 | # define NI_OUTPUT_CSC_YCBCR_709 3 | ||
56 | # define NI_OUTPUT_CSC_PROG_COEFF 4 | ||
57 | # define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5 | ||
58 | # define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4) | ||
59 | |||
60 | #define NI_DEGAMMA_CONTROL 0x6960 | ||
61 | # define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0) | ||
62 | # define NI_DEGAMMA_BYPASS 0 | ||
63 | # define NI_DEGAMMA_SRGB_24 1 | ||
64 | # define NI_DEGAMMA_XVYCC_222 2 | ||
65 | # define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4) | ||
66 | # define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8) | ||
67 | # define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12) | ||
68 | |||
69 | #define NI_GAMUT_REMAP_CONTROL 0x6964 | ||
70 | # define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0) | ||
71 | # define NI_GAMUT_REMAP_BYPASS 0 | ||
72 | # define NI_GAMUT_REMAP_PROG_COEFF 1 | ||
73 | # define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2 | ||
74 | # define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3 | ||
75 | # define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4) | ||
76 | |||
77 | #define NI_REGAMMA_CONTROL 0x6a80 | ||
78 | # define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0) | ||
79 | # define NI_REGAMMA_BYPASS 0 | ||
80 | # define NI_REGAMMA_SRGB_24 1 | ||
81 | # define NI_REGAMMA_XVYCC_222 2 | ||
82 | # define NI_REGAMMA_PROG_A 3 | ||
83 | # define NI_REGAMMA_PROG_B 4 | ||
84 | # define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4) | ||
85 | |||
86 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h new file mode 100644 index 000000000000..4672869cdb26 --- /dev/null +++ b/drivers/gpu/drm/radeon/nid.h | |||
@@ -0,0 +1,538 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Alex Deucher | ||
23 | */ | ||
24 | #ifndef NI_H | ||
25 | #define NI_H | ||
26 | |||
27 | #define CAYMAN_MAX_SH_GPRS 256 | ||
28 | #define CAYMAN_MAX_TEMP_GPRS 16 | ||
29 | #define CAYMAN_MAX_SH_THREADS 256 | ||
30 | #define CAYMAN_MAX_SH_STACK_ENTRIES 4096 | ||
31 | #define CAYMAN_MAX_FRC_EOV_CNT 16384 | ||
32 | #define CAYMAN_MAX_BACKENDS 8 | ||
33 | #define CAYMAN_MAX_BACKENDS_MASK 0xFF | ||
34 | #define CAYMAN_MAX_BACKENDS_PER_SE_MASK 0xF | ||
35 | #define CAYMAN_MAX_SIMDS 16 | ||
36 | #define CAYMAN_MAX_SIMDS_MASK 0xFFFF | ||
37 | #define CAYMAN_MAX_SIMDS_PER_SE_MASK 0xFFF | ||
38 | #define CAYMAN_MAX_PIPES 8 | ||
39 | #define CAYMAN_MAX_PIPES_MASK 0xFF | ||
40 | #define CAYMAN_MAX_LDS_NUM 0xFFFF | ||
41 | #define CAYMAN_MAX_TCC 16 | ||
42 | #define CAYMAN_MAX_TCC_MASK 0xFF | ||
43 | |||
44 | #define DMIF_ADDR_CONFIG 0xBD4 | ||
45 | #define SRBM_STATUS 0x0E50 | ||
46 | |||
47 | #define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 | ||
48 | #define REQUEST_TYPE(x) (((x) & 0xf) << 0) | ||
49 | #define RESPONSE_TYPE_MASK 0x000000F0 | ||
50 | #define RESPONSE_TYPE_SHIFT 4 | ||
51 | #define VM_L2_CNTL 0x1400 | ||
52 | #define ENABLE_L2_CACHE (1 << 0) | ||
53 | #define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) | ||
54 | #define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9) | ||
55 | #define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10) | ||
56 | #define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14) | ||
57 | #define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 18) | ||
58 | /* CONTEXT1_IDENTITY_ACCESS_MODE | ||
59 | * 0 physical = logical | ||
60 | * 1 logical via context1 page table | ||
61 | * 2 inside identity aperture use translation, outside physical = logical | ||
62 | * 3 inside identity aperture physical = logical, outside use translation | ||
63 | */ | ||
64 | #define VM_L2_CNTL2 0x1404 | ||
65 | #define INVALIDATE_ALL_L1_TLBS (1 << 0) | ||
66 | #define INVALIDATE_L2_CACHE (1 << 1) | ||
67 | #define VM_L2_CNTL3 0x1408 | ||
68 | #define BANK_SELECT(x) ((x) << 0) | ||
69 | #define CACHE_UPDATE_MODE(x) ((x) << 6) | ||
70 | #define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20) | ||
71 | #define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15) | ||
72 | #define VM_L2_STATUS 0x140C | ||
73 | #define L2_BUSY (1 << 0) | ||
74 | #define VM_CONTEXT0_CNTL 0x1410 | ||
75 | #define ENABLE_CONTEXT (1 << 0) | ||
76 | #define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) | ||
77 | #define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) | ||
78 | #define VM_CONTEXT1_CNTL 0x1414 | ||
79 | #define VM_CONTEXT0_CNTL2 0x1430 | ||
80 | #define VM_CONTEXT1_CNTL2 0x1434 | ||
81 | #define VM_INVALIDATE_REQUEST 0x1478 | ||
82 | #define VM_INVALIDATE_RESPONSE 0x147c | ||
83 | #define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 | ||
84 | #define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c | ||
85 | #define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C | ||
86 | #define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C | ||
87 | #define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C | ||
88 | |||
89 | #define MC_SHARED_CHMAP 0x2004 | ||
90 | #define NOOFCHAN_SHIFT 12 | ||
91 | #define NOOFCHAN_MASK 0x00003000 | ||
92 | #define MC_SHARED_CHREMAP 0x2008 | ||
93 | |||
94 | #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 | ||
95 | #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 | ||
96 | #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C | ||
97 | #define MC_VM_MX_L1_TLB_CNTL 0x2064 | ||
98 | #define ENABLE_L1_TLB (1 << 0) | ||
99 | #define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) | ||
100 | #define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3) | ||
101 | #define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3) | ||
102 | #define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3) | ||
103 | #define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3) | ||
104 | #define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5) | ||
105 | #define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6) | ||
106 | |||
107 | #define MC_SHARED_BLACKOUT_CNTL 0x20ac | ||
108 | #define MC_ARB_RAMCFG 0x2760 | ||
109 | #define NOOFBANK_SHIFT 0 | ||
110 | #define NOOFBANK_MASK 0x00000003 | ||
111 | #define NOOFRANK_SHIFT 2 | ||
112 | #define NOOFRANK_MASK 0x00000004 | ||
113 | #define NOOFROWS_SHIFT 3 | ||
114 | #define NOOFROWS_MASK 0x00000038 | ||
115 | #define NOOFCOLS_SHIFT 6 | ||
116 | #define NOOFCOLS_MASK 0x000000C0 | ||
117 | #define CHANSIZE_SHIFT 8 | ||
118 | #define CHANSIZE_MASK 0x00000100 | ||
119 | #define BURSTLENGTH_SHIFT 9 | ||
120 | #define BURSTLENGTH_MASK 0x00000200 | ||
121 | #define CHANSIZE_OVERRIDE (1 << 11) | ||
122 | #define MC_SEQ_SUP_CNTL 0x28c8 | ||
123 | #define RUN_MASK (1 << 0) | ||
124 | #define MC_SEQ_SUP_PGM 0x28cc | ||
125 | #define MC_IO_PAD_CNTL_D0 0x29d0 | ||
126 | #define MEM_FALL_OUT_CMD (1 << 8) | ||
127 | #define MC_SEQ_MISC0 0x2a00 | ||
128 | #define MC_SEQ_MISC0_GDDR5_SHIFT 28 | ||
129 | #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 | ||
130 | #define MC_SEQ_MISC0_GDDR5_VALUE 5 | ||
131 | #define MC_SEQ_IO_DEBUG_INDEX 0x2a44 | ||
132 | #define MC_SEQ_IO_DEBUG_DATA 0x2a48 | ||
133 | |||
134 | #define HDP_HOST_PATH_CNTL 0x2C00 | ||
135 | #define HDP_NONSURFACE_BASE 0x2C04 | ||
136 | #define HDP_NONSURFACE_INFO 0x2C08 | ||
137 | #define HDP_NONSURFACE_SIZE 0x2C0C | ||
138 | #define HDP_ADDR_CONFIG 0x2F48 | ||
139 | #define HDP_MISC_CNTL 0x2F4C | ||
140 | #define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) | ||
141 | |||
142 | #define CC_SYS_RB_BACKEND_DISABLE 0x3F88 | ||
143 | #define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C | ||
144 | #define CGTS_SYS_TCC_DISABLE 0x3F90 | ||
145 | #define CGTS_USER_SYS_TCC_DISABLE 0x3F94 | ||
146 | |||
147 | #define CONFIG_MEMSIZE 0x5428 | ||
148 | |||
149 | #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 | ||
150 | #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 | ||
151 | |||
152 | #define GRBM_CNTL 0x8000 | ||
153 | #define GRBM_READ_TIMEOUT(x) ((x) << 0) | ||
154 | #define GRBM_STATUS 0x8010 | ||
155 | #define CMDFIFO_AVAIL_MASK 0x0000000F | ||
156 | #define RING2_RQ_PENDING (1 << 4) | ||
157 | #define SRBM_RQ_PENDING (1 << 5) | ||
158 | #define RING1_RQ_PENDING (1 << 6) | ||
159 | #define CF_RQ_PENDING (1 << 7) | ||
160 | #define PF_RQ_PENDING (1 << 8) | ||
161 | #define GDS_DMA_RQ_PENDING (1 << 9) | ||
162 | #define GRBM_EE_BUSY (1 << 10) | ||
163 | #define SX_CLEAN (1 << 11) | ||
164 | #define DB_CLEAN (1 << 12) | ||
165 | #define CB_CLEAN (1 << 13) | ||
166 | #define TA_BUSY (1 << 14) | ||
167 | #define GDS_BUSY (1 << 15) | ||
168 | #define VGT_BUSY_NO_DMA (1 << 16) | ||
169 | #define VGT_BUSY (1 << 17) | ||
170 | #define IA_BUSY_NO_DMA (1 << 18) | ||
171 | #define IA_BUSY (1 << 19) | ||
172 | #define SX_BUSY (1 << 20) | ||
173 | #define SH_BUSY (1 << 21) | ||
174 | #define SPI_BUSY (1 << 22) | ||
175 | #define SC_BUSY (1 << 24) | ||
176 | #define PA_BUSY (1 << 25) | ||
177 | #define DB_BUSY (1 << 26) | ||
178 | #define CP_COHERENCY_BUSY (1 << 28) | ||
179 | #define CP_BUSY (1 << 29) | ||
180 | #define CB_BUSY (1 << 30) | ||
181 | #define GUI_ACTIVE (1 << 31) | ||
182 | #define GRBM_STATUS_SE0 0x8014 | ||
183 | #define GRBM_STATUS_SE1 0x8018 | ||
184 | #define SE_SX_CLEAN (1 << 0) | ||
185 | #define SE_DB_CLEAN (1 << 1) | ||
186 | #define SE_CB_CLEAN (1 << 2) | ||
187 | #define SE_VGT_BUSY (1 << 23) | ||
188 | #define SE_PA_BUSY (1 << 24) | ||
189 | #define SE_TA_BUSY (1 << 25) | ||
190 | #define SE_SX_BUSY (1 << 26) | ||
191 | #define SE_SPI_BUSY (1 << 27) | ||
192 | #define SE_SH_BUSY (1 << 28) | ||
193 | #define SE_SC_BUSY (1 << 29) | ||
194 | #define SE_DB_BUSY (1 << 30) | ||
195 | #define SE_CB_BUSY (1 << 31) | ||
196 | #define GRBM_SOFT_RESET 0x8020 | ||
197 | #define SOFT_RESET_CP (1 << 0) | ||
198 | #define SOFT_RESET_CB (1 << 1) | ||
199 | #define SOFT_RESET_DB (1 << 3) | ||
200 | #define SOFT_RESET_GDS (1 << 4) | ||
201 | #define SOFT_RESET_PA (1 << 5) | ||
202 | #define SOFT_RESET_SC (1 << 6) | ||
203 | #define SOFT_RESET_SPI (1 << 8) | ||
204 | #define SOFT_RESET_SH (1 << 9) | ||
205 | #define SOFT_RESET_SX (1 << 10) | ||
206 | #define SOFT_RESET_TC (1 << 11) | ||
207 | #define SOFT_RESET_TA (1 << 12) | ||
208 | #define SOFT_RESET_VGT (1 << 14) | ||
209 | #define SOFT_RESET_IA (1 << 15) | ||
210 | |||
211 | #define SCRATCH_REG0 0x8500 | ||
212 | #define SCRATCH_REG1 0x8504 | ||
213 | #define SCRATCH_REG2 0x8508 | ||
214 | #define SCRATCH_REG3 0x850C | ||
215 | #define SCRATCH_REG4 0x8510 | ||
216 | #define SCRATCH_REG5 0x8514 | ||
217 | #define SCRATCH_REG6 0x8518 | ||
218 | #define SCRATCH_REG7 0x851C | ||
219 | #define SCRATCH_UMSK 0x8540 | ||
220 | #define SCRATCH_ADDR 0x8544 | ||
221 | #define CP_SEM_WAIT_TIMER 0x85BC | ||
222 | #define CP_ME_CNTL 0x86D8 | ||
223 | #define CP_ME_HALT (1 << 28) | ||
224 | #define CP_PFP_HALT (1 << 26) | ||
225 | #define CP_RB2_RPTR 0x86f8 | ||
226 | #define CP_RB1_RPTR 0x86fc | ||
227 | #define CP_RB0_RPTR 0x8700 | ||
228 | #define CP_RB_WPTR_DELAY 0x8704 | ||
229 | #define CP_MEQ_THRESHOLDS 0x8764 | ||
230 | #define MEQ1_START(x) ((x) << 0) | ||
231 | #define MEQ2_START(x) ((x) << 8) | ||
232 | #define CP_PERFMON_CNTL 0x87FC | ||
233 | |||
234 | #define VGT_CACHE_INVALIDATION 0x88C4 | ||
235 | #define CACHE_INVALIDATION(x) ((x) << 0) | ||
236 | #define VC_ONLY 0 | ||
237 | #define TC_ONLY 1 | ||
238 | #define VC_AND_TC 2 | ||
239 | #define AUTO_INVLD_EN(x) ((x) << 6) | ||
240 | #define NO_AUTO 0 | ||
241 | #define ES_AUTO 1 | ||
242 | #define GS_AUTO 2 | ||
243 | #define ES_AND_GS_AUTO 3 | ||
244 | #define VGT_GS_VERTEX_REUSE 0x88D4 | ||
245 | |||
246 | #define CC_GC_SHADER_PIPE_CONFIG 0x8950 | ||
247 | #define GC_USER_SHADER_PIPE_CONFIG 0x8954 | ||
248 | #define INACTIVE_QD_PIPES(x) ((x) << 8) | ||
249 | #define INACTIVE_QD_PIPES_MASK 0x0000FF00 | ||
250 | #define INACTIVE_QD_PIPES_SHIFT 8 | ||
251 | #define INACTIVE_SIMDS(x) ((x) << 16) | ||
252 | #define INACTIVE_SIMDS_MASK 0xFFFF0000 | ||
253 | #define INACTIVE_SIMDS_SHIFT 16 | ||
254 | |||
255 | #define VGT_PRIMITIVE_TYPE 0x8958 | ||
256 | #define VGT_NUM_INSTANCES 0x8974 | ||
257 | #define VGT_TF_RING_SIZE 0x8988 | ||
258 | #define VGT_OFFCHIP_LDS_BASE 0x89b4 | ||
259 | |||
260 | #define PA_SC_LINE_STIPPLE_STATE 0x8B10 | ||
261 | #define PA_CL_ENHANCE 0x8A14 | ||
262 | #define CLIP_VTX_REORDER_ENA (1 << 0) | ||
263 | #define NUM_CLIP_SEQ(x) ((x) << 1) | ||
264 | #define PA_SC_FIFO_SIZE 0x8BCC | ||
265 | #define SC_PRIM_FIFO_SIZE(x) ((x) << 0) | ||
266 | #define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12) | ||
267 | #define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20) | ||
268 | #define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24 | ||
269 | #define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) | ||
270 | #define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) | ||
271 | |||
272 | #define SQ_CONFIG 0x8C00 | ||
273 | #define VC_ENABLE (1 << 0) | ||
274 | #define EXPORT_SRC_C (1 << 1) | ||
275 | #define GFX_PRIO(x) ((x) << 2) | ||
276 | #define CS1_PRIO(x) ((x) << 4) | ||
277 | #define CS2_PRIO(x) ((x) << 6) | ||
278 | #define SQ_GPR_RESOURCE_MGMT_1 0x8C04 | ||
279 | #define NUM_PS_GPRS(x) ((x) << 0) | ||
280 | #define NUM_VS_GPRS(x) ((x) << 16) | ||
281 | #define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28) | ||
282 | #define SQ_ESGS_RING_SIZE 0x8c44 | ||
283 | #define SQ_GSVS_RING_SIZE 0x8c4c | ||
284 | #define SQ_ESTMP_RING_BASE 0x8c50 | ||
285 | #define SQ_ESTMP_RING_SIZE 0x8c54 | ||
286 | #define SQ_GSTMP_RING_BASE 0x8c58 | ||
287 | #define SQ_GSTMP_RING_SIZE 0x8c5c | ||
288 | #define SQ_VSTMP_RING_BASE 0x8c60 | ||
289 | #define SQ_VSTMP_RING_SIZE 0x8c64 | ||
290 | #define SQ_PSTMP_RING_BASE 0x8c68 | ||
291 | #define SQ_PSTMP_RING_SIZE 0x8c6c | ||
292 | #define SQ_MS_FIFO_SIZES 0x8CF0 | ||
293 | #define CACHE_FIFO_SIZE(x) ((x) << 0) | ||
294 | #define FETCH_FIFO_HIWATER(x) ((x) << 8) | ||
295 | #define DONE_FIFO_HIWATER(x) ((x) << 16) | ||
296 | #define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24) | ||
297 | #define SQ_LSTMP_RING_BASE 0x8e10 | ||
298 | #define SQ_LSTMP_RING_SIZE 0x8e14 | ||
299 | #define SQ_HSTMP_RING_BASE 0x8e18 | ||
300 | #define SQ_HSTMP_RING_SIZE 0x8e1c | ||
301 | #define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C | ||
302 | #define DYN_GPR_ENABLE (1 << 8) | ||
303 | #define SQ_CONST_MEM_BASE 0x8df8 | ||
304 | |||
305 | #define SX_EXPORT_BUFFER_SIZES 0x900C | ||
306 | #define COLOR_BUFFER_SIZE(x) ((x) << 0) | ||
307 | #define POSITION_BUFFER_SIZE(x) ((x) << 8) | ||
308 | #define SMX_BUFFER_SIZE(x) ((x) << 16) | ||
309 | #define SX_DEBUG_1 0x9058 | ||
310 | #define ENABLE_NEW_SMX_ADDRESS (1 << 16) | ||
311 | |||
312 | #define SPI_CONFIG_CNTL 0x9100 | ||
313 | #define GPR_WRITE_PRIORITY(x) ((x) << 0) | ||
314 | #define SPI_CONFIG_CNTL_1 0x913C | ||
315 | #define VTX_DONE_DELAY(x) ((x) << 0) | ||
316 | #define INTERP_ONE_PRIM_PER_ROW (1 << 4) | ||
317 | #define CRC_SIMD_ID_WADDR_DISABLE (1 << 8) | ||
318 | |||
319 | #define CGTS_TCC_DISABLE 0x9148 | ||
320 | #define CGTS_USER_TCC_DISABLE 0x914C | ||
321 | #define TCC_DISABLE_MASK 0xFFFF0000 | ||
322 | #define TCC_DISABLE_SHIFT 16 | ||
323 | #define CGTS_SM_CTRL_REG 0x9150 | ||
324 | #define OVERRIDE (1 << 21) | ||
325 | |||
326 | #define TA_CNTL_AUX 0x9508 | ||
327 | #define DISABLE_CUBE_WRAP (1 << 0) | ||
328 | #define DISABLE_CUBE_ANISO (1 << 1) | ||
329 | |||
330 | #define TCP_CHAN_STEER_LO 0x960c | ||
331 | #define TCP_CHAN_STEER_HI 0x9610 | ||
332 | |||
333 | #define CC_RB_BACKEND_DISABLE 0x98F4 | ||
334 | #define BACKEND_DISABLE(x) ((x) << 16) | ||
335 | #define GB_ADDR_CONFIG 0x98F8 | ||
336 | #define NUM_PIPES(x) ((x) << 0) | ||
337 | #define NUM_PIPES_MASK 0x00000007 | ||
338 | #define NUM_PIPES_SHIFT 0 | ||
339 | #define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) | ||
340 | #define PIPE_INTERLEAVE_SIZE_MASK 0x00000070 | ||
341 | #define PIPE_INTERLEAVE_SIZE_SHIFT 4 | ||
342 | #define BANK_INTERLEAVE_SIZE(x) ((x) << 8) | ||
343 | #define NUM_SHADER_ENGINES(x) ((x) << 12) | ||
344 | #define NUM_SHADER_ENGINES_MASK 0x00003000 | ||
345 | #define NUM_SHADER_ENGINES_SHIFT 12 | ||
346 | #define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16) | ||
347 | #define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000 | ||
348 | #define SHADER_ENGINE_TILE_SIZE_SHIFT 16 | ||
349 | #define NUM_GPUS(x) ((x) << 20) | ||
350 | #define NUM_GPUS_MASK 0x00700000 | ||
351 | #define NUM_GPUS_SHIFT 20 | ||
352 | #define MULTI_GPU_TILE_SIZE(x) ((x) << 24) | ||
353 | #define MULTI_GPU_TILE_SIZE_MASK 0x03000000 | ||
354 | #define MULTI_GPU_TILE_SIZE_SHIFT 24 | ||
355 | #define ROW_SIZE(x) ((x) << 28) | ||
356 | #define ROW_SIZE_MASK 0x30000000 | ||
357 | #define ROW_SIZE_SHIFT 28 | ||
358 | #define NUM_LOWER_PIPES(x) ((x) << 30) | ||
359 | #define NUM_LOWER_PIPES_MASK 0x40000000 | ||
360 | #define NUM_LOWER_PIPES_SHIFT 30 | ||
361 | #define GB_BACKEND_MAP 0x98FC | ||
362 | |||
363 | #define CB_PERF_CTR0_SEL_0 0x9A20 | ||
364 | #define CB_PERF_CTR0_SEL_1 0x9A24 | ||
365 | #define CB_PERF_CTR1_SEL_0 0x9A28 | ||
366 | #define CB_PERF_CTR1_SEL_1 0x9A2C | ||
367 | #define CB_PERF_CTR2_SEL_0 0x9A30 | ||
368 | #define CB_PERF_CTR2_SEL_1 0x9A34 | ||
369 | #define CB_PERF_CTR3_SEL_0 0x9A38 | ||
370 | #define CB_PERF_CTR3_SEL_1 0x9A3C | ||
371 | |||
372 | #define GC_USER_RB_BACKEND_DISABLE 0x9B7C | ||
373 | #define BACKEND_DISABLE_MASK 0x00FF0000 | ||
374 | #define BACKEND_DISABLE_SHIFT 16 | ||
375 | |||
376 | #define SMX_DC_CTL0 0xA020 | ||
377 | #define USE_HASH_FUNCTION (1 << 0) | ||
378 | #define NUMBER_OF_SETS(x) ((x) << 1) | ||
379 | #define FLUSH_ALL_ON_EVENT (1 << 10) | ||
380 | #define STALL_ON_EVENT (1 << 11) | ||
381 | #define SMX_EVENT_CTL 0xA02C | ||
382 | #define ES_FLUSH_CTL(x) ((x) << 0) | ||
383 | #define GS_FLUSH_CTL(x) ((x) << 3) | ||
384 | #define ACK_FLUSH_CTL(x) ((x) << 6) | ||
385 | #define SYNC_FLUSH_CTL (1 << 8) | ||
386 | |||
387 | #define CP_RB0_BASE 0xC100 | ||
388 | #define CP_RB0_CNTL 0xC104 | ||
389 | #define RB_BUFSZ(x) ((x) << 0) | ||
390 | #define RB_BLKSZ(x) ((x) << 8) | ||
391 | #define RB_NO_UPDATE (1 << 27) | ||
392 | #define RB_RPTR_WR_ENA (1 << 31) | ||
393 | #define BUF_SWAP_32BIT (2 << 16) | ||
394 | #define CP_RB0_RPTR_ADDR 0xC10C | ||
395 | #define CP_RB0_RPTR_ADDR_HI 0xC110 | ||
396 | #define CP_RB0_WPTR 0xC114 | ||
397 | #define CP_RB1_BASE 0xC180 | ||
398 | #define CP_RB1_CNTL 0xC184 | ||
399 | #define CP_RB1_RPTR_ADDR 0xC188 | ||
400 | #define CP_RB1_RPTR_ADDR_HI 0xC18C | ||
401 | #define CP_RB1_WPTR 0xC190 | ||
402 | #define CP_RB2_BASE 0xC194 | ||
403 | #define CP_RB2_CNTL 0xC198 | ||
404 | #define CP_RB2_RPTR_ADDR 0xC19C | ||
405 | #define CP_RB2_RPTR_ADDR_HI 0xC1A0 | ||
406 | #define CP_RB2_WPTR 0xC1A4 | ||
407 | #define CP_PFP_UCODE_ADDR 0xC150 | ||
408 | #define CP_PFP_UCODE_DATA 0xC154 | ||
409 | #define CP_ME_RAM_RADDR 0xC158 | ||
410 | #define CP_ME_RAM_WADDR 0xC15C | ||
411 | #define CP_ME_RAM_DATA 0xC160 | ||
412 | #define CP_DEBUG 0xC1FC | ||
413 | |||
414 | /* | ||
415 | * PM4 | ||
416 | */ | ||
417 | #define PACKET_TYPE0 0 | ||
418 | #define PACKET_TYPE1 1 | ||
419 | #define PACKET_TYPE2 2 | ||
420 | #define PACKET_TYPE3 3 | ||
421 | |||
422 | #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) | ||
423 | #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) | ||
424 | #define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) | ||
425 | #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) | ||
426 | #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ | ||
427 | (((reg) >> 2) & 0xFFFF) | \ | ||
428 | ((n) & 0x3FFF) << 16) | ||
429 | #define CP_PACKET2 0x80000000 | ||
430 | #define PACKET2_PAD_SHIFT 0 | ||
431 | #define PACKET2_PAD_MASK (0x3fffffff << 0) | ||
432 | |||
433 | #define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) | ||
434 | |||
435 | #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ | ||
436 | (((op) & 0xFF) << 8) | \ | ||
437 | ((n) & 0x3FFF) << 16) | ||
438 | |||
439 | /* Packet 3 types */ | ||
440 | #define PACKET3_NOP 0x10 | ||
441 | #define PACKET3_SET_BASE 0x11 | ||
442 | #define PACKET3_CLEAR_STATE 0x12 | ||
443 | #define PACKET3_INDEX_BUFFER_SIZE 0x13 | ||
444 | #define PACKET3_DEALLOC_STATE 0x14 | ||
445 | #define PACKET3_DISPATCH_DIRECT 0x15 | ||
446 | #define PACKET3_DISPATCH_INDIRECT 0x16 | ||
447 | #define PACKET3_INDIRECT_BUFFER_END 0x17 | ||
448 | #define PACKET3_SET_PREDICATION 0x20 | ||
449 | #define PACKET3_REG_RMW 0x21 | ||
450 | #define PACKET3_COND_EXEC 0x22 | ||
451 | #define PACKET3_PRED_EXEC 0x23 | ||
452 | #define PACKET3_DRAW_INDIRECT 0x24 | ||
453 | #define PACKET3_DRAW_INDEX_INDIRECT 0x25 | ||
454 | #define PACKET3_INDEX_BASE 0x26 | ||
455 | #define PACKET3_DRAW_INDEX_2 0x27 | ||
456 | #define PACKET3_CONTEXT_CONTROL 0x28 | ||
457 | #define PACKET3_DRAW_INDEX_OFFSET 0x29 | ||
458 | #define PACKET3_INDEX_TYPE 0x2A | ||
459 | #define PACKET3_DRAW_INDEX 0x2B | ||
460 | #define PACKET3_DRAW_INDEX_AUTO 0x2D | ||
461 | #define PACKET3_DRAW_INDEX_IMMD 0x2E | ||
462 | #define PACKET3_NUM_INSTANCES 0x2F | ||
463 | #define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 | ||
464 | #define PACKET3_INDIRECT_BUFFER 0x32 | ||
465 | #define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 | ||
466 | #define PACKET3_DRAW_INDEX_OFFSET_2 0x35 | ||
467 | #define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 | ||
468 | #define PACKET3_WRITE_DATA 0x37 | ||
469 | #define PACKET3_MEM_SEMAPHORE 0x39 | ||
470 | #define PACKET3_MPEG_INDEX 0x3A | ||
471 | #define PACKET3_WAIT_REG_MEM 0x3C | ||
472 | #define PACKET3_MEM_WRITE 0x3D | ||
473 | #define PACKET3_SURFACE_SYNC 0x43 | ||
474 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) | ||
475 | # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) | ||
476 | # define PACKET3_CB2_DEST_BASE_ENA (1 << 8) | ||
477 | # define PACKET3_CB3_DEST_BASE_ENA (1 << 9) | ||
478 | # define PACKET3_CB4_DEST_BASE_ENA (1 << 10) | ||
479 | # define PACKET3_CB5_DEST_BASE_ENA (1 << 11) | ||
480 | # define PACKET3_CB6_DEST_BASE_ENA (1 << 12) | ||
481 | # define PACKET3_CB7_DEST_BASE_ENA (1 << 13) | ||
482 | # define PACKET3_DB_DEST_BASE_ENA (1 << 14) | ||
483 | # define PACKET3_CB8_DEST_BASE_ENA (1 << 15) | ||
484 | # define PACKET3_CB9_DEST_BASE_ENA (1 << 16) | ||
485 | # define PACKET3_CB10_DEST_BASE_ENA (1 << 17) | ||
486 | # define PACKET3_CB11_DEST_BASE_ENA (1 << 18) | ||
487 | # define PACKET3_FULL_CACHE_ENA (1 << 20) | ||
488 | # define PACKET3_TC_ACTION_ENA (1 << 23) | ||
489 | # define PACKET3_CB_ACTION_ENA (1 << 25) | ||
490 | # define PACKET3_DB_ACTION_ENA (1 << 26) | ||
491 | # define PACKET3_SH_ACTION_ENA (1 << 27) | ||
492 | # define PACKET3_SX_ACTION_ENA (1 << 28) | ||
493 | #define PACKET3_ME_INITIALIZE 0x44 | ||
494 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) | ||
495 | #define PACKET3_COND_WRITE 0x45 | ||
496 | #define PACKET3_EVENT_WRITE 0x46 | ||
497 | #define PACKET3_EVENT_WRITE_EOP 0x47 | ||
498 | #define PACKET3_EVENT_WRITE_EOS 0x48 | ||
499 | #define PACKET3_PREAMBLE_CNTL 0x4A | ||
500 | # define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) | ||
501 | # define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) | ||
502 | #define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C | ||
503 | #define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D | ||
504 | #define PACKET3_ALU_PS_CONST_UPDATE 0x4E | ||
505 | #define PACKET3_ALU_VS_CONST_UPDATE 0x4F | ||
506 | #define PACKET3_ONE_REG_WRITE 0x57 | ||
507 | #define PACKET3_SET_CONFIG_REG 0x68 | ||
508 | #define PACKET3_SET_CONFIG_REG_START 0x00008000 | ||
509 | #define PACKET3_SET_CONFIG_REG_END 0x0000ac00 | ||
510 | #define PACKET3_SET_CONTEXT_REG 0x69 | ||
511 | #define PACKET3_SET_CONTEXT_REG_START 0x00028000 | ||
512 | #define PACKET3_SET_CONTEXT_REG_END 0x00029000 | ||
513 | #define PACKET3_SET_ALU_CONST 0x6A | ||
514 | /* alu const buffers only; no reg file */ | ||
515 | #define PACKET3_SET_BOOL_CONST 0x6B | ||
516 | #define PACKET3_SET_BOOL_CONST_START 0x0003a500 | ||
517 | #define PACKET3_SET_BOOL_CONST_END 0x0003a518 | ||
518 | #define PACKET3_SET_LOOP_CONST 0x6C | ||
519 | #define PACKET3_SET_LOOP_CONST_START 0x0003a200 | ||
520 | #define PACKET3_SET_LOOP_CONST_END 0x0003a500 | ||
521 | #define PACKET3_SET_RESOURCE 0x6D | ||
522 | #define PACKET3_SET_RESOURCE_START 0x00030000 | ||
523 | #define PACKET3_SET_RESOURCE_END 0x00038000 | ||
524 | #define PACKET3_SET_SAMPLER 0x6E | ||
525 | #define PACKET3_SET_SAMPLER_START 0x0003c000 | ||
526 | #define PACKET3_SET_SAMPLER_END 0x0003c600 | ||
527 | #define PACKET3_SET_CTL_CONST 0x6F | ||
528 | #define PACKET3_SET_CTL_CONST_START 0x0003cff0 | ||
529 | #define PACKET3_SET_CTL_CONST_END 0x0003ff0c | ||
530 | #define PACKET3_SET_RESOURCE_OFFSET 0x70 | ||
531 | #define PACKET3_SET_ALU_CONST_VS 0x71 | ||
532 | #define PACKET3_SET_ALU_CONST_DI 0x72 | ||
533 | #define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 | ||
534 | #define PACKET3_SET_RESOURCE_INDIRECT 0x74 | ||
535 | #define PACKET3_SET_APPEND_CNT 0x75 | ||
536 | |||
537 | #endif | ||
538 | |||
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e59422320bb6..f2204cb1ccdf 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -68,6 +68,39 @@ MODULE_FIRMWARE(FIRMWARE_R520); | |||
68 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 68 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
69 | */ | 69 | */ |
70 | 70 | ||
71 | void r100_pre_page_flip(struct radeon_device *rdev, int crtc) | ||
72 | { | ||
73 | /* enable the pflip int */ | ||
74 | radeon_irq_kms_pflip_irq_get(rdev, crtc); | ||
75 | } | ||
76 | |||
77 | void r100_post_page_flip(struct radeon_device *rdev, int crtc) | ||
78 | { | ||
79 | /* disable the pflip int */ | ||
80 | radeon_irq_kms_pflip_irq_put(rdev, crtc); | ||
81 | } | ||
82 | |||
83 | u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | ||
84 | { | ||
85 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | ||
86 | u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; | ||
87 | |||
88 | /* Lock the graphics update lock */ | ||
89 | /* update the scanout addresses */ | ||
90 | WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); | ||
91 | |||
92 | /* Wait for update_pending to go high. */ | ||
93 | while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); | ||
94 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | ||
95 | |||
96 | /* Unlock the lock, so double-buffering can take place inside vblank */ | ||
97 | tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; | ||
98 | WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); | ||
99 | |||
100 | /* Return current update_pending status: */ | ||
101 | return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; | ||
102 | } | ||
103 | |||
71 | void r100_pm_get_dynpm_state(struct radeon_device *rdev) | 104 | void r100_pm_get_dynpm_state(struct radeon_device *rdev) |
72 | { | 105 | { |
73 | int i; | 106 | int i; |
@@ -442,7 +475,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
442 | int r; | 475 | int r; |
443 | 476 | ||
444 | if (rdev->gart.table.ram.ptr) { | 477 | if (rdev->gart.table.ram.ptr) { |
445 | WARN(1, "R100 PCI GART already initialized.\n"); | 478 | WARN(1, "R100 PCI GART already initialized\n"); |
446 | return 0; | 479 | return 0; |
447 | } | 480 | } |
448 | /* Initialize common gart structure */ | 481 | /* Initialize common gart structure */ |
@@ -516,7 +549,7 @@ int r100_irq_set(struct radeon_device *rdev) | |||
516 | uint32_t tmp = 0; | 549 | uint32_t tmp = 0; |
517 | 550 | ||
518 | if (!rdev->irq.installed) { | 551 | if (!rdev->irq.installed) { |
519 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 552 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
520 | WREG32(R_000040_GEN_INT_CNTL, 0); | 553 | WREG32(R_000040_GEN_INT_CNTL, 0); |
521 | return -EINVAL; | 554 | return -EINVAL; |
522 | } | 555 | } |
@@ -526,10 +559,12 @@ int r100_irq_set(struct radeon_device *rdev) | |||
526 | if (rdev->irq.gui_idle) { | 559 | if (rdev->irq.gui_idle) { |
527 | tmp |= RADEON_GUI_IDLE_MASK; | 560 | tmp |= RADEON_GUI_IDLE_MASK; |
528 | } | 561 | } |
529 | if (rdev->irq.crtc_vblank_int[0]) { | 562 | if (rdev->irq.crtc_vblank_int[0] || |
563 | rdev->irq.pflip[0]) { | ||
530 | tmp |= RADEON_CRTC_VBLANK_MASK; | 564 | tmp |= RADEON_CRTC_VBLANK_MASK; |
531 | } | 565 | } |
532 | if (rdev->irq.crtc_vblank_int[1]) { | 566 | if (rdev->irq.crtc_vblank_int[1] || |
567 | rdev->irq.pflip[1]) { | ||
533 | tmp |= RADEON_CRTC2_VBLANK_MASK; | 568 | tmp |= RADEON_CRTC2_VBLANK_MASK; |
534 | } | 569 | } |
535 | if (rdev->irq.hpd[0]) { | 570 | if (rdev->irq.hpd[0]) { |
@@ -600,14 +635,22 @@ int r100_irq_process(struct radeon_device *rdev) | |||
600 | } | 635 | } |
601 | /* Vertical blank interrupts */ | 636 | /* Vertical blank interrupts */ |
602 | if (status & RADEON_CRTC_VBLANK_STAT) { | 637 | if (status & RADEON_CRTC_VBLANK_STAT) { |
603 | drm_handle_vblank(rdev->ddev, 0); | 638 | if (rdev->irq.crtc_vblank_int[0]) { |
604 | rdev->pm.vblank_sync = true; | 639 | drm_handle_vblank(rdev->ddev, 0); |
605 | wake_up(&rdev->irq.vblank_queue); | 640 | rdev->pm.vblank_sync = true; |
641 | wake_up(&rdev->irq.vblank_queue); | ||
642 | } | ||
643 | if (rdev->irq.pflip[0]) | ||
644 | radeon_crtc_handle_flip(rdev, 0); | ||
606 | } | 645 | } |
607 | if (status & RADEON_CRTC2_VBLANK_STAT) { | 646 | if (status & RADEON_CRTC2_VBLANK_STAT) { |
608 | drm_handle_vblank(rdev->ddev, 1); | 647 | if (rdev->irq.crtc_vblank_int[1]) { |
609 | rdev->pm.vblank_sync = true; | 648 | drm_handle_vblank(rdev->ddev, 1); |
610 | wake_up(&rdev->irq.vblank_queue); | 649 | rdev->pm.vblank_sync = true; |
650 | wake_up(&rdev->irq.vblank_queue); | ||
651 | } | ||
652 | if (rdev->irq.pflip[1]) | ||
653 | radeon_crtc_handle_flip(rdev, 1); | ||
611 | } | 654 | } |
612 | if (status & RADEON_FP_DETECT_STAT) { | 655 | if (status & RADEON_FP_DETECT_STAT) { |
613 | queue_hotplug = true; | 656 | queue_hotplug = true; |
@@ -622,7 +665,7 @@ int r100_irq_process(struct radeon_device *rdev) | |||
622 | /* reset gui idle ack. the status bit is broken */ | 665 | /* reset gui idle ack. the status bit is broken */ |
623 | rdev->irq.gui_idle_acked = false; | 666 | rdev->irq.gui_idle_acked = false; |
624 | if (queue_hotplug) | 667 | if (queue_hotplug) |
625 | queue_work(rdev->wq, &rdev->hotplug_work); | 668 | schedule_work(&rdev->hotplug_work); |
626 | if (rdev->msi_enabled) { | 669 | if (rdev->msi_enabled) { |
627 | switch (rdev->family) { | 670 | switch (rdev->family) { |
628 | case CHIP_RS400: | 671 | case CHIP_RS400: |
@@ -675,67 +718,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev, | |||
675 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); | 718 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
676 | } | 719 | } |
677 | 720 | ||
678 | int r100_wb_init(struct radeon_device *rdev) | ||
679 | { | ||
680 | int r; | ||
681 | |||
682 | if (rdev->wb.wb_obj == NULL) { | ||
683 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, | ||
684 | RADEON_GEM_DOMAIN_GTT, | ||
685 | &rdev->wb.wb_obj); | ||
686 | if (r) { | ||
687 | dev_err(rdev->dev, "(%d) create WB buffer failed\n", r); | ||
688 | return r; | ||
689 | } | ||
690 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
691 | if (unlikely(r != 0)) | ||
692 | return r; | ||
693 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | ||
694 | &rdev->wb.gpu_addr); | ||
695 | if (r) { | ||
696 | dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r); | ||
697 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
698 | return r; | ||
699 | } | ||
700 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | ||
701 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
702 | if (r) { | ||
703 | dev_err(rdev->dev, "(%d) map WB buffer failed\n", r); | ||
704 | return r; | ||
705 | } | ||
706 | } | ||
707 | WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr); | ||
708 | WREG32(R_00070C_CP_RB_RPTR_ADDR, | ||
709 | S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2)); | ||
710 | WREG32(R_000770_SCRATCH_UMSK, 0xff); | ||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | void r100_wb_disable(struct radeon_device *rdev) | ||
715 | { | ||
716 | WREG32(R_000770_SCRATCH_UMSK, 0); | ||
717 | } | ||
718 | |||
719 | void r100_wb_fini(struct radeon_device *rdev) | ||
720 | { | ||
721 | int r; | ||
722 | |||
723 | r100_wb_disable(rdev); | ||
724 | if (rdev->wb.wb_obj) { | ||
725 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
726 | if (unlikely(r != 0)) { | ||
727 | dev_err(rdev->dev, "(%d) can't finish WB\n", r); | ||
728 | return; | ||
729 | } | ||
730 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
731 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
732 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
733 | radeon_bo_unref(&rdev->wb.wb_obj); | ||
734 | rdev->wb.wb = NULL; | ||
735 | rdev->wb.wb_obj = NULL; | ||
736 | } | ||
737 | } | ||
738 | |||
739 | int r100_copy_blit(struct radeon_device *rdev, | 721 | int r100_copy_blit(struct radeon_device *rdev, |
740 | uint64_t src_offset, | 722 | uint64_t src_offset, |
741 | uint64_t dst_offset, | 723 | uint64_t dst_offset, |
@@ -996,20 +978,32 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
996 | WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); | 978 | WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); |
997 | tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | | 979 | tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | |
998 | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | | 980 | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | |
999 | REG_SET(RADEON_MAX_FETCH, max_fetch) | | 981 | REG_SET(RADEON_MAX_FETCH, max_fetch)); |
1000 | RADEON_RB_NO_UPDATE); | ||
1001 | #ifdef __BIG_ENDIAN | 982 | #ifdef __BIG_ENDIAN |
1002 | tmp |= RADEON_BUF_SWAP_32BIT; | 983 | tmp |= RADEON_BUF_SWAP_32BIT; |
1003 | #endif | 984 | #endif |
1004 | WREG32(RADEON_CP_RB_CNTL, tmp); | 985 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); |
1005 | 986 | ||
1006 | /* Set ring address */ | 987 | /* Set ring address */ |
1007 | DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); | 988 | DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); |
1008 | WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); | 989 | WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); |
1009 | /* Force read & write ptr to 0 */ | 990 | /* Force read & write ptr to 0 */ |
1010 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); | 991 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); |
1011 | WREG32(RADEON_CP_RB_RPTR_WR, 0); | 992 | WREG32(RADEON_CP_RB_RPTR_WR, 0); |
1012 | WREG32(RADEON_CP_RB_WPTR, 0); | 993 | WREG32(RADEON_CP_RB_WPTR, 0); |
994 | |||
995 | /* set the wb address whether it's enabled or not */ | ||
996 | WREG32(R_00070C_CP_RB_RPTR_ADDR, | ||
997 | S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); | ||
998 | WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); | ||
999 | |||
1000 | if (rdev->wb.enabled) | ||
1001 | WREG32(R_000770_SCRATCH_UMSK, 0xff); | ||
1002 | else { | ||
1003 | tmp |= RADEON_RB_NO_UPDATE; | ||
1004 | WREG32(R_000770_SCRATCH_UMSK, 0); | ||
1005 | } | ||
1006 | |||
1013 | WREG32(RADEON_CP_RB_CNTL, tmp); | 1007 | WREG32(RADEON_CP_RB_CNTL, tmp); |
1014 | udelay(10); | 1008 | udelay(10); |
1015 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | 1009 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
@@ -1020,8 +1014,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1020 | WREG32(RADEON_CP_CSQ_MODE, | 1014 | WREG32(RADEON_CP_CSQ_MODE, |
1021 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | | 1015 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
1022 | REG_SET(RADEON_INDIRECT1_START, indirect1_start)); | 1016 | REG_SET(RADEON_INDIRECT1_START, indirect1_start)); |
1023 | WREG32(0x718, 0); | 1017 | WREG32(RADEON_CP_RB_WPTR_DELAY, 0); |
1024 | WREG32(0x744, 0x00004D4D); | 1018 | WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); |
1025 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); | 1019 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); |
1026 | radeon_ring_start(rdev); | 1020 | radeon_ring_start(rdev); |
1027 | r = radeon_ring_test(rdev); | 1021 | r = radeon_ring_test(rdev); |
@@ -1030,7 +1024,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1030 | return r; | 1024 | return r; |
1031 | } | 1025 | } |
1032 | rdev->cp.ready = true; | 1026 | rdev->cp.ready = true; |
1033 | rdev->mc.active_vram_size = rdev->mc.real_vram_size; | 1027 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
1034 | return 0; | 1028 | return 0; |
1035 | } | 1029 | } |
1036 | 1030 | ||
@@ -1048,10 +1042,11 @@ void r100_cp_fini(struct radeon_device *rdev) | |||
1048 | void r100_cp_disable(struct radeon_device *rdev) | 1042 | void r100_cp_disable(struct radeon_device *rdev) |
1049 | { | 1043 | { |
1050 | /* Disable ring */ | 1044 | /* Disable ring */ |
1051 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 1045 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
1052 | rdev->cp.ready = false; | 1046 | rdev->cp.ready = false; |
1053 | WREG32(RADEON_CP_CSQ_MODE, 0); | 1047 | WREG32(RADEON_CP_CSQ_MODE, 0); |
1054 | WREG32(RADEON_CP_CSQ_CNTL, 0); | 1048 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
1049 | WREG32(R_000770_SCRATCH_UMSK, 0); | ||
1055 | if (r100_gui_wait_for_idle(rdev)) { | 1050 | if (r100_gui_wait_for_idle(rdev)) { |
1056 | printk(KERN_WARNING "Failed to wait GUI idle while " | 1051 | printk(KERN_WARNING "Failed to wait GUI idle while " |
1057 | "programming pipes. Bad things might happen.\n"); | 1052 | "programming pipes. Bad things might happen.\n"); |
@@ -1210,14 +1205,12 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1210 | if (waitreloc.reg != RADEON_WAIT_UNTIL || | 1205 | if (waitreloc.reg != RADEON_WAIT_UNTIL || |
1211 | waitreloc.count != 0) { | 1206 | waitreloc.count != 0) { |
1212 | DRM_ERROR("vline wait had illegal wait until segment\n"); | 1207 | DRM_ERROR("vline wait had illegal wait until segment\n"); |
1213 | r = -EINVAL; | 1208 | return -EINVAL; |
1214 | return r; | ||
1215 | } | 1209 | } |
1216 | 1210 | ||
1217 | if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { | 1211 | if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { |
1218 | DRM_ERROR("vline wait had illegal wait until\n"); | 1212 | DRM_ERROR("vline wait had illegal wait until\n"); |
1219 | r = -EINVAL; | 1213 | return -EINVAL; |
1220 | return r; | ||
1221 | } | 1214 | } |
1222 | 1215 | ||
1223 | /* jump over the NOP */ | 1216 | /* jump over the NOP */ |
@@ -1235,8 +1228,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1235 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 1228 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
1236 | if (!obj) { | 1229 | if (!obj) { |
1237 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | 1230 | DRM_ERROR("cannot find crtc %d\n", crtc_id); |
1238 | r = -EINVAL; | 1231 | return -EINVAL; |
1239 | goto out; | ||
1240 | } | 1232 | } |
1241 | crtc = obj_to_crtc(obj); | 1233 | crtc = obj_to_crtc(obj); |
1242 | radeon_crtc = to_radeon_crtc(crtc); | 1234 | radeon_crtc = to_radeon_crtc(crtc); |
@@ -1258,14 +1250,13 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1258 | break; | 1250 | break; |
1259 | default: | 1251 | default: |
1260 | DRM_ERROR("unknown crtc reloc\n"); | 1252 | DRM_ERROR("unknown crtc reloc\n"); |
1261 | r = -EINVAL; | 1253 | return -EINVAL; |
1262 | goto out; | ||
1263 | } | 1254 | } |
1264 | ib[h_idx] = header; | 1255 | ib[h_idx] = header; |
1265 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; | 1256 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; |
1266 | } | 1257 | } |
1267 | out: | 1258 | |
1268 | return r; | 1259 | return 0; |
1269 | } | 1260 | } |
1270 | 1261 | ||
1271 | /** | 1262 | /** |
@@ -1415,6 +1406,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1415 | } | 1406 | } |
1416 | track->zb.robj = reloc->robj; | 1407 | track->zb.robj = reloc->robj; |
1417 | track->zb.offset = idx_value; | 1408 | track->zb.offset = idx_value; |
1409 | track->zb_dirty = true; | ||
1418 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1410 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1419 | break; | 1411 | break; |
1420 | case RADEON_RB3D_COLOROFFSET: | 1412 | case RADEON_RB3D_COLOROFFSET: |
@@ -1427,6 +1419,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1427 | } | 1419 | } |
1428 | track->cb[0].robj = reloc->robj; | 1420 | track->cb[0].robj = reloc->robj; |
1429 | track->cb[0].offset = idx_value; | 1421 | track->cb[0].offset = idx_value; |
1422 | track->cb_dirty = true; | ||
1430 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1423 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1431 | break; | 1424 | break; |
1432 | case RADEON_PP_TXOFFSET_0: | 1425 | case RADEON_PP_TXOFFSET_0: |
@@ -1442,6 +1435,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1442 | } | 1435 | } |
1443 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1436 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1444 | track->textures[i].robj = reloc->robj; | 1437 | track->textures[i].robj = reloc->robj; |
1438 | track->tex_dirty = true; | ||
1445 | break; | 1439 | break; |
1446 | case RADEON_PP_CUBIC_OFFSET_T0_0: | 1440 | case RADEON_PP_CUBIC_OFFSET_T0_0: |
1447 | case RADEON_PP_CUBIC_OFFSET_T0_1: | 1441 | case RADEON_PP_CUBIC_OFFSET_T0_1: |
@@ -1459,6 +1453,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1459 | track->textures[0].cube_info[i].offset = idx_value; | 1453 | track->textures[0].cube_info[i].offset = idx_value; |
1460 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1454 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1461 | track->textures[0].cube_info[i].robj = reloc->robj; | 1455 | track->textures[0].cube_info[i].robj = reloc->robj; |
1456 | track->tex_dirty = true; | ||
1462 | break; | 1457 | break; |
1463 | case RADEON_PP_CUBIC_OFFSET_T1_0: | 1458 | case RADEON_PP_CUBIC_OFFSET_T1_0: |
1464 | case RADEON_PP_CUBIC_OFFSET_T1_1: | 1459 | case RADEON_PP_CUBIC_OFFSET_T1_1: |
@@ -1476,6 +1471,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1476 | track->textures[1].cube_info[i].offset = idx_value; | 1471 | track->textures[1].cube_info[i].offset = idx_value; |
1477 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1472 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1478 | track->textures[1].cube_info[i].robj = reloc->robj; | 1473 | track->textures[1].cube_info[i].robj = reloc->robj; |
1474 | track->tex_dirty = true; | ||
1479 | break; | 1475 | break; |
1480 | case RADEON_PP_CUBIC_OFFSET_T2_0: | 1476 | case RADEON_PP_CUBIC_OFFSET_T2_0: |
1481 | case RADEON_PP_CUBIC_OFFSET_T2_1: | 1477 | case RADEON_PP_CUBIC_OFFSET_T2_1: |
@@ -1493,9 +1489,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1493 | track->textures[2].cube_info[i].offset = idx_value; | 1489 | track->textures[2].cube_info[i].offset = idx_value; |
1494 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1490 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1495 | track->textures[2].cube_info[i].robj = reloc->robj; | 1491 | track->textures[2].cube_info[i].robj = reloc->robj; |
1492 | track->tex_dirty = true; | ||
1496 | break; | 1493 | break; |
1497 | case RADEON_RE_WIDTH_HEIGHT: | 1494 | case RADEON_RE_WIDTH_HEIGHT: |
1498 | track->maxy = ((idx_value >> 16) & 0x7FF); | 1495 | track->maxy = ((idx_value >> 16) & 0x7FF); |
1496 | track->cb_dirty = true; | ||
1497 | track->zb_dirty = true; | ||
1499 | break; | 1498 | break; |
1500 | case RADEON_RB3D_COLORPITCH: | 1499 | case RADEON_RB3D_COLORPITCH: |
1501 | r = r100_cs_packet_next_reloc(p, &reloc); | 1500 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1516,9 +1515,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1516 | ib[idx] = tmp; | 1515 | ib[idx] = tmp; |
1517 | 1516 | ||
1518 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; | 1517 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
1518 | track->cb_dirty = true; | ||
1519 | break; | 1519 | break; |
1520 | case RADEON_RB3D_DEPTHPITCH: | 1520 | case RADEON_RB3D_DEPTHPITCH: |
1521 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; | 1521 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
1522 | track->zb_dirty = true; | ||
1522 | break; | 1523 | break; |
1523 | case RADEON_RB3D_CNTL: | 1524 | case RADEON_RB3D_CNTL: |
1524 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { | 1525 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
@@ -1543,6 +1544,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1543 | return -EINVAL; | 1544 | return -EINVAL; |
1544 | } | 1545 | } |
1545 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); | 1546 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
1547 | track->cb_dirty = true; | ||
1548 | track->zb_dirty = true; | ||
1546 | break; | 1549 | break; |
1547 | case RADEON_RB3D_ZSTENCILCNTL: | 1550 | case RADEON_RB3D_ZSTENCILCNTL: |
1548 | switch (idx_value & 0xf) { | 1551 | switch (idx_value & 0xf) { |
@@ -1560,6 +1563,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1560 | default: | 1563 | default: |
1561 | break; | 1564 | break; |
1562 | } | 1565 | } |
1566 | track->zb_dirty = true; | ||
1563 | break; | 1567 | break; |
1564 | case RADEON_RB3D_ZPASS_ADDR: | 1568 | case RADEON_RB3D_ZPASS_ADDR: |
1565 | r = r100_cs_packet_next_reloc(p, &reloc); | 1569 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1576,6 +1580,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1576 | uint32_t temp = idx_value >> 4; | 1580 | uint32_t temp = idx_value >> 4; |
1577 | for (i = 0; i < track->num_texture; i++) | 1581 | for (i = 0; i < track->num_texture; i++) |
1578 | track->textures[i].enabled = !!(temp & (1 << i)); | 1582 | track->textures[i].enabled = !!(temp & (1 << i)); |
1583 | track->tex_dirty = true; | ||
1579 | } | 1584 | } |
1580 | break; | 1585 | break; |
1581 | case RADEON_SE_VF_CNTL: | 1586 | case RADEON_SE_VF_CNTL: |
@@ -1590,12 +1595,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1590 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; | 1595 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; |
1591 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; | 1596 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
1592 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; | 1597 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
1598 | track->tex_dirty = true; | ||
1593 | break; | 1599 | break; |
1594 | case RADEON_PP_TEX_PITCH_0: | 1600 | case RADEON_PP_TEX_PITCH_0: |
1595 | case RADEON_PP_TEX_PITCH_1: | 1601 | case RADEON_PP_TEX_PITCH_1: |
1596 | case RADEON_PP_TEX_PITCH_2: | 1602 | case RADEON_PP_TEX_PITCH_2: |
1597 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; | 1603 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; |
1598 | track->textures[i].pitch = idx_value + 32; | 1604 | track->textures[i].pitch = idx_value + 32; |
1605 | track->tex_dirty = true; | ||
1599 | break; | 1606 | break; |
1600 | case RADEON_PP_TXFILTER_0: | 1607 | case RADEON_PP_TXFILTER_0: |
1601 | case RADEON_PP_TXFILTER_1: | 1608 | case RADEON_PP_TXFILTER_1: |
@@ -1609,6 +1616,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1609 | tmp = (idx_value >> 27) & 0x7; | 1616 | tmp = (idx_value >> 27) & 0x7; |
1610 | if (tmp == 2 || tmp == 6) | 1617 | if (tmp == 2 || tmp == 6) |
1611 | track->textures[i].roundup_h = false; | 1618 | track->textures[i].roundup_h = false; |
1619 | track->tex_dirty = true; | ||
1612 | break; | 1620 | break; |
1613 | case RADEON_PP_TXFORMAT_0: | 1621 | case RADEON_PP_TXFORMAT_0: |
1614 | case RADEON_PP_TXFORMAT_1: | 1622 | case RADEON_PP_TXFORMAT_1: |
@@ -1661,6 +1669,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1661 | } | 1669 | } |
1662 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); | 1670 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
1663 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); | 1671 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
1672 | track->tex_dirty = true; | ||
1664 | break; | 1673 | break; |
1665 | case RADEON_PP_CUBIC_FACES_0: | 1674 | case RADEON_PP_CUBIC_FACES_0: |
1666 | case RADEON_PP_CUBIC_FACES_1: | 1675 | case RADEON_PP_CUBIC_FACES_1: |
@@ -1671,6 +1680,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1671 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); | 1680 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
1672 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); | 1681 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); |
1673 | } | 1682 | } |
1683 | track->tex_dirty = true; | ||
1674 | break; | 1684 | break; |
1675 | default: | 1685 | default: |
1676 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 1686 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
@@ -2074,12 +2084,13 @@ int r100_asic_reset(struct radeon_device *rdev) | |||
2074 | { | 2084 | { |
2075 | struct r100_mc_save save; | 2085 | struct r100_mc_save save; |
2076 | u32 status, tmp; | 2086 | u32 status, tmp; |
2087 | int ret = 0; | ||
2077 | 2088 | ||
2078 | r100_mc_stop(rdev, &save); | ||
2079 | status = RREG32(R_000E40_RBBM_STATUS); | 2089 | status = RREG32(R_000E40_RBBM_STATUS); |
2080 | if (!G_000E40_GUI_ACTIVE(status)) { | 2090 | if (!G_000E40_GUI_ACTIVE(status)) { |
2081 | return 0; | 2091 | return 0; |
2082 | } | 2092 | } |
2093 | r100_mc_stop(rdev, &save); | ||
2083 | status = RREG32(R_000E40_RBBM_STATUS); | 2094 | status = RREG32(R_000E40_RBBM_STATUS); |
2084 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 2095 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); |
2085 | /* stop CP */ | 2096 | /* stop CP */ |
@@ -2119,11 +2130,11 @@ int r100_asic_reset(struct radeon_device *rdev) | |||
2119 | G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { | 2130 | G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { |
2120 | dev_err(rdev->dev, "failed to reset GPU\n"); | 2131 | dev_err(rdev->dev, "failed to reset GPU\n"); |
2121 | rdev->gpu_lockup = true; | 2132 | rdev->gpu_lockup = true; |
2122 | return -1; | 2133 | ret = -1; |
2123 | } | 2134 | } else |
2135 | dev_info(rdev->dev, "GPU reset succeed\n"); | ||
2124 | r100_mc_resume(rdev, &save); | 2136 | r100_mc_resume(rdev, &save); |
2125 | dev_info(rdev->dev, "GPU reset succeed\n"); | 2137 | return ret; |
2126 | return 0; | ||
2127 | } | 2138 | } |
2128 | 2139 | ||
2129 | void r100_set_common_regs(struct radeon_device *rdev) | 2140 | void r100_set_common_regs(struct radeon_device *rdev) |
@@ -2297,7 +2308,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev) | |||
2297 | /* FIXME we don't use the second aperture yet when we could use it */ | 2308 | /* FIXME we don't use the second aperture yet when we could use it */ |
2298 | if (rdev->mc.visible_vram_size > rdev->mc.aper_size) | 2309 | if (rdev->mc.visible_vram_size > rdev->mc.aper_size) |
2299 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 2310 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
2300 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
2301 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); | 2311 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); |
2302 | if (rdev->flags & RADEON_IS_IGP) { | 2312 | if (rdev->flags & RADEON_IS_IGP) { |
2303 | uint32_t tom; | 2313 | uint32_t tom; |
@@ -2318,6 +2328,9 @@ void r100_vram_init_sizes(struct radeon_device *rdev) | |||
2318 | /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - | 2328 | /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - |
2319 | * Novell bug 204882 + along with lots of ubuntu ones | 2329 | * Novell bug 204882 + along with lots of ubuntu ones |
2320 | */ | 2330 | */ |
2331 | if (rdev->mc.aper_size > config_aper_size) | ||
2332 | config_aper_size = rdev->mc.aper_size; | ||
2333 | |||
2321 | if (config_aper_size > rdev->mc.real_vram_size) | 2334 | if (config_aper_size > rdev->mc.real_vram_size) |
2322 | rdev->mc.mc_vram_size = config_aper_size; | 2335 | rdev->mc.mc_vram_size = config_aper_size; |
2323 | else | 2336 | else |
@@ -2331,10 +2344,10 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state) | |||
2331 | 2344 | ||
2332 | temp = RREG32(RADEON_CONFIG_CNTL); | 2345 | temp = RREG32(RADEON_CONFIG_CNTL); |
2333 | if (state == false) { | 2346 | if (state == false) { |
2334 | temp &= ~(1<<8); | 2347 | temp &= ~RADEON_CFG_VGA_RAM_EN; |
2335 | temp |= (1<<9); | 2348 | temp |= RADEON_CFG_VGA_IO_DIS; |
2336 | } else { | 2349 | } else { |
2337 | temp &= ~(1<<9); | 2350 | temp &= ~RADEON_CFG_VGA_IO_DIS; |
2338 | } | 2351 | } |
2339 | WREG32(RADEON_CONFIG_CNTL, temp); | 2352 | WREG32(RADEON_CONFIG_CNTL, temp); |
2340 | } | 2353 | } |
@@ -3225,6 +3238,8 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
3225 | for (u = 0; u < track->num_texture; u++) { | 3238 | for (u = 0; u < track->num_texture; u++) { |
3226 | if (!track->textures[u].enabled) | 3239 | if (!track->textures[u].enabled) |
3227 | continue; | 3240 | continue; |
3241 | if (track->textures[u].lookup_disable) | ||
3242 | continue; | ||
3228 | robj = track->textures[u].robj; | 3243 | robj = track->textures[u].robj; |
3229 | if (robj == NULL) { | 3244 | if (robj == NULL) { |
3230 | DRM_ERROR("No texture bound to unit %u\n", u); | 3245 | DRM_ERROR("No texture bound to unit %u\n", u); |
@@ -3300,9 +3315,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3300 | unsigned long size; | 3315 | unsigned long size; |
3301 | unsigned prim_walk; | 3316 | unsigned prim_walk; |
3302 | unsigned nverts; | 3317 | unsigned nverts; |
3303 | unsigned num_cb = track->num_cb; | 3318 | unsigned num_cb = track->cb_dirty ? track->num_cb : 0; |
3304 | 3319 | ||
3305 | if (!track->zb_cb_clear && !track->color_channel_mask && | 3320 | if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && |
3306 | !track->blend_read_enable) | 3321 | !track->blend_read_enable) |
3307 | num_cb = 0; | 3322 | num_cb = 0; |
3308 | 3323 | ||
@@ -3323,7 +3338,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3323 | return -EINVAL; | 3338 | return -EINVAL; |
3324 | } | 3339 | } |
3325 | } | 3340 | } |
3326 | if (track->z_enabled) { | 3341 | track->cb_dirty = false; |
3342 | |||
3343 | if (track->zb_dirty && track->z_enabled) { | ||
3327 | if (track->zb.robj == NULL) { | 3344 | if (track->zb.robj == NULL) { |
3328 | DRM_ERROR("[drm] No buffer for z buffer !\n"); | 3345 | DRM_ERROR("[drm] No buffer for z buffer !\n"); |
3329 | return -EINVAL; | 3346 | return -EINVAL; |
@@ -3340,6 +3357,28 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3340 | return -EINVAL; | 3357 | return -EINVAL; |
3341 | } | 3358 | } |
3342 | } | 3359 | } |
3360 | track->zb_dirty = false; | ||
3361 | |||
3362 | if (track->aa_dirty && track->aaresolve) { | ||
3363 | if (track->aa.robj == NULL) { | ||
3364 | DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); | ||
3365 | return -EINVAL; | ||
3366 | } | ||
3367 | /* I believe the format comes from colorbuffer0. */ | ||
3368 | size = track->aa.pitch * track->cb[0].cpp * track->maxy; | ||
3369 | size += track->aa.offset; | ||
3370 | if (size > radeon_bo_size(track->aa.robj)) { | ||
3371 | DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " | ||
3372 | "(need %lu have %lu) !\n", i, size, | ||
3373 | radeon_bo_size(track->aa.robj)); | ||
3374 | DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", | ||
3375 | i, track->aa.pitch, track->cb[0].cpp, | ||
3376 | track->aa.offset, track->maxy); | ||
3377 | return -EINVAL; | ||
3378 | } | ||
3379 | } | ||
3380 | track->aa_dirty = false; | ||
3381 | |||
3343 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; | 3382 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; |
3344 | if (track->vap_vf_cntl & (1 << 14)) { | 3383 | if (track->vap_vf_cntl & (1 << 14)) { |
3345 | nverts = track->vap_alt_nverts; | 3384 | nverts = track->vap_alt_nverts; |
@@ -3399,13 +3438,23 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3399 | prim_walk); | 3438 | prim_walk); |
3400 | return -EINVAL; | 3439 | return -EINVAL; |
3401 | } | 3440 | } |
3402 | return r100_cs_track_texture_check(rdev, track); | 3441 | |
3442 | if (track->tex_dirty) { | ||
3443 | track->tex_dirty = false; | ||
3444 | return r100_cs_track_texture_check(rdev, track); | ||
3445 | } | ||
3446 | return 0; | ||
3403 | } | 3447 | } |
3404 | 3448 | ||
3405 | void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) | 3449 | void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) |
3406 | { | 3450 | { |
3407 | unsigned i, face; | 3451 | unsigned i, face; |
3408 | 3452 | ||
3453 | track->cb_dirty = true; | ||
3454 | track->zb_dirty = true; | ||
3455 | track->tex_dirty = true; | ||
3456 | track->aa_dirty = true; | ||
3457 | |||
3409 | if (rdev->family < CHIP_R300) { | 3458 | if (rdev->family < CHIP_R300) { |
3410 | track->num_cb = 1; | 3459 | track->num_cb = 1; |
3411 | if (rdev->family <= CHIP_RS200) | 3460 | if (rdev->family <= CHIP_RS200) |
@@ -3419,6 +3468,8 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track | |||
3419 | track->num_texture = 16; | 3468 | track->num_texture = 16; |
3420 | track->maxy = 4096; | 3469 | track->maxy = 4096; |
3421 | track->separate_cube = 0; | 3470 | track->separate_cube = 0; |
3471 | track->aaresolve = false; | ||
3472 | track->aa.robj = NULL; | ||
3422 | } | 3473 | } |
3423 | 3474 | ||
3424 | for (i = 0; i < track->num_cb; i++) { | 3475 | for (i = 0; i < track->num_cb; i++) { |
@@ -3459,6 +3510,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track | |||
3459 | track->textures[i].robj = NULL; | 3510 | track->textures[i].robj = NULL; |
3460 | /* CS IB emission code makes sure texture unit are disabled */ | 3511 | /* CS IB emission code makes sure texture unit are disabled */ |
3461 | track->textures[i].enabled = false; | 3512 | track->textures[i].enabled = false; |
3513 | track->textures[i].lookup_disable = false; | ||
3462 | track->textures[i].roundup_w = true; | 3514 | track->textures[i].roundup_w = true; |
3463 | track->textures[i].roundup_h = true; | 3515 | track->textures[i].roundup_h = true; |
3464 | if (track->separate_cube) | 3516 | if (track->separate_cube) |
@@ -3503,7 +3555,7 @@ int r100_ring_test(struct radeon_device *rdev) | |||
3503 | if (i < rdev->usec_timeout) { | 3555 | if (i < rdev->usec_timeout) { |
3504 | DRM_INFO("ring test succeeded in %d usecs\n", i); | 3556 | DRM_INFO("ring test succeeded in %d usecs\n", i); |
3505 | } else { | 3557 | } else { |
3506 | DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", | 3558 | DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", |
3507 | scratch, tmp); | 3559 | scratch, tmp); |
3508 | r = -EINVAL; | 3560 | r = -EINVAL; |
3509 | } | 3561 | } |
@@ -3565,7 +3617,7 @@ int r100_ib_test(struct radeon_device *rdev) | |||
3565 | if (i < rdev->usec_timeout) { | 3617 | if (i < rdev->usec_timeout) { |
3566 | DRM_INFO("ib test succeeded in %u usecs\n", i); | 3618 | DRM_INFO("ib test succeeded in %u usecs\n", i); |
3567 | } else { | 3619 | } else { |
3568 | DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", | 3620 | DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", |
3569 | scratch, tmp); | 3621 | scratch, tmp); |
3570 | r = -EINVAL; | 3622 | r = -EINVAL; |
3571 | } | 3623 | } |
@@ -3585,13 +3637,13 @@ int r100_ib_init(struct radeon_device *rdev) | |||
3585 | 3637 | ||
3586 | r = radeon_ib_pool_init(rdev); | 3638 | r = radeon_ib_pool_init(rdev); |
3587 | if (r) { | 3639 | if (r) { |
3588 | dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r); | 3640 | dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r); |
3589 | r100_ib_fini(rdev); | 3641 | r100_ib_fini(rdev); |
3590 | return r; | 3642 | return r; |
3591 | } | 3643 | } |
3592 | r = r100_ib_test(rdev); | 3644 | r = r100_ib_test(rdev); |
3593 | if (r) { | 3645 | if (r) { |
3594 | dev_err(rdev->dev, "failled testing IB (%d).\n", r); | 3646 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
3595 | r100_ib_fini(rdev); | 3647 | r100_ib_fini(rdev); |
3596 | return r; | 3648 | return r; |
3597 | } | 3649 | } |
@@ -3727,8 +3779,6 @@ static int r100_startup(struct radeon_device *rdev) | |||
3727 | r100_mc_program(rdev); | 3779 | r100_mc_program(rdev); |
3728 | /* Resume clock */ | 3780 | /* Resume clock */ |
3729 | r100_clock_startup(rdev); | 3781 | r100_clock_startup(rdev); |
3730 | /* Initialize GPU configuration (# pipes, ...) */ | ||
3731 | // r100_gpu_init(rdev); | ||
3732 | /* Initialize GART (initialize after TTM so we can allocate | 3782 | /* Initialize GART (initialize after TTM so we can allocate |
3733 | * memory through TTM but finalize after TTM) */ | 3783 | * memory through TTM but finalize after TTM) */ |
3734 | r100_enable_bm(rdev); | 3784 | r100_enable_bm(rdev); |
@@ -3737,21 +3787,24 @@ static int r100_startup(struct radeon_device *rdev) | |||
3737 | if (r) | 3787 | if (r) |
3738 | return r; | 3788 | return r; |
3739 | } | 3789 | } |
3790 | |||
3791 | /* allocate wb buffer */ | ||
3792 | r = radeon_wb_init(rdev); | ||
3793 | if (r) | ||
3794 | return r; | ||
3795 | |||
3740 | /* Enable IRQ */ | 3796 | /* Enable IRQ */ |
3741 | r100_irq_set(rdev); | 3797 | r100_irq_set(rdev); |
3742 | rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 3798 | rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
3743 | /* 1M ring buffer */ | 3799 | /* 1M ring buffer */ |
3744 | r = r100_cp_init(rdev, 1024 * 1024); | 3800 | r = r100_cp_init(rdev, 1024 * 1024); |
3745 | if (r) { | 3801 | if (r) { |
3746 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 3802 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
3747 | return r; | 3803 | return r; |
3748 | } | 3804 | } |
3749 | r = r100_wb_init(rdev); | ||
3750 | if (r) | ||
3751 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
3752 | r = r100_ib_init(rdev); | 3805 | r = r100_ib_init(rdev); |
3753 | if (r) { | 3806 | if (r) { |
3754 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 3807 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); |
3755 | return r; | 3808 | return r; |
3756 | } | 3809 | } |
3757 | return 0; | 3810 | return 0; |
@@ -3782,7 +3835,7 @@ int r100_resume(struct radeon_device *rdev) | |||
3782 | int r100_suspend(struct radeon_device *rdev) | 3835 | int r100_suspend(struct radeon_device *rdev) |
3783 | { | 3836 | { |
3784 | r100_cp_disable(rdev); | 3837 | r100_cp_disable(rdev); |
3785 | r100_wb_disable(rdev); | 3838 | radeon_wb_disable(rdev); |
3786 | r100_irq_disable(rdev); | 3839 | r100_irq_disable(rdev); |
3787 | if (rdev->flags & RADEON_IS_PCI) | 3840 | if (rdev->flags & RADEON_IS_PCI) |
3788 | r100_pci_gart_disable(rdev); | 3841 | r100_pci_gart_disable(rdev); |
@@ -3792,7 +3845,7 @@ int r100_suspend(struct radeon_device *rdev) | |||
3792 | void r100_fini(struct radeon_device *rdev) | 3845 | void r100_fini(struct radeon_device *rdev) |
3793 | { | 3846 | { |
3794 | r100_cp_fini(rdev); | 3847 | r100_cp_fini(rdev); |
3795 | r100_wb_fini(rdev); | 3848 | radeon_wb_fini(rdev); |
3796 | r100_ib_fini(rdev); | 3849 | r100_ib_fini(rdev); |
3797 | radeon_gem_fini(rdev); | 3850 | radeon_gem_fini(rdev); |
3798 | if (rdev->flags & RADEON_IS_PCI) | 3851 | if (rdev->flags & RADEON_IS_PCI) |
@@ -3905,7 +3958,7 @@ int r100_init(struct radeon_device *rdev) | |||
3905 | /* Somethings want wront with the accel init stop accel */ | 3958 | /* Somethings want wront with the accel init stop accel */ |
3906 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 3959 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
3907 | r100_cp_fini(rdev); | 3960 | r100_cp_fini(rdev); |
3908 | r100_wb_fini(rdev); | 3961 | radeon_wb_fini(rdev); |
3909 | r100_ib_fini(rdev); | 3962 | r100_ib_fini(rdev); |
3910 | radeon_irq_kms_fini(rdev); | 3963 | radeon_irq_kms_fini(rdev); |
3911 | if (rdev->flags & RADEON_IS_PCI) | 3964 | if (rdev->flags & RADEON_IS_PCI) |
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index f47cdca1c004..686f9dc5d4bd 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
@@ -46,19 +46,13 @@ struct r100_cs_track_texture { | |||
46 | unsigned height_11; | 46 | unsigned height_11; |
47 | bool use_pitch; | 47 | bool use_pitch; |
48 | bool enabled; | 48 | bool enabled; |
49 | bool lookup_disable; | ||
49 | bool roundup_w; | 50 | bool roundup_w; |
50 | bool roundup_h; | 51 | bool roundup_h; |
51 | unsigned compress_format; | 52 | unsigned compress_format; |
52 | }; | 53 | }; |
53 | 54 | ||
54 | struct r100_cs_track_limits { | ||
55 | unsigned num_cb; | ||
56 | unsigned num_texture; | ||
57 | unsigned max_levels; | ||
58 | }; | ||
59 | |||
60 | struct r100_cs_track { | 55 | struct r100_cs_track { |
61 | struct radeon_device *rdev; | ||
62 | unsigned num_cb; | 56 | unsigned num_cb; |
63 | unsigned num_texture; | 57 | unsigned num_texture; |
64 | unsigned maxy; | 58 | unsigned maxy; |
@@ -69,14 +63,20 @@ struct r100_cs_track { | |||
69 | unsigned num_arrays; | 63 | unsigned num_arrays; |
70 | unsigned max_indx; | 64 | unsigned max_indx; |
71 | unsigned color_channel_mask; | 65 | unsigned color_channel_mask; |
72 | struct r100_cs_track_array arrays[11]; | 66 | struct r100_cs_track_array arrays[16]; |
73 | struct r100_cs_track_cb cb[R300_MAX_CB]; | 67 | struct r100_cs_track_cb cb[R300_MAX_CB]; |
74 | struct r100_cs_track_cb zb; | 68 | struct r100_cs_track_cb zb; |
69 | struct r100_cs_track_cb aa; | ||
75 | struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; | 70 | struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; |
76 | bool z_enabled; | 71 | bool z_enabled; |
77 | bool separate_cube; | 72 | bool separate_cube; |
78 | bool zb_cb_clear; | 73 | bool zb_cb_clear; |
79 | bool blend_read_enable; | 74 | bool blend_read_enable; |
75 | bool cb_dirty; | ||
76 | bool zb_dirty; | ||
77 | bool tex_dirty; | ||
78 | bool aa_dirty; | ||
79 | bool aaresolve; | ||
80 | }; | 80 | }; |
81 | 81 | ||
82 | int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); | 82 | int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); |
@@ -146,6 +146,12 @@ static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, | |||
146 | ib = p->ib->ptr; | 146 | ib = p->ib->ptr; |
147 | track = (struct r100_cs_track *)p->track; | 147 | track = (struct r100_cs_track *)p->track; |
148 | c = radeon_get_ib_value(p, idx++) & 0x1F; | 148 | c = radeon_get_ib_value(p, idx++) & 0x1F; |
149 | if (c > 16) { | ||
150 | DRM_ERROR("Only 16 vertex buffers are allowed %d\n", | ||
151 | pkt->opcode); | ||
152 | r100_cs_dump_packet(p, pkt); | ||
153 | return -EINVAL; | ||
154 | } | ||
149 | track->num_arrays = c; | 155 | track->num_arrays = c; |
150 | for (i = 0; i < (c - 1); i+=2, idx+=3) { | 156 | for (i = 0; i < (c - 1); i+=2, idx+=3) { |
151 | r = r100_cs_packet_next_reloc(p, &reloc); | 157 | r = r100_cs_packet_next_reloc(p, &reloc); |
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h index b121b6c678d4..eab91760fae0 100644 --- a/drivers/gpu/drm/radeon/r100d.h +++ b/drivers/gpu/drm/radeon/r100d.h | |||
@@ -551,7 +551,7 @@ | |||
551 | #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) | 551 | #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) |
552 | #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) | 552 | #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) |
553 | #define C_000360_CUR2_LOCK 0x7FFFFFFF | 553 | #define C_000360_CUR2_LOCK 0x7FFFFFFF |
554 | #define R_0003C2_GENMO_WT 0x0003C0 | 554 | #define R_0003C2_GENMO_WT 0x0003C2 |
555 | #define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) | 555 | #define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) |
556 | #define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) | 556 | #define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) |
557 | #define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE | 557 | #define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 0266d72e0a4c..f24058300413 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -184,6 +184,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
184 | } | 184 | } |
185 | track->zb.robj = reloc->robj; | 185 | track->zb.robj = reloc->robj; |
186 | track->zb.offset = idx_value; | 186 | track->zb.offset = idx_value; |
187 | track->zb_dirty = true; | ||
187 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 188 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
188 | break; | 189 | break; |
189 | case RADEON_RB3D_COLOROFFSET: | 190 | case RADEON_RB3D_COLOROFFSET: |
@@ -196,6 +197,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
196 | } | 197 | } |
197 | track->cb[0].robj = reloc->robj; | 198 | track->cb[0].robj = reloc->robj; |
198 | track->cb[0].offset = idx_value; | 199 | track->cb[0].offset = idx_value; |
200 | track->cb_dirty = true; | ||
199 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 201 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
200 | break; | 202 | break; |
201 | case R200_PP_TXOFFSET_0: | 203 | case R200_PP_TXOFFSET_0: |
@@ -214,6 +216,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
214 | } | 216 | } |
215 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 217 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
216 | track->textures[i].robj = reloc->robj; | 218 | track->textures[i].robj = reloc->robj; |
219 | track->tex_dirty = true; | ||
217 | break; | 220 | break; |
218 | case R200_PP_CUBIC_OFFSET_F1_0: | 221 | case R200_PP_CUBIC_OFFSET_F1_0: |
219 | case R200_PP_CUBIC_OFFSET_F2_0: | 222 | case R200_PP_CUBIC_OFFSET_F2_0: |
@@ -257,9 +260,12 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
257 | track->textures[i].cube_info[face - 1].offset = idx_value; | 260 | track->textures[i].cube_info[face - 1].offset = idx_value; |
258 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 261 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
259 | track->textures[i].cube_info[face - 1].robj = reloc->robj; | 262 | track->textures[i].cube_info[face - 1].robj = reloc->robj; |
263 | track->tex_dirty = true; | ||
260 | break; | 264 | break; |
261 | case RADEON_RE_WIDTH_HEIGHT: | 265 | case RADEON_RE_WIDTH_HEIGHT: |
262 | track->maxy = ((idx_value >> 16) & 0x7FF); | 266 | track->maxy = ((idx_value >> 16) & 0x7FF); |
267 | track->cb_dirty = true; | ||
268 | track->zb_dirty = true; | ||
263 | break; | 269 | break; |
264 | case RADEON_RB3D_COLORPITCH: | 270 | case RADEON_RB3D_COLORPITCH: |
265 | r = r100_cs_packet_next_reloc(p, &reloc); | 271 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -280,9 +286,11 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
280 | ib[idx] = tmp; | 286 | ib[idx] = tmp; |
281 | 287 | ||
282 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; | 288 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
289 | track->cb_dirty = true; | ||
283 | break; | 290 | break; |
284 | case RADEON_RB3D_DEPTHPITCH: | 291 | case RADEON_RB3D_DEPTHPITCH: |
285 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; | 292 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
293 | track->zb_dirty = true; | ||
286 | break; | 294 | break; |
287 | case RADEON_RB3D_CNTL: | 295 | case RADEON_RB3D_CNTL: |
288 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { | 296 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
@@ -312,6 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
312 | } | 320 | } |
313 | 321 | ||
314 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); | 322 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
323 | track->cb_dirty = true; | ||
324 | track->zb_dirty = true; | ||
315 | break; | 325 | break; |
316 | case RADEON_RB3D_ZSTENCILCNTL: | 326 | case RADEON_RB3D_ZSTENCILCNTL: |
317 | switch (idx_value & 0xf) { | 327 | switch (idx_value & 0xf) { |
@@ -329,6 +339,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
329 | default: | 339 | default: |
330 | break; | 340 | break; |
331 | } | 341 | } |
342 | track->zb_dirty = true; | ||
332 | break; | 343 | break; |
333 | case RADEON_RB3D_ZPASS_ADDR: | 344 | case RADEON_RB3D_ZPASS_ADDR: |
334 | r = r100_cs_packet_next_reloc(p, &reloc); | 345 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -345,6 +356,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
345 | uint32_t temp = idx_value >> 4; | 356 | uint32_t temp = idx_value >> 4; |
346 | for (i = 0; i < track->num_texture; i++) | 357 | for (i = 0; i < track->num_texture; i++) |
347 | track->textures[i].enabled = !!(temp & (1 << i)); | 358 | track->textures[i].enabled = !!(temp & (1 << i)); |
359 | track->tex_dirty = true; | ||
348 | } | 360 | } |
349 | break; | 361 | break; |
350 | case RADEON_SE_VF_CNTL: | 362 | case RADEON_SE_VF_CNTL: |
@@ -369,6 +381,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
369 | i = (reg - R200_PP_TXSIZE_0) / 32; | 381 | i = (reg - R200_PP_TXSIZE_0) / 32; |
370 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; | 382 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
371 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; | 383 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
384 | track->tex_dirty = true; | ||
372 | break; | 385 | break; |
373 | case R200_PP_TXPITCH_0: | 386 | case R200_PP_TXPITCH_0: |
374 | case R200_PP_TXPITCH_1: | 387 | case R200_PP_TXPITCH_1: |
@@ -378,6 +391,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
378 | case R200_PP_TXPITCH_5: | 391 | case R200_PP_TXPITCH_5: |
379 | i = (reg - R200_PP_TXPITCH_0) / 32; | 392 | i = (reg - R200_PP_TXPITCH_0) / 32; |
380 | track->textures[i].pitch = idx_value + 32; | 393 | track->textures[i].pitch = idx_value + 32; |
394 | track->tex_dirty = true; | ||
381 | break; | 395 | break; |
382 | case R200_PP_TXFILTER_0: | 396 | case R200_PP_TXFILTER_0: |
383 | case R200_PP_TXFILTER_1: | 397 | case R200_PP_TXFILTER_1: |
@@ -394,6 +408,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
394 | tmp = (idx_value >> 27) & 0x7; | 408 | tmp = (idx_value >> 27) & 0x7; |
395 | if (tmp == 2 || tmp == 6) | 409 | if (tmp == 2 || tmp == 6) |
396 | track->textures[i].roundup_h = false; | 410 | track->textures[i].roundup_h = false; |
411 | track->tex_dirty = true; | ||
397 | break; | 412 | break; |
398 | case R200_PP_TXMULTI_CTL_0: | 413 | case R200_PP_TXMULTI_CTL_0: |
399 | case R200_PP_TXMULTI_CTL_1: | 414 | case R200_PP_TXMULTI_CTL_1: |
@@ -432,6 +447,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
432 | track->textures[i].tex_coord_type = 1; | 447 | track->textures[i].tex_coord_type = 1; |
433 | break; | 448 | break; |
434 | } | 449 | } |
450 | track->tex_dirty = true; | ||
435 | break; | 451 | break; |
436 | case R200_PP_TXFORMAT_0: | 452 | case R200_PP_TXFORMAT_0: |
437 | case R200_PP_TXFORMAT_1: | 453 | case R200_PP_TXFORMAT_1: |
@@ -447,6 +463,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
447 | track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); | 463 | track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); |
448 | track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); | 464 | track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); |
449 | } | 465 | } |
466 | if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE) | ||
467 | track->textures[i].lookup_disable = true; | ||
450 | switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { | 468 | switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { |
451 | case R200_TXFORMAT_I8: | 469 | case R200_TXFORMAT_I8: |
452 | case R200_TXFORMAT_RGB332: | 470 | case R200_TXFORMAT_RGB332: |
@@ -486,6 +504,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
486 | } | 504 | } |
487 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); | 505 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
488 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); | 506 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
507 | track->tex_dirty = true; | ||
489 | break; | 508 | break; |
490 | case R200_PP_CUBIC_FACES_0: | 509 | case R200_PP_CUBIC_FACES_0: |
491 | case R200_PP_CUBIC_FACES_1: | 510 | case R200_PP_CUBIC_FACES_1: |
@@ -499,6 +518,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
499 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); | 518 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
500 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); | 519 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); |
501 | } | 520 | } |
521 | track->tex_dirty = true; | ||
502 | break; | 522 | break; |
503 | default: | 523 | default: |
504 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 524 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index c827738ad7dd..55a7f190027e 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -69,6 +69,9 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
69 | mb(); | 69 | mb(); |
70 | } | 70 | } |
71 | 71 | ||
72 | #define R300_PTE_WRITEABLE (1 << 2) | ||
73 | #define R300_PTE_READABLE (1 << 3) | ||
74 | |||
72 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 75 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
73 | { | 76 | { |
74 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | 77 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
@@ -78,7 +81,7 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
78 | } | 81 | } |
79 | addr = (lower_32_bits(addr) >> 8) | | 82 | addr = (lower_32_bits(addr) >> 8) | |
80 | ((upper_32_bits(addr) & 0xff) << 24) | | 83 | ((upper_32_bits(addr) & 0xff) << 24) | |
81 | 0xc; | 84 | R300_PTE_WRITEABLE | R300_PTE_READABLE; |
82 | /* on x86 we want this to be CPU endian, on powerpc | 85 | /* on x86 we want this to be CPU endian, on powerpc |
83 | * on powerpc without HW swappers, it'll get swapped on way | 86 | * on powerpc without HW swappers, it'll get swapped on way |
84 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ | 87 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
@@ -91,7 +94,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) | |||
91 | int r; | 94 | int r; |
92 | 95 | ||
93 | if (rdev->gart.table.vram.robj) { | 96 | if (rdev->gart.table.vram.robj) { |
94 | WARN(1, "RV370 PCIE GART already initialized.\n"); | 97 | WARN(1, "RV370 PCIE GART already initialized\n"); |
95 | return 0; | 98 | return 0; |
96 | } | 99 | } |
97 | /* Initialize common gart structure */ | 100 | /* Initialize common gart structure */ |
@@ -135,7 +138,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) | |||
135 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); | 138 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); |
136 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); | 139 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); |
137 | /* Clear error */ | 140 | /* Clear error */ |
138 | WREG32_PCIE(0x18, 0); | 141 | WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0); |
139 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); | 142 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
140 | tmp |= RADEON_PCIE_TX_GART_EN; | 143 | tmp |= RADEON_PCIE_TX_GART_EN; |
141 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; | 144 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
@@ -405,12 +408,13 @@ int r300_asic_reset(struct radeon_device *rdev) | |||
405 | { | 408 | { |
406 | struct r100_mc_save save; | 409 | struct r100_mc_save save; |
407 | u32 status, tmp; | 410 | u32 status, tmp; |
411 | int ret = 0; | ||
408 | 412 | ||
409 | r100_mc_stop(rdev, &save); | ||
410 | status = RREG32(R_000E40_RBBM_STATUS); | 413 | status = RREG32(R_000E40_RBBM_STATUS); |
411 | if (!G_000E40_GUI_ACTIVE(status)) { | 414 | if (!G_000E40_GUI_ACTIVE(status)) { |
412 | return 0; | 415 | return 0; |
413 | } | 416 | } |
417 | r100_mc_stop(rdev, &save); | ||
414 | status = RREG32(R_000E40_RBBM_STATUS); | 418 | status = RREG32(R_000E40_RBBM_STATUS); |
415 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 419 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); |
416 | /* stop CP */ | 420 | /* stop CP */ |
@@ -433,7 +437,7 @@ int r300_asic_reset(struct radeon_device *rdev) | |||
433 | status = RREG32(R_000E40_RBBM_STATUS); | 437 | status = RREG32(R_000E40_RBBM_STATUS); |
434 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 438 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); |
435 | /* resetting the CP seems to be problematic sometimes it end up | 439 | /* resetting the CP seems to be problematic sometimes it end up |
436 | * hard locking the computer, but it's necessary for successfull | 440 | * hard locking the computer, but it's necessary for successful |
437 | * reset more test & playing is needed on R3XX/R4XX to find a | 441 | * reset more test & playing is needed on R3XX/R4XX to find a |
438 | * reliable (if any solution) | 442 | * reliable (if any solution) |
439 | */ | 443 | */ |
@@ -451,11 +455,11 @@ int r300_asic_reset(struct radeon_device *rdev) | |||
451 | if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { | 455 | if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { |
452 | dev_err(rdev->dev, "failed to reset GPU\n"); | 456 | dev_err(rdev->dev, "failed to reset GPU\n"); |
453 | rdev->gpu_lockup = true; | 457 | rdev->gpu_lockup = true; |
454 | return -1; | 458 | ret = -1; |
455 | } | 459 | } else |
460 | dev_info(rdev->dev, "GPU reset succeed\n"); | ||
456 | r100_mc_resume(rdev, &save); | 461 | r100_mc_resume(rdev, &save); |
457 | dev_info(rdev->dev, "GPU reset succeed\n"); | 462 | return ret; |
458 | return 0; | ||
459 | } | 463 | } |
460 | 464 | ||
461 | /* | 465 | /* |
@@ -558,10 +562,7 @@ int rv370_get_pcie_lanes(struct radeon_device *rdev) | |||
558 | 562 | ||
559 | /* FIXME wait for idle */ | 563 | /* FIXME wait for idle */ |
560 | 564 | ||
561 | if (rdev->family < CHIP_R600) | 565 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
562 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); | ||
563 | else | ||
564 | link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); | ||
565 | 566 | ||
566 | switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { | 567 | switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { |
567 | case RADEON_PCIE_LC_LINK_WIDTH_X0: | 568 | case RADEON_PCIE_LC_LINK_WIDTH_X0: |
@@ -666,6 +667,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
666 | } | 667 | } |
667 | track->cb[i].robj = reloc->robj; | 668 | track->cb[i].robj = reloc->robj; |
668 | track->cb[i].offset = idx_value; | 669 | track->cb[i].offset = idx_value; |
670 | track->cb_dirty = true; | ||
669 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 671 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
670 | break; | 672 | break; |
671 | case R300_ZB_DEPTHOFFSET: | 673 | case R300_ZB_DEPTHOFFSET: |
@@ -678,6 +680,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
678 | } | 680 | } |
679 | track->zb.robj = reloc->robj; | 681 | track->zb.robj = reloc->robj; |
680 | track->zb.offset = idx_value; | 682 | track->zb.offset = idx_value; |
683 | track->zb_dirty = true; | ||
681 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 684 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
682 | break; | 685 | break; |
683 | case R300_TX_OFFSET_0: | 686 | case R300_TX_OFFSET_0: |
@@ -716,6 +719,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
716 | tmp |= tile_flags; | 719 | tmp |= tile_flags; |
717 | ib[idx] = tmp; | 720 | ib[idx] = tmp; |
718 | track->textures[i].robj = reloc->robj; | 721 | track->textures[i].robj = reloc->robj; |
722 | track->tex_dirty = true; | ||
719 | break; | 723 | break; |
720 | /* Tracked registers */ | 724 | /* Tracked registers */ |
721 | case 0x2084: | 725 | case 0x2084: |
@@ -742,10 +746,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
742 | if (p->rdev->family < CHIP_RV515) { | 746 | if (p->rdev->family < CHIP_RV515) { |
743 | track->maxy -= 1440; | 747 | track->maxy -= 1440; |
744 | } | 748 | } |
749 | track->cb_dirty = true; | ||
750 | track->zb_dirty = true; | ||
745 | break; | 751 | break; |
746 | case 0x4E00: | 752 | case 0x4E00: |
747 | /* RB3D_CCTL */ | 753 | /* RB3D_CCTL */ |
754 | if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ | ||
755 | p->rdev->cmask_filp != p->filp) { | ||
756 | DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n"); | ||
757 | return -EINVAL; | ||
758 | } | ||
748 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; | 759 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; |
760 | track->cb_dirty = true; | ||
749 | break; | 761 | break; |
750 | case 0x4E38: | 762 | case 0x4E38: |
751 | case 0x4E3C: | 763 | case 0x4E3C: |
@@ -787,6 +799,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
787 | case 15: | 799 | case 15: |
788 | track->cb[i].cpp = 2; | 800 | track->cb[i].cpp = 2; |
789 | break; | 801 | break; |
802 | case 5: | ||
803 | if (p->rdev->family < CHIP_RV515) { | ||
804 | DRM_ERROR("Invalid color buffer format (%d)!\n", | ||
805 | ((idx_value >> 21) & 0xF)); | ||
806 | return -EINVAL; | ||
807 | } | ||
808 | /* Pass through. */ | ||
790 | case 6: | 809 | case 6: |
791 | track->cb[i].cpp = 4; | 810 | track->cb[i].cpp = 4; |
792 | break; | 811 | break; |
@@ -801,6 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
801 | ((idx_value >> 21) & 0xF)); | 820 | ((idx_value >> 21) & 0xF)); |
802 | return -EINVAL; | 821 | return -EINVAL; |
803 | } | 822 | } |
823 | track->cb_dirty = true; | ||
804 | break; | 824 | break; |
805 | case 0x4F00: | 825 | case 0x4F00: |
806 | /* ZB_CNTL */ | 826 | /* ZB_CNTL */ |
@@ -809,6 +829,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
809 | } else { | 829 | } else { |
810 | track->z_enabled = false; | 830 | track->z_enabled = false; |
811 | } | 831 | } |
832 | track->zb_dirty = true; | ||
812 | break; | 833 | break; |
813 | case 0x4F10: | 834 | case 0x4F10: |
814 | /* ZB_FORMAT */ | 835 | /* ZB_FORMAT */ |
@@ -825,6 +846,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
825 | (idx_value & 0xF)); | 846 | (idx_value & 0xF)); |
826 | return -EINVAL; | 847 | return -EINVAL; |
827 | } | 848 | } |
849 | track->zb_dirty = true; | ||
828 | break; | 850 | break; |
829 | case 0x4F24: | 851 | case 0x4F24: |
830 | /* ZB_DEPTHPITCH */ | 852 | /* ZB_DEPTHPITCH */ |
@@ -848,14 +870,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
848 | ib[idx] = tmp; | 870 | ib[idx] = tmp; |
849 | 871 | ||
850 | track->zb.pitch = idx_value & 0x3FFC; | 872 | track->zb.pitch = idx_value & 0x3FFC; |
873 | track->zb_dirty = true; | ||
851 | break; | 874 | break; |
852 | case 0x4104: | 875 | case 0x4104: |
876 | /* TX_ENABLE */ | ||
853 | for (i = 0; i < 16; i++) { | 877 | for (i = 0; i < 16; i++) { |
854 | bool enabled; | 878 | bool enabled; |
855 | 879 | ||
856 | enabled = !!(idx_value & (1 << i)); | 880 | enabled = !!(idx_value & (1 << i)); |
857 | track->textures[i].enabled = enabled; | 881 | track->textures[i].enabled = enabled; |
858 | } | 882 | } |
883 | track->tex_dirty = true; | ||
859 | break; | 884 | break; |
860 | case 0x44C0: | 885 | case 0x44C0: |
861 | case 0x44C4: | 886 | case 0x44C4: |
@@ -885,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
885 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | 910 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
886 | break; | 911 | break; |
887 | case R300_TX_FORMAT_X16: | 912 | case R300_TX_FORMAT_X16: |
913 | case R300_TX_FORMAT_FL_I16: | ||
888 | case R300_TX_FORMAT_Y8X8: | 914 | case R300_TX_FORMAT_Y8X8: |
889 | case R300_TX_FORMAT_Z5Y6X5: | 915 | case R300_TX_FORMAT_Z5Y6X5: |
890 | case R300_TX_FORMAT_Z6Y5X5: | 916 | case R300_TX_FORMAT_Z6Y5X5: |
@@ -897,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
897 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | 923 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
898 | break; | 924 | break; |
899 | case R300_TX_FORMAT_Y16X16: | 925 | case R300_TX_FORMAT_Y16X16: |
926 | case R300_TX_FORMAT_FL_I16A16: | ||
900 | case R300_TX_FORMAT_Z11Y11X10: | 927 | case R300_TX_FORMAT_Z11Y11X10: |
901 | case R300_TX_FORMAT_Z10Y11X11: | 928 | case R300_TX_FORMAT_Z10Y11X11: |
902 | case R300_TX_FORMAT_W8Z8Y8X8: | 929 | case R300_TX_FORMAT_W8Z8Y8X8: |
@@ -938,8 +965,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
938 | DRM_ERROR("Invalid texture format %u\n", | 965 | DRM_ERROR("Invalid texture format %u\n", |
939 | (idx_value & 0x1F)); | 966 | (idx_value & 0x1F)); |
940 | return -EINVAL; | 967 | return -EINVAL; |
941 | break; | ||
942 | } | 968 | } |
969 | track->tex_dirty = true; | ||
943 | break; | 970 | break; |
944 | case 0x4400: | 971 | case 0x4400: |
945 | case 0x4404: | 972 | case 0x4404: |
@@ -967,6 +994,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
967 | if (tmp == 2 || tmp == 4 || tmp == 6) { | 994 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
968 | track->textures[i].roundup_h = false; | 995 | track->textures[i].roundup_h = false; |
969 | } | 996 | } |
997 | track->tex_dirty = true; | ||
970 | break; | 998 | break; |
971 | case 0x4500: | 999 | case 0x4500: |
972 | case 0x4504: | 1000 | case 0x4504: |
@@ -1004,6 +1032,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1004 | DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); | 1032 | DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); |
1005 | return -EINVAL; | 1033 | return -EINVAL; |
1006 | } | 1034 | } |
1035 | track->tex_dirty = true; | ||
1007 | break; | 1036 | break; |
1008 | case 0x4480: | 1037 | case 0x4480: |
1009 | case 0x4484: | 1038 | case 0x4484: |
@@ -1033,6 +1062,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1033 | track->textures[i].use_pitch = !!tmp; | 1062 | track->textures[i].use_pitch = !!tmp; |
1034 | tmp = (idx_value >> 22) & 0xF; | 1063 | tmp = (idx_value >> 22) & 0xF; |
1035 | track->textures[i].txdepth = tmp; | 1064 | track->textures[i].txdepth = tmp; |
1065 | track->tex_dirty = true; | ||
1036 | break; | 1066 | break; |
1037 | case R300_ZB_ZPASS_ADDR: | 1067 | case R300_ZB_ZPASS_ADDR: |
1038 | r = r100_cs_packet_next_reloc(p, &reloc); | 1068 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1047,6 +1077,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1047 | case 0x4e0c: | 1077 | case 0x4e0c: |
1048 | /* RB3D_COLOR_CHANNEL_MASK */ | 1078 | /* RB3D_COLOR_CHANNEL_MASK */ |
1049 | track->color_channel_mask = idx_value; | 1079 | track->color_channel_mask = idx_value; |
1080 | track->cb_dirty = true; | ||
1050 | break; | 1081 | break; |
1051 | case 0x43a4: | 1082 | case 0x43a4: |
1052 | /* SC_HYPERZ_EN */ | 1083 | /* SC_HYPERZ_EN */ |
@@ -1060,6 +1091,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1060 | case 0x4f1c: | 1091 | case 0x4f1c: |
1061 | /* ZB_BW_CNTL */ | 1092 | /* ZB_BW_CNTL */ |
1062 | track->zb_cb_clear = !!(idx_value & (1 << 5)); | 1093 | track->zb_cb_clear = !!(idx_value & (1 << 5)); |
1094 | track->cb_dirty = true; | ||
1095 | track->zb_dirty = true; | ||
1063 | if (p->rdev->hyperz_filp != p->filp) { | 1096 | if (p->rdev->hyperz_filp != p->filp) { |
1064 | if (idx_value & (R300_HIZ_ENABLE | | 1097 | if (idx_value & (R300_HIZ_ENABLE | |
1065 | R300_RD_COMP_ENABLE | | 1098 | R300_RD_COMP_ENABLE | |
@@ -1071,8 +1104,28 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1071 | case 0x4e04: | 1104 | case 0x4e04: |
1072 | /* RB3D_BLENDCNTL */ | 1105 | /* RB3D_BLENDCNTL */ |
1073 | track->blend_read_enable = !!(idx_value & (1 << 2)); | 1106 | track->blend_read_enable = !!(idx_value & (1 << 2)); |
1107 | track->cb_dirty = true; | ||
1108 | break; | ||
1109 | case R300_RB3D_AARESOLVE_OFFSET: | ||
1110 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1111 | if (r) { | ||
1112 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1113 | idx, reg); | ||
1114 | r100_cs_dump_packet(p, pkt); | ||
1115 | return r; | ||
1116 | } | ||
1117 | track->aa.robj = reloc->robj; | ||
1118 | track->aa.offset = idx_value; | ||
1119 | track->aa_dirty = true; | ||
1120 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | ||
1121 | break; | ||
1122 | case R300_RB3D_AARESOLVE_PITCH: | ||
1123 | track->aa.pitch = idx_value & 0x3FFE; | ||
1124 | track->aa_dirty = true; | ||
1074 | break; | 1125 | break; |
1075 | case 0x4f28: /* ZB_DEPTHCLEARVALUE */ | 1126 | case R300_RB3D_AARESOLVE_CTL: |
1127 | track->aaresolve = idx_value & 0x1; | ||
1128 | track->aa_dirty = true; | ||
1076 | break; | 1129 | break; |
1077 | case 0x4f30: /* ZB_MASK_OFFSET */ | 1130 | case 0x4f30: /* ZB_MASK_OFFSET */ |
1078 | case 0x4f34: /* ZB_ZMASK_PITCH */ | 1131 | case 0x4f34: /* ZB_ZMASK_PITCH */ |
@@ -1199,6 +1252,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p, | |||
1199 | if (p->rdev->hyperz_filp != p->filp) | 1252 | if (p->rdev->hyperz_filp != p->filp) |
1200 | return -EINVAL; | 1253 | return -EINVAL; |
1201 | break; | 1254 | break; |
1255 | case PACKET3_3D_CLEAR_CMASK: | ||
1256 | if (p->rdev->cmask_filp != p->filp) | ||
1257 | return -EINVAL; | ||
1258 | break; | ||
1202 | case PACKET3_NOP: | 1259 | case PACKET3_NOP: |
1203 | break; | 1260 | break; |
1204 | default: | 1261 | default: |
@@ -1332,21 +1389,24 @@ static int r300_startup(struct radeon_device *rdev) | |||
1332 | if (r) | 1389 | if (r) |
1333 | return r; | 1390 | return r; |
1334 | } | 1391 | } |
1392 | |||
1393 | /* allocate wb buffer */ | ||
1394 | r = radeon_wb_init(rdev); | ||
1395 | if (r) | ||
1396 | return r; | ||
1397 | |||
1335 | /* Enable IRQ */ | 1398 | /* Enable IRQ */ |
1336 | r100_irq_set(rdev); | 1399 | r100_irq_set(rdev); |
1337 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 1400 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
1338 | /* 1M ring buffer */ | 1401 | /* 1M ring buffer */ |
1339 | r = r100_cp_init(rdev, 1024 * 1024); | 1402 | r = r100_cp_init(rdev, 1024 * 1024); |
1340 | if (r) { | 1403 | if (r) { |
1341 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 1404 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
1342 | return r; | 1405 | return r; |
1343 | } | 1406 | } |
1344 | r = r100_wb_init(rdev); | ||
1345 | if (r) | ||
1346 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
1347 | r = r100_ib_init(rdev); | 1407 | r = r100_ib_init(rdev); |
1348 | if (r) { | 1408 | if (r) { |
1349 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 1409 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); |
1350 | return r; | 1410 | return r; |
1351 | } | 1411 | } |
1352 | return 0; | 1412 | return 0; |
@@ -1379,7 +1439,7 @@ int r300_resume(struct radeon_device *rdev) | |||
1379 | int r300_suspend(struct radeon_device *rdev) | 1439 | int r300_suspend(struct radeon_device *rdev) |
1380 | { | 1440 | { |
1381 | r100_cp_disable(rdev); | 1441 | r100_cp_disable(rdev); |
1382 | r100_wb_disable(rdev); | 1442 | radeon_wb_disable(rdev); |
1383 | r100_irq_disable(rdev); | 1443 | r100_irq_disable(rdev); |
1384 | if (rdev->flags & RADEON_IS_PCIE) | 1444 | if (rdev->flags & RADEON_IS_PCIE) |
1385 | rv370_pcie_gart_disable(rdev); | 1445 | rv370_pcie_gart_disable(rdev); |
@@ -1391,7 +1451,7 @@ int r300_suspend(struct radeon_device *rdev) | |||
1391 | void r300_fini(struct radeon_device *rdev) | 1451 | void r300_fini(struct radeon_device *rdev) |
1392 | { | 1452 | { |
1393 | r100_cp_fini(rdev); | 1453 | r100_cp_fini(rdev); |
1394 | r100_wb_fini(rdev); | 1454 | radeon_wb_fini(rdev); |
1395 | r100_ib_fini(rdev); | 1455 | r100_ib_fini(rdev); |
1396 | radeon_gem_fini(rdev); | 1456 | radeon_gem_fini(rdev); |
1397 | if (rdev->flags & RADEON_IS_PCIE) | 1457 | if (rdev->flags & RADEON_IS_PCIE) |
@@ -1484,7 +1544,7 @@ int r300_init(struct radeon_device *rdev) | |||
1484 | /* Somethings want wront with the accel init stop accel */ | 1544 | /* Somethings want wront with the accel init stop accel */ |
1485 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 1545 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
1486 | r100_cp_fini(rdev); | 1546 | r100_cp_fini(rdev); |
1487 | r100_wb_fini(rdev); | 1547 | radeon_wb_fini(rdev); |
1488 | r100_ib_fini(rdev); | 1548 | r100_ib_fini(rdev); |
1489 | radeon_irq_kms_fini(rdev); | 1549 | radeon_irq_kms_fini(rdev); |
1490 | if (rdev->flags & RADEON_IS_PCIE) | 1550 | if (rdev->flags & RADEON_IS_PCIE) |
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 1a0d5362cd79..00c0d2ba22d3 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h | |||
@@ -608,7 +608,7 @@ | |||
608 | * My guess is that there are two bits for each zbias primitive | 608 | * My guess is that there are two bits for each zbias primitive |
609 | * (FILL, LINE, POINT). | 609 | * (FILL, LINE, POINT). |
610 | * One to enable depth test and one for depth write. | 610 | * One to enable depth test and one for depth write. |
611 | * Yet this doesnt explain why depth writes work ... | 611 | * Yet this doesn't explain why depth writes work ... |
612 | */ | 612 | */ |
613 | #define R300_RE_OCCLUSION_CNTL 0x42B4 | 613 | #define R300_RE_OCCLUSION_CNTL 0x42B4 |
614 | # define R300_OCCLUSION_ON (1<<1) | 614 | # define R300_OCCLUSION_ON (1<<1) |
@@ -817,7 +817,7 @@ | |||
817 | # define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11) | 817 | # define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11) |
818 | # define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11) | 818 | # define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11) |
819 | 819 | ||
820 | /* NOTE: NEAREST doesnt seem to exist. | 820 | /* NOTE: NEAREST doesn't seem to exist. |
821 | * Im not seting MAG_FILTER_MASK and (3 << 11) on for all | 821 | * Im not seting MAG_FILTER_MASK and (3 << 11) on for all |
822 | * anisotropy modes because that would void selected mag filter | 822 | * anisotropy modes because that would void selected mag filter |
823 | */ | 823 | */ |
@@ -1371,6 +1371,8 @@ | |||
1371 | #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ | 1371 | #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ |
1372 | #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ | 1372 | #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ |
1373 | 1373 | ||
1374 | #define R300_RB3D_AARESOLVE_OFFSET 0x4E80 | ||
1375 | #define R300_RB3D_AARESOLVE_PITCH 0x4E84 | ||
1374 | #define R300_RB3D_AARESOLVE_CTL 0x4E88 | 1376 | #define R300_RB3D_AARESOLVE_CTL 0x4E88 |
1375 | /* gap */ | 1377 | /* gap */ |
1376 | 1378 | ||
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h index 0c036c60d9df..1f519a5ffb8c 100644 --- a/drivers/gpu/drm/radeon/r300d.h +++ b/drivers/gpu/drm/radeon/r300d.h | |||
@@ -54,6 +54,7 @@ | |||
54 | #define PACKET3_3D_DRAW_IMMD_2 0x35 | 54 | #define PACKET3_3D_DRAW_IMMD_2 0x35 |
55 | #define PACKET3_3D_DRAW_INDX_2 0x36 | 55 | #define PACKET3_3D_DRAW_INDX_2 0x36 |
56 | #define PACKET3_3D_CLEAR_HIZ 0x37 | 56 | #define PACKET3_3D_CLEAR_HIZ 0x37 |
57 | #define PACKET3_3D_CLEAR_CMASK 0x38 | ||
57 | #define PACKET3_BITBLT_MULTI 0x9B | 58 | #define PACKET3_BITBLT_MULTI 0x9B |
58 | 59 | ||
59 | #define PACKET0(reg, n) (CP_PACKET0 | \ | 60 | #define PACKET0(reg, n) (CP_PACKET0 | \ |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 59f7bccc5be0..417fab81812f 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -96,7 +96,7 @@ void r420_pipes_init(struct radeon_device *rdev) | |||
96 | "programming pipes. Bad things might happen.\n"); | 96 | "programming pipes. Bad things might happen.\n"); |
97 | } | 97 | } |
98 | /* get max number of pipes */ | 98 | /* get max number of pipes */ |
99 | gb_pipe_select = RREG32(0x402C); | 99 | gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); |
100 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; | 100 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; |
101 | 101 | ||
102 | /* SE chips have 1 pipe */ | 102 | /* SE chips have 1 pipe */ |
@@ -248,23 +248,25 @@ static int r420_startup(struct radeon_device *rdev) | |||
248 | return r; | 248 | return r; |
249 | } | 249 | } |
250 | r420_pipes_init(rdev); | 250 | r420_pipes_init(rdev); |
251 | |||
252 | /* allocate wb buffer */ | ||
253 | r = radeon_wb_init(rdev); | ||
254 | if (r) | ||
255 | return r; | ||
256 | |||
251 | /* Enable IRQ */ | 257 | /* Enable IRQ */ |
252 | r100_irq_set(rdev); | 258 | r100_irq_set(rdev); |
253 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 259 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
254 | /* 1M ring buffer */ | 260 | /* 1M ring buffer */ |
255 | r = r100_cp_init(rdev, 1024 * 1024); | 261 | r = r100_cp_init(rdev, 1024 * 1024); |
256 | if (r) { | 262 | if (r) { |
257 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 263 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
258 | return r; | 264 | return r; |
259 | } | 265 | } |
260 | r420_cp_errata_init(rdev); | 266 | r420_cp_errata_init(rdev); |
261 | r = r100_wb_init(rdev); | ||
262 | if (r) { | ||
263 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
264 | } | ||
265 | r = r100_ib_init(rdev); | 267 | r = r100_ib_init(rdev); |
266 | if (r) { | 268 | if (r) { |
267 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 269 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); |
268 | return r; | 270 | return r; |
269 | } | 271 | } |
270 | return 0; | 272 | return 0; |
@@ -302,7 +304,7 @@ int r420_suspend(struct radeon_device *rdev) | |||
302 | { | 304 | { |
303 | r420_cp_errata_fini(rdev); | 305 | r420_cp_errata_fini(rdev); |
304 | r100_cp_disable(rdev); | 306 | r100_cp_disable(rdev); |
305 | r100_wb_disable(rdev); | 307 | radeon_wb_disable(rdev); |
306 | r100_irq_disable(rdev); | 308 | r100_irq_disable(rdev); |
307 | if (rdev->flags & RADEON_IS_PCIE) | 309 | if (rdev->flags & RADEON_IS_PCIE) |
308 | rv370_pcie_gart_disable(rdev); | 310 | rv370_pcie_gart_disable(rdev); |
@@ -314,7 +316,7 @@ int r420_suspend(struct radeon_device *rdev) | |||
314 | void r420_fini(struct radeon_device *rdev) | 316 | void r420_fini(struct radeon_device *rdev) |
315 | { | 317 | { |
316 | r100_cp_fini(rdev); | 318 | r100_cp_fini(rdev); |
317 | r100_wb_fini(rdev); | 319 | radeon_wb_fini(rdev); |
318 | r100_ib_fini(rdev); | 320 | r100_ib_fini(rdev); |
319 | radeon_gem_fini(rdev); | 321 | radeon_gem_fini(rdev); |
320 | if (rdev->flags & RADEON_IS_PCIE) | 322 | if (rdev->flags & RADEON_IS_PCIE) |
@@ -418,7 +420,7 @@ int r420_init(struct radeon_device *rdev) | |||
418 | /* Somethings want wront with the accel init stop accel */ | 420 | /* Somethings want wront with the accel init stop accel */ |
419 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 421 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
420 | r100_cp_fini(rdev); | 422 | r100_cp_fini(rdev); |
421 | r100_wb_fini(rdev); | 423 | radeon_wb_fini(rdev); |
422 | r100_ib_fini(rdev); | 424 | r100_ib_fini(rdev); |
423 | radeon_irq_kms_fini(rdev); | 425 | radeon_irq_kms_fini(rdev); |
424 | if (rdev->flags & RADEON_IS_PCIE) | 426 | if (rdev->flags & RADEON_IS_PCIE) |
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 6ac1f604e29b..fc437059918f 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h | |||
@@ -355,6 +355,8 @@ | |||
355 | #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 | 355 | #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 |
356 | #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 | 356 | #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 |
357 | 357 | ||
358 | #define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4 | ||
359 | |||
358 | /* master controls */ | 360 | /* master controls */ |
359 | #define AVIVO_DC_CRTC_MASTER_EN 0x60f8 | 361 | #define AVIVO_DC_CRTC_MASTER_EN 0x60f8 |
360 | #define AVIVO_DC_CRTC_TV_CONTROL 0x60fc | 362 | #define AVIVO_DC_CRTC_TV_CONTROL 0x60fc |
@@ -409,8 +411,10 @@ | |||
409 | #define AVIVO_D1GRPH_X_END 0x6134 | 411 | #define AVIVO_D1GRPH_X_END 0x6134 |
410 | #define AVIVO_D1GRPH_Y_END 0x6138 | 412 | #define AVIVO_D1GRPH_Y_END 0x6138 |
411 | #define AVIVO_D1GRPH_UPDATE 0x6144 | 413 | #define AVIVO_D1GRPH_UPDATE 0x6144 |
414 | # define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2) | ||
412 | # define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16) | 415 | # define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16) |
413 | #define AVIVO_D1GRPH_FLIP_CONTROL 0x6148 | 416 | #define AVIVO_D1GRPH_FLIP_CONTROL 0x6148 |
417 | # define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0) | ||
414 | 418 | ||
415 | #define AVIVO_D1CUR_CONTROL 0x6400 | 419 | #define AVIVO_D1CUR_CONTROL 0x6400 |
416 | # define AVIVO_D1CURSOR_EN (1 << 0) | 420 | # define AVIVO_D1CURSOR_EN (1 << 0) |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 1458dee902dd..3081d07f8de5 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -79,8 +79,8 @@ static void r520_gpu_init(struct radeon_device *rdev) | |||
79 | WREG32(0x4128, 0xFF); | 79 | WREG32(0x4128, 0xFF); |
80 | } | 80 | } |
81 | r420_pipes_init(rdev); | 81 | r420_pipes_init(rdev); |
82 | gb_pipe_select = RREG32(0x402C); | 82 | gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); |
83 | tmp = RREG32(0x170C); | 83 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
84 | pipe_select_current = (tmp >> 2) & 3; | 84 | pipe_select_current = (tmp >> 2) & 3; |
85 | tmp = (1 << pipe_select_current) | | 85 | tmp = (1 << pipe_select_current) | |
86 | (((gb_pipe_select >> 8) & 0xF) << 4); | 86 | (((gb_pipe_select >> 8) & 0xF) << 4); |
@@ -181,21 +181,24 @@ static int r520_startup(struct radeon_device *rdev) | |||
181 | if (r) | 181 | if (r) |
182 | return r; | 182 | return r; |
183 | } | 183 | } |
184 | |||
185 | /* allocate wb buffer */ | ||
186 | r = radeon_wb_init(rdev); | ||
187 | if (r) | ||
188 | return r; | ||
189 | |||
184 | /* Enable IRQ */ | 190 | /* Enable IRQ */ |
185 | rs600_irq_set(rdev); | 191 | rs600_irq_set(rdev); |
186 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 192 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
187 | /* 1M ring buffer */ | 193 | /* 1M ring buffer */ |
188 | r = r100_cp_init(rdev, 1024 * 1024); | 194 | r = r100_cp_init(rdev, 1024 * 1024); |
189 | if (r) { | 195 | if (r) { |
190 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 196 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
191 | return r; | 197 | return r; |
192 | } | 198 | } |
193 | r = r100_wb_init(rdev); | ||
194 | if (r) | ||
195 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
196 | r = r100_ib_init(rdev); | 199 | r = r100_ib_init(rdev); |
197 | if (r) { | 200 | if (r) { |
198 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 201 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); |
199 | return r; | 202 | return r; |
200 | } | 203 | } |
201 | return 0; | 204 | return 0; |
@@ -295,7 +298,7 @@ int r520_init(struct radeon_device *rdev) | |||
295 | /* Somethings want wront with the accel init stop accel */ | 298 | /* Somethings want wront with the accel init stop accel */ |
296 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 299 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
297 | r100_cp_fini(rdev); | 300 | r100_cp_fini(rdev); |
298 | r100_wb_fini(rdev); | 301 | radeon_wb_fini(rdev); |
299 | r100_ib_fini(rdev); | 302 | r100_ib_fini(rdev); |
300 | radeon_irq_kms_fini(rdev); | 303 | radeon_irq_kms_fini(rdev); |
301 | rv370_pcie_gart_fini(rdev); | 304 | rv370_pcie_gart_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 7b65e4efe8af..bc54b26cb32f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #define EVERGREEN_PFP_UCODE_SIZE 1120 | 47 | #define EVERGREEN_PFP_UCODE_SIZE 1120 |
48 | #define EVERGREEN_PM4_UCODE_SIZE 1376 | 48 | #define EVERGREEN_PM4_UCODE_SIZE 1376 |
49 | #define EVERGREEN_RLC_UCODE_SIZE 768 | 49 | #define EVERGREEN_RLC_UCODE_SIZE 768 |
50 | #define CAYMAN_RLC_UCODE_SIZE 1024 | ||
50 | 51 | ||
51 | /* Firmware Names */ | 52 | /* Firmware Names */ |
52 | MODULE_FIRMWARE("radeon/R600_pfp.bin"); | 53 | MODULE_FIRMWARE("radeon/R600_pfp.bin"); |
@@ -83,6 +84,13 @@ MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); | |||
83 | MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); | 84 | MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); |
84 | MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); | 85 | MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); |
85 | MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); | 86 | MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); |
87 | MODULE_FIRMWARE("radeon/PALM_pfp.bin"); | ||
88 | MODULE_FIRMWARE("radeon/PALM_me.bin"); | ||
89 | MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); | ||
90 | MODULE_FIRMWARE("radeon/SUMO_pfp.bin"); | ||
91 | MODULE_FIRMWARE("radeon/SUMO_me.bin"); | ||
92 | MODULE_FIRMWARE("radeon/SUMO2_pfp.bin"); | ||
93 | MODULE_FIRMWARE("radeon/SUMO2_me.bin"); | ||
86 | 94 | ||
87 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); | 95 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); |
88 | 96 | ||
@@ -91,18 +99,17 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev); | |||
91 | void r600_gpu_init(struct radeon_device *rdev); | 99 | void r600_gpu_init(struct radeon_device *rdev); |
92 | void r600_fini(struct radeon_device *rdev); | 100 | void r600_fini(struct radeon_device *rdev); |
93 | void r600_irq_disable(struct radeon_device *rdev); | 101 | void r600_irq_disable(struct radeon_device *rdev); |
102 | static void r600_pcie_gen2_enable(struct radeon_device *rdev); | ||
94 | 103 | ||
95 | /* get temperature in millidegrees */ | 104 | /* get temperature in millidegrees */ |
96 | u32 rv6xx_get_temp(struct radeon_device *rdev) | 105 | int rv6xx_get_temp(struct radeon_device *rdev) |
97 | { | 106 | { |
98 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> | 107 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> |
99 | ASIC_T_SHIFT; | 108 | ASIC_T_SHIFT; |
100 | u32 actual_temp = 0; | 109 | int actual_temp = temp & 0xff; |
101 | 110 | ||
102 | if ((temp >> 7) & 1) | 111 | if (temp & 0x100) |
103 | actual_temp = 0; | 112 | actual_temp -= 256; |
104 | else | ||
105 | actual_temp = (temp >> 1) & 0xff; | ||
106 | 113 | ||
107 | return actual_temp * 1000; | 114 | return actual_temp * 1000; |
108 | } | 115 | } |
@@ -583,8 +590,11 @@ void r600_pm_misc(struct radeon_device *rdev) | |||
583 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; | 590 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; |
584 | 591 | ||
585 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | 592 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { |
593 | /* 0xff01 is a flag rather then an actual voltage */ | ||
594 | if (voltage->voltage == 0xff01) | ||
595 | return; | ||
586 | if (voltage->voltage != rdev->pm.current_vddc) { | 596 | if (voltage->voltage != rdev->pm.current_vddc) { |
587 | radeon_atom_set_voltage(rdev, voltage->voltage); | 597 | radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); |
588 | rdev->pm.current_vddc = voltage->voltage; | 598 | rdev->pm.current_vddc = voltage->voltage; |
589 | DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); | 599 | DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); |
590 | } | 600 | } |
@@ -884,12 +894,15 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
884 | u32 tmp; | 894 | u32 tmp; |
885 | 895 | ||
886 | /* flush hdp cache so updates hit vram */ | 896 | /* flush hdp cache so updates hit vram */ |
887 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { | 897 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && |
898 | !(rdev->flags & RADEON_IS_AGP)) { | ||
888 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | 899 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
889 | u32 tmp; | 900 | u32 tmp; |
890 | 901 | ||
891 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read | 902 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
892 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL | 903 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL |
904 | * This seems to cause problems on some AGP cards. Just use the old | ||
905 | * method for them. | ||
893 | */ | 906 | */ |
894 | WREG32(HDP_DEBUG1, 0); | 907 | WREG32(HDP_DEBUG1, 0); |
895 | tmp = readl((void __iomem *)ptr); | 908 | tmp = readl((void __iomem *)ptr); |
@@ -919,7 +932,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev) | |||
919 | int r; | 932 | int r; |
920 | 933 | ||
921 | if (rdev->gart.table.vram.robj) { | 934 | if (rdev->gart.table.vram.robj) { |
922 | WARN(1, "R600 PCIE GART already initialized.\n"); | 935 | WARN(1, "R600 PCIE GART already initialized\n"); |
923 | return 0; | 936 | return 0; |
924 | } | 937 | } |
925 | /* Initialize common gart structure */ | 938 | /* Initialize common gart structure */ |
@@ -1167,7 +1180,7 @@ static void r600_mc_program(struct radeon_device *rdev) | |||
1167 | * Note: GTT start, end, size should be initialized before calling this | 1180 | * Note: GTT start, end, size should be initialized before calling this |
1168 | * function on AGP platform. | 1181 | * function on AGP platform. |
1169 | */ | 1182 | */ |
1170 | void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | 1183 | static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) |
1171 | { | 1184 | { |
1172 | u64 size_bf, size_af; | 1185 | u64 size_bf, size_af; |
1173 | 1186 | ||
@@ -1201,8 +1214,10 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
1201 | mc->vram_end, mc->real_vram_size >> 20); | 1214 | mc->vram_end, mc->real_vram_size >> 20); |
1202 | } else { | 1215 | } else { |
1203 | u64 base = 0; | 1216 | u64 base = 0; |
1204 | if (rdev->flags & RADEON_IS_IGP) | 1217 | if (rdev->flags & RADEON_IS_IGP) { |
1205 | base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; | 1218 | base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF; |
1219 | base <<= 24; | ||
1220 | } | ||
1206 | radeon_vram_location(rdev, &rdev->mc, base); | 1221 | radeon_vram_location(rdev, &rdev->mc, base); |
1207 | rdev->mc.gtt_base_align = 0; | 1222 | rdev->mc.gtt_base_align = 0; |
1208 | radeon_gtt_location(rdev, mc); | 1223 | radeon_gtt_location(rdev, mc); |
@@ -1248,7 +1263,6 @@ int r600_mc_init(struct radeon_device *rdev) | |||
1248 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); | 1263 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
1249 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | 1264 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
1250 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1265 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
1251 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
1252 | r600_vram_gtt_location(rdev, &rdev->mc); | 1266 | r600_vram_gtt_location(rdev, &rdev->mc); |
1253 | 1267 | ||
1254 | if (rdev->flags & RADEON_IS_IGP) { | 1268 | if (rdev->flags & RADEON_IS_IGP) { |
@@ -1284,6 +1298,9 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
1284 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); | 1298 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); |
1285 | u32 tmp; | 1299 | u32 tmp; |
1286 | 1300 | ||
1301 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | ||
1302 | return 0; | ||
1303 | |||
1287 | dev_info(rdev->dev, "GPU softreset \n"); | 1304 | dev_info(rdev->dev, "GPU softreset \n"); |
1288 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | 1305 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", |
1289 | RREG32(R_008010_GRBM_STATUS)); | 1306 | RREG32(R_008010_GRBM_STATUS)); |
@@ -1343,13 +1360,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev) | |||
1343 | u32 srbm_status; | 1360 | u32 srbm_status; |
1344 | u32 grbm_status; | 1361 | u32 grbm_status; |
1345 | u32 grbm_status2; | 1362 | u32 grbm_status2; |
1363 | struct r100_gpu_lockup *lockup; | ||
1346 | int r; | 1364 | int r; |
1347 | 1365 | ||
1366 | if (rdev->family >= CHIP_RV770) | ||
1367 | lockup = &rdev->config.rv770.lockup; | ||
1368 | else | ||
1369 | lockup = &rdev->config.r600.lockup; | ||
1370 | |||
1348 | srbm_status = RREG32(R_000E50_SRBM_STATUS); | 1371 | srbm_status = RREG32(R_000E50_SRBM_STATUS); |
1349 | grbm_status = RREG32(R_008010_GRBM_STATUS); | 1372 | grbm_status = RREG32(R_008010_GRBM_STATUS); |
1350 | grbm_status2 = RREG32(R_008014_GRBM_STATUS2); | 1373 | grbm_status2 = RREG32(R_008014_GRBM_STATUS2); |
1351 | if (!G_008010_GUI_ACTIVE(grbm_status)) { | 1374 | if (!G_008010_GUI_ACTIVE(grbm_status)) { |
1352 | r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp); | 1375 | r100_gpu_lockup_update(lockup, &rdev->cp); |
1353 | return false; | 1376 | return false; |
1354 | } | 1377 | } |
1355 | /* force CP activities */ | 1378 | /* force CP activities */ |
@@ -1361,7 +1384,7 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev) | |||
1361 | radeon_ring_unlock_commit(rdev); | 1384 | radeon_ring_unlock_commit(rdev); |
1362 | } | 1385 | } |
1363 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); | 1386 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); |
1364 | return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp); | 1387 | return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); |
1365 | } | 1388 | } |
1366 | 1389 | ||
1367 | int r600_asic_reset(struct radeon_device *rdev) | 1390 | int r600_asic_reset(struct radeon_device *rdev) |
@@ -1608,8 +1631,11 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
1608 | rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes; | 1631 | rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes; |
1609 | rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); | 1632 | rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); |
1610 | tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); | 1633 | tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); |
1611 | tiling_config |= GROUP_SIZE(0); | 1634 | tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); |
1612 | rdev->config.r600.tiling_group_size = 256; | 1635 | if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) |
1636 | rdev->config.r600.tiling_group_size = 512; | ||
1637 | else | ||
1638 | rdev->config.r600.tiling_group_size = 256; | ||
1613 | tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; | 1639 | tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; |
1614 | if (tmp > 3) { | 1640 | if (tmp > 3) { |
1615 | tiling_config |= ROW_TILING(3); | 1641 | tiling_config |= ROW_TILING(3); |
@@ -1918,8 +1944,9 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
1918 | */ | 1944 | */ |
1919 | void r600_cp_stop(struct radeon_device *rdev) | 1945 | void r600_cp_stop(struct radeon_device *rdev) |
1920 | { | 1946 | { |
1921 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 1947 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
1922 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | 1948 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
1949 | WREG32(SCRATCH_UMSK, 0); | ||
1923 | } | 1950 | } |
1924 | 1951 | ||
1925 | int r600_init_microcode(struct radeon_device *rdev) | 1952 | int r600_init_microcode(struct radeon_device *rdev) |
@@ -2000,6 +2027,18 @@ int r600_init_microcode(struct radeon_device *rdev) | |||
2000 | chip_name = "CYPRESS"; | 2027 | chip_name = "CYPRESS"; |
2001 | rlc_chip_name = "CYPRESS"; | 2028 | rlc_chip_name = "CYPRESS"; |
2002 | break; | 2029 | break; |
2030 | case CHIP_PALM: | ||
2031 | chip_name = "PALM"; | ||
2032 | rlc_chip_name = "SUMO"; | ||
2033 | break; | ||
2034 | case CHIP_SUMO: | ||
2035 | chip_name = "SUMO"; | ||
2036 | rlc_chip_name = "SUMO"; | ||
2037 | break; | ||
2038 | case CHIP_SUMO2: | ||
2039 | chip_name = "SUMO2"; | ||
2040 | rlc_chip_name = "SUMO"; | ||
2041 | break; | ||
2003 | default: BUG(); | 2042 | default: BUG(); |
2004 | } | 2043 | } |
2005 | 2044 | ||
@@ -2081,7 +2120,11 @@ static int r600_cp_load_microcode(struct radeon_device *rdev) | |||
2081 | 2120 | ||
2082 | r600_cp_stop(rdev); | 2121 | r600_cp_stop(rdev); |
2083 | 2122 | ||
2084 | WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | 2123 | WREG32(CP_RB_CNTL, |
2124 | #ifdef __BIG_ENDIAN | ||
2125 | BUF_SWAP_32BIT | | ||
2126 | #endif | ||
2127 | RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | ||
2085 | 2128 | ||
2086 | /* Reset cp */ | 2129 | /* Reset cp */ |
2087 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); | 2130 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); |
@@ -2152,7 +2195,7 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2152 | 2195 | ||
2153 | /* Set ring buffer size */ | 2196 | /* Set ring buffer size */ |
2154 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); | 2197 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); |
2155 | tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 2198 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
2156 | #ifdef __BIG_ENDIAN | 2199 | #ifdef __BIG_ENDIAN |
2157 | tmp |= BUF_SWAP_32BIT; | 2200 | tmp |= BUF_SWAP_32BIT; |
2158 | #endif | 2201 | #endif |
@@ -2166,8 +2209,23 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2166 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 2209 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
2167 | WREG32(CP_RB_RPTR_WR, 0); | 2210 | WREG32(CP_RB_RPTR_WR, 0); |
2168 | WREG32(CP_RB_WPTR, 0); | 2211 | WREG32(CP_RB_WPTR, 0); |
2169 | WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); | 2212 | |
2170 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); | 2213 | /* set the wb address whether it's enabled or not */ |
2214 | WREG32(CP_RB_RPTR_ADDR, | ||
2215 | #ifdef __BIG_ENDIAN | ||
2216 | RB_RPTR_SWAP(2) | | ||
2217 | #endif | ||
2218 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); | ||
2219 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | ||
2220 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | ||
2221 | |||
2222 | if (rdev->wb.enabled) | ||
2223 | WREG32(SCRATCH_UMSK, 0xff); | ||
2224 | else { | ||
2225 | tmp |= RB_NO_UPDATE; | ||
2226 | WREG32(SCRATCH_UMSK, 0); | ||
2227 | } | ||
2228 | |||
2171 | mdelay(1); | 2229 | mdelay(1); |
2172 | WREG32(CP_RB_CNTL, tmp); | 2230 | WREG32(CP_RB_CNTL, tmp); |
2173 | 2231 | ||
@@ -2219,9 +2277,10 @@ void r600_scratch_init(struct radeon_device *rdev) | |||
2219 | int i; | 2277 | int i; |
2220 | 2278 | ||
2221 | rdev->scratch.num_reg = 7; | 2279 | rdev->scratch.num_reg = 7; |
2280 | rdev->scratch.reg_base = SCRATCH_REG0; | ||
2222 | for (i = 0; i < rdev->scratch.num_reg; i++) { | 2281 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
2223 | rdev->scratch.free[i] = true; | 2282 | rdev->scratch.free[i] = true; |
2224 | rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4); | 2283 | rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); |
2225 | } | 2284 | } |
2226 | } | 2285 | } |
2227 | 2286 | ||
@@ -2265,88 +2324,34 @@ int r600_ring_test(struct radeon_device *rdev) | |||
2265 | return r; | 2324 | return r; |
2266 | } | 2325 | } |
2267 | 2326 | ||
2268 | void r600_wb_disable(struct radeon_device *rdev) | ||
2269 | { | ||
2270 | int r; | ||
2271 | |||
2272 | WREG32(SCRATCH_UMSK, 0); | ||
2273 | if (rdev->wb.wb_obj) { | ||
2274 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
2275 | if (unlikely(r != 0)) | ||
2276 | return; | ||
2277 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
2278 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
2279 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
2280 | } | ||
2281 | } | ||
2282 | |||
2283 | void r600_wb_fini(struct radeon_device *rdev) | ||
2284 | { | ||
2285 | r600_wb_disable(rdev); | ||
2286 | if (rdev->wb.wb_obj) { | ||
2287 | radeon_bo_unref(&rdev->wb.wb_obj); | ||
2288 | rdev->wb.wb = NULL; | ||
2289 | rdev->wb.wb_obj = NULL; | ||
2290 | } | ||
2291 | } | ||
2292 | |||
2293 | int r600_wb_enable(struct radeon_device *rdev) | ||
2294 | { | ||
2295 | int r; | ||
2296 | |||
2297 | if (rdev->wb.wb_obj == NULL) { | ||
2298 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, | ||
2299 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); | ||
2300 | if (r) { | ||
2301 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); | ||
2302 | return r; | ||
2303 | } | ||
2304 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
2305 | if (unlikely(r != 0)) { | ||
2306 | r600_wb_fini(rdev); | ||
2307 | return r; | ||
2308 | } | ||
2309 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | ||
2310 | &rdev->wb.gpu_addr); | ||
2311 | if (r) { | ||
2312 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
2313 | dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); | ||
2314 | r600_wb_fini(rdev); | ||
2315 | return r; | ||
2316 | } | ||
2317 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | ||
2318 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
2319 | if (r) { | ||
2320 | dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); | ||
2321 | r600_wb_fini(rdev); | ||
2322 | return r; | ||
2323 | } | ||
2324 | } | ||
2325 | WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF); | ||
2326 | WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC); | ||
2327 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF); | ||
2328 | WREG32(SCRATCH_UMSK, 0xff); | ||
2329 | return 0; | ||
2330 | } | ||
2331 | |||
2332 | void r600_fence_ring_emit(struct radeon_device *rdev, | 2327 | void r600_fence_ring_emit(struct radeon_device *rdev, |
2333 | struct radeon_fence *fence) | 2328 | struct radeon_fence *fence) |
2334 | { | 2329 | { |
2335 | /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ | 2330 | if (rdev->wb.use_event) { |
2336 | 2331 | u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + | |
2337 | radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); | 2332 | (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base); |
2338 | radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT); | 2333 | /* EVENT_WRITE_EOP - flush caches, send int */ |
2339 | /* wait for 3D idle clean */ | 2334 | radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); |
2340 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 2335 | radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); |
2341 | radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | 2336 | radeon_ring_write(rdev, addr & 0xffffffff); |
2342 | radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); | 2337 | radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); |
2343 | /* Emit fence sequence & fire IRQ */ | 2338 | radeon_ring_write(rdev, fence->seq); |
2344 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 2339 | radeon_ring_write(rdev, 0); |
2345 | radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | 2340 | } else { |
2346 | radeon_ring_write(rdev, fence->seq); | 2341 | radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); |
2347 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ | 2342 | radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); |
2348 | radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); | 2343 | /* wait for 3D idle clean */ |
2349 | radeon_ring_write(rdev, RB_INT_STAT); | 2344 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
2345 | radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | ||
2346 | radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); | ||
2347 | /* Emit fence sequence & fire IRQ */ | ||
2348 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
2349 | radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | ||
2350 | radeon_ring_write(rdev, fence->seq); | ||
2351 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ | ||
2352 | radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); | ||
2353 | radeon_ring_write(rdev, RB_INT_STAT); | ||
2354 | } | ||
2350 | } | 2355 | } |
2351 | 2356 | ||
2352 | int r600_copy_blit(struct radeon_device *rdev, | 2357 | int r600_copy_blit(struct radeon_device *rdev, |
@@ -2383,28 +2388,13 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg) | |||
2383 | /* FIXME: implement */ | 2388 | /* FIXME: implement */ |
2384 | } | 2389 | } |
2385 | 2390 | ||
2386 | |||
2387 | bool r600_card_posted(struct radeon_device *rdev) | ||
2388 | { | ||
2389 | uint32_t reg; | ||
2390 | |||
2391 | /* first check CRTCs */ | ||
2392 | reg = RREG32(D1CRTC_CONTROL) | | ||
2393 | RREG32(D2CRTC_CONTROL); | ||
2394 | if (reg & CRTC_EN) | ||
2395 | return true; | ||
2396 | |||
2397 | /* then check MEM_SIZE, in case the crtcs are off */ | ||
2398 | if (RREG32(CONFIG_MEMSIZE)) | ||
2399 | return true; | ||
2400 | |||
2401 | return false; | ||
2402 | } | ||
2403 | |||
2404 | int r600_startup(struct radeon_device *rdev) | 2391 | int r600_startup(struct radeon_device *rdev) |
2405 | { | 2392 | { |
2406 | int r; | 2393 | int r; |
2407 | 2394 | ||
2395 | /* enable pcie gen2 link */ | ||
2396 | r600_pcie_gen2_enable(rdev); | ||
2397 | |||
2408 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 2398 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
2409 | r = r600_init_microcode(rdev); | 2399 | r = r600_init_microcode(rdev); |
2410 | if (r) { | 2400 | if (r) { |
@@ -2428,19 +2418,12 @@ int r600_startup(struct radeon_device *rdev) | |||
2428 | rdev->asic->copy = NULL; | 2418 | rdev->asic->copy = NULL; |
2429 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | 2419 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
2430 | } | 2420 | } |
2431 | /* pin copy shader into vram */ | 2421 | |
2432 | if (rdev->r600_blit.shader_obj) { | 2422 | /* allocate wb buffer */ |
2433 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 2423 | r = radeon_wb_init(rdev); |
2434 | if (unlikely(r != 0)) | 2424 | if (r) |
2435 | return r; | 2425 | return r; |
2436 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 2426 | |
2437 | &rdev->r600_blit.shader_gpu_addr); | ||
2438 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
2439 | if (r) { | ||
2440 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | ||
2441 | return r; | ||
2442 | } | ||
2443 | } | ||
2444 | /* Enable IRQ */ | 2427 | /* Enable IRQ */ |
2445 | r = r600_irq_init(rdev); | 2428 | r = r600_irq_init(rdev); |
2446 | if (r) { | 2429 | if (r) { |
@@ -2459,8 +2442,7 @@ int r600_startup(struct radeon_device *rdev) | |||
2459 | r = r600_cp_resume(rdev); | 2442 | r = r600_cp_resume(rdev); |
2460 | if (r) | 2443 | if (r) |
2461 | return r; | 2444 | return r; |
2462 | /* write back buffer are not vital so don't worry about failure */ | 2445 | |
2463 | r600_wb_enable(rdev); | ||
2464 | return 0; | 2446 | return 0; |
2465 | } | 2447 | } |
2466 | 2448 | ||
@@ -2497,7 +2479,7 @@ int r600_resume(struct radeon_device *rdev) | |||
2497 | 2479 | ||
2498 | r = r600_ib_test(rdev); | 2480 | r = r600_ib_test(rdev); |
2499 | if (r) { | 2481 | if (r) { |
2500 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 2482 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); |
2501 | return r; | 2483 | return r; |
2502 | } | 2484 | } |
2503 | 2485 | ||
@@ -2519,7 +2501,7 @@ int r600_suspend(struct radeon_device *rdev) | |||
2519 | r600_cp_stop(rdev); | 2501 | r600_cp_stop(rdev); |
2520 | rdev->cp.ready = false; | 2502 | rdev->cp.ready = false; |
2521 | r600_irq_suspend(rdev); | 2503 | r600_irq_suspend(rdev); |
2522 | r600_wb_disable(rdev); | 2504 | radeon_wb_disable(rdev); |
2523 | r600_pcie_gart_disable(rdev); | 2505 | r600_pcie_gart_disable(rdev); |
2524 | /* unpin shaders bo */ | 2506 | /* unpin shaders bo */ |
2525 | if (rdev->r600_blit.shader_obj) { | 2507 | if (rdev->r600_blit.shader_obj) { |
@@ -2542,9 +2524,6 @@ int r600_init(struct radeon_device *rdev) | |||
2542 | { | 2524 | { |
2543 | int r; | 2525 | int r; |
2544 | 2526 | ||
2545 | r = radeon_dummy_page_init(rdev); | ||
2546 | if (r) | ||
2547 | return r; | ||
2548 | if (r600_debugfs_mc_info_init(rdev)) { | 2527 | if (r600_debugfs_mc_info_init(rdev)) { |
2549 | DRM_ERROR("Failed to register debugfs file for mc !\n"); | 2528 | DRM_ERROR("Failed to register debugfs file for mc !\n"); |
2550 | } | 2529 | } |
@@ -2566,7 +2545,7 @@ int r600_init(struct radeon_device *rdev) | |||
2566 | if (r) | 2545 | if (r) |
2567 | return r; | 2546 | return r; |
2568 | /* Post card if necessary */ | 2547 | /* Post card if necessary */ |
2569 | if (!r600_card_posted(rdev)) { | 2548 | if (!radeon_card_posted(rdev)) { |
2570 | if (!rdev->bios) { | 2549 | if (!rdev->bios) { |
2571 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | 2550 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); |
2572 | return -EINVAL; | 2551 | return -EINVAL; |
@@ -2616,8 +2595,8 @@ int r600_init(struct radeon_device *rdev) | |||
2616 | if (r) { | 2595 | if (r) { |
2617 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 2596 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
2618 | r600_cp_fini(rdev); | 2597 | r600_cp_fini(rdev); |
2619 | r600_wb_fini(rdev); | ||
2620 | r600_irq_fini(rdev); | 2598 | r600_irq_fini(rdev); |
2599 | radeon_wb_fini(rdev); | ||
2621 | radeon_irq_kms_fini(rdev); | 2600 | radeon_irq_kms_fini(rdev); |
2622 | r600_pcie_gart_fini(rdev); | 2601 | r600_pcie_gart_fini(rdev); |
2623 | rdev->accel_working = false; | 2602 | rdev->accel_working = false; |
@@ -2647,8 +2626,9 @@ void r600_fini(struct radeon_device *rdev) | |||
2647 | r600_audio_fini(rdev); | 2626 | r600_audio_fini(rdev); |
2648 | r600_blit_fini(rdev); | 2627 | r600_blit_fini(rdev); |
2649 | r600_cp_fini(rdev); | 2628 | r600_cp_fini(rdev); |
2650 | r600_wb_fini(rdev); | ||
2651 | r600_irq_fini(rdev); | 2629 | r600_irq_fini(rdev); |
2630 | radeon_wb_fini(rdev); | ||
2631 | radeon_ib_pool_fini(rdev); | ||
2652 | radeon_irq_kms_fini(rdev); | 2632 | radeon_irq_kms_fini(rdev); |
2653 | r600_pcie_gart_fini(rdev); | 2633 | r600_pcie_gart_fini(rdev); |
2654 | radeon_agp_fini(rdev); | 2634 | radeon_agp_fini(rdev); |
@@ -2658,7 +2638,6 @@ void r600_fini(struct radeon_device *rdev) | |||
2658 | radeon_atombios_fini(rdev); | 2638 | radeon_atombios_fini(rdev); |
2659 | kfree(rdev->bios); | 2639 | kfree(rdev->bios); |
2660 | rdev->bios = NULL; | 2640 | rdev->bios = NULL; |
2661 | radeon_dummy_page_fini(rdev); | ||
2662 | } | 2641 | } |
2663 | 2642 | ||
2664 | 2643 | ||
@@ -2669,7 +2648,11 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
2669 | { | 2648 | { |
2670 | /* FIXME: implement */ | 2649 | /* FIXME: implement */ |
2671 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 2650 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
2672 | radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); | 2651 | radeon_ring_write(rdev, |
2652 | #ifdef __BIG_ENDIAN | ||
2653 | (2 << 0) | | ||
2654 | #endif | ||
2655 | (ib->gpu_addr & 0xFFFFFFFC)); | ||
2673 | radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); | 2656 | radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); |
2674 | radeon_ring_write(rdev, ib->length_dw); | 2657 | radeon_ring_write(rdev, ib->length_dw); |
2675 | } | 2658 | } |
@@ -2769,8 +2752,8 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev) | |||
2769 | 2752 | ||
2770 | /* Allocate ring buffer */ | 2753 | /* Allocate ring buffer */ |
2771 | if (rdev->ih.ring_obj == NULL) { | 2754 | if (rdev->ih.ring_obj == NULL) { |
2772 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, | 2755 | r = radeon_bo_create(rdev, rdev->ih.ring_size, |
2773 | true, | 2756 | PAGE_SIZE, true, |
2774 | RADEON_GEM_DOMAIN_GTT, | 2757 | RADEON_GEM_DOMAIN_GTT, |
2775 | &rdev->ih.ring_obj); | 2758 | &rdev->ih.ring_obj); |
2776 | if (r) { | 2759 | if (r) { |
@@ -2850,13 +2833,20 @@ static int r600_rlc_init(struct radeon_device *rdev) | |||
2850 | WREG32(RLC_HB_CNTL, 0); | 2833 | WREG32(RLC_HB_CNTL, 0); |
2851 | WREG32(RLC_HB_RPTR, 0); | 2834 | WREG32(RLC_HB_RPTR, 0); |
2852 | WREG32(RLC_HB_WPTR, 0); | 2835 | WREG32(RLC_HB_WPTR, 0); |
2853 | WREG32(RLC_HB_WPTR_LSB_ADDR, 0); | 2836 | if (rdev->family <= CHIP_CAICOS) { |
2854 | WREG32(RLC_HB_WPTR_MSB_ADDR, 0); | 2837 | WREG32(RLC_HB_WPTR_LSB_ADDR, 0); |
2838 | WREG32(RLC_HB_WPTR_MSB_ADDR, 0); | ||
2839 | } | ||
2855 | WREG32(RLC_MC_CNTL, 0); | 2840 | WREG32(RLC_MC_CNTL, 0); |
2856 | WREG32(RLC_UCODE_CNTL, 0); | 2841 | WREG32(RLC_UCODE_CNTL, 0); |
2857 | 2842 | ||
2858 | fw_data = (const __be32 *)rdev->rlc_fw->data; | 2843 | fw_data = (const __be32 *)rdev->rlc_fw->data; |
2859 | if (rdev->family >= CHIP_CEDAR) { | 2844 | if (rdev->family >= CHIP_CAYMAN) { |
2845 | for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) { | ||
2846 | WREG32(RLC_UCODE_ADDR, i); | ||
2847 | WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); | ||
2848 | } | ||
2849 | } else if (rdev->family >= CHIP_CEDAR) { | ||
2860 | for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) { | 2850 | for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) { |
2861 | WREG32(RLC_UCODE_ADDR, i); | 2851 | WREG32(RLC_UCODE_ADDR, i); |
2862 | WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); | 2852 | WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); |
@@ -2915,6 +2905,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) | |||
2915 | WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); | 2905 | WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
2916 | WREG32(GRBM_INT_CNTL, 0); | 2906 | WREG32(GRBM_INT_CNTL, 0); |
2917 | WREG32(DxMODE_INT_MASK, 0); | 2907 | WREG32(DxMODE_INT_MASK, 0); |
2908 | WREG32(D1GRPH_INTERRUPT_CONTROL, 0); | ||
2909 | WREG32(D2GRPH_INTERRUPT_CONTROL, 0); | ||
2918 | if (ASIC_IS_DCE3(rdev)) { | 2910 | if (ASIC_IS_DCE3(rdev)) { |
2919 | WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); | 2911 | WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); |
2920 | WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); | 2912 | WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); |
@@ -2983,10 +2975,13 @@ int r600_irq_init(struct radeon_device *rdev) | |||
2983 | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | | 2975 | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | |
2984 | IH_WPTR_OVERFLOW_CLEAR | | 2976 | IH_WPTR_OVERFLOW_CLEAR | |
2985 | (rb_bufsz << 1)); | 2977 | (rb_bufsz << 1)); |
2986 | /* WPTR writeback, not yet */ | 2978 | |
2987 | /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/ | 2979 | if (rdev->wb.enabled) |
2988 | WREG32(IH_RB_WPTR_ADDR_LO, 0); | 2980 | ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; |
2989 | WREG32(IH_RB_WPTR_ADDR_HI, 0); | 2981 | |
2982 | /* set the writeback address whether it's enabled or not */ | ||
2983 | WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); | ||
2984 | WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); | ||
2990 | 2985 | ||
2991 | WREG32(IH_RB_CNTL, ih_rb_cntl); | 2986 | WREG32(IH_RB_CNTL, ih_rb_cntl); |
2992 | 2987 | ||
@@ -3036,9 +3031,10 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3036 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; | 3031 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; |
3037 | u32 grbm_int_cntl = 0; | 3032 | u32 grbm_int_cntl = 0; |
3038 | u32 hdmi1, hdmi2; | 3033 | u32 hdmi1, hdmi2; |
3034 | u32 d1grph = 0, d2grph = 0; | ||
3039 | 3035 | ||
3040 | if (!rdev->irq.installed) { | 3036 | if (!rdev->irq.installed) { |
3041 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 3037 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
3042 | return -EINVAL; | 3038 | return -EINVAL; |
3043 | } | 3039 | } |
3044 | /* don't enable anything if the ih is disabled */ | 3040 | /* don't enable anything if the ih is disabled */ |
@@ -3070,12 +3066,15 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3070 | if (rdev->irq.sw_int) { | 3066 | if (rdev->irq.sw_int) { |
3071 | DRM_DEBUG("r600_irq_set: sw int\n"); | 3067 | DRM_DEBUG("r600_irq_set: sw int\n"); |
3072 | cp_int_cntl |= RB_INT_ENABLE; | 3068 | cp_int_cntl |= RB_INT_ENABLE; |
3069 | cp_int_cntl |= TIME_STAMP_INT_ENABLE; | ||
3073 | } | 3070 | } |
3074 | if (rdev->irq.crtc_vblank_int[0]) { | 3071 | if (rdev->irq.crtc_vblank_int[0] || |
3072 | rdev->irq.pflip[0]) { | ||
3075 | DRM_DEBUG("r600_irq_set: vblank 0\n"); | 3073 | DRM_DEBUG("r600_irq_set: vblank 0\n"); |
3076 | mode_int |= D1MODE_VBLANK_INT_MASK; | 3074 | mode_int |= D1MODE_VBLANK_INT_MASK; |
3077 | } | 3075 | } |
3078 | if (rdev->irq.crtc_vblank_int[1]) { | 3076 | if (rdev->irq.crtc_vblank_int[1] || |
3077 | rdev->irq.pflip[1]) { | ||
3079 | DRM_DEBUG("r600_irq_set: vblank 1\n"); | 3078 | DRM_DEBUG("r600_irq_set: vblank 1\n"); |
3080 | mode_int |= D2MODE_VBLANK_INT_MASK; | 3079 | mode_int |= D2MODE_VBLANK_INT_MASK; |
3081 | } | 3080 | } |
@@ -3118,6 +3117,8 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3118 | 3117 | ||
3119 | WREG32(CP_INT_CNTL, cp_int_cntl); | 3118 | WREG32(CP_INT_CNTL, cp_int_cntl); |
3120 | WREG32(DxMODE_INT_MASK, mode_int); | 3119 | WREG32(DxMODE_INT_MASK, mode_int); |
3120 | WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); | ||
3121 | WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); | ||
3121 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); | 3122 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); |
3122 | WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); | 3123 | WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); |
3123 | if (ASIC_IS_DCE3(rdev)) { | 3124 | if (ASIC_IS_DCE3(rdev)) { |
@@ -3140,32 +3141,35 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3140 | return 0; | 3141 | return 0; |
3141 | } | 3142 | } |
3142 | 3143 | ||
3143 | static inline void r600_irq_ack(struct radeon_device *rdev, | 3144 | static inline void r600_irq_ack(struct radeon_device *rdev) |
3144 | u32 *disp_int, | ||
3145 | u32 *disp_int_cont, | ||
3146 | u32 *disp_int_cont2) | ||
3147 | { | 3145 | { |
3148 | u32 tmp; | 3146 | u32 tmp; |
3149 | 3147 | ||
3150 | if (ASIC_IS_DCE3(rdev)) { | 3148 | if (ASIC_IS_DCE3(rdev)) { |
3151 | *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); | 3149 | rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); |
3152 | *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); | 3150 | rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); |
3153 | *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); | 3151 | rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); |
3154 | } else { | 3152 | } else { |
3155 | *disp_int = RREG32(DISP_INTERRUPT_STATUS); | 3153 | rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS); |
3156 | *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); | 3154 | rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); |
3157 | *disp_int_cont2 = 0; | 3155 | rdev->irq.stat_regs.r600.disp_int_cont2 = 0; |
3158 | } | 3156 | } |
3159 | 3157 | rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS); | |
3160 | if (*disp_int & LB_D1_VBLANK_INTERRUPT) | 3158 | rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS); |
3159 | |||
3160 | if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED) | ||
3161 | WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); | ||
3162 | if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED) | ||
3163 | WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); | ||
3164 | if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) | ||
3161 | WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); | 3165 | WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); |
3162 | if (*disp_int & LB_D1_VLINE_INTERRUPT) | 3166 | if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) |
3163 | WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); | 3167 | WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); |
3164 | if (*disp_int & LB_D2_VBLANK_INTERRUPT) | 3168 | if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) |
3165 | WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); | 3169 | WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); |
3166 | if (*disp_int & LB_D2_VLINE_INTERRUPT) | 3170 | if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) |
3167 | WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); | 3171 | WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); |
3168 | if (*disp_int & DC_HPD1_INTERRUPT) { | 3172 | if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { |
3169 | if (ASIC_IS_DCE3(rdev)) { | 3173 | if (ASIC_IS_DCE3(rdev)) { |
3170 | tmp = RREG32(DC_HPD1_INT_CONTROL); | 3174 | tmp = RREG32(DC_HPD1_INT_CONTROL); |
3171 | tmp |= DC_HPDx_INT_ACK; | 3175 | tmp |= DC_HPDx_INT_ACK; |
@@ -3176,7 +3180,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev, | |||
3176 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | 3180 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); |
3177 | } | 3181 | } |
3178 | } | 3182 | } |
3179 | if (*disp_int & DC_HPD2_INTERRUPT) { | 3183 | if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { |
3180 | if (ASIC_IS_DCE3(rdev)) { | 3184 | if (ASIC_IS_DCE3(rdev)) { |
3181 | tmp = RREG32(DC_HPD2_INT_CONTROL); | 3185 | tmp = RREG32(DC_HPD2_INT_CONTROL); |
3182 | tmp |= DC_HPDx_INT_ACK; | 3186 | tmp |= DC_HPDx_INT_ACK; |
@@ -3187,7 +3191,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev, | |||
3187 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | 3191 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); |
3188 | } | 3192 | } |
3189 | } | 3193 | } |
3190 | if (*disp_int_cont & DC_HPD3_INTERRUPT) { | 3194 | if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { |
3191 | if (ASIC_IS_DCE3(rdev)) { | 3195 | if (ASIC_IS_DCE3(rdev)) { |
3192 | tmp = RREG32(DC_HPD3_INT_CONTROL); | 3196 | tmp = RREG32(DC_HPD3_INT_CONTROL); |
3193 | tmp |= DC_HPDx_INT_ACK; | 3197 | tmp |= DC_HPDx_INT_ACK; |
@@ -3198,18 +3202,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev, | |||
3198 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); | 3202 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); |
3199 | } | 3203 | } |
3200 | } | 3204 | } |
3201 | if (*disp_int_cont & DC_HPD4_INTERRUPT) { | 3205 | if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { |
3202 | tmp = RREG32(DC_HPD4_INT_CONTROL); | 3206 | tmp = RREG32(DC_HPD4_INT_CONTROL); |
3203 | tmp |= DC_HPDx_INT_ACK; | 3207 | tmp |= DC_HPDx_INT_ACK; |
3204 | WREG32(DC_HPD4_INT_CONTROL, tmp); | 3208 | WREG32(DC_HPD4_INT_CONTROL, tmp); |
3205 | } | 3209 | } |
3206 | if (ASIC_IS_DCE32(rdev)) { | 3210 | if (ASIC_IS_DCE32(rdev)) { |
3207 | if (*disp_int_cont2 & DC_HPD5_INTERRUPT) { | 3211 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { |
3208 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 3212 | tmp = RREG32(DC_HPD5_INT_CONTROL); |
3209 | tmp |= DC_HPDx_INT_ACK; | 3213 | tmp |= DC_HPDx_INT_ACK; |
3210 | WREG32(DC_HPD5_INT_CONTROL, tmp); | 3214 | WREG32(DC_HPD5_INT_CONTROL, tmp); |
3211 | } | 3215 | } |
3212 | if (*disp_int_cont2 & DC_HPD6_INTERRUPT) { | 3216 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { |
3213 | tmp = RREG32(DC_HPD5_INT_CONTROL); | 3217 | tmp = RREG32(DC_HPD5_INT_CONTROL); |
3214 | tmp |= DC_HPDx_INT_ACK; | 3218 | tmp |= DC_HPDx_INT_ACK; |
3215 | WREG32(DC_HPD6_INT_CONTROL, tmp); | 3219 | WREG32(DC_HPD6_INT_CONTROL, tmp); |
@@ -3231,12 +3235,10 @@ static inline void r600_irq_ack(struct radeon_device *rdev, | |||
3231 | 3235 | ||
3232 | void r600_irq_disable(struct radeon_device *rdev) | 3236 | void r600_irq_disable(struct radeon_device *rdev) |
3233 | { | 3237 | { |
3234 | u32 disp_int, disp_int_cont, disp_int_cont2; | ||
3235 | |||
3236 | r600_disable_interrupts(rdev); | 3238 | r600_disable_interrupts(rdev); |
3237 | /* Wait and acknowledge irq */ | 3239 | /* Wait and acknowledge irq */ |
3238 | mdelay(1); | 3240 | mdelay(1); |
3239 | r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); | 3241 | r600_irq_ack(rdev); |
3240 | r600_disable_interrupt_state(rdev); | 3242 | r600_disable_interrupt_state(rdev); |
3241 | } | 3243 | } |
3242 | 3244 | ||
@@ -3244,8 +3246,10 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) | |||
3244 | { | 3246 | { |
3245 | u32 wptr, tmp; | 3247 | u32 wptr, tmp; |
3246 | 3248 | ||
3247 | /* XXX use writeback */ | 3249 | if (rdev->wb.enabled) |
3248 | wptr = RREG32(IH_RB_WPTR); | 3250 | wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); |
3251 | else | ||
3252 | wptr = RREG32(IH_RB_WPTR); | ||
3249 | 3253 | ||
3250 | if (wptr & RB_OVERFLOW) { | 3254 | if (wptr & RB_OVERFLOW) { |
3251 | /* When a ring buffer overflow happen start parsing interrupt | 3255 | /* When a ring buffer overflow happen start parsing interrupt |
@@ -3294,54 +3298,57 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) | |||
3294 | 3298 | ||
3295 | int r600_irq_process(struct radeon_device *rdev) | 3299 | int r600_irq_process(struct radeon_device *rdev) |
3296 | { | 3300 | { |
3297 | u32 wptr = r600_get_ih_wptr(rdev); | 3301 | u32 wptr; |
3298 | u32 rptr = rdev->ih.rptr; | 3302 | u32 rptr; |
3299 | u32 src_id, src_data; | 3303 | u32 src_id, src_data; |
3300 | u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; | 3304 | u32 ring_index; |
3301 | unsigned long flags; | 3305 | unsigned long flags; |
3302 | bool queue_hotplug = false; | 3306 | bool queue_hotplug = false; |
3303 | 3307 | ||
3304 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | 3308 | if (!rdev->ih.enabled || rdev->shutdown) |
3305 | if (!rdev->ih.enabled) | ||
3306 | return IRQ_NONE; | 3309 | return IRQ_NONE; |
3307 | 3310 | ||
3311 | wptr = r600_get_ih_wptr(rdev); | ||
3312 | rptr = rdev->ih.rptr; | ||
3313 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | ||
3314 | |||
3308 | spin_lock_irqsave(&rdev->ih.lock, flags); | 3315 | spin_lock_irqsave(&rdev->ih.lock, flags); |
3309 | 3316 | ||
3310 | if (rptr == wptr) { | 3317 | if (rptr == wptr) { |
3311 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | 3318 | spin_unlock_irqrestore(&rdev->ih.lock, flags); |
3312 | return IRQ_NONE; | 3319 | return IRQ_NONE; |
3313 | } | 3320 | } |
3314 | if (rdev->shutdown) { | ||
3315 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | ||
3316 | return IRQ_NONE; | ||
3317 | } | ||
3318 | 3321 | ||
3319 | restart_ih: | 3322 | restart_ih: |
3320 | /* display interrupts */ | 3323 | /* display interrupts */ |
3321 | r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); | 3324 | r600_irq_ack(rdev); |
3322 | 3325 | ||
3323 | rdev->ih.wptr = wptr; | 3326 | rdev->ih.wptr = wptr; |
3324 | while (rptr != wptr) { | 3327 | while (rptr != wptr) { |
3325 | /* wptr/rptr are in bytes! */ | 3328 | /* wptr/rptr are in bytes! */ |
3326 | ring_index = rptr / 4; | 3329 | ring_index = rptr / 4; |
3327 | src_id = rdev->ih.ring[ring_index] & 0xff; | 3330 | src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; |
3328 | src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; | 3331 | src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; |
3329 | 3332 | ||
3330 | switch (src_id) { | 3333 | switch (src_id) { |
3331 | case 1: /* D1 vblank/vline */ | 3334 | case 1: /* D1 vblank/vline */ |
3332 | switch (src_data) { | 3335 | switch (src_data) { |
3333 | case 0: /* D1 vblank */ | 3336 | case 0: /* D1 vblank */ |
3334 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | 3337 | if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { |
3335 | drm_handle_vblank(rdev->ddev, 0); | 3338 | if (rdev->irq.crtc_vblank_int[0]) { |
3336 | rdev->pm.vblank_sync = true; | 3339 | drm_handle_vblank(rdev->ddev, 0); |
3337 | wake_up(&rdev->irq.vblank_queue); | 3340 | rdev->pm.vblank_sync = true; |
3338 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | 3341 | wake_up(&rdev->irq.vblank_queue); |
3342 | } | ||
3343 | if (rdev->irq.pflip[0]) | ||
3344 | radeon_crtc_handle_flip(rdev, 0); | ||
3345 | rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; | ||
3339 | DRM_DEBUG("IH: D1 vblank\n"); | 3346 | DRM_DEBUG("IH: D1 vblank\n"); |
3340 | } | 3347 | } |
3341 | break; | 3348 | break; |
3342 | case 1: /* D1 vline */ | 3349 | case 1: /* D1 vline */ |
3343 | if (disp_int & LB_D1_VLINE_INTERRUPT) { | 3350 | if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { |
3344 | disp_int &= ~LB_D1_VLINE_INTERRUPT; | 3351 | rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; |
3345 | DRM_DEBUG("IH: D1 vline\n"); | 3352 | DRM_DEBUG("IH: D1 vline\n"); |
3346 | } | 3353 | } |
3347 | break; | 3354 | break; |
@@ -3353,17 +3360,21 @@ restart_ih: | |||
3353 | case 5: /* D2 vblank/vline */ | 3360 | case 5: /* D2 vblank/vline */ |
3354 | switch (src_data) { | 3361 | switch (src_data) { |
3355 | case 0: /* D2 vblank */ | 3362 | case 0: /* D2 vblank */ |
3356 | if (disp_int & LB_D2_VBLANK_INTERRUPT) { | 3363 | if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { |
3357 | drm_handle_vblank(rdev->ddev, 1); | 3364 | if (rdev->irq.crtc_vblank_int[1]) { |
3358 | rdev->pm.vblank_sync = true; | 3365 | drm_handle_vblank(rdev->ddev, 1); |
3359 | wake_up(&rdev->irq.vblank_queue); | 3366 | rdev->pm.vblank_sync = true; |
3360 | disp_int &= ~LB_D2_VBLANK_INTERRUPT; | 3367 | wake_up(&rdev->irq.vblank_queue); |
3368 | } | ||
3369 | if (rdev->irq.pflip[1]) | ||
3370 | radeon_crtc_handle_flip(rdev, 1); | ||
3371 | rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; | ||
3361 | DRM_DEBUG("IH: D2 vblank\n"); | 3372 | DRM_DEBUG("IH: D2 vblank\n"); |
3362 | } | 3373 | } |
3363 | break; | 3374 | break; |
3364 | case 1: /* D1 vline */ | 3375 | case 1: /* D1 vline */ |
3365 | if (disp_int & LB_D2_VLINE_INTERRUPT) { | 3376 | if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { |
3366 | disp_int &= ~LB_D2_VLINE_INTERRUPT; | 3377 | rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; |
3367 | DRM_DEBUG("IH: D2 vline\n"); | 3378 | DRM_DEBUG("IH: D2 vline\n"); |
3368 | } | 3379 | } |
3369 | break; | 3380 | break; |
@@ -3375,43 +3386,43 @@ restart_ih: | |||
3375 | case 19: /* HPD/DAC hotplug */ | 3386 | case 19: /* HPD/DAC hotplug */ |
3376 | switch (src_data) { | 3387 | switch (src_data) { |
3377 | case 0: | 3388 | case 0: |
3378 | if (disp_int & DC_HPD1_INTERRUPT) { | 3389 | if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { |
3379 | disp_int &= ~DC_HPD1_INTERRUPT; | 3390 | rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; |
3380 | queue_hotplug = true; | 3391 | queue_hotplug = true; |
3381 | DRM_DEBUG("IH: HPD1\n"); | 3392 | DRM_DEBUG("IH: HPD1\n"); |
3382 | } | 3393 | } |
3383 | break; | 3394 | break; |
3384 | case 1: | 3395 | case 1: |
3385 | if (disp_int & DC_HPD2_INTERRUPT) { | 3396 | if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { |
3386 | disp_int &= ~DC_HPD2_INTERRUPT; | 3397 | rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; |
3387 | queue_hotplug = true; | 3398 | queue_hotplug = true; |
3388 | DRM_DEBUG("IH: HPD2\n"); | 3399 | DRM_DEBUG("IH: HPD2\n"); |
3389 | } | 3400 | } |
3390 | break; | 3401 | break; |
3391 | case 4: | 3402 | case 4: |
3392 | if (disp_int_cont & DC_HPD3_INTERRUPT) { | 3403 | if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { |
3393 | disp_int_cont &= ~DC_HPD3_INTERRUPT; | 3404 | rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; |
3394 | queue_hotplug = true; | 3405 | queue_hotplug = true; |
3395 | DRM_DEBUG("IH: HPD3\n"); | 3406 | DRM_DEBUG("IH: HPD3\n"); |
3396 | } | 3407 | } |
3397 | break; | 3408 | break; |
3398 | case 5: | 3409 | case 5: |
3399 | if (disp_int_cont & DC_HPD4_INTERRUPT) { | 3410 | if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { |
3400 | disp_int_cont &= ~DC_HPD4_INTERRUPT; | 3411 | rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; |
3401 | queue_hotplug = true; | 3412 | queue_hotplug = true; |
3402 | DRM_DEBUG("IH: HPD4\n"); | 3413 | DRM_DEBUG("IH: HPD4\n"); |
3403 | } | 3414 | } |
3404 | break; | 3415 | break; |
3405 | case 10: | 3416 | case 10: |
3406 | if (disp_int_cont2 & DC_HPD5_INTERRUPT) { | 3417 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { |
3407 | disp_int_cont2 &= ~DC_HPD5_INTERRUPT; | 3418 | rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; |
3408 | queue_hotplug = true; | 3419 | queue_hotplug = true; |
3409 | DRM_DEBUG("IH: HPD5\n"); | 3420 | DRM_DEBUG("IH: HPD5\n"); |
3410 | } | 3421 | } |
3411 | break; | 3422 | break; |
3412 | case 12: | 3423 | case 12: |
3413 | if (disp_int_cont2 & DC_HPD6_INTERRUPT) { | 3424 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { |
3414 | disp_int_cont2 &= ~DC_HPD6_INTERRUPT; | 3425 | rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; |
3415 | queue_hotplug = true; | 3426 | queue_hotplug = true; |
3416 | DRM_DEBUG("IH: HPD6\n"); | 3427 | DRM_DEBUG("IH: HPD6\n"); |
3417 | } | 3428 | } |
@@ -3433,9 +3444,10 @@ restart_ih: | |||
3433 | break; | 3444 | break; |
3434 | case 181: /* CP EOP event */ | 3445 | case 181: /* CP EOP event */ |
3435 | DRM_DEBUG("IH: CP EOP\n"); | 3446 | DRM_DEBUG("IH: CP EOP\n"); |
3447 | radeon_fence_process(rdev); | ||
3436 | break; | 3448 | break; |
3437 | case 233: /* GUI IDLE */ | 3449 | case 233: /* GUI IDLE */ |
3438 | DRM_DEBUG("IH: CP EOP\n"); | 3450 | DRM_DEBUG("IH: GUI idle\n"); |
3439 | rdev->pm.gui_idle = true; | 3451 | rdev->pm.gui_idle = true; |
3440 | wake_up(&rdev->irq.idle_queue); | 3452 | wake_up(&rdev->irq.idle_queue); |
3441 | break; | 3453 | break; |
@@ -3453,7 +3465,7 @@ restart_ih: | |||
3453 | if (wptr != rdev->ih.wptr) | 3465 | if (wptr != rdev->ih.wptr) |
3454 | goto restart_ih; | 3466 | goto restart_ih; |
3455 | if (queue_hotplug) | 3467 | if (queue_hotplug) |
3456 | queue_work(rdev->wq, &rdev->hotplug_work); | 3468 | schedule_work(&rdev->hotplug_work); |
3457 | rdev->ih.rptr = rptr; | 3469 | rdev->ih.rptr = rptr; |
3458 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | 3470 | WREG32(IH_RB_RPTR, rdev->ih.rptr); |
3459 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | 3471 | spin_unlock_irqrestore(&rdev->ih.lock, flags); |
@@ -3528,10 +3540,12 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) | |||
3528 | void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | 3540 | void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) |
3529 | { | 3541 | { |
3530 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read | 3542 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
3531 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL | 3543 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. |
3544 | * This seems to cause problems on some AGP cards. Just use the old | ||
3545 | * method for them. | ||
3532 | */ | 3546 | */ |
3533 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && | 3547 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && |
3534 | rdev->vram_scratch.ptr) { | 3548 | rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) { |
3535 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; | 3549 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
3536 | u32 tmp; | 3550 | u32 tmp; |
3537 | 3551 | ||
@@ -3540,3 +3554,222 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | |||
3540 | } else | 3554 | } else |
3541 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | 3555 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); |
3542 | } | 3556 | } |
3557 | |||
3558 | void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes) | ||
3559 | { | ||
3560 | u32 link_width_cntl, mask, target_reg; | ||
3561 | |||
3562 | if (rdev->flags & RADEON_IS_IGP) | ||
3563 | return; | ||
3564 | |||
3565 | if (!(rdev->flags & RADEON_IS_PCIE)) | ||
3566 | return; | ||
3567 | |||
3568 | /* x2 cards have a special sequence */ | ||
3569 | if (ASIC_IS_X2(rdev)) | ||
3570 | return; | ||
3571 | |||
3572 | /* FIXME wait for idle */ | ||
3573 | |||
3574 | switch (lanes) { | ||
3575 | case 0: | ||
3576 | mask = RADEON_PCIE_LC_LINK_WIDTH_X0; | ||
3577 | break; | ||
3578 | case 1: | ||
3579 | mask = RADEON_PCIE_LC_LINK_WIDTH_X1; | ||
3580 | break; | ||
3581 | case 2: | ||
3582 | mask = RADEON_PCIE_LC_LINK_WIDTH_X2; | ||
3583 | break; | ||
3584 | case 4: | ||
3585 | mask = RADEON_PCIE_LC_LINK_WIDTH_X4; | ||
3586 | break; | ||
3587 | case 8: | ||
3588 | mask = RADEON_PCIE_LC_LINK_WIDTH_X8; | ||
3589 | break; | ||
3590 | case 12: | ||
3591 | mask = RADEON_PCIE_LC_LINK_WIDTH_X12; | ||
3592 | break; | ||
3593 | case 16: | ||
3594 | default: | ||
3595 | mask = RADEON_PCIE_LC_LINK_WIDTH_X16; | ||
3596 | break; | ||
3597 | } | ||
3598 | |||
3599 | link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); | ||
3600 | |||
3601 | if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == | ||
3602 | (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) | ||
3603 | return; | ||
3604 | |||
3605 | if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS) | ||
3606 | return; | ||
3607 | |||
3608 | link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | | ||
3609 | RADEON_PCIE_LC_RECONFIG_NOW | | ||
3610 | R600_PCIE_LC_RENEGOTIATE_EN | | ||
3611 | R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE); | ||
3612 | link_width_cntl |= mask; | ||
3613 | |||
3614 | WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3615 | |||
3616 | /* some northbridges can renegotiate the link rather than requiring | ||
3617 | * a complete re-config. | ||
3618 | * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.) | ||
3619 | */ | ||
3620 | if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT) | ||
3621 | link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT; | ||
3622 | else | ||
3623 | link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE; | ||
3624 | |||
3625 | WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | | ||
3626 | RADEON_PCIE_LC_RECONFIG_NOW)); | ||
3627 | |||
3628 | if (rdev->family >= CHIP_RV770) | ||
3629 | target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX; | ||
3630 | else | ||
3631 | target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX; | ||
3632 | |||
3633 | /* wait for lane set to complete */ | ||
3634 | link_width_cntl = RREG32(target_reg); | ||
3635 | while (link_width_cntl == 0xffffffff) | ||
3636 | link_width_cntl = RREG32(target_reg); | ||
3637 | |||
3638 | } | ||
3639 | |||
3640 | int r600_get_pcie_lanes(struct radeon_device *rdev) | ||
3641 | { | ||
3642 | u32 link_width_cntl; | ||
3643 | |||
3644 | if (rdev->flags & RADEON_IS_IGP) | ||
3645 | return 0; | ||
3646 | |||
3647 | if (!(rdev->flags & RADEON_IS_PCIE)) | ||
3648 | return 0; | ||
3649 | |||
3650 | /* x2 cards have a special sequence */ | ||
3651 | if (ASIC_IS_X2(rdev)) | ||
3652 | return 0; | ||
3653 | |||
3654 | /* FIXME wait for idle */ | ||
3655 | |||
3656 | link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); | ||
3657 | |||
3658 | switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { | ||
3659 | case RADEON_PCIE_LC_LINK_WIDTH_X0: | ||
3660 | return 0; | ||
3661 | case RADEON_PCIE_LC_LINK_WIDTH_X1: | ||
3662 | return 1; | ||
3663 | case RADEON_PCIE_LC_LINK_WIDTH_X2: | ||
3664 | return 2; | ||
3665 | case RADEON_PCIE_LC_LINK_WIDTH_X4: | ||
3666 | return 4; | ||
3667 | case RADEON_PCIE_LC_LINK_WIDTH_X8: | ||
3668 | return 8; | ||
3669 | case RADEON_PCIE_LC_LINK_WIDTH_X16: | ||
3670 | default: | ||
3671 | return 16; | ||
3672 | } | ||
3673 | } | ||
3674 | |||
3675 | static void r600_pcie_gen2_enable(struct radeon_device *rdev) | ||
3676 | { | ||
3677 | u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; | ||
3678 | u16 link_cntl2; | ||
3679 | |||
3680 | if (radeon_pcie_gen2 == 0) | ||
3681 | return; | ||
3682 | |||
3683 | if (rdev->flags & RADEON_IS_IGP) | ||
3684 | return; | ||
3685 | |||
3686 | if (!(rdev->flags & RADEON_IS_PCIE)) | ||
3687 | return; | ||
3688 | |||
3689 | /* x2 cards have a special sequence */ | ||
3690 | if (ASIC_IS_X2(rdev)) | ||
3691 | return; | ||
3692 | |||
3693 | /* only RV6xx+ chips are supported */ | ||
3694 | if (rdev->family <= CHIP_R600) | ||
3695 | return; | ||
3696 | |||
3697 | /* 55 nm r6xx asics */ | ||
3698 | if ((rdev->family == CHIP_RV670) || | ||
3699 | (rdev->family == CHIP_RV620) || | ||
3700 | (rdev->family == CHIP_RV635)) { | ||
3701 | /* advertise upconfig capability */ | ||
3702 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
3703 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | ||
3704 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3705 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
3706 | if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { | ||
3707 | lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; | ||
3708 | link_width_cntl &= ~(LC_LINK_WIDTH_MASK | | ||
3709 | LC_RECONFIG_ARC_MISSING_ESCAPE); | ||
3710 | link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN; | ||
3711 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3712 | } else { | ||
3713 | link_width_cntl |= LC_UPCONFIGURE_DIS; | ||
3714 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3715 | } | ||
3716 | } | ||
3717 | |||
3718 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3719 | if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && | ||
3720 | (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { | ||
3721 | |||
3722 | /* 55 nm r6xx asics */ | ||
3723 | if ((rdev->family == CHIP_RV670) || | ||
3724 | (rdev->family == CHIP_RV620) || | ||
3725 | (rdev->family == CHIP_RV635)) { | ||
3726 | WREG32(MM_CFGREGS_CNTL, 0x8); | ||
3727 | link_cntl2 = RREG32(0x4088); | ||
3728 | WREG32(MM_CFGREGS_CNTL, 0); | ||
3729 | /* not supported yet */ | ||
3730 | if (link_cntl2 & SELECTABLE_DEEMPHASIS) | ||
3731 | return; | ||
3732 | } | ||
3733 | |||
3734 | speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK; | ||
3735 | speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT); | ||
3736 | speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK; | ||
3737 | speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE; | ||
3738 | speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE; | ||
3739 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3740 | |||
3741 | tmp = RREG32(0x541c); | ||
3742 | WREG32(0x541c, tmp | 0x8); | ||
3743 | WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); | ||
3744 | link_cntl2 = RREG16(0x4088); | ||
3745 | link_cntl2 &= ~TARGET_LINK_SPEED_MASK; | ||
3746 | link_cntl2 |= 0x2; | ||
3747 | WREG16(0x4088, link_cntl2); | ||
3748 | WREG32(MM_CFGREGS_CNTL, 0); | ||
3749 | |||
3750 | if ((rdev->family == CHIP_RV670) || | ||
3751 | (rdev->family == CHIP_RV620) || | ||
3752 | (rdev->family == CHIP_RV635)) { | ||
3753 | training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL); | ||
3754 | training_cntl &= ~LC_POINT_7_PLUS_EN; | ||
3755 | WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl); | ||
3756 | } else { | ||
3757 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3758 | speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; | ||
3759 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3760 | } | ||
3761 | |||
3762 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3763 | speed_cntl |= LC_GEN2_EN_STRAP; | ||
3764 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3765 | |||
3766 | } else { | ||
3767 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
3768 | /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ | ||
3769 | if (1) | ||
3770 | link_width_cntl |= LC_UPCONFIGURE_DIS; | ||
3771 | else | ||
3772 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | ||
3773 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3774 | } | ||
3775 | } | ||
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index b5443fe1c1d1..846fae576399 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "drmP.h" | 26 | #include "drmP.h" |
27 | #include "radeon.h" | 27 | #include "radeon.h" |
28 | #include "radeon_reg.h" | 28 | #include "radeon_reg.h" |
29 | #include "radeon_asic.h" | ||
29 | #include "atom.h" | 30 | #include "atom.h" |
30 | 31 | ||
31 | #define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */ | 32 | #define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */ |
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index ca5c29f70779..7f1043448d25 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c | |||
@@ -137,9 +137,9 @@ set_shaders(struct drm_device *dev) | |||
137 | ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); | 137 | ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); |
138 | 138 | ||
139 | for (i = 0; i < r6xx_vs_size; i++) | 139 | for (i = 0; i < r6xx_vs_size; i++) |
140 | vs[i] = r6xx_vs[i]; | 140 | vs[i] = cpu_to_le32(r6xx_vs[i]); |
141 | for (i = 0; i < r6xx_ps_size; i++) | 141 | for (i = 0; i < r6xx_ps_size; i++) |
142 | ps[i] = r6xx_ps[i]; | 142 | ps[i] = cpu_to_le32(r6xx_ps[i]); |
143 | 143 | ||
144 | dev_priv->blit_vb->used = 512; | 144 | dev_priv->blit_vb->used = 512; |
145 | 145 | ||
@@ -192,6 +192,9 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr) | |||
192 | DRM_DEBUG("\n"); | 192 | DRM_DEBUG("\n"); |
193 | 193 | ||
194 | sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); | 194 | sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); |
195 | #ifdef __BIG_ENDIAN | ||
196 | sq_vtx_constant_word2 |= (2 << 30); | ||
197 | #endif | ||
195 | 198 | ||
196 | BEGIN_RING(9); | 199 | BEGIN_RING(9); |
197 | OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); | 200 | OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); |
@@ -291,7 +294,11 @@ draw_auto(drm_radeon_private_t *dev_priv) | |||
291 | OUT_RING(DI_PT_RECTLIST); | 294 | OUT_RING(DI_PT_RECTLIST); |
292 | 295 | ||
293 | OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); | 296 | OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); |
297 | #ifdef __BIG_ENDIAN | ||
298 | OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT); | ||
299 | #else | ||
294 | OUT_RING(DI_INDEX_SIZE_16_BIT); | 300 | OUT_RING(DI_INDEX_SIZE_16_BIT); |
301 | #endif | ||
295 | 302 | ||
296 | OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); | 303 | OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); |
297 | OUT_RING(1); | 304 | OUT_RING(1); |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 3473c00781ff..9aa74c3f8cb6 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -54,7 +54,7 @@ set_render_target(struct radeon_device *rdev, int format, | |||
54 | if (h < 8) | 54 | if (h < 8) |
55 | h = 8; | 55 | h = 8; |
56 | 56 | ||
57 | cb_color_info = ((format << 2) | (1 << 27)); | 57 | cb_color_info = ((format << 2) | (1 << 27) | (1 << 8)); |
58 | pitch = (w / 8) - 1; | 58 | pitch = (w / 8) - 1; |
59 | slice = ((w * h) / 64) - 1; | 59 | slice = ((w * h) / 64) - 1; |
60 | 60 | ||
@@ -165,6 +165,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
165 | u32 sq_vtx_constant_word2; | 165 | u32 sq_vtx_constant_word2; |
166 | 166 | ||
167 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); | 167 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); |
168 | #ifdef __BIG_ENDIAN | ||
169 | sq_vtx_constant_word2 |= (2 << 30); | ||
170 | #endif | ||
168 | 171 | ||
169 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); | 172 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); |
170 | radeon_ring_write(rdev, 0x460); | 173 | radeon_ring_write(rdev, 0x460); |
@@ -199,7 +202,7 @@ set_tex_resource(struct radeon_device *rdev, | |||
199 | if (h < 1) | 202 | if (h < 1) |
200 | h = 1; | 203 | h = 1; |
201 | 204 | ||
202 | sq_tex_resource_word0 = (1 << 0); | 205 | sq_tex_resource_word0 = (1 << 0) | (1 << 3); |
203 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | | 206 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | |
204 | ((w - 1) << 19)); | 207 | ((w - 1) << 19)); |
205 | 208 | ||
@@ -253,7 +256,11 @@ draw_auto(struct radeon_device *rdev) | |||
253 | radeon_ring_write(rdev, DI_PT_RECTLIST); | 256 | radeon_ring_write(rdev, DI_PT_RECTLIST); |
254 | 257 | ||
255 | radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); | 258 | radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); |
256 | radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); | 259 | radeon_ring_write(rdev, |
260 | #ifdef __BIG_ENDIAN | ||
261 | (2 << 2) | | ||
262 | #endif | ||
263 | DI_INDEX_SIZE_16_BIT); | ||
257 | 264 | ||
258 | radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); | 265 | radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); |
259 | radeon_ring_write(rdev, 1); | 266 | radeon_ring_write(rdev, 1); |
@@ -424,7 +431,11 @@ set_default_state(struct radeon_device *rdev) | |||
424 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); | 431 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); |
425 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; | 432 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; |
426 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 433 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
427 | radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); | 434 | radeon_ring_write(rdev, |
435 | #ifdef __BIG_ENDIAN | ||
436 | (2 << 0) | | ||
437 | #endif | ||
438 | (gpu_addr & 0xFFFFFFFC)); | ||
428 | radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); | 439 | radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); |
429 | radeon_ring_write(rdev, dwords); | 440 | radeon_ring_write(rdev, dwords); |
430 | 441 | ||
@@ -467,14 +478,15 @@ static inline uint32_t i2f(uint32_t input) | |||
467 | int r600_blit_init(struct radeon_device *rdev) | 478 | int r600_blit_init(struct radeon_device *rdev) |
468 | { | 479 | { |
469 | u32 obj_size; | 480 | u32 obj_size; |
470 | int r, dwords; | 481 | int i, r, dwords; |
471 | void *ptr; | 482 | void *ptr; |
472 | u32 packet2s[16]; | 483 | u32 packet2s[16]; |
473 | int num_packet2s = 0; | 484 | int num_packet2s = 0; |
474 | 485 | ||
475 | /* don't reinitialize blit */ | 486 | /* pin copy shader into vram if already initialized */ |
476 | if (rdev->r600_blit.shader_obj) | 487 | if (rdev->r600_blit.shader_obj) |
477 | return 0; | 488 | goto done; |
489 | |||
478 | mutex_init(&rdev->r600_blit.mutex); | 490 | mutex_init(&rdev->r600_blit.mutex); |
479 | rdev->r600_blit.state_offset = 0; | 491 | rdev->r600_blit.state_offset = 0; |
480 | 492 | ||
@@ -485,7 +497,7 @@ int r600_blit_init(struct radeon_device *rdev) | |||
485 | 497 | ||
486 | dwords = rdev->r600_blit.state_len; | 498 | dwords = rdev->r600_blit.state_len; |
487 | while (dwords & 0xf) { | 499 | while (dwords & 0xf) { |
488 | packet2s[num_packet2s++] = PACKET2(0); | 500 | packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); |
489 | dwords++; | 501 | dwords++; |
490 | } | 502 | } |
491 | 503 | ||
@@ -500,7 +512,7 @@ int r600_blit_init(struct radeon_device *rdev) | |||
500 | obj_size += r6xx_ps_size * 4; | 512 | obj_size += r6xx_ps_size * 4; |
501 | obj_size = ALIGN(obj_size, 256); | 513 | obj_size = ALIGN(obj_size, 256); |
502 | 514 | ||
503 | r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, | 515 | r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
504 | &rdev->r600_blit.shader_obj); | 516 | &rdev->r600_blit.shader_obj); |
505 | if (r) { | 517 | if (r) { |
506 | DRM_ERROR("r600 failed to allocate shader\n"); | 518 | DRM_ERROR("r600 failed to allocate shader\n"); |
@@ -528,11 +540,25 @@ int r600_blit_init(struct radeon_device *rdev) | |||
528 | if (num_packet2s) | 540 | if (num_packet2s) |
529 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), | 541 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), |
530 | packet2s, num_packet2s * 4); | 542 | packet2s, num_packet2s * 4); |
531 | memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); | 543 | for (i = 0; i < r6xx_vs_size; i++) |
532 | memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); | 544 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]); |
545 | for (i = 0; i < r6xx_ps_size; i++) | ||
546 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]); | ||
533 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); | 547 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); |
534 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 548 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
535 | rdev->mc.active_vram_size = rdev->mc.real_vram_size; | 549 | |
550 | done: | ||
551 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
552 | if (unlikely(r != 0)) | ||
553 | return r; | ||
554 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
555 | &rdev->r600_blit.shader_gpu_addr); | ||
556 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
557 | if (r) { | ||
558 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | ||
559 | return r; | ||
560 | } | ||
561 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | ||
536 | return 0; | 562 | return 0; |
537 | } | 563 | } |
538 | 564 | ||
@@ -540,7 +566,7 @@ void r600_blit_fini(struct radeon_device *rdev) | |||
540 | { | 566 | { |
541 | int r; | 567 | int r; |
542 | 568 | ||
543 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 569 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
544 | if (rdev->r600_blit.shader_obj == NULL) | 570 | if (rdev->r600_blit.shader_obj == NULL) |
545 | return; | 571 | return; |
546 | /* If we can't reserve the bo, unref should be enough to destroy | 572 | /* If we can't reserve the bo, unref should be enough to destroy |
@@ -554,7 +580,7 @@ void r600_blit_fini(struct radeon_device *rdev) | |||
554 | radeon_bo_unref(&rdev->r600_blit.shader_obj); | 580 | radeon_bo_unref(&rdev->r600_blit.shader_obj); |
555 | } | 581 | } |
556 | 582 | ||
557 | int r600_vb_ib_get(struct radeon_device *rdev) | 583 | static int r600_vb_ib_get(struct radeon_device *rdev) |
558 | { | 584 | { |
559 | int r; | 585 | int r; |
560 | r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); | 586 | r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); |
@@ -568,7 +594,7 @@ int r600_vb_ib_get(struct radeon_device *rdev) | |||
568 | return 0; | 594 | return 0; |
569 | } | 595 | } |
570 | 596 | ||
571 | void r600_vb_ib_put(struct radeon_device *rdev) | 597 | static void r600_vb_ib_put(struct radeon_device *rdev) |
572 | { | 598 | { |
573 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); | 599 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); |
574 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 600 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
@@ -650,8 +676,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
650 | int src_x = src_gpu_addr & 255; | 676 | int src_x = src_gpu_addr & 255; |
651 | int dst_x = dst_gpu_addr & 255; | 677 | int dst_x = dst_gpu_addr & 255; |
652 | int h = 1; | 678 | int h = 1; |
653 | src_gpu_addr = src_gpu_addr & ~255; | 679 | src_gpu_addr = src_gpu_addr & ~255ULL; |
654 | dst_gpu_addr = dst_gpu_addr & ~255; | 680 | dst_gpu_addr = dst_gpu_addr & ~255ULL; |
655 | 681 | ||
656 | if (!src_x && !dst_x) { | 682 | if (!src_x && !dst_x) { |
657 | h = (cur_size / max_bytes); | 683 | h = (cur_size / max_bytes); |
@@ -672,17 +698,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
672 | 698 | ||
673 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { | 699 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { |
674 | WARN_ON(1); | 700 | WARN_ON(1); |
675 | |||
676 | #if 0 | ||
677 | r600_vb_ib_put(rdev); | ||
678 | |||
679 | r600_nomm_put_vb(dev); | ||
680 | r600_nomm_get_vb(dev); | ||
681 | if (!dev_priv->blit_vb) | ||
682 | return; | ||
683 | set_shaders(dev); | ||
684 | vb = r600_nomm_get_vb_ptr(dev); | ||
685 | #endif | ||
686 | } | 701 | } |
687 | 702 | ||
688 | vb[0] = i2f(dst_x); | 703 | vb[0] = i2f(dst_x); |
@@ -744,8 +759,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
744 | int src_x = (src_gpu_addr & 255); | 759 | int src_x = (src_gpu_addr & 255); |
745 | int dst_x = (dst_gpu_addr & 255); | 760 | int dst_x = (dst_gpu_addr & 255); |
746 | int h = 1; | 761 | int h = 1; |
747 | src_gpu_addr = src_gpu_addr & ~255; | 762 | src_gpu_addr = src_gpu_addr & ~255ULL; |
748 | dst_gpu_addr = dst_gpu_addr & ~255; | 763 | dst_gpu_addr = dst_gpu_addr & ~255ULL; |
749 | 764 | ||
750 | if (!src_x && !dst_x) { | 765 | if (!src_x && !dst_x) { |
751 | h = (cur_size / max_bytes); | 766 | h = (cur_size / max_bytes); |
@@ -767,17 +782,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
767 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { | 782 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { |
768 | WARN_ON(1); | 783 | WARN_ON(1); |
769 | } | 784 | } |
770 | #if 0 | ||
771 | if ((rdev->blit_vb->used + 48) > rdev->blit_vb->total) { | ||
772 | r600_nomm_put_vb(dev); | ||
773 | r600_nomm_get_vb(dev); | ||
774 | if (!rdev->blit_vb) | ||
775 | return; | ||
776 | |||
777 | set_shaders(dev); | ||
778 | vb = r600_nomm_get_vb_ptr(dev); | ||
779 | } | ||
780 | #endif | ||
781 | 785 | ||
782 | vb[0] = i2f(dst_x / 4); | 786 | vb[0] = i2f(dst_x / 4); |
783 | vb[1] = 0; | 787 | vb[1] = 0; |
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c index e8151c1d55b2..2d1f6c5ee2a7 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.c +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c | |||
@@ -684,7 +684,11 @@ const u32 r6xx_vs[] = | |||
684 | 0x00000000, | 684 | 0x00000000, |
685 | 0x3c000000, | 685 | 0x3c000000, |
686 | 0x68cd1000, | 686 | 0x68cd1000, |
687 | #ifdef __BIG_ENDIAN | ||
688 | 0x000a0000, | ||
689 | #else | ||
687 | 0x00080000, | 690 | 0x00080000, |
691 | #endif | ||
688 | 0x00000000, | 692 | 0x00000000, |
689 | }; | 693 | }; |
690 | 694 | ||
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 4f4cd8b286d5..c3ab959bdc7c 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c | |||
@@ -396,6 +396,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) | |||
396 | r600_do_cp_stop(dev_priv); | 396 | r600_do_cp_stop(dev_priv); |
397 | 397 | ||
398 | RADEON_WRITE(R600_CP_RB_CNTL, | 398 | RADEON_WRITE(R600_CP_RB_CNTL, |
399 | #ifdef __BIG_ENDIAN | ||
400 | R600_BUF_SWAP_32BIT | | ||
401 | #endif | ||
399 | R600_RB_NO_UPDATE | | 402 | R600_RB_NO_UPDATE | |
400 | R600_RB_BLKSZ(15) | | 403 | R600_RB_BLKSZ(15) | |
401 | R600_RB_BUFSZ(3)); | 404 | R600_RB_BUFSZ(3)); |
@@ -486,9 +489,12 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv) | |||
486 | r600_do_cp_stop(dev_priv); | 489 | r600_do_cp_stop(dev_priv); |
487 | 490 | ||
488 | RADEON_WRITE(R600_CP_RB_CNTL, | 491 | RADEON_WRITE(R600_CP_RB_CNTL, |
492 | #ifdef __BIG_ENDIAN | ||
493 | R600_BUF_SWAP_32BIT | | ||
494 | #endif | ||
489 | R600_RB_NO_UPDATE | | 495 | R600_RB_NO_UPDATE | |
490 | (15 << 8) | | 496 | R600_RB_BLKSZ(15) | |
491 | (3 << 0)); | 497 | R600_RB_BUFSZ(3)); |
492 | 498 | ||
493 | RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); | 499 | RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); |
494 | RADEON_READ(R600_GRBM_SOFT_RESET); | 500 | RADEON_READ(R600_GRBM_SOFT_RESET); |
@@ -550,8 +556,12 @@ static void r600_test_writeback(drm_radeon_private_t *dev_priv) | |||
550 | 556 | ||
551 | if (!dev_priv->writeback_works) { | 557 | if (!dev_priv->writeback_works) { |
552 | /* Disable writeback to avoid unnecessary bus master transfer */ | 558 | /* Disable writeback to avoid unnecessary bus master transfer */ |
553 | RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) | | 559 | RADEON_WRITE(R600_CP_RB_CNTL, |
554 | RADEON_RB_NO_UPDATE); | 560 | #ifdef __BIG_ENDIAN |
561 | R600_BUF_SWAP_32BIT | | ||
562 | #endif | ||
563 | RADEON_READ(R600_CP_RB_CNTL) | | ||
564 | R600_RB_NO_UPDATE); | ||
555 | RADEON_WRITE(R600_SCRATCH_UMSK, 0); | 565 | RADEON_WRITE(R600_SCRATCH_UMSK, 0); |
556 | } | 566 | } |
557 | } | 567 | } |
@@ -575,7 +585,11 @@ int r600_do_engine_reset(struct drm_device *dev) | |||
575 | 585 | ||
576 | RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); | 586 | RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); |
577 | cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL); | 587 | cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL); |
578 | RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA); | 588 | RADEON_WRITE(R600_CP_RB_CNTL, |
589 | #ifdef __BIG_ENDIAN | ||
590 | R600_BUF_SWAP_32BIT | | ||
591 | #endif | ||
592 | R600_RB_RPTR_WR_ENA); | ||
579 | 593 | ||
580 | RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr); | 594 | RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr); |
581 | RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr); | 595 | RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr); |
@@ -1838,7 +1852,10 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, | |||
1838 | + dev_priv->gart_vm_start; | 1852 | + dev_priv->gart_vm_start; |
1839 | } | 1853 | } |
1840 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR, | 1854 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR, |
1841 | rptr_addr & 0xffffffff); | 1855 | #ifdef __BIG_ENDIAN |
1856 | (2 << 0) | | ||
1857 | #endif | ||
1858 | (rptr_addr & 0xfffffffc)); | ||
1842 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, | 1859 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, |
1843 | upper_32_bits(rptr_addr)); | 1860 | upper_32_bits(rptr_addr)); |
1844 | 1861 | ||
@@ -1889,7 +1906,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, | |||
1889 | { | 1906 | { |
1890 | u64 scratch_addr; | 1907 | u64 scratch_addr; |
1891 | 1908 | ||
1892 | scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR); | 1909 | scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC; |
1893 | scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32; | 1910 | scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32; |
1894 | scratch_addr += R600_SCRATCH_REG_OFFSET; | 1911 | scratch_addr += R600_SCRATCH_REG_OFFSET; |
1895 | scratch_addr >>= 8; | 1912 | scratch_addr >>= 8; |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 250a3a918193..909bda8dd550 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -50,6 +50,7 @@ struct r600_cs_track { | |||
50 | u32 nsamples; | 50 | u32 nsamples; |
51 | u32 cb_color_base_last[8]; | 51 | u32 cb_color_base_last[8]; |
52 | struct radeon_bo *cb_color_bo[8]; | 52 | struct radeon_bo *cb_color_bo[8]; |
53 | u64 cb_color_bo_mc[8]; | ||
53 | u32 cb_color_bo_offset[8]; | 54 | u32 cb_color_bo_offset[8]; |
54 | struct radeon_bo *cb_color_frag_bo[8]; | 55 | struct radeon_bo *cb_color_frag_bo[8]; |
55 | struct radeon_bo *cb_color_tile_bo[8]; | 56 | struct radeon_bo *cb_color_tile_bo[8]; |
@@ -67,76 +68,239 @@ struct r600_cs_track { | |||
67 | u32 db_depth_size; | 68 | u32 db_depth_size; |
68 | u32 db_offset; | 69 | u32 db_offset; |
69 | struct radeon_bo *db_bo; | 70 | struct radeon_bo *db_bo; |
71 | u64 db_bo_mc; | ||
70 | }; | 72 | }; |
71 | 73 | ||
74 | #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } | ||
75 | #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 } | ||
76 | #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0, CHIP_R600 } | ||
77 | #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 } | ||
78 | #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0, CHIP_R600 } | ||
79 | #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 } | ||
80 | #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 } | ||
81 | #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 } | ||
82 | |||
83 | struct gpu_formats { | ||
84 | unsigned blockwidth; | ||
85 | unsigned blockheight; | ||
86 | unsigned blocksize; | ||
87 | unsigned valid_color; | ||
88 | enum radeon_family min_family; | ||
89 | }; | ||
90 | |||
91 | static const struct gpu_formats color_formats_table[] = { | ||
92 | /* 8 bit */ | ||
93 | FMT_8_BIT(V_038004_COLOR_8, 1), | ||
94 | FMT_8_BIT(V_038004_COLOR_4_4, 1), | ||
95 | FMT_8_BIT(V_038004_COLOR_3_3_2, 1), | ||
96 | FMT_8_BIT(V_038004_FMT_1, 0), | ||
97 | |||
98 | /* 16-bit */ | ||
99 | FMT_16_BIT(V_038004_COLOR_16, 1), | ||
100 | FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1), | ||
101 | FMT_16_BIT(V_038004_COLOR_8_8, 1), | ||
102 | FMT_16_BIT(V_038004_COLOR_5_6_5, 1), | ||
103 | FMT_16_BIT(V_038004_COLOR_6_5_5, 1), | ||
104 | FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1), | ||
105 | FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1), | ||
106 | FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1), | ||
107 | |||
108 | /* 24-bit */ | ||
109 | FMT_24_BIT(V_038004_FMT_8_8_8), | ||
110 | |||
111 | /* 32-bit */ | ||
112 | FMT_32_BIT(V_038004_COLOR_32, 1), | ||
113 | FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1), | ||
114 | FMT_32_BIT(V_038004_COLOR_16_16, 1), | ||
115 | FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1), | ||
116 | FMT_32_BIT(V_038004_COLOR_8_24, 1), | ||
117 | FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1), | ||
118 | FMT_32_BIT(V_038004_COLOR_24_8, 1), | ||
119 | FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1), | ||
120 | FMT_32_BIT(V_038004_COLOR_10_11_11, 1), | ||
121 | FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1), | ||
122 | FMT_32_BIT(V_038004_COLOR_11_11_10, 1), | ||
123 | FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1), | ||
124 | FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1), | ||
125 | FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1), | ||
126 | FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1), | ||
127 | FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0), | ||
128 | FMT_32_BIT(V_038004_FMT_32_AS_8, 0), | ||
129 | FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0), | ||
130 | |||
131 | /* 48-bit */ | ||
132 | FMT_48_BIT(V_038004_FMT_16_16_16), | ||
133 | FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT), | ||
134 | |||
135 | /* 64-bit */ | ||
136 | FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1), | ||
137 | FMT_64_BIT(V_038004_COLOR_32_32, 1), | ||
138 | FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1), | ||
139 | FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1), | ||
140 | FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1), | ||
141 | |||
142 | FMT_96_BIT(V_038004_FMT_32_32_32), | ||
143 | FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT), | ||
144 | |||
145 | /* 128-bit */ | ||
146 | FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1), | ||
147 | FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1), | ||
148 | |||
149 | [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 }, | ||
150 | [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 }, | ||
151 | |||
152 | /* block compressed formats */ | ||
153 | [V_038004_FMT_BC1] = { 4, 4, 8, 0 }, | ||
154 | [V_038004_FMT_BC2] = { 4, 4, 16, 0 }, | ||
155 | [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, | ||
156 | [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, | ||
157 | [V_038004_FMT_BC5] = { 4, 4, 16, 0}, | ||
158 | [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ | ||
159 | [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ | ||
160 | |||
161 | /* The other Evergreen formats */ | ||
162 | [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, | ||
163 | }; | ||
164 | |||
165 | static inline bool fmt_is_valid_color(u32 format) | ||
166 | { | ||
167 | if (format >= ARRAY_SIZE(color_formats_table)) | ||
168 | return false; | ||
169 | |||
170 | if (color_formats_table[format].valid_color) | ||
171 | return true; | ||
172 | |||
173 | return false; | ||
174 | } | ||
175 | |||
176 | static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family) | ||
177 | { | ||
178 | if (format >= ARRAY_SIZE(color_formats_table)) | ||
179 | return false; | ||
180 | |||
181 | if (family < color_formats_table[format].min_family) | ||
182 | return false; | ||
183 | |||
184 | if (color_formats_table[format].blockwidth > 0) | ||
185 | return true; | ||
186 | |||
187 | return false; | ||
188 | } | ||
189 | |||
190 | static inline int fmt_get_blocksize(u32 format) | ||
191 | { | ||
192 | if (format >= ARRAY_SIZE(color_formats_table)) | ||
193 | return 0; | ||
194 | |||
195 | return color_formats_table[format].blocksize; | ||
196 | } | ||
197 | |||
198 | static inline int fmt_get_nblocksx(u32 format, u32 w) | ||
199 | { | ||
200 | unsigned bw; | ||
201 | |||
202 | if (format >= ARRAY_SIZE(color_formats_table)) | ||
203 | return 0; | ||
204 | |||
205 | bw = color_formats_table[format].blockwidth; | ||
206 | if (bw == 0) | ||
207 | return 0; | ||
208 | |||
209 | return (w + bw - 1) / bw; | ||
210 | } | ||
211 | |||
212 | static inline int fmt_get_nblocksy(u32 format, u32 h) | ||
213 | { | ||
214 | unsigned bh; | ||
215 | |||
216 | if (format >= ARRAY_SIZE(color_formats_table)) | ||
217 | return 0; | ||
218 | |||
219 | bh = color_formats_table[format].blockheight; | ||
220 | if (bh == 0) | ||
221 | return 0; | ||
222 | |||
223 | return (h + bh - 1) / bh; | ||
224 | } | ||
225 | |||
72 | static inline int r600_bpe_from_format(u32 *bpe, u32 format) | 226 | static inline int r600_bpe_from_format(u32 *bpe, u32 format) |
73 | { | 227 | { |
74 | switch (format) { | 228 | unsigned res; |
75 | case V_038004_COLOR_8: | 229 | |
76 | case V_038004_COLOR_4_4: | 230 | if (format >= ARRAY_SIZE(color_formats_table)) |
77 | case V_038004_COLOR_3_3_2: | 231 | goto fail; |
78 | case V_038004_FMT_1: | 232 | |
79 | *bpe = 1; | 233 | res = color_formats_table[format].blocksize; |
80 | break; | 234 | if (res == 0) |
81 | case V_038004_COLOR_16: | 235 | goto fail; |
82 | case V_038004_COLOR_16_FLOAT: | 236 | |
83 | case V_038004_COLOR_8_8: | 237 | *bpe = res; |
84 | case V_038004_COLOR_5_6_5: | 238 | return 0; |
85 | case V_038004_COLOR_6_5_5: | 239 | |
86 | case V_038004_COLOR_1_5_5_5: | 240 | fail: |
87 | case V_038004_COLOR_4_4_4_4: | 241 | *bpe = 16; |
88 | case V_038004_COLOR_5_5_5_1: | 242 | return -EINVAL; |
89 | *bpe = 2; | 243 | } |
90 | break; | 244 | |
91 | case V_038004_FMT_8_8_8: | 245 | struct array_mode_checker { |
92 | *bpe = 3; | 246 | int array_mode; |
93 | break; | 247 | u32 group_size; |
94 | case V_038004_COLOR_32: | 248 | u32 nbanks; |
95 | case V_038004_COLOR_32_FLOAT: | 249 | u32 npipes; |
96 | case V_038004_COLOR_16_16: | 250 | u32 nsamples; |
97 | case V_038004_COLOR_16_16_FLOAT: | 251 | u32 blocksize; |
98 | case V_038004_COLOR_8_24: | 252 | }; |
99 | case V_038004_COLOR_8_24_FLOAT: | 253 | |
100 | case V_038004_COLOR_24_8: | 254 | /* returns alignment in pixels for pitch/height/depth and bytes for base */ |
101 | case V_038004_COLOR_24_8_FLOAT: | 255 | static inline int r600_get_array_mode_alignment(struct array_mode_checker *values, |
102 | case V_038004_COLOR_10_11_11: | 256 | u32 *pitch_align, |
103 | case V_038004_COLOR_10_11_11_FLOAT: | 257 | u32 *height_align, |
104 | case V_038004_COLOR_11_11_10: | 258 | u32 *depth_align, |
105 | case V_038004_COLOR_11_11_10_FLOAT: | 259 | u64 *base_align) |
106 | case V_038004_COLOR_2_10_10_10: | 260 | { |
107 | case V_038004_COLOR_8_8_8_8: | 261 | u32 tile_width = 8; |
108 | case V_038004_COLOR_10_10_10_2: | 262 | u32 tile_height = 8; |
109 | case V_038004_FMT_5_9_9_9_SHAREDEXP: | 263 | u32 macro_tile_width = values->nbanks; |
110 | case V_038004_FMT_32_AS_8: | 264 | u32 macro_tile_height = values->npipes; |
111 | case V_038004_FMT_32_AS_8_8: | 265 | u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples; |
112 | *bpe = 4; | 266 | u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; |
113 | break; | 267 | |
114 | case V_038004_COLOR_X24_8_32_FLOAT: | 268 | switch (values->array_mode) { |
115 | case V_038004_COLOR_32_32: | 269 | case ARRAY_LINEAR_GENERAL: |
116 | case V_038004_COLOR_32_32_FLOAT: | 270 | /* technically tile_width/_height for pitch/height */ |
117 | case V_038004_COLOR_16_16_16_16: | 271 | *pitch_align = 1; /* tile_width */ |
118 | case V_038004_COLOR_16_16_16_16_FLOAT: | 272 | *height_align = 1; /* tile_height */ |
119 | *bpe = 8; | 273 | *depth_align = 1; |
274 | *base_align = 1; | ||
120 | break; | 275 | break; |
121 | case V_038004_FMT_16_16_16: | 276 | case ARRAY_LINEAR_ALIGNED: |
122 | case V_038004_FMT_16_16_16_FLOAT: | 277 | *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize)); |
123 | *bpe = 6; | 278 | *height_align = tile_height; |
279 | *depth_align = 1; | ||
280 | *base_align = values->group_size; | ||
124 | break; | 281 | break; |
125 | case V_038004_FMT_32_32_32: | 282 | case ARRAY_1D_TILED_THIN1: |
126 | case V_038004_FMT_32_32_32_FLOAT: | 283 | *pitch_align = max((u32)tile_width, |
127 | *bpe = 12; | 284 | (u32)(values->group_size / |
285 | (tile_height * values->blocksize * values->nsamples))); | ||
286 | *height_align = tile_height; | ||
287 | *depth_align = 1; | ||
288 | *base_align = values->group_size; | ||
128 | break; | 289 | break; |
129 | case V_038004_COLOR_32_32_32_32: | 290 | case ARRAY_2D_TILED_THIN1: |
130 | case V_038004_COLOR_32_32_32_32_FLOAT: | 291 | *pitch_align = max((u32)macro_tile_width, |
131 | *bpe = 16; | 292 | (u32)(((values->group_size / tile_height) / |
293 | (values->blocksize * values->nsamples)) * | ||
294 | values->nbanks)) * tile_width; | ||
295 | *height_align = macro_tile_height * tile_height; | ||
296 | *depth_align = 1; | ||
297 | *base_align = max(macro_tile_bytes, | ||
298 | (*pitch_align) * values->blocksize * (*height_align) * values->nsamples); | ||
132 | break; | 299 | break; |
133 | case V_038004_FMT_GB_GR: | ||
134 | case V_038004_FMT_BG_RG: | ||
135 | case V_038004_COLOR_INVALID: | ||
136 | default: | 300 | default: |
137 | *bpe = 16; | ||
138 | return -EINVAL; | 301 | return -EINVAL; |
139 | } | 302 | } |
303 | |||
140 | return 0; | 304 | return 0; |
141 | } | 305 | } |
142 | 306 | ||
@@ -153,10 +317,12 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
153 | track->cb_color_info[i] = 0; | 317 | track->cb_color_info[i] = 0; |
154 | track->cb_color_bo[i] = NULL; | 318 | track->cb_color_bo[i] = NULL; |
155 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; | 319 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; |
320 | track->cb_color_bo_mc[i] = 0xFFFFFFFF; | ||
156 | } | 321 | } |
157 | track->cb_target_mask = 0xFFFFFFFF; | 322 | track->cb_target_mask = 0xFFFFFFFF; |
158 | track->cb_shader_mask = 0xFFFFFFFF; | 323 | track->cb_shader_mask = 0xFFFFFFFF; |
159 | track->db_bo = NULL; | 324 | track->db_bo = NULL; |
325 | track->db_bo_mc = 0xFFFFFFFF; | ||
160 | /* assume the biggest format and that htile is enabled */ | 326 | /* assume the biggest format and that htile is enabled */ |
161 | track->db_depth_info = 7 | (1 << 25); | 327 | track->db_depth_info = 7 | (1 << 25); |
162 | track->db_depth_view = 0xFFFFC000; | 328 | track->db_depth_view = 0xFFFFC000; |
@@ -168,71 +334,59 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
168 | static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | 334 | static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) |
169 | { | 335 | { |
170 | struct r600_cs_track *track = p->track; | 336 | struct r600_cs_track *track = p->track; |
171 | u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align; | 337 | u32 slice_tile_max, size, tmp; |
338 | u32 height, height_align, pitch, pitch_align, depth_align; | ||
339 | u64 base_offset, base_align; | ||
340 | struct array_mode_checker array_check; | ||
172 | volatile u32 *ib = p->ib->ptr; | 341 | volatile u32 *ib = p->ib->ptr; |
173 | 342 | unsigned array_mode; | |
343 | u32 format; | ||
174 | if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { | 344 | if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { |
175 | dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); | 345 | dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); |
176 | return -EINVAL; | 346 | return -EINVAL; |
177 | } | 347 | } |
178 | size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; | 348 | size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; |
179 | if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) { | 349 | format = G_0280A0_FORMAT(track->cb_color_info[i]); |
350 | if (!fmt_is_valid_color(format)) { | ||
180 | dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", | 351 | dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", |
181 | __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]), | 352 | __func__, __LINE__, format, |
182 | i, track->cb_color_info[i]); | 353 | i, track->cb_color_info[i]); |
183 | return -EINVAL; | 354 | return -EINVAL; |
184 | } | 355 | } |
185 | /* pitch is the number of 8x8 tiles per row */ | 356 | /* pitch in pixels */ |
186 | pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1; | 357 | pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; |
187 | slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; | 358 | slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; |
188 | height = size / (pitch * 8 * bpe); | 359 | slice_tile_max *= 64; |
360 | height = slice_tile_max / pitch; | ||
189 | if (height > 8192) | 361 | if (height > 8192) |
190 | height = 8192; | 362 | height = 8192; |
191 | if (height > 7) | 363 | array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); |
192 | height &= ~0x7; | 364 | |
193 | switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) { | 365 | base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; |
366 | array_check.array_mode = array_mode; | ||
367 | array_check.group_size = track->group_size; | ||
368 | array_check.nbanks = track->nbanks; | ||
369 | array_check.npipes = track->npipes; | ||
370 | array_check.nsamples = track->nsamples; | ||
371 | array_check.blocksize = fmt_get_blocksize(format); | ||
372 | if (r600_get_array_mode_alignment(&array_check, | ||
373 | &pitch_align, &height_align, &depth_align, &base_align)) { | ||
374 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, | ||
375 | G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, | ||
376 | track->cb_color_info[i]); | ||
377 | return -EINVAL; | ||
378 | } | ||
379 | switch (array_mode) { | ||
194 | case V_0280A0_ARRAY_LINEAR_GENERAL: | 380 | case V_0280A0_ARRAY_LINEAR_GENERAL: |
195 | /* technically height & 0x7 */ | ||
196 | break; | 381 | break; |
197 | case V_0280A0_ARRAY_LINEAR_ALIGNED: | 382 | case V_0280A0_ARRAY_LINEAR_ALIGNED: |
198 | pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; | ||
199 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
200 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
201 | __func__, __LINE__, pitch); | ||
202 | return -EINVAL; | ||
203 | } | ||
204 | if (!IS_ALIGNED(height, 8)) { | ||
205 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
206 | __func__, __LINE__, height); | ||
207 | return -EINVAL; | ||
208 | } | ||
209 | break; | 383 | break; |
210 | case V_0280A0_ARRAY_1D_TILED_THIN1: | 384 | case V_0280A0_ARRAY_1D_TILED_THIN1: |
211 | pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8; | 385 | /* avoid breaking userspace */ |
212 | if (!IS_ALIGNED(pitch, pitch_align)) { | 386 | if (height > 7) |
213 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | 387 | height &= ~0x7; |
214 | __func__, __LINE__, pitch); | ||
215 | return -EINVAL; | ||
216 | } | ||
217 | if (!IS_ALIGNED(height, 8)) { | ||
218 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
219 | __func__, __LINE__, height); | ||
220 | return -EINVAL; | ||
221 | } | ||
222 | break; | 388 | break; |
223 | case V_0280A0_ARRAY_2D_TILED_THIN1: | 389 | case V_0280A0_ARRAY_2D_TILED_THIN1: |
224 | pitch_align = max((u32)track->nbanks, | ||
225 | (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)); | ||
226 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
227 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
228 | __func__, __LINE__, pitch); | ||
229 | return -EINVAL; | ||
230 | } | ||
231 | if (!IS_ALIGNED((height / 8), track->nbanks)) { | ||
232 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
233 | __func__, __LINE__, height); | ||
234 | return -EINVAL; | ||
235 | } | ||
236 | break; | 390 | break; |
237 | default: | 391 | default: |
238 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, | 392 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, |
@@ -240,21 +394,46 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
240 | track->cb_color_info[i]); | 394 | track->cb_color_info[i]); |
241 | return -EINVAL; | 395 | return -EINVAL; |
242 | } | 396 | } |
243 | /* check offset */ | 397 | |
244 | tmp = height * pitch * 8 * bpe; | 398 | if (!IS_ALIGNED(pitch, pitch_align)) { |
245 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { | 399 | dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", |
246 | dev_warn(p->dev, "%s offset[%d] %d too big\n", __func__, i, track->cb_color_bo_offset[i]); | 400 | __func__, __LINE__, pitch, pitch_align, array_mode); |
247 | return -EINVAL; | 401 | return -EINVAL; |
248 | } | 402 | } |
249 | if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) { | 403 | if (!IS_ALIGNED(height, height_align)) { |
250 | dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]); | 404 | dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", |
405 | __func__, __LINE__, height, height_align, array_mode); | ||
251 | return -EINVAL; | 406 | return -EINVAL; |
252 | } | 407 | } |
408 | if (!IS_ALIGNED(base_offset, base_align)) { | ||
409 | dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i, | ||
410 | base_offset, base_align, array_mode); | ||
411 | return -EINVAL; | ||
412 | } | ||
413 | |||
414 | /* check offset */ | ||
415 | tmp = fmt_get_nblocksy(format, height) * fmt_get_nblocksx(format, pitch) * fmt_get_blocksize(format); | ||
416 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { | ||
417 | if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { | ||
418 | /* the initial DDX does bad things with the CB size occasionally */ | ||
419 | /* it rounds up height too far for slice tile max but the BO is smaller */ | ||
420 | /* r600c,g also seem to flush at bad times in some apps resulting in | ||
421 | * bogus values here. So for linear just allow anything to avoid breaking | ||
422 | * broken userspace. | ||
423 | */ | ||
424 | } else { | ||
425 | dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i, | ||
426 | array_mode, | ||
427 | track->cb_color_bo_offset[i], tmp, | ||
428 | radeon_bo_size(track->cb_color_bo[i])); | ||
429 | return -EINVAL; | ||
430 | } | ||
431 | } | ||
253 | /* limit max tile */ | 432 | /* limit max tile */ |
254 | tmp = (height * pitch * 8) >> 6; | 433 | tmp = (height * pitch) >> 6; |
255 | if (tmp < slice_tile_max) | 434 | if (tmp < slice_tile_max) |
256 | slice_tile_max = tmp; | 435 | slice_tile_max = tmp; |
257 | tmp = S_028060_PITCH_TILE_MAX(pitch - 1) | | 436 | tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | |
258 | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); | 437 | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); |
259 | ib[track->cb_color_size_idx[i]] = tmp; | 438 | ib[track->cb_color_size_idx[i]] = tmp; |
260 | return 0; | 439 | return 0; |
@@ -296,7 +475,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
296 | /* Check depth buffer */ | 475 | /* Check depth buffer */ |
297 | if (G_028800_STENCIL_ENABLE(track->db_depth_control) || | 476 | if (G_028800_STENCIL_ENABLE(track->db_depth_control) || |
298 | G_028800_Z_ENABLE(track->db_depth_control)) { | 477 | G_028800_Z_ENABLE(track->db_depth_control)) { |
299 | u32 nviews, bpe, ntiles, pitch, pitch_align, height, size; | 478 | u32 nviews, bpe, ntiles, size, slice_tile_max; |
479 | u32 height, height_align, pitch, pitch_align, depth_align; | ||
480 | u64 base_offset, base_align; | ||
481 | struct array_mode_checker array_check; | ||
482 | int array_mode; | ||
483 | |||
300 | if (track->db_bo == NULL) { | 484 | if (track->db_bo == NULL) { |
301 | dev_warn(p->dev, "z/stencil with no depth buffer\n"); | 485 | dev_warn(p->dev, "z/stencil with no depth buffer\n"); |
302 | return -EINVAL; | 486 | return -EINVAL; |
@@ -339,39 +523,34 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
339 | ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); | 523 | ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); |
340 | } else { | 524 | } else { |
341 | size = radeon_bo_size(track->db_bo); | 525 | size = radeon_bo_size(track->db_bo); |
342 | pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1; | 526 | /* pitch in pixels */ |
343 | height = size / (pitch * 8 * bpe); | 527 | pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; |
344 | height &= ~0x7; | 528 | slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; |
345 | if (!height) | 529 | slice_tile_max *= 64; |
346 | height = 8; | 530 | height = slice_tile_max / pitch; |
347 | 531 | if (height > 8192) | |
348 | switch (G_028010_ARRAY_MODE(track->db_depth_info)) { | 532 | height = 8192; |
533 | base_offset = track->db_bo_mc + track->db_offset; | ||
534 | array_mode = G_028010_ARRAY_MODE(track->db_depth_info); | ||
535 | array_check.array_mode = array_mode; | ||
536 | array_check.group_size = track->group_size; | ||
537 | array_check.nbanks = track->nbanks; | ||
538 | array_check.npipes = track->npipes; | ||
539 | array_check.nsamples = track->nsamples; | ||
540 | array_check.blocksize = bpe; | ||
541 | if (r600_get_array_mode_alignment(&array_check, | ||
542 | &pitch_align, &height_align, &depth_align, &base_align)) { | ||
543 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
544 | G_028010_ARRAY_MODE(track->db_depth_info), | ||
545 | track->db_depth_info); | ||
546 | return -EINVAL; | ||
547 | } | ||
548 | switch (array_mode) { | ||
349 | case V_028010_ARRAY_1D_TILED_THIN1: | 549 | case V_028010_ARRAY_1D_TILED_THIN1: |
350 | pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8); | 550 | /* don't break userspace */ |
351 | if (!IS_ALIGNED(pitch, pitch_align)) { | 551 | height &= ~0x7; |
352 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | ||
353 | __func__, __LINE__, pitch); | ||
354 | return -EINVAL; | ||
355 | } | ||
356 | if (!IS_ALIGNED(height, 8)) { | ||
357 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | ||
358 | __func__, __LINE__, height); | ||
359 | return -EINVAL; | ||
360 | } | ||
361 | break; | 552 | break; |
362 | case V_028010_ARRAY_2D_TILED_THIN1: | 553 | case V_028010_ARRAY_2D_TILED_THIN1: |
363 | pitch_align = max((u32)track->nbanks, | ||
364 | (u32)(((track->group_size / 8) / bpe) * track->nbanks)); | ||
365 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
366 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | ||
367 | __func__, __LINE__, pitch); | ||
368 | return -EINVAL; | ||
369 | } | ||
370 | if ((height / 8) & (track->nbanks - 1)) { | ||
371 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | ||
372 | __func__, __LINE__, height); | ||
373 | return -EINVAL; | ||
374 | } | ||
375 | break; | 554 | break; |
376 | default: | 555 | default: |
377 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | 556 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, |
@@ -379,17 +558,31 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
379 | track->db_depth_info); | 558 | track->db_depth_info); |
380 | return -EINVAL; | 559 | return -EINVAL; |
381 | } | 560 | } |
382 | if (!IS_ALIGNED(track->db_offset, track->group_size)) { | 561 | |
383 | dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset); | 562 | if (!IS_ALIGNED(pitch, pitch_align)) { |
563 | dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", | ||
564 | __func__, __LINE__, pitch, pitch_align, array_mode); | ||
384 | return -EINVAL; | 565 | return -EINVAL; |
385 | } | 566 | } |
567 | if (!IS_ALIGNED(height, height_align)) { | ||
568 | dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", | ||
569 | __func__, __LINE__, height, height_align, array_mode); | ||
570 | return -EINVAL; | ||
571 | } | ||
572 | if (!IS_ALIGNED(base_offset, base_align)) { | ||
573 | dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i, | ||
574 | base_offset, base_align, array_mode); | ||
575 | return -EINVAL; | ||
576 | } | ||
577 | |||
386 | ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | 578 | ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; |
387 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; | 579 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; |
388 | tmp = ntiles * bpe * 64 * nviews; | 580 | tmp = ntiles * bpe * 64 * nviews; |
389 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { | 581 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { |
390 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n", | 582 | dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", |
391 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, | 583 | array_mode, |
392 | radeon_bo_size(track->db_bo)); | 584 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, |
585 | radeon_bo_size(track->db_bo)); | ||
393 | return -EINVAL; | 586 | return -EINVAL; |
394 | } | 587 | } |
395 | } | 588 | } |
@@ -595,33 +788,28 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
595 | if (wait_reg_mem.type != PACKET_TYPE3 || | 788 | if (wait_reg_mem.type != PACKET_TYPE3 || |
596 | wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { | 789 | wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { |
597 | DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); | 790 | DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); |
598 | r = -EINVAL; | 791 | return -EINVAL; |
599 | return r; | ||
600 | } | 792 | } |
601 | 793 | ||
602 | wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); | 794 | wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); |
603 | /* bit 4 is reg (0) or mem (1) */ | 795 | /* bit 4 is reg (0) or mem (1) */ |
604 | if (wait_reg_mem_info & 0x10) { | 796 | if (wait_reg_mem_info & 0x10) { |
605 | DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); | 797 | DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); |
606 | r = -EINVAL; | 798 | return -EINVAL; |
607 | return r; | ||
608 | } | 799 | } |
609 | /* waiting for value to be equal */ | 800 | /* waiting for value to be equal */ |
610 | if ((wait_reg_mem_info & 0x7) != 0x3) { | 801 | if ((wait_reg_mem_info & 0x7) != 0x3) { |
611 | DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); | 802 | DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); |
612 | r = -EINVAL; | 803 | return -EINVAL; |
613 | return r; | ||
614 | } | 804 | } |
615 | if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) { | 805 | if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) { |
616 | DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); | 806 | DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); |
617 | r = -EINVAL; | 807 | return -EINVAL; |
618 | return r; | ||
619 | } | 808 | } |
620 | 809 | ||
621 | if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) { | 810 | if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) { |
622 | DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); | 811 | DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); |
623 | r = -EINVAL; | 812 | return -EINVAL; |
624 | return r; | ||
625 | } | 813 | } |
626 | 814 | ||
627 | /* jump over the NOP */ | 815 | /* jump over the NOP */ |
@@ -640,8 +828,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
640 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 828 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
641 | if (!obj) { | 829 | if (!obj) { |
642 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | 830 | DRM_ERROR("cannot find crtc %d\n", crtc_id); |
643 | r = -EINVAL; | 831 | return -EINVAL; |
644 | goto out; | ||
645 | } | 832 | } |
646 | crtc = obj_to_crtc(obj); | 833 | crtc = obj_to_crtc(obj); |
647 | radeon_crtc = to_radeon_crtc(crtc); | 834 | radeon_crtc = to_radeon_crtc(crtc); |
@@ -664,14 +851,13 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
664 | break; | 851 | break; |
665 | default: | 852 | default: |
666 | DRM_ERROR("unknown crtc reloc\n"); | 853 | DRM_ERROR("unknown crtc reloc\n"); |
667 | r = -EINVAL; | 854 | return -EINVAL; |
668 | goto out; | ||
669 | } | 855 | } |
670 | ib[h_idx] = header; | 856 | ib[h_idx] = header; |
671 | ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; | 857 | ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; |
672 | } | 858 | } |
673 | out: | 859 | |
674 | return r; | 860 | return 0; |
675 | } | 861 | } |
676 | 862 | ||
677 | static int r600_packet0_check(struct radeon_cs_parser *p, | 863 | static int r600_packet0_check(struct radeon_cs_parser *p, |
@@ -743,7 +929,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
743 | return 0; | 929 | return 0; |
744 | ib = p->ib->ptr; | 930 | ib = p->ib->ptr; |
745 | switch (reg) { | 931 | switch (reg) { |
746 | /* force following reg to 0 in an attemp to disable out buffer | 932 | /* force following reg to 0 in an attempt to disable out buffer |
747 | * which will need us to better understand how it works to perform | 933 | * which will need us to better understand how it works to perform |
748 | * security check on it (Jerome) | 934 | * security check on it (Jerome) |
749 | */ | 935 | */ |
@@ -938,6 +1124,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
938 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1124 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
939 | track->cb_color_base_last[tmp] = ib[idx]; | 1125 | track->cb_color_base_last[tmp] = ib[idx]; |
940 | track->cb_color_bo[tmp] = reloc->robj; | 1126 | track->cb_color_bo[tmp] = reloc->robj; |
1127 | track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; | ||
941 | break; | 1128 | break; |
942 | case DB_DEPTH_BASE: | 1129 | case DB_DEPTH_BASE: |
943 | r = r600_cs_packet_next_reloc(p, &reloc); | 1130 | r = r600_cs_packet_next_reloc(p, &reloc); |
@@ -949,6 +1136,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
949 | track->db_offset = radeon_get_ib_value(p, idx) << 8; | 1136 | track->db_offset = radeon_get_ib_value(p, idx) << 8; |
950 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1137 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
951 | track->db_bo = reloc->robj; | 1138 | track->db_bo = reloc->robj; |
1139 | track->db_bo_mc = reloc->lobj.gpu_offset; | ||
952 | break; | 1140 | break; |
953 | case DB_HTILE_DATA_BASE: | 1141 | case DB_HTILE_DATA_BASE: |
954 | case SQ_PGM_START_FS: | 1142 | case SQ_PGM_START_FS: |
@@ -1019,39 +1207,61 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
1019 | return 0; | 1207 | return 0; |
1020 | } | 1208 | } |
1021 | 1209 | ||
1022 | static inline unsigned minify(unsigned size, unsigned levels) | 1210 | static inline unsigned mip_minify(unsigned size, unsigned level) |
1023 | { | 1211 | { |
1024 | size = size >> levels; | 1212 | unsigned val; |
1025 | if (size < 1) | 1213 | |
1026 | size = 1; | 1214 | val = max(1U, size >> level); |
1027 | return size; | 1215 | if (level > 0) |
1216 | val = roundup_pow_of_two(val); | ||
1217 | return val; | ||
1028 | } | 1218 | } |
1029 | 1219 | ||
1030 | static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels, | 1220 | static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, |
1031 | unsigned w0, unsigned h0, unsigned d0, unsigned bpe, | 1221 | unsigned w0, unsigned h0, unsigned d0, unsigned format, |
1032 | unsigned pitch_align, | 1222 | unsigned block_align, unsigned height_align, unsigned base_align, |
1033 | unsigned *l0_size, unsigned *mipmap_size) | 1223 | unsigned *l0_size, unsigned *mipmap_size) |
1034 | { | 1224 | { |
1035 | unsigned offset, i, level, face; | 1225 | unsigned offset, i, level; |
1036 | unsigned width, height, depth, rowstride, size; | 1226 | unsigned width, height, depth, size; |
1227 | unsigned blocksize; | ||
1228 | unsigned nbx, nby; | ||
1229 | unsigned nlevels = llevel - blevel + 1; | ||
1037 | 1230 | ||
1038 | w0 = minify(w0, 0); | 1231 | *l0_size = -1; |
1039 | h0 = minify(h0, 0); | 1232 | blocksize = fmt_get_blocksize(format); |
1040 | d0 = minify(d0, 0); | 1233 | |
1234 | w0 = mip_minify(w0, 0); | ||
1235 | h0 = mip_minify(h0, 0); | ||
1236 | d0 = mip_minify(d0, 0); | ||
1041 | for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { | 1237 | for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { |
1042 | width = minify(w0, i); | 1238 | width = mip_minify(w0, i); |
1043 | height = minify(h0, i); | 1239 | nbx = fmt_get_nblocksx(format, width); |
1044 | depth = minify(d0, i); | 1240 | |
1045 | for(face = 0; face < nfaces; face++) { | 1241 | nbx = round_up(nbx, block_align); |
1046 | rowstride = ALIGN((width * bpe), pitch_align); | 1242 | |
1047 | size = height * rowstride * depth; | 1243 | height = mip_minify(h0, i); |
1048 | offset += size; | 1244 | nby = fmt_get_nblocksy(format, height); |
1049 | offset = (offset + 0x1f) & ~0x1f; | 1245 | nby = round_up(nby, height_align); |
1050 | } | 1246 | |
1247 | depth = mip_minify(d0, i); | ||
1248 | |||
1249 | size = nbx * nby * blocksize; | ||
1250 | if (nfaces) | ||
1251 | size *= nfaces; | ||
1252 | else | ||
1253 | size *= depth; | ||
1254 | |||
1255 | if (i == 0) | ||
1256 | *l0_size = size; | ||
1257 | |||
1258 | if (i == 0 || i == 1) | ||
1259 | offset = round_up(offset, base_align); | ||
1260 | |||
1261 | offset += size; | ||
1051 | } | 1262 | } |
1052 | *l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0; | ||
1053 | *mipmap_size = offset; | 1263 | *mipmap_size = offset; |
1054 | if (!nlevels) | 1264 | if (llevel == 0) |
1055 | *mipmap_size = *l0_size; | 1265 | *mipmap_size = *l0_size; |
1056 | if (!blevel) | 1266 | if (!blevel) |
1057 | *mipmap_size -= *l0_size; | 1267 | *mipmap_size -= *l0_size; |
@@ -1070,16 +1280,27 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels | |||
1070 | static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | 1280 | static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, |
1071 | struct radeon_bo *texture, | 1281 | struct radeon_bo *texture, |
1072 | struct radeon_bo *mipmap, | 1282 | struct radeon_bo *mipmap, |
1283 | u64 base_offset, | ||
1284 | u64 mip_offset, | ||
1073 | u32 tiling_flags) | 1285 | u32 tiling_flags) |
1074 | { | 1286 | { |
1075 | struct r600_cs_track *track = p->track; | 1287 | struct r600_cs_track *track = p->track; |
1076 | u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; | 1288 | u32 nfaces, llevel, blevel, w0, h0, d0; |
1077 | u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align; | 1289 | u32 word0, word1, l0_size, mipmap_size, word2, word3; |
1290 | u32 height_align, pitch, pitch_align, depth_align; | ||
1291 | u32 array, barray, larray; | ||
1292 | u64 base_align; | ||
1293 | struct array_mode_checker array_check; | ||
1294 | u32 format; | ||
1078 | 1295 | ||
1079 | /* on legacy kernel we don't perform advanced check */ | 1296 | /* on legacy kernel we don't perform advanced check */ |
1080 | if (p->rdev == NULL) | 1297 | if (p->rdev == NULL) |
1081 | return 0; | 1298 | return 0; |
1082 | 1299 | ||
1300 | /* convert to bytes */ | ||
1301 | base_offset <<= 8; | ||
1302 | mip_offset <<= 8; | ||
1303 | |||
1083 | word0 = radeon_get_ib_value(p, idx + 0); | 1304 | word0 = radeon_get_ib_value(p, idx + 0); |
1084 | if (tiling_flags & RADEON_TILING_MACRO) | 1305 | if (tiling_flags & RADEON_TILING_MACRO) |
1085 | word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); | 1306 | word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); |
@@ -1096,82 +1317,89 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i | |||
1096 | case V_038000_SQ_TEX_DIM_3D: | 1317 | case V_038000_SQ_TEX_DIM_3D: |
1097 | break; | 1318 | break; |
1098 | case V_038000_SQ_TEX_DIM_CUBEMAP: | 1319 | case V_038000_SQ_TEX_DIM_CUBEMAP: |
1099 | nfaces = 6; | 1320 | if (p->family >= CHIP_RV770) |
1321 | nfaces = 8; | ||
1322 | else | ||
1323 | nfaces = 6; | ||
1100 | break; | 1324 | break; |
1101 | case V_038000_SQ_TEX_DIM_1D_ARRAY: | 1325 | case V_038000_SQ_TEX_DIM_1D_ARRAY: |
1102 | case V_038000_SQ_TEX_DIM_2D_ARRAY: | 1326 | case V_038000_SQ_TEX_DIM_2D_ARRAY: |
1327 | array = 1; | ||
1328 | break; | ||
1103 | case V_038000_SQ_TEX_DIM_2D_MSAA: | 1329 | case V_038000_SQ_TEX_DIM_2D_MSAA: |
1104 | case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: | 1330 | case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: |
1105 | default: | 1331 | default: |
1106 | dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); | 1332 | dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); |
1107 | return -EINVAL; | 1333 | return -EINVAL; |
1108 | } | 1334 | } |
1109 | if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) { | 1335 | format = G_038004_DATA_FORMAT(word1); |
1336 | if (!fmt_is_valid_texture(format, p->family)) { | ||
1110 | dev_warn(p->dev, "%s:%d texture invalid format %d\n", | 1337 | dev_warn(p->dev, "%s:%d texture invalid format %d\n", |
1111 | __func__, __LINE__, G_038004_DATA_FORMAT(word1)); | 1338 | __func__, __LINE__, format); |
1112 | return -EINVAL; | 1339 | return -EINVAL; |
1113 | } | 1340 | } |
1114 | 1341 | ||
1115 | pitch = G_038000_PITCH(word0) + 1; | 1342 | /* pitch in texels */ |
1116 | switch (G_038000_TILE_MODE(word0)) { | 1343 | pitch = (G_038000_PITCH(word0) + 1) * 8; |
1117 | case V_038000_ARRAY_LINEAR_GENERAL: | 1344 | array_check.array_mode = G_038000_TILE_MODE(word0); |
1118 | pitch_align = 1; | 1345 | array_check.group_size = track->group_size; |
1119 | /* XXX check height align */ | 1346 | array_check.nbanks = track->nbanks; |
1120 | break; | 1347 | array_check.npipes = track->npipes; |
1121 | case V_038000_ARRAY_LINEAR_ALIGNED: | 1348 | array_check.nsamples = 1; |
1122 | pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; | 1349 | array_check.blocksize = fmt_get_blocksize(format); |
1123 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1350 | if (r600_get_array_mode_alignment(&array_check, |
1124 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1351 | &pitch_align, &height_align, &depth_align, &base_align)) { |
1125 | __func__, __LINE__, pitch); | 1352 | dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", |
1126 | return -EINVAL; | 1353 | __func__, __LINE__, G_038000_TILE_MODE(word0)); |
1127 | } | 1354 | return -EINVAL; |
1128 | /* XXX check height align */ | 1355 | } |
1129 | break; | 1356 | |
1130 | case V_038000_ARRAY_1D_TILED_THIN1: | 1357 | /* XXX check height as well... */ |
1131 | pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8; | 1358 | |
1132 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1359 | if (!IS_ALIGNED(pitch, pitch_align)) { |
1133 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1360 | dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", |
1134 | __func__, __LINE__, pitch); | 1361 | __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); |
1135 | return -EINVAL; | 1362 | return -EINVAL; |
1136 | } | 1363 | } |
1137 | /* XXX check height align */ | 1364 | if (!IS_ALIGNED(base_offset, base_align)) { |
1138 | break; | 1365 | dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n", |
1139 | case V_038000_ARRAY_2D_TILED_THIN1: | 1366 | __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); |
1140 | pitch_align = max((u32)track->nbanks, | 1367 | return -EINVAL; |
1141 | (u32)(((track->group_size / 8) / bpe) * track->nbanks)); | 1368 | } |
1142 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1369 | if (!IS_ALIGNED(mip_offset, base_align)) { |
1143 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1370 | dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n", |
1144 | __func__, __LINE__, pitch); | 1371 | __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); |
1145 | return -EINVAL; | ||
1146 | } | ||
1147 | /* XXX check height align */ | ||
1148 | break; | ||
1149 | default: | ||
1150 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
1151 | G_038000_TILE_MODE(word0), word0); | ||
1152 | return -EINVAL; | 1372 | return -EINVAL; |
1153 | } | 1373 | } |
1154 | /* XXX check offset align */ | 1374 | |
1375 | word2 = radeon_get_ib_value(p, idx + 2) << 8; | ||
1376 | word3 = radeon_get_ib_value(p, idx + 3) << 8; | ||
1155 | 1377 | ||
1156 | word0 = radeon_get_ib_value(p, idx + 4); | 1378 | word0 = radeon_get_ib_value(p, idx + 4); |
1157 | word1 = radeon_get_ib_value(p, idx + 5); | 1379 | word1 = radeon_get_ib_value(p, idx + 5); |
1158 | blevel = G_038010_BASE_LEVEL(word0); | 1380 | blevel = G_038010_BASE_LEVEL(word0); |
1159 | nlevels = G_038014_LAST_LEVEL(word1); | 1381 | llevel = G_038014_LAST_LEVEL(word1); |
1160 | r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, | 1382 | if (array == 1) { |
1161 | (pitch_align * bpe), | 1383 | barray = G_038014_BASE_ARRAY(word1); |
1384 | larray = G_038014_LAST_ARRAY(word1); | ||
1385 | |||
1386 | nfaces = larray - barray + 1; | ||
1387 | } | ||
1388 | r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format, | ||
1389 | pitch_align, height_align, base_align, | ||
1162 | &l0_size, &mipmap_size); | 1390 | &l0_size, &mipmap_size); |
1163 | /* using get ib will give us the offset into the texture bo */ | 1391 | /* using get ib will give us the offset into the texture bo */ |
1164 | word0 = radeon_get_ib_value(p, idx + 2) << 8; | 1392 | if ((l0_size + word2) > radeon_bo_size(texture)) { |
1165 | if ((l0_size + word0) > radeon_bo_size(texture)) { | ||
1166 | dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n", | 1393 | dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n", |
1167 | w0, h0, bpe, word0, l0_size, radeon_bo_size(texture)); | 1394 | w0, h0, format, word2, l0_size, radeon_bo_size(texture)); |
1395 | dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align); | ||
1168 | return -EINVAL; | 1396 | return -EINVAL; |
1169 | } | 1397 | } |
1170 | /* using get ib will give us the offset into the mipmap bo */ | 1398 | /* using get ib will give us the offset into the mipmap bo */ |
1171 | word0 = radeon_get_ib_value(p, idx + 3) << 8; | 1399 | word3 = radeon_get_ib_value(p, idx + 3) << 8; |
1172 | if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { | 1400 | if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { |
1173 | /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", | 1401 | /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", |
1174 | w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/ | 1402 | w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ |
1175 | } | 1403 | } |
1176 | return 0; | 1404 | return 0; |
1177 | } | 1405 | } |
@@ -1194,6 +1422,38 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1194 | idx_value = radeon_get_ib_value(p, idx); | 1422 | idx_value = radeon_get_ib_value(p, idx); |
1195 | 1423 | ||
1196 | switch (pkt->opcode) { | 1424 | switch (pkt->opcode) { |
1425 | case PACKET3_SET_PREDICATION: | ||
1426 | { | ||
1427 | int pred_op; | ||
1428 | int tmp; | ||
1429 | if (pkt->count != 1) { | ||
1430 | DRM_ERROR("bad SET PREDICATION\n"); | ||
1431 | return -EINVAL; | ||
1432 | } | ||
1433 | |||
1434 | tmp = radeon_get_ib_value(p, idx + 1); | ||
1435 | pred_op = (tmp >> 16) & 0x7; | ||
1436 | |||
1437 | /* for the clear predicate operation */ | ||
1438 | if (pred_op == 0) | ||
1439 | return 0; | ||
1440 | |||
1441 | if (pred_op > 2) { | ||
1442 | DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); | ||
1443 | return -EINVAL; | ||
1444 | } | ||
1445 | |||
1446 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
1447 | if (r) { | ||
1448 | DRM_ERROR("bad SET PREDICATION\n"); | ||
1449 | return -EINVAL; | ||
1450 | } | ||
1451 | |||
1452 | ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
1453 | ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff); | ||
1454 | } | ||
1455 | break; | ||
1456 | |||
1197 | case PACKET3_START_3D_CMDBUF: | 1457 | case PACKET3_START_3D_CMDBUF: |
1198 | if (p->family >= CHIP_RV770 || pkt->count) { | 1458 | if (p->family >= CHIP_RV770 || pkt->count) { |
1199 | DRM_ERROR("bad START_3D\n"); | 1459 | DRM_ERROR("bad START_3D\n"); |
@@ -1386,7 +1646,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1386 | mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1646 | mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
1387 | mipmap = reloc->robj; | 1647 | mipmap = reloc->robj; |
1388 | r = r600_check_texture_resource(p, idx+(i*7)+1, | 1648 | r = r600_check_texture_resource(p, idx+(i*7)+1, |
1389 | texture, mipmap, reloc->lobj.tiling_flags); | 1649 | texture, mipmap, |
1650 | base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), | ||
1651 | mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), | ||
1652 | reloc->lobj.tiling_flags); | ||
1390 | if (r) | 1653 | if (r) |
1391 | return r; | 1654 | return r; |
1392 | ib[idx+1+(i*7)+2] += base_offset; | 1655 | ib[idx+1+(i*7)+2] += base_offset; |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index e6a58ed48dcf..f5ac7e788d81 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "drmP.h" | 26 | #include "drmP.h" |
27 | #include "radeon_drm.h" | 27 | #include "radeon_drm.h" |
28 | #include "radeon.h" | 28 | #include "radeon.h" |
29 | #include "radeon_asic.h" | ||
29 | #include "atom.h" | 30 | #include "atom.h" |
30 | 31 | ||
31 | /* | 32 | /* |
@@ -333,7 +334,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod | |||
333 | r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0, | 334 | r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0, |
334 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); | 335 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); |
335 | 336 | ||
336 | /* it's unknown what these bits do excatly, but it's indeed quite usefull for debugging */ | 337 | /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ |
337 | WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF); | 338 | WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF); |
338 | WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF); | 339 | WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF); |
339 | WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001); | 340 | WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001); |
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h index d84612ae47e0..f869897c7456 100644 --- a/drivers/gpu/drm/radeon/r600_reg.h +++ b/drivers/gpu/drm/radeon/r600_reg.h | |||
@@ -81,11 +81,16 @@ | |||
81 | #define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720 | 81 | #define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720 |
82 | #define R600_LOW_VID_LOWER_GPIO_CNTL 0x724 | 82 | #define R600_LOW_VID_LOWER_GPIO_CNTL 0x724 |
83 | 83 | ||
84 | 84 | #define R600_D1GRPH_SWAP_CONTROL 0x610C | |
85 | # define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0) | ||
86 | # define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0) | ||
87 | # define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0) | ||
88 | # define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0) | ||
85 | 89 | ||
86 | #define R600_HDP_NONSURFACE_BASE 0x2c04 | 90 | #define R600_HDP_NONSURFACE_BASE 0x2c04 |
87 | 91 | ||
88 | #define R600_BUS_CNTL 0x5420 | 92 | #define R600_BUS_CNTL 0x5420 |
93 | # define R600_BIOS_ROM_DIS (1 << 1) | ||
89 | #define R600_CONFIG_CNTL 0x5424 | 94 | #define R600_CONFIG_CNTL 0x5424 |
90 | #define R600_CONFIG_MEMSIZE 0x5428 | 95 | #define R600_CONFIG_MEMSIZE 0x5428 |
91 | #define R600_CONFIG_F0_BASE 0x542C | 96 | #define R600_CONFIG_F0_BASE 0x542C |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 858a1920c0d7..0245ae6c204e 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -51,6 +51,12 @@ | |||
51 | #define PTE_READABLE (1 << 5) | 51 | #define PTE_READABLE (1 << 5) |
52 | #define PTE_WRITEABLE (1 << 6) | 52 | #define PTE_WRITEABLE (1 << 6) |
53 | 53 | ||
54 | /* tiling bits */ | ||
55 | #define ARRAY_LINEAR_GENERAL 0x00000000 | ||
56 | #define ARRAY_LINEAR_ALIGNED 0x00000001 | ||
57 | #define ARRAY_1D_TILED_THIN1 0x00000002 | ||
58 | #define ARRAY_2D_TILED_THIN1 0x00000004 | ||
59 | |||
54 | /* Registers */ | 60 | /* Registers */ |
55 | #define ARB_POP 0x2418 | 61 | #define ARB_POP 0x2418 |
56 | #define ENABLE_TC128 (1 << 30) | 62 | #define ENABLE_TC128 (1 << 30) |
@@ -148,13 +154,14 @@ | |||
148 | #define ROQ_IB2_START(x) ((x) << 8) | 154 | #define ROQ_IB2_START(x) ((x) << 8) |
149 | #define CP_RB_BASE 0xC100 | 155 | #define CP_RB_BASE 0xC100 |
150 | #define CP_RB_CNTL 0xC104 | 156 | #define CP_RB_CNTL 0xC104 |
151 | #define RB_BUFSZ(x) ((x)<<0) | 157 | #define RB_BUFSZ(x) ((x) << 0) |
152 | #define RB_BLKSZ(x) ((x)<<8) | 158 | #define RB_BLKSZ(x) ((x) << 8) |
153 | #define RB_NO_UPDATE (1<<27) | 159 | #define RB_NO_UPDATE (1 << 27) |
154 | #define RB_RPTR_WR_ENA (1<<31) | 160 | #define RB_RPTR_WR_ENA (1 << 31) |
155 | #define BUF_SWAP_32BIT (2 << 16) | 161 | #define BUF_SWAP_32BIT (2 << 16) |
156 | #define CP_RB_RPTR 0x8700 | 162 | #define CP_RB_RPTR 0x8700 |
157 | #define CP_RB_RPTR_ADDR 0xC10C | 163 | #define CP_RB_RPTR_ADDR 0xC10C |
164 | #define RB_RPTR_SWAP(x) ((x) << 0) | ||
158 | #define CP_RB_RPTR_ADDR_HI 0xC110 | 165 | #define CP_RB_RPTR_ADDR_HI 0xC110 |
159 | #define CP_RB_RPTR_WR 0xC108 | 166 | #define CP_RB_RPTR_WR 0xC108 |
160 | #define CP_RB_WPTR 0xC114 | 167 | #define CP_RB_WPTR 0xC114 |
@@ -474,6 +481,7 @@ | |||
474 | #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 | 481 | #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 |
475 | #define VTX_REUSE_DEPTH_MASK 0x000000FF | 482 | #define VTX_REUSE_DEPTH_MASK 0x000000FF |
476 | #define VGT_EVENT_INITIATOR 0x28a90 | 483 | #define VGT_EVENT_INITIATOR 0x28a90 |
484 | # define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0) | ||
477 | # define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) | 485 | # define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) |
478 | 486 | ||
479 | #define VM_CONTEXT0_CNTL 0x1410 | 487 | #define VM_CONTEXT0_CNTL 0x1410 |
@@ -528,7 +536,7 @@ | |||
528 | #define IH_RB_WPTR_ADDR_LO 0x3e14 | 536 | #define IH_RB_WPTR_ADDR_LO 0x3e14 |
529 | #define IH_CNTL 0x3e18 | 537 | #define IH_CNTL 0x3e18 |
530 | # define ENABLE_INTR (1 << 0) | 538 | # define ENABLE_INTR (1 << 0) |
531 | # define IH_MC_SWAP(x) ((x) << 2) | 539 | # define IH_MC_SWAP(x) ((x) << 1) |
532 | # define IH_MC_SWAP_NONE 0 | 540 | # define IH_MC_SWAP_NONE 0 |
533 | # define IH_MC_SWAP_16BIT 1 | 541 | # define IH_MC_SWAP_16BIT 1 |
534 | # define IH_MC_SWAP_32BIT 2 | 542 | # define IH_MC_SWAP_32BIT 2 |
@@ -721,6 +729,54 @@ | |||
721 | /* DCE 3.2 */ | 729 | /* DCE 3.2 */ |
722 | # define DC_HPDx_EN (1 << 28) | 730 | # define DC_HPDx_EN (1 << 28) |
723 | 731 | ||
732 | #define D1GRPH_INTERRUPT_STATUS 0x6158 | ||
733 | #define D2GRPH_INTERRUPT_STATUS 0x6958 | ||
734 | # define DxGRPH_PFLIP_INT_OCCURRED (1 << 0) | ||
735 | # define DxGRPH_PFLIP_INT_CLEAR (1 << 8) | ||
736 | #define D1GRPH_INTERRUPT_CONTROL 0x615c | ||
737 | #define D2GRPH_INTERRUPT_CONTROL 0x695c | ||
738 | # define DxGRPH_PFLIP_INT_MASK (1 << 0) | ||
739 | # define DxGRPH_PFLIP_INT_TYPE (1 << 8) | ||
740 | |||
741 | /* PCIE link stuff */ | ||
742 | #define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ | ||
743 | # define LC_POINT_7_PLUS_EN (1 << 6) | ||
744 | #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ | ||
745 | # define LC_LINK_WIDTH_SHIFT 0 | ||
746 | # define LC_LINK_WIDTH_MASK 0x7 | ||
747 | # define LC_LINK_WIDTH_X0 0 | ||
748 | # define LC_LINK_WIDTH_X1 1 | ||
749 | # define LC_LINK_WIDTH_X2 2 | ||
750 | # define LC_LINK_WIDTH_X4 3 | ||
751 | # define LC_LINK_WIDTH_X8 4 | ||
752 | # define LC_LINK_WIDTH_X16 6 | ||
753 | # define LC_LINK_WIDTH_RD_SHIFT 4 | ||
754 | # define LC_LINK_WIDTH_RD_MASK 0x70 | ||
755 | # define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) | ||
756 | # define LC_RECONFIG_NOW (1 << 8) | ||
757 | # define LC_RENEGOTIATION_SUPPORT (1 << 9) | ||
758 | # define LC_RENEGOTIATE_EN (1 << 10) | ||
759 | # define LC_SHORT_RECONFIG_EN (1 << 11) | ||
760 | # define LC_UPCONFIGURE_SUPPORT (1 << 12) | ||
761 | # define LC_UPCONFIGURE_DIS (1 << 13) | ||
762 | #define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ | ||
763 | # define LC_GEN2_EN_STRAP (1 << 0) | ||
764 | # define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) | ||
765 | # define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) | ||
766 | # define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) | ||
767 | # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) | ||
768 | # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 | ||
769 | # define LC_CURRENT_DATA_RATE (1 << 11) | ||
770 | # define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) | ||
771 | # define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) | ||
772 | # define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) | ||
773 | # define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) | ||
774 | #define MM_CFGREGS_CNTL 0x544c | ||
775 | # define MM_WR_TO_CFG_EN (1 << 3) | ||
776 | #define LINK_CNTL2 0x88 /* F0 */ | ||
777 | # define TARGET_LINK_SPEED_MASK (0xf << 0) | ||
778 | # define SELECTABLE_DEEMPHASIS (1 << 6) | ||
779 | |||
724 | /* | 780 | /* |
725 | * PM4 | 781 | * PM4 |
726 | */ | 782 | */ |
@@ -775,7 +831,27 @@ | |||
775 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) | 831 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) |
776 | #define PACKET3_COND_WRITE 0x45 | 832 | #define PACKET3_COND_WRITE 0x45 |
777 | #define PACKET3_EVENT_WRITE 0x46 | 833 | #define PACKET3_EVENT_WRITE 0x46 |
834 | #define EVENT_TYPE(x) ((x) << 0) | ||
835 | #define EVENT_INDEX(x) ((x) << 8) | ||
836 | /* 0 - any non-TS event | ||
837 | * 1 - ZPASS_DONE | ||
838 | * 2 - SAMPLE_PIPELINESTAT | ||
839 | * 3 - SAMPLE_STREAMOUTSTAT* | ||
840 | * 4 - *S_PARTIAL_FLUSH | ||
841 | * 5 - TS events | ||
842 | */ | ||
778 | #define PACKET3_EVENT_WRITE_EOP 0x47 | 843 | #define PACKET3_EVENT_WRITE_EOP 0x47 |
844 | #define DATA_SEL(x) ((x) << 29) | ||
845 | /* 0 - discard | ||
846 | * 1 - send low 32bit data | ||
847 | * 2 - send 64bit data | ||
848 | * 3 - send 64bit counter value | ||
849 | */ | ||
850 | #define INT_SEL(x) ((x) << 24) | ||
851 | /* 0 - none | ||
852 | * 1 - interrupt only (DATA_SEL = 0) | ||
853 | * 2 - interrupt when data write is confirmed | ||
854 | */ | ||
779 | #define PACKET3_ONE_REG_WRITE 0x57 | 855 | #define PACKET3_ONE_REG_WRITE 0x57 |
780 | #define PACKET3_SET_CONFIG_REG 0x68 | 856 | #define PACKET3_SET_CONFIG_REG 0x68 |
781 | #define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000 | 857 | #define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000 |
@@ -1228,6 +1304,14 @@ | |||
1228 | #define V_038004_FMT_16_16_16_FLOAT 0x0000002E | 1304 | #define V_038004_FMT_16_16_16_FLOAT 0x0000002E |
1229 | #define V_038004_FMT_32_32_32 0x0000002F | 1305 | #define V_038004_FMT_32_32_32 0x0000002F |
1230 | #define V_038004_FMT_32_32_32_FLOAT 0x00000030 | 1306 | #define V_038004_FMT_32_32_32_FLOAT 0x00000030 |
1307 | #define V_038004_FMT_BC1 0x00000031 | ||
1308 | #define V_038004_FMT_BC2 0x00000032 | ||
1309 | #define V_038004_FMT_BC3 0x00000033 | ||
1310 | #define V_038004_FMT_BC4 0x00000034 | ||
1311 | #define V_038004_FMT_BC5 0x00000035 | ||
1312 | #define V_038004_FMT_BC6 0x00000036 | ||
1313 | #define V_038004_FMT_BC7 0x00000037 | ||
1314 | #define V_038004_FMT_32_AS_32_32_32_32 0x00000038 | ||
1231 | #define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010 | 1315 | #define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010 |
1232 | #define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0) | 1316 | #define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0) |
1233 | #define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3) | 1317 | #define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9ff38c99a6ea..ef0e0e016914 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -69,6 +69,7 @@ | |||
69 | #include <ttm/ttm_bo_driver.h> | 69 | #include <ttm/ttm_bo_driver.h> |
70 | #include <ttm/ttm_placement.h> | 70 | #include <ttm/ttm_placement.h> |
71 | #include <ttm/ttm_module.h> | 71 | #include <ttm/ttm_module.h> |
72 | #include <ttm/ttm_execbuf_util.h> | ||
72 | 73 | ||
73 | #include "radeon_family.h" | 74 | #include "radeon_family.h" |
74 | #include "radeon_mode.h" | 75 | #include "radeon_mode.h" |
@@ -88,10 +89,10 @@ extern int radeon_benchmarking; | |||
88 | extern int radeon_testing; | 89 | extern int radeon_testing; |
89 | extern int radeon_connector_table; | 90 | extern int radeon_connector_table; |
90 | extern int radeon_tv; | 91 | extern int radeon_tv; |
91 | extern int radeon_new_pll; | ||
92 | extern int radeon_audio; | 92 | extern int radeon_audio; |
93 | extern int radeon_disp_priority; | 93 | extern int radeon_disp_priority; |
94 | extern int radeon_hw_i2c; | 94 | extern int radeon_hw_i2c; |
95 | extern int radeon_pcie_gen2; | ||
95 | 96 | ||
96 | /* | 97 | /* |
97 | * Copy from radeon_drv.h so we don't have to include both and have conflicting | 98 | * Copy from radeon_drv.h so we don't have to include both and have conflicting |
@@ -164,6 +165,7 @@ struct radeon_clock { | |||
164 | uint32_t default_sclk; | 165 | uint32_t default_sclk; |
165 | uint32_t default_dispclk; | 166 | uint32_t default_dispclk; |
166 | uint32_t dp_extclk; | 167 | uint32_t dp_extclk; |
168 | uint32_t max_pixel_clock; | ||
167 | }; | 169 | }; |
168 | 170 | ||
169 | /* | 171 | /* |
@@ -176,11 +178,13 @@ void radeon_pm_suspend(struct radeon_device *rdev); | |||
176 | void radeon_pm_resume(struct radeon_device *rdev); | 178 | void radeon_pm_resume(struct radeon_device *rdev); |
177 | void radeon_combios_get_power_modes(struct radeon_device *rdev); | 179 | void radeon_combios_get_power_modes(struct radeon_device *rdev); |
178 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 180 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
179 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); | 181 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); |
182 | int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage); | ||
180 | void rs690_pm_info(struct radeon_device *rdev); | 183 | void rs690_pm_info(struct radeon_device *rdev); |
181 | extern u32 rv6xx_get_temp(struct radeon_device *rdev); | 184 | extern int rv6xx_get_temp(struct radeon_device *rdev); |
182 | extern u32 rv770_get_temp(struct radeon_device *rdev); | 185 | extern int rv770_get_temp(struct radeon_device *rdev); |
183 | extern u32 evergreen_get_temp(struct radeon_device *rdev); | 186 | extern int evergreen_get_temp(struct radeon_device *rdev); |
187 | extern int sumo_get_temp(struct radeon_device *rdev); | ||
184 | 188 | ||
185 | /* | 189 | /* |
186 | * Fences. | 190 | * Fences. |
@@ -256,17 +260,17 @@ struct radeon_bo { | |||
256 | int surface_reg; | 260 | int surface_reg; |
257 | /* Constant after initialization */ | 261 | /* Constant after initialization */ |
258 | struct radeon_device *rdev; | 262 | struct radeon_device *rdev; |
259 | struct drm_gem_object *gobj; | 263 | struct drm_gem_object gem_base; |
260 | }; | 264 | }; |
265 | #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) | ||
261 | 266 | ||
262 | struct radeon_bo_list { | 267 | struct radeon_bo_list { |
263 | struct list_head list; | 268 | struct ttm_validate_buffer tv; |
264 | struct radeon_bo *bo; | 269 | struct radeon_bo *bo; |
265 | uint64_t gpu_offset; | 270 | uint64_t gpu_offset; |
266 | unsigned rdomain; | 271 | unsigned rdomain; |
267 | unsigned wdomain; | 272 | unsigned wdomain; |
268 | u32 tiling_flags; | 273 | u32 tiling_flags; |
269 | bool reserved; | ||
270 | }; | 274 | }; |
271 | 275 | ||
272 | /* | 276 | /* |
@@ -287,6 +291,15 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | |||
287 | uint64_t *gpu_addr); | 291 | uint64_t *gpu_addr); |
288 | void radeon_gem_object_unpin(struct drm_gem_object *obj); | 292 | void radeon_gem_object_unpin(struct drm_gem_object *obj); |
289 | 293 | ||
294 | int radeon_mode_dumb_create(struct drm_file *file_priv, | ||
295 | struct drm_device *dev, | ||
296 | struct drm_mode_create_dumb *args); | ||
297 | int radeon_mode_dumb_mmap(struct drm_file *filp, | ||
298 | struct drm_device *dev, | ||
299 | uint32_t handle, uint64_t *offset_p); | ||
300 | int radeon_mode_dumb_destroy(struct drm_file *file_priv, | ||
301 | struct drm_device *dev, | ||
302 | uint32_t handle); | ||
290 | 303 | ||
291 | /* | 304 | /* |
292 | * GART structures, functions & helpers | 305 | * GART structures, functions & helpers |
@@ -318,6 +331,7 @@ struct radeon_gart { | |||
318 | union radeon_gart_table table; | 331 | union radeon_gart_table table; |
319 | struct page **pages; | 332 | struct page **pages; |
320 | dma_addr_t *pages_addr; | 333 | dma_addr_t *pages_addr; |
334 | bool *ttm_alloced; | ||
321 | bool ready; | 335 | bool ready; |
322 | }; | 336 | }; |
323 | 337 | ||
@@ -330,7 +344,8 @@ void radeon_gart_fini(struct radeon_device *rdev); | |||
330 | void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | 344 | void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, |
331 | int pages); | 345 | int pages); |
332 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | 346 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
333 | int pages, struct page **pagelist); | 347 | int pages, struct page **pagelist, |
348 | dma_addr_t *dma_addr); | ||
334 | 349 | ||
335 | 350 | ||
336 | /* | 351 | /* |
@@ -344,7 +359,6 @@ struct radeon_mc { | |||
344 | * about vram size near mc fb location */ | 359 | * about vram size near mc fb location */ |
345 | u64 mc_vram_size; | 360 | u64 mc_vram_size; |
346 | u64 visible_vram_size; | 361 | u64 visible_vram_size; |
347 | u64 active_vram_size; | ||
348 | u64 gtt_size; | 362 | u64 gtt_size; |
349 | u64 gtt_start; | 363 | u64 gtt_start; |
350 | u64 gtt_end; | 364 | u64 gtt_end; |
@@ -366,6 +380,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev); | |||
366 | */ | 380 | */ |
367 | struct radeon_scratch { | 381 | struct radeon_scratch { |
368 | unsigned num_reg; | 382 | unsigned num_reg; |
383 | uint32_t reg_base; | ||
369 | bool free[32]; | 384 | bool free[32]; |
370 | uint32_t reg[32]; | 385 | uint32_t reg[32]; |
371 | }; | 386 | }; |
@@ -377,11 +392,56 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); | |||
377 | /* | 392 | /* |
378 | * IRQS. | 393 | * IRQS. |
379 | */ | 394 | */ |
395 | |||
396 | struct radeon_unpin_work { | ||
397 | struct work_struct work; | ||
398 | struct radeon_device *rdev; | ||
399 | int crtc_id; | ||
400 | struct radeon_fence *fence; | ||
401 | struct drm_pending_vblank_event *event; | ||
402 | struct radeon_bo *old_rbo; | ||
403 | u64 new_crtc_base; | ||
404 | }; | ||
405 | |||
406 | struct r500_irq_stat_regs { | ||
407 | u32 disp_int; | ||
408 | }; | ||
409 | |||
410 | struct r600_irq_stat_regs { | ||
411 | u32 disp_int; | ||
412 | u32 disp_int_cont; | ||
413 | u32 disp_int_cont2; | ||
414 | u32 d1grph_int; | ||
415 | u32 d2grph_int; | ||
416 | }; | ||
417 | |||
418 | struct evergreen_irq_stat_regs { | ||
419 | u32 disp_int; | ||
420 | u32 disp_int_cont; | ||
421 | u32 disp_int_cont2; | ||
422 | u32 disp_int_cont3; | ||
423 | u32 disp_int_cont4; | ||
424 | u32 disp_int_cont5; | ||
425 | u32 d1grph_int; | ||
426 | u32 d2grph_int; | ||
427 | u32 d3grph_int; | ||
428 | u32 d4grph_int; | ||
429 | u32 d5grph_int; | ||
430 | u32 d6grph_int; | ||
431 | }; | ||
432 | |||
433 | union radeon_irq_stat_regs { | ||
434 | struct r500_irq_stat_regs r500; | ||
435 | struct r600_irq_stat_regs r600; | ||
436 | struct evergreen_irq_stat_regs evergreen; | ||
437 | }; | ||
438 | |||
380 | struct radeon_irq { | 439 | struct radeon_irq { |
381 | bool installed; | 440 | bool installed; |
382 | bool sw_int; | 441 | bool sw_int; |
383 | /* FIXME: use a define max crtc rather than hardcode it */ | 442 | /* FIXME: use a define max crtc rather than hardcode it */ |
384 | bool crtc_vblank_int[6]; | 443 | bool crtc_vblank_int[6]; |
444 | bool pflip[6]; | ||
385 | wait_queue_head_t vblank_queue; | 445 | wait_queue_head_t vblank_queue; |
386 | /* FIXME: use defines for max hpd/dacs */ | 446 | /* FIXME: use defines for max hpd/dacs */ |
387 | bool hpd[6]; | 447 | bool hpd[6]; |
@@ -392,12 +452,17 @@ struct radeon_irq { | |||
392 | bool hdmi[2]; | 452 | bool hdmi[2]; |
393 | spinlock_t sw_lock; | 453 | spinlock_t sw_lock; |
394 | int sw_refcount; | 454 | int sw_refcount; |
455 | union radeon_irq_stat_regs stat_regs; | ||
456 | spinlock_t pflip_lock[6]; | ||
457 | int pflip_refcount[6]; | ||
395 | }; | 458 | }; |
396 | 459 | ||
397 | int radeon_irq_kms_init(struct radeon_device *rdev); | 460 | int radeon_irq_kms_init(struct radeon_device *rdev); |
398 | void radeon_irq_kms_fini(struct radeon_device *rdev); | 461 | void radeon_irq_kms_fini(struct radeon_device *rdev); |
399 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); | 462 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); |
400 | void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); | 463 | void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); |
464 | void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); | ||
465 | void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); | ||
401 | 466 | ||
402 | /* | 467 | /* |
403 | * CP & ring. | 468 | * CP & ring. |
@@ -594,8 +659,17 @@ struct radeon_wb { | |||
594 | struct radeon_bo *wb_obj; | 659 | struct radeon_bo *wb_obj; |
595 | volatile uint32_t *wb; | 660 | volatile uint32_t *wb; |
596 | uint64_t gpu_addr; | 661 | uint64_t gpu_addr; |
662 | bool enabled; | ||
663 | bool use_event; | ||
597 | }; | 664 | }; |
598 | 665 | ||
666 | #define RADEON_WB_SCRATCH_OFFSET 0 | ||
667 | #define RADEON_WB_CP_RPTR_OFFSET 1024 | ||
668 | #define RADEON_WB_CP1_RPTR_OFFSET 1280 | ||
669 | #define RADEON_WB_CP2_RPTR_OFFSET 1536 | ||
670 | #define R600_WB_IH_WPTR_OFFSET 2048 | ||
671 | #define R600_WB_EVENT_OFFSET 3072 | ||
672 | |||
599 | /** | 673 | /** |
600 | * struct radeon_pm - power management datas | 674 | * struct radeon_pm - power management datas |
601 | * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) | 675 | * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) |
@@ -607,11 +681,11 @@ struct radeon_wb { | |||
607 | * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) | 681 | * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) |
608 | * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) | 682 | * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) |
609 | * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) | 683 | * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) |
610 | * @sclk: GPU clock Mhz (core bandwith depends of this clock) | 684 | * @sclk: GPU clock Mhz (core bandwidth depends of this clock) |
611 | * @needed_bandwidth: current bandwidth needs | 685 | * @needed_bandwidth: current bandwidth needs |
612 | * | 686 | * |
613 | * It keeps track of various data needed to take powermanagement decision. | 687 | * It keeps track of various data needed to take powermanagement decision. |
614 | * Bandwith need is used to determine minimun clock of the GPU and memory. | 688 | * Bandwidth need is used to determine minimun clock of the GPU and memory. |
615 | * Equation between gpu/memory clock and available bandwidth is hw dependent | 689 | * Equation between gpu/memory clock and available bandwidth is hw dependent |
616 | * (type of memory, bus size, efficiency, ...) | 690 | * (type of memory, bus size, efficiency, ...) |
617 | */ | 691 | */ |
@@ -680,6 +754,8 @@ enum radeon_int_thermal_type { | |||
680 | THERMAL_TYPE_RV6XX, | 754 | THERMAL_TYPE_RV6XX, |
681 | THERMAL_TYPE_RV770, | 755 | THERMAL_TYPE_RV770, |
682 | THERMAL_TYPE_EVERGREEN, | 756 | THERMAL_TYPE_EVERGREEN, |
757 | THERMAL_TYPE_SUMO, | ||
758 | THERMAL_TYPE_NI, | ||
683 | }; | 759 | }; |
684 | 760 | ||
685 | struct radeon_voltage { | 761 | struct radeon_voltage { |
@@ -693,7 +769,9 @@ struct radeon_voltage { | |||
693 | u8 vddci_id; /* index into vddci voltage table */ | 769 | u8 vddci_id; /* index into vddci voltage table */ |
694 | bool vddci_enabled; | 770 | bool vddci_enabled; |
695 | /* r6xx+ sw */ | 771 | /* r6xx+ sw */ |
696 | u32 voltage; | 772 | u16 voltage; |
773 | /* evergreen+ vddci */ | ||
774 | u16 vddci; | ||
697 | }; | 775 | }; |
698 | 776 | ||
699 | /* clock mode flags */ | 777 | /* clock mode flags */ |
@@ -751,8 +829,7 @@ struct radeon_pm { | |||
751 | fixed20_12 sclk; | 829 | fixed20_12 sclk; |
752 | fixed20_12 mclk; | 830 | fixed20_12 mclk; |
753 | fixed20_12 needed_bandwidth; | 831 | fixed20_12 needed_bandwidth; |
754 | /* XXX: use a define for num power modes */ | 832 | struct radeon_power_state *power_state; |
755 | struct radeon_power_state power_state[8]; | ||
756 | /* number of valid power states */ | 833 | /* number of valid power states */ |
757 | int num_power_states; | 834 | int num_power_states; |
758 | int current_power_state_index; | 835 | int current_power_state_index; |
@@ -762,7 +839,12 @@ struct radeon_pm { | |||
762 | int default_power_state_index; | 839 | int default_power_state_index; |
763 | u32 current_sclk; | 840 | u32 current_sclk; |
764 | u32 current_mclk; | 841 | u32 current_mclk; |
765 | u32 current_vddc; | 842 | u16 current_vddc; |
843 | u16 current_vddci; | ||
844 | u32 default_sclk; | ||
845 | u32 default_mclk; | ||
846 | u16 default_vddc; | ||
847 | u16 default_vddci; | ||
766 | struct radeon_i2c_chan *i2c_bus; | 848 | struct radeon_i2c_chan *i2c_bus; |
767 | /* selected pm method */ | 849 | /* selected pm method */ |
768 | enum radeon_pm_method pm_method; | 850 | enum radeon_pm_method pm_method; |
@@ -874,6 +956,10 @@ struct radeon_asic { | |||
874 | void (*pm_finish)(struct radeon_device *rdev); | 956 | void (*pm_finish)(struct radeon_device *rdev); |
875 | void (*pm_init_profile)(struct radeon_device *rdev); | 957 | void (*pm_init_profile)(struct radeon_device *rdev); |
876 | void (*pm_get_dynpm_state)(struct radeon_device *rdev); | 958 | void (*pm_get_dynpm_state)(struct radeon_device *rdev); |
959 | /* pageflipping */ | ||
960 | void (*pre_page_flip)(struct radeon_device *rdev, int crtc); | ||
961 | u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); | ||
962 | void (*post_page_flip)(struct radeon_device *rdev, int crtc); | ||
877 | }; | 963 | }; |
878 | 964 | ||
879 | /* | 965 | /* |
@@ -968,6 +1054,46 @@ struct evergreen_asic { | |||
968 | unsigned tiling_npipes; | 1054 | unsigned tiling_npipes; |
969 | unsigned tiling_group_size; | 1055 | unsigned tiling_group_size; |
970 | unsigned tile_config; | 1056 | unsigned tile_config; |
1057 | struct r100_gpu_lockup lockup; | ||
1058 | }; | ||
1059 | |||
1060 | struct cayman_asic { | ||
1061 | unsigned max_shader_engines; | ||
1062 | unsigned max_pipes_per_simd; | ||
1063 | unsigned max_tile_pipes; | ||
1064 | unsigned max_simds_per_se; | ||
1065 | unsigned max_backends_per_se; | ||
1066 | unsigned max_texture_channel_caches; | ||
1067 | unsigned max_gprs; | ||
1068 | unsigned max_threads; | ||
1069 | unsigned max_gs_threads; | ||
1070 | unsigned max_stack_entries; | ||
1071 | unsigned sx_num_of_sets; | ||
1072 | unsigned sx_max_export_size; | ||
1073 | unsigned sx_max_export_pos_size; | ||
1074 | unsigned sx_max_export_smx_size; | ||
1075 | unsigned max_hw_contexts; | ||
1076 | unsigned sq_num_cf_insts; | ||
1077 | unsigned sc_prim_fifo_size; | ||
1078 | unsigned sc_hiz_tile_fifo_size; | ||
1079 | unsigned sc_earlyz_tile_fifo_size; | ||
1080 | |||
1081 | unsigned num_shader_engines; | ||
1082 | unsigned num_shader_pipes_per_simd; | ||
1083 | unsigned num_tile_pipes; | ||
1084 | unsigned num_simds_per_se; | ||
1085 | unsigned num_backends_per_se; | ||
1086 | unsigned backend_disable_mask_per_asic; | ||
1087 | unsigned backend_map; | ||
1088 | unsigned num_texture_channel_caches; | ||
1089 | unsigned mem_max_burst_length_bytes; | ||
1090 | unsigned mem_row_size_in_kb; | ||
1091 | unsigned shader_engine_tile_size; | ||
1092 | unsigned num_gpus; | ||
1093 | unsigned multi_gpu_tile_size; | ||
1094 | |||
1095 | unsigned tile_config; | ||
1096 | struct r100_gpu_lockup lockup; | ||
971 | }; | 1097 | }; |
972 | 1098 | ||
973 | union radeon_asic_config { | 1099 | union radeon_asic_config { |
@@ -976,6 +1102,7 @@ union radeon_asic_config { | |||
976 | struct r600_asic r600; | 1102 | struct r600_asic r600; |
977 | struct rv770_asic rv770; | 1103 | struct rv770_asic rv770; |
978 | struct evergreen_asic evergreen; | 1104 | struct evergreen_asic evergreen; |
1105 | struct cayman_asic cayman; | ||
979 | }; | 1106 | }; |
980 | 1107 | ||
981 | /* | 1108 | /* |
@@ -1066,6 +1193,9 @@ struct radeon_device { | |||
1066 | struct radeon_mman mman; | 1193 | struct radeon_mman mman; |
1067 | struct radeon_fence_driver fence_drv; | 1194 | struct radeon_fence_driver fence_drv; |
1068 | struct radeon_cp cp; | 1195 | struct radeon_cp cp; |
1196 | /* cayman compute rings */ | ||
1197 | struct radeon_cp cp1; | ||
1198 | struct radeon_cp cp2; | ||
1069 | struct radeon_ib_pool ib_pool; | 1199 | struct radeon_ib_pool ib_pool; |
1070 | struct radeon_irq irq; | 1200 | struct radeon_irq irq; |
1071 | struct radeon_asic *asic; | 1201 | struct radeon_asic *asic; |
@@ -1084,11 +1214,11 @@ struct radeon_device { | |||
1084 | const struct firmware *me_fw; /* all family ME firmware */ | 1214 | const struct firmware *me_fw; /* all family ME firmware */ |
1085 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ | 1215 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ |
1086 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ | 1216 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ |
1217 | const struct firmware *mc_fw; /* NI MC firmware */ | ||
1087 | struct r600_blit r600_blit; | 1218 | struct r600_blit r600_blit; |
1088 | struct r700_vram_scratch vram_scratch; | 1219 | struct r700_vram_scratch vram_scratch; |
1089 | int msi_enabled; /* msi enabled */ | 1220 | int msi_enabled; /* msi enabled */ |
1090 | struct r600_ih ih; /* r6/700 interrupt ring */ | 1221 | struct r600_ih ih; /* r6/700 interrupt ring */ |
1091 | struct workqueue_struct *wq; | ||
1092 | struct work_struct hotplug_work; | 1222 | struct work_struct hotplug_work; |
1093 | int num_crtc; /* number of crtcs */ | 1223 | int num_crtc; /* number of crtcs */ |
1094 | struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ | 1224 | struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ |
@@ -1103,10 +1233,10 @@ struct radeon_device { | |||
1103 | uint8_t audio_status_bits; | 1233 | uint8_t audio_status_bits; |
1104 | uint8_t audio_category_code; | 1234 | uint8_t audio_category_code; |
1105 | 1235 | ||
1106 | bool powered_down; | ||
1107 | struct notifier_block acpi_nb; | 1236 | struct notifier_block acpi_nb; |
1108 | /* only one userspace can use Hyperz features at a time */ | 1237 | /* only one userspace can use Hyperz features or CMASK at a time */ |
1109 | struct drm_file *hyperz_filp; | 1238 | struct drm_file *hyperz_filp; |
1239 | struct drm_file *cmask_filp; | ||
1110 | /* i2c buses */ | 1240 | /* i2c buses */ |
1111 | struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; | 1241 | struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; |
1112 | }; | 1242 | }; |
@@ -1118,13 +1248,6 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1118 | void radeon_device_fini(struct radeon_device *rdev); | 1248 | void radeon_device_fini(struct radeon_device *rdev); |
1119 | int radeon_gpu_wait_for_idle(struct radeon_device *rdev); | 1249 | int radeon_gpu_wait_for_idle(struct radeon_device *rdev); |
1120 | 1250 | ||
1121 | /* r600 blit */ | ||
1122 | int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); | ||
1123 | void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); | ||
1124 | void r600_kms_blit_copy(struct radeon_device *rdev, | ||
1125 | u64 src_gpu_addr, u64 dst_gpu_addr, | ||
1126 | int size_bytes); | ||
1127 | |||
1128 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | 1251 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) |
1129 | { | 1252 | { |
1130 | if (reg < rdev->rmmio_size) | 1253 | if (reg < rdev->rmmio_size) |
@@ -1175,6 +1298,8 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
1175 | */ | 1298 | */ |
1176 | #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) | 1299 | #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) |
1177 | #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) | 1300 | #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) |
1301 | #define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg)) | ||
1302 | #define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg)) | ||
1178 | #define RREG32(reg) r100_mm_rreg(rdev, (reg)) | 1303 | #define RREG32(reg) r100_mm_rreg(rdev, (reg)) |
1179 | #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) | 1304 | #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) |
1180 | #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) | 1305 | #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) |
@@ -1248,10 +1373,25 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); | |||
1248 | (rdev->family == CHIP_RV410) || \ | 1373 | (rdev->family == CHIP_RV410) || \ |
1249 | (rdev->family == CHIP_RS400) || \ | 1374 | (rdev->family == CHIP_RS400) || \ |
1250 | (rdev->family == CHIP_RS480)) | 1375 | (rdev->family == CHIP_RS480)) |
1376 | #define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \ | ||
1377 | (rdev->ddev->pdev->device == 0x9443) || \ | ||
1378 | (rdev->ddev->pdev->device == 0x944B) || \ | ||
1379 | (rdev->ddev->pdev->device == 0x9506) || \ | ||
1380 | (rdev->ddev->pdev->device == 0x9509) || \ | ||
1381 | (rdev->ddev->pdev->device == 0x950F) || \ | ||
1382 | (rdev->ddev->pdev->device == 0x689C) || \ | ||
1383 | (rdev->ddev->pdev->device == 0x689D)) | ||
1251 | #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) | 1384 | #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) |
1385 | #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ | ||
1386 | (rdev->family == CHIP_RS690) || \ | ||
1387 | (rdev->family == CHIP_RS740) || \ | ||
1388 | (rdev->family >= CHIP_R600)) | ||
1252 | #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) | 1389 | #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) |
1253 | #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) | 1390 | #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) |
1254 | #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) | 1391 | #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) |
1392 | #define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \ | ||
1393 | (rdev->flags & RADEON_IS_IGP)) | ||
1394 | #define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS)) | ||
1255 | 1395 | ||
1256 | /* | 1396 | /* |
1257 | * BIOS helpers. | 1397 | * BIOS helpers. |
@@ -1327,6 +1467,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
1327 | #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) | 1467 | #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) |
1328 | #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) | 1468 | #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) |
1329 | #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) | 1469 | #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) |
1470 | #define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc)) | ||
1471 | #define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base)) | ||
1472 | #define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc)) | ||
1330 | 1473 | ||
1331 | /* Common functions */ | 1474 | /* Common functions */ |
1332 | /* AGP */ | 1475 | /* AGP */ |
@@ -1341,6 +1484,9 @@ extern void radeon_update_bandwidth_info(struct radeon_device *rdev); | |||
1341 | extern void radeon_update_display_priority(struct radeon_device *rdev); | 1484 | extern void radeon_update_display_priority(struct radeon_device *rdev); |
1342 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); | 1485 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); |
1343 | extern void radeon_scratch_init(struct radeon_device *rdev); | 1486 | extern void radeon_scratch_init(struct radeon_device *rdev); |
1487 | extern void radeon_wb_fini(struct radeon_device *rdev); | ||
1488 | extern int radeon_wb_init(struct radeon_device *rdev); | ||
1489 | extern void radeon_wb_disable(struct radeon_device *rdev); | ||
1344 | extern void radeon_surface_init(struct radeon_device *rdev); | 1490 | extern void radeon_surface_init(struct radeon_device *rdev); |
1345 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); | 1491 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); |
1346 | extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); | 1492 | extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); |
@@ -1351,120 +1497,17 @@ extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *m | |||
1351 | extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); | 1497 | extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); |
1352 | extern int radeon_resume_kms(struct drm_device *dev); | 1498 | extern int radeon_resume_kms(struct drm_device *dev); |
1353 | extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); | 1499 | extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); |
1500 | extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); | ||
1354 | 1501 | ||
1355 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ | 1502 | /* |
1356 | extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp); | 1503 | * r600 functions used by radeon_encoder.c |
1357 | extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp); | 1504 | */ |
1358 | |||
1359 | /* rv200,rv250,rv280 */ | ||
1360 | extern void r200_set_safe_registers(struct radeon_device *rdev); | ||
1361 | |||
1362 | /* r300,r350,rv350,rv370,rv380 */ | ||
1363 | extern void r300_set_reg_safe(struct radeon_device *rdev); | ||
1364 | extern void r300_mc_program(struct radeon_device *rdev); | ||
1365 | extern void r300_mc_init(struct radeon_device *rdev); | ||
1366 | extern void r300_clock_startup(struct radeon_device *rdev); | ||
1367 | extern int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
1368 | extern int rv370_pcie_gart_init(struct radeon_device *rdev); | ||
1369 | extern void rv370_pcie_gart_fini(struct radeon_device *rdev); | ||
1370 | extern int rv370_pcie_gart_enable(struct radeon_device *rdev); | ||
1371 | extern void rv370_pcie_gart_disable(struct radeon_device *rdev); | ||
1372 | |||
1373 | /* r420,r423,rv410 */ | ||
1374 | extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); | ||
1375 | extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); | ||
1376 | extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); | ||
1377 | extern void r420_pipes_init(struct radeon_device *rdev); | ||
1378 | |||
1379 | /* rv515 */ | ||
1380 | struct rv515_mc_save { | ||
1381 | u32 d1vga_control; | ||
1382 | u32 d2vga_control; | ||
1383 | u32 vga_render_control; | ||
1384 | u32 vga_hdp_control; | ||
1385 | u32 d1crtc_control; | ||
1386 | u32 d2crtc_control; | ||
1387 | }; | ||
1388 | extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev); | ||
1389 | extern void rv515_vga_render_disable(struct radeon_device *rdev); | ||
1390 | extern void rv515_set_safe_registers(struct radeon_device *rdev); | ||
1391 | extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save); | ||
1392 | extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save); | ||
1393 | extern void rv515_clock_startup(struct radeon_device *rdev); | ||
1394 | extern void rv515_debugfs(struct radeon_device *rdev); | ||
1395 | extern int rv515_suspend(struct radeon_device *rdev); | ||
1396 | |||
1397 | /* rs400 */ | ||
1398 | extern int rs400_gart_init(struct radeon_device *rdev); | ||
1399 | extern int rs400_gart_enable(struct radeon_device *rdev); | ||
1400 | extern void rs400_gart_adjust_size(struct radeon_device *rdev); | ||
1401 | extern void rs400_gart_disable(struct radeon_device *rdev); | ||
1402 | extern void rs400_gart_fini(struct radeon_device *rdev); | ||
1403 | |||
1404 | /* rs600 */ | ||
1405 | extern void rs600_set_safe_registers(struct radeon_device *rdev); | ||
1406 | extern int rs600_irq_set(struct radeon_device *rdev); | ||
1407 | extern void rs600_irq_disable(struct radeon_device *rdev); | ||
1408 | |||
1409 | /* rs690, rs740 */ | ||
1410 | extern void rs690_line_buffer_adjust(struct radeon_device *rdev, | ||
1411 | struct drm_display_mode *mode1, | ||
1412 | struct drm_display_mode *mode2); | ||
1413 | |||
1414 | /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ | ||
1415 | extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); | ||
1416 | extern bool r600_card_posted(struct radeon_device *rdev); | ||
1417 | extern void r600_cp_stop(struct radeon_device *rdev); | ||
1418 | extern int r600_cp_start(struct radeon_device *rdev); | ||
1419 | extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); | ||
1420 | extern int r600_cp_resume(struct radeon_device *rdev); | ||
1421 | extern void r600_cp_fini(struct radeon_device *rdev); | ||
1422 | extern int r600_count_pipe_bits(uint32_t val); | ||
1423 | extern int r600_mc_wait_for_idle(struct radeon_device *rdev); | ||
1424 | extern int r600_pcie_gart_init(struct radeon_device *rdev); | ||
1425 | extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); | ||
1426 | extern int r600_ib_test(struct radeon_device *rdev); | ||
1427 | extern int r600_ring_test(struct radeon_device *rdev); | ||
1428 | extern void r600_wb_fini(struct radeon_device *rdev); | ||
1429 | extern int r600_wb_enable(struct radeon_device *rdev); | ||
1430 | extern void r600_wb_disable(struct radeon_device *rdev); | ||
1431 | extern void r600_scratch_init(struct radeon_device *rdev); | ||
1432 | extern int r600_blit_init(struct radeon_device *rdev); | ||
1433 | extern void r600_blit_fini(struct radeon_device *rdev); | ||
1434 | extern int r600_init_microcode(struct radeon_device *rdev); | ||
1435 | extern int r600_asic_reset(struct radeon_device *rdev); | ||
1436 | /* r600 irq */ | ||
1437 | extern int r600_irq_init(struct radeon_device *rdev); | ||
1438 | extern void r600_irq_fini(struct radeon_device *rdev); | ||
1439 | extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); | ||
1440 | extern int r600_irq_set(struct radeon_device *rdev); | ||
1441 | extern void r600_irq_suspend(struct radeon_device *rdev); | ||
1442 | extern void r600_disable_interrupts(struct radeon_device *rdev); | ||
1443 | extern void r600_rlc_stop(struct radeon_device *rdev); | ||
1444 | /* r600 audio */ | ||
1445 | extern int r600_audio_init(struct radeon_device *rdev); | ||
1446 | extern int r600_audio_tmds_index(struct drm_encoder *encoder); | ||
1447 | extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); | ||
1448 | extern int r600_audio_channels(struct radeon_device *rdev); | ||
1449 | extern int r600_audio_bits_per_sample(struct radeon_device *rdev); | ||
1450 | extern int r600_audio_rate(struct radeon_device *rdev); | ||
1451 | extern uint8_t r600_audio_status_bits(struct radeon_device *rdev); | ||
1452 | extern uint8_t r600_audio_category_code(struct radeon_device *rdev); | ||
1453 | extern void r600_audio_schedule_polling(struct radeon_device *rdev); | ||
1454 | extern void r600_audio_enable_polling(struct drm_encoder *encoder); | ||
1455 | extern void r600_audio_disable_polling(struct drm_encoder *encoder); | ||
1456 | extern void r600_audio_fini(struct radeon_device *rdev); | ||
1457 | extern void r600_hdmi_init(struct drm_encoder *encoder); | ||
1458 | extern void r600_hdmi_enable(struct drm_encoder *encoder); | 1505 | extern void r600_hdmi_enable(struct drm_encoder *encoder); |
1459 | extern void r600_hdmi_disable(struct drm_encoder *encoder); | 1506 | extern void r600_hdmi_disable(struct drm_encoder *encoder); |
1460 | extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); | 1507 | extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); |
1461 | extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); | ||
1462 | extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); | ||
1463 | 1508 | ||
1464 | extern void r700_cp_stop(struct radeon_device *rdev); | 1509 | extern int ni_init_microcode(struct radeon_device *rdev); |
1465 | extern void r700_cp_fini(struct radeon_device *rdev); | 1510 | extern int ni_mc_load_microcode(struct radeon_device *rdev); |
1466 | extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); | ||
1467 | extern int evergreen_irq_set(struct radeon_device *rdev); | ||
1468 | 1511 | ||
1469 | /* radeon_acpi.c */ | 1512 | /* radeon_acpi.c */ |
1470 | #if defined(CONFIG_ACPI) | 1513 | #if defined(CONFIG_ACPI) |
@@ -1473,14 +1516,6 @@ extern int radeon_acpi_init(struct radeon_device *rdev); | |||
1473 | static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } | 1516 | static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } |
1474 | #endif | 1517 | #endif |
1475 | 1518 | ||
1476 | /* evergreen */ | ||
1477 | struct evergreen_mc_save { | ||
1478 | u32 vga_control[6]; | ||
1479 | u32 vga_render_control; | ||
1480 | u32 vga_hdp_control; | ||
1481 | u32 crtc_control[6]; | ||
1482 | }; | ||
1483 | |||
1484 | #include "radeon_object.h" | 1519 | #include "radeon_object.h" |
1485 | 1520 | ||
1486 | #endif | 1521 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 25e1dd197791..b2449629537d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev) | |||
94 | rdev->mc_rreg = &rs600_mc_rreg; | 94 | rdev->mc_rreg = &rs600_mc_rreg; |
95 | rdev->mc_wreg = &rs600_mc_wreg; | 95 | rdev->mc_wreg = &rs600_mc_wreg; |
96 | } | 96 | } |
97 | if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) { | 97 | if (rdev->family >= CHIP_R600) { |
98 | rdev->pciep_rreg = &r600_pciep_rreg; | 98 | rdev->pciep_rreg = &r600_pciep_rreg; |
99 | rdev->pciep_wreg = &r600_pciep_wreg; | 99 | rdev->pciep_wreg = &r600_pciep_wreg; |
100 | } | 100 | } |
@@ -171,6 +171,9 @@ static struct radeon_asic r100_asic = { | |||
171 | .pm_finish = &r100_pm_finish, | 171 | .pm_finish = &r100_pm_finish, |
172 | .pm_init_profile = &r100_pm_init_profile, | 172 | .pm_init_profile = &r100_pm_init_profile, |
173 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 173 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
174 | .pre_page_flip = &r100_pre_page_flip, | ||
175 | .page_flip = &r100_page_flip, | ||
176 | .post_page_flip = &r100_post_page_flip, | ||
174 | }; | 177 | }; |
175 | 178 | ||
176 | static struct radeon_asic r200_asic = { | 179 | static struct radeon_asic r200_asic = { |
@@ -215,6 +218,9 @@ static struct radeon_asic r200_asic = { | |||
215 | .pm_finish = &r100_pm_finish, | 218 | .pm_finish = &r100_pm_finish, |
216 | .pm_init_profile = &r100_pm_init_profile, | 219 | .pm_init_profile = &r100_pm_init_profile, |
217 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 220 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
221 | .pre_page_flip = &r100_pre_page_flip, | ||
222 | .page_flip = &r100_page_flip, | ||
223 | .post_page_flip = &r100_post_page_flip, | ||
218 | }; | 224 | }; |
219 | 225 | ||
220 | static struct radeon_asic r300_asic = { | 226 | static struct radeon_asic r300_asic = { |
@@ -260,6 +266,9 @@ static struct radeon_asic r300_asic = { | |||
260 | .pm_finish = &r100_pm_finish, | 266 | .pm_finish = &r100_pm_finish, |
261 | .pm_init_profile = &r100_pm_init_profile, | 267 | .pm_init_profile = &r100_pm_init_profile, |
262 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 268 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
269 | .pre_page_flip = &r100_pre_page_flip, | ||
270 | .page_flip = &r100_page_flip, | ||
271 | .post_page_flip = &r100_post_page_flip, | ||
263 | }; | 272 | }; |
264 | 273 | ||
265 | static struct radeon_asic r300_asic_pcie = { | 274 | static struct radeon_asic r300_asic_pcie = { |
@@ -304,6 +313,9 @@ static struct radeon_asic r300_asic_pcie = { | |||
304 | .pm_finish = &r100_pm_finish, | 313 | .pm_finish = &r100_pm_finish, |
305 | .pm_init_profile = &r100_pm_init_profile, | 314 | .pm_init_profile = &r100_pm_init_profile, |
306 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 315 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
316 | .pre_page_flip = &r100_pre_page_flip, | ||
317 | .page_flip = &r100_page_flip, | ||
318 | .post_page_flip = &r100_post_page_flip, | ||
307 | }; | 319 | }; |
308 | 320 | ||
309 | static struct radeon_asic r420_asic = { | 321 | static struct radeon_asic r420_asic = { |
@@ -349,6 +361,9 @@ static struct radeon_asic r420_asic = { | |||
349 | .pm_finish = &r100_pm_finish, | 361 | .pm_finish = &r100_pm_finish, |
350 | .pm_init_profile = &r420_pm_init_profile, | 362 | .pm_init_profile = &r420_pm_init_profile, |
351 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 363 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
364 | .pre_page_flip = &r100_pre_page_flip, | ||
365 | .page_flip = &r100_page_flip, | ||
366 | .post_page_flip = &r100_post_page_flip, | ||
352 | }; | 367 | }; |
353 | 368 | ||
354 | static struct radeon_asic rs400_asic = { | 369 | static struct radeon_asic rs400_asic = { |
@@ -394,6 +409,9 @@ static struct radeon_asic rs400_asic = { | |||
394 | .pm_finish = &r100_pm_finish, | 409 | .pm_finish = &r100_pm_finish, |
395 | .pm_init_profile = &r100_pm_init_profile, | 410 | .pm_init_profile = &r100_pm_init_profile, |
396 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 411 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
412 | .pre_page_flip = &r100_pre_page_flip, | ||
413 | .page_flip = &r100_page_flip, | ||
414 | .post_page_flip = &r100_post_page_flip, | ||
397 | }; | 415 | }; |
398 | 416 | ||
399 | static struct radeon_asic rs600_asic = { | 417 | static struct radeon_asic rs600_asic = { |
@@ -439,6 +457,9 @@ static struct radeon_asic rs600_asic = { | |||
439 | .pm_finish = &rs600_pm_finish, | 457 | .pm_finish = &rs600_pm_finish, |
440 | .pm_init_profile = &r420_pm_init_profile, | 458 | .pm_init_profile = &r420_pm_init_profile, |
441 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 459 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
460 | .pre_page_flip = &rs600_pre_page_flip, | ||
461 | .page_flip = &rs600_page_flip, | ||
462 | .post_page_flip = &rs600_post_page_flip, | ||
442 | }; | 463 | }; |
443 | 464 | ||
444 | static struct radeon_asic rs690_asic = { | 465 | static struct radeon_asic rs690_asic = { |
@@ -484,6 +505,9 @@ static struct radeon_asic rs690_asic = { | |||
484 | .pm_finish = &rs600_pm_finish, | 505 | .pm_finish = &rs600_pm_finish, |
485 | .pm_init_profile = &r420_pm_init_profile, | 506 | .pm_init_profile = &r420_pm_init_profile, |
486 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 507 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
508 | .pre_page_flip = &rs600_pre_page_flip, | ||
509 | .page_flip = &rs600_page_flip, | ||
510 | .post_page_flip = &rs600_post_page_flip, | ||
487 | }; | 511 | }; |
488 | 512 | ||
489 | static struct radeon_asic rv515_asic = { | 513 | static struct radeon_asic rv515_asic = { |
@@ -529,6 +553,9 @@ static struct radeon_asic rv515_asic = { | |||
529 | .pm_finish = &rs600_pm_finish, | 553 | .pm_finish = &rs600_pm_finish, |
530 | .pm_init_profile = &r420_pm_init_profile, | 554 | .pm_init_profile = &r420_pm_init_profile, |
531 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 555 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
556 | .pre_page_flip = &rs600_pre_page_flip, | ||
557 | .page_flip = &rs600_page_flip, | ||
558 | .post_page_flip = &rs600_post_page_flip, | ||
532 | }; | 559 | }; |
533 | 560 | ||
534 | static struct radeon_asic r520_asic = { | 561 | static struct radeon_asic r520_asic = { |
@@ -574,6 +601,9 @@ static struct radeon_asic r520_asic = { | |||
574 | .pm_finish = &rs600_pm_finish, | 601 | .pm_finish = &rs600_pm_finish, |
575 | .pm_init_profile = &r420_pm_init_profile, | 602 | .pm_init_profile = &r420_pm_init_profile, |
576 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 603 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, |
604 | .pre_page_flip = &rs600_pre_page_flip, | ||
605 | .page_flip = &rs600_page_flip, | ||
606 | .post_page_flip = &rs600_post_page_flip, | ||
577 | }; | 607 | }; |
578 | 608 | ||
579 | static struct radeon_asic r600_asic = { | 609 | static struct radeon_asic r600_asic = { |
@@ -601,8 +631,8 @@ static struct radeon_asic r600_asic = { | |||
601 | .set_engine_clock = &radeon_atom_set_engine_clock, | 631 | .set_engine_clock = &radeon_atom_set_engine_clock, |
602 | .get_memory_clock = &radeon_atom_get_memory_clock, | 632 | .get_memory_clock = &radeon_atom_get_memory_clock, |
603 | .set_memory_clock = &radeon_atom_set_memory_clock, | 633 | .set_memory_clock = &radeon_atom_set_memory_clock, |
604 | .get_pcie_lanes = &rv370_get_pcie_lanes, | 634 | .get_pcie_lanes = &r600_get_pcie_lanes, |
605 | .set_pcie_lanes = NULL, | 635 | .set_pcie_lanes = &r600_set_pcie_lanes, |
606 | .set_clock_gating = NULL, | 636 | .set_clock_gating = NULL, |
607 | .set_surface_reg = r600_set_surface_reg, | 637 | .set_surface_reg = r600_set_surface_reg, |
608 | .clear_surface_reg = r600_clear_surface_reg, | 638 | .clear_surface_reg = r600_clear_surface_reg, |
@@ -618,6 +648,9 @@ static struct radeon_asic r600_asic = { | |||
618 | .pm_finish = &rs600_pm_finish, | 648 | .pm_finish = &rs600_pm_finish, |
619 | .pm_init_profile = &r600_pm_init_profile, | 649 | .pm_init_profile = &r600_pm_init_profile, |
620 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 650 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, |
651 | .pre_page_flip = &rs600_pre_page_flip, | ||
652 | .page_flip = &rs600_page_flip, | ||
653 | .post_page_flip = &rs600_post_page_flip, | ||
621 | }; | 654 | }; |
622 | 655 | ||
623 | static struct radeon_asic rs780_asic = { | 656 | static struct radeon_asic rs780_asic = { |
@@ -662,6 +695,9 @@ static struct radeon_asic rs780_asic = { | |||
662 | .pm_finish = &rs600_pm_finish, | 695 | .pm_finish = &rs600_pm_finish, |
663 | .pm_init_profile = &rs780_pm_init_profile, | 696 | .pm_init_profile = &rs780_pm_init_profile, |
664 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 697 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, |
698 | .pre_page_flip = &rs600_pre_page_flip, | ||
699 | .page_flip = &rs600_page_flip, | ||
700 | .post_page_flip = &rs600_post_page_flip, | ||
665 | }; | 701 | }; |
666 | 702 | ||
667 | static struct radeon_asic rv770_asic = { | 703 | static struct radeon_asic rv770_asic = { |
@@ -689,8 +725,8 @@ static struct radeon_asic rv770_asic = { | |||
689 | .set_engine_clock = &radeon_atom_set_engine_clock, | 725 | .set_engine_clock = &radeon_atom_set_engine_clock, |
690 | .get_memory_clock = &radeon_atom_get_memory_clock, | 726 | .get_memory_clock = &radeon_atom_get_memory_clock, |
691 | .set_memory_clock = &radeon_atom_set_memory_clock, | 727 | .set_memory_clock = &radeon_atom_set_memory_clock, |
692 | .get_pcie_lanes = &rv370_get_pcie_lanes, | 728 | .get_pcie_lanes = &r600_get_pcie_lanes, |
693 | .set_pcie_lanes = NULL, | 729 | .set_pcie_lanes = &r600_set_pcie_lanes, |
694 | .set_clock_gating = &radeon_atom_set_clock_gating, | 730 | .set_clock_gating = &radeon_atom_set_clock_gating, |
695 | .set_surface_reg = r600_set_surface_reg, | 731 | .set_surface_reg = r600_set_surface_reg, |
696 | .clear_surface_reg = r600_clear_surface_reg, | 732 | .clear_surface_reg = r600_clear_surface_reg, |
@@ -706,6 +742,9 @@ static struct radeon_asic rv770_asic = { | |||
706 | .pm_finish = &rs600_pm_finish, | 742 | .pm_finish = &rs600_pm_finish, |
707 | .pm_init_profile = &r600_pm_init_profile, | 743 | .pm_init_profile = &r600_pm_init_profile, |
708 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 744 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, |
745 | .pre_page_flip = &rs600_pre_page_flip, | ||
746 | .page_flip = &rv770_page_flip, | ||
747 | .post_page_flip = &rs600_post_page_flip, | ||
709 | }; | 748 | }; |
710 | 749 | ||
711 | static struct radeon_asic evergreen_asic = { | 750 | static struct radeon_asic evergreen_asic = { |
@@ -720,19 +759,66 @@ static struct radeon_asic evergreen_asic = { | |||
720 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | 759 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, |
721 | .gart_set_page = &rs600_gart_set_page, | 760 | .gart_set_page = &rs600_gart_set_page, |
722 | .ring_test = &r600_ring_test, | 761 | .ring_test = &r600_ring_test, |
723 | .ring_ib_execute = &r600_ring_ib_execute, | 762 | .ring_ib_execute = &evergreen_ring_ib_execute, |
724 | .irq_set = &evergreen_irq_set, | 763 | .irq_set = &evergreen_irq_set, |
725 | .irq_process = &evergreen_irq_process, | 764 | .irq_process = &evergreen_irq_process, |
726 | .get_vblank_counter = &evergreen_get_vblank_counter, | 765 | .get_vblank_counter = &evergreen_get_vblank_counter, |
727 | .fence_ring_emit = &r600_fence_ring_emit, | 766 | .fence_ring_emit = &r600_fence_ring_emit, |
728 | .cs_parse = &evergreen_cs_parse, | 767 | .cs_parse = &evergreen_cs_parse, |
729 | .copy_blit = NULL, | 768 | .copy_blit = &evergreen_copy_blit, |
730 | .copy_dma = NULL, | 769 | .copy_dma = &evergreen_copy_blit, |
731 | .copy = NULL, | 770 | .copy = &evergreen_copy_blit, |
732 | .get_engine_clock = &radeon_atom_get_engine_clock, | 771 | .get_engine_clock = &radeon_atom_get_engine_clock, |
733 | .set_engine_clock = &radeon_atom_set_engine_clock, | 772 | .set_engine_clock = &radeon_atom_set_engine_clock, |
734 | .get_memory_clock = &radeon_atom_get_memory_clock, | 773 | .get_memory_clock = &radeon_atom_get_memory_clock, |
735 | .set_memory_clock = &radeon_atom_set_memory_clock, | 774 | .set_memory_clock = &radeon_atom_set_memory_clock, |
775 | .get_pcie_lanes = &r600_get_pcie_lanes, | ||
776 | .set_pcie_lanes = &r600_set_pcie_lanes, | ||
777 | .set_clock_gating = NULL, | ||
778 | .set_surface_reg = r600_set_surface_reg, | ||
779 | .clear_surface_reg = r600_clear_surface_reg, | ||
780 | .bandwidth_update = &evergreen_bandwidth_update, | ||
781 | .hpd_init = &evergreen_hpd_init, | ||
782 | .hpd_fini = &evergreen_hpd_fini, | ||
783 | .hpd_sense = &evergreen_hpd_sense, | ||
784 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | ||
785 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
786 | .gui_idle = &r600_gui_idle, | ||
787 | .pm_misc = &evergreen_pm_misc, | ||
788 | .pm_prepare = &evergreen_pm_prepare, | ||
789 | .pm_finish = &evergreen_pm_finish, | ||
790 | .pm_init_profile = &r600_pm_init_profile, | ||
791 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | ||
792 | .pre_page_flip = &evergreen_pre_page_flip, | ||
793 | .page_flip = &evergreen_page_flip, | ||
794 | .post_page_flip = &evergreen_post_page_flip, | ||
795 | }; | ||
796 | |||
797 | static struct radeon_asic sumo_asic = { | ||
798 | .init = &evergreen_init, | ||
799 | .fini = &evergreen_fini, | ||
800 | .suspend = &evergreen_suspend, | ||
801 | .resume = &evergreen_resume, | ||
802 | .cp_commit = &r600_cp_commit, | ||
803 | .gpu_is_lockup = &evergreen_gpu_is_lockup, | ||
804 | .asic_reset = &evergreen_asic_reset, | ||
805 | .vga_set_state = &r600_vga_set_state, | ||
806 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | ||
807 | .gart_set_page = &rs600_gart_set_page, | ||
808 | .ring_test = &r600_ring_test, | ||
809 | .ring_ib_execute = &evergreen_ring_ib_execute, | ||
810 | .irq_set = &evergreen_irq_set, | ||
811 | .irq_process = &evergreen_irq_process, | ||
812 | .get_vblank_counter = &evergreen_get_vblank_counter, | ||
813 | .fence_ring_emit = &r600_fence_ring_emit, | ||
814 | .cs_parse = &evergreen_cs_parse, | ||
815 | .copy_blit = &evergreen_copy_blit, | ||
816 | .copy_dma = &evergreen_copy_blit, | ||
817 | .copy = &evergreen_copy_blit, | ||
818 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
819 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
820 | .get_memory_clock = NULL, | ||
821 | .set_memory_clock = NULL, | ||
736 | .get_pcie_lanes = NULL, | 822 | .get_pcie_lanes = NULL, |
737 | .set_pcie_lanes = NULL, | 823 | .set_pcie_lanes = NULL, |
738 | .set_clock_gating = NULL, | 824 | .set_clock_gating = NULL, |
@@ -743,17 +829,122 @@ static struct radeon_asic evergreen_asic = { | |||
743 | .hpd_fini = &evergreen_hpd_fini, | 829 | .hpd_fini = &evergreen_hpd_fini, |
744 | .hpd_sense = &evergreen_hpd_sense, | 830 | .hpd_sense = &evergreen_hpd_sense, |
745 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | 831 | .hpd_set_polarity = &evergreen_hpd_set_polarity, |
832 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
833 | .gui_idle = &r600_gui_idle, | ||
834 | .pm_misc = &evergreen_pm_misc, | ||
835 | .pm_prepare = &evergreen_pm_prepare, | ||
836 | .pm_finish = &evergreen_pm_finish, | ||
837 | .pm_init_profile = &rs780_pm_init_profile, | ||
838 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | ||
839 | .pre_page_flip = &evergreen_pre_page_flip, | ||
840 | .page_flip = &evergreen_page_flip, | ||
841 | .post_page_flip = &evergreen_post_page_flip, | ||
842 | }; | ||
843 | |||
844 | static struct radeon_asic btc_asic = { | ||
845 | .init = &evergreen_init, | ||
846 | .fini = &evergreen_fini, | ||
847 | .suspend = &evergreen_suspend, | ||
848 | .resume = &evergreen_resume, | ||
849 | .cp_commit = &r600_cp_commit, | ||
850 | .gpu_is_lockup = &evergreen_gpu_is_lockup, | ||
851 | .asic_reset = &evergreen_asic_reset, | ||
852 | .vga_set_state = &r600_vga_set_state, | ||
853 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | ||
854 | .gart_set_page = &rs600_gart_set_page, | ||
855 | .ring_test = &r600_ring_test, | ||
856 | .ring_ib_execute = &evergreen_ring_ib_execute, | ||
857 | .irq_set = &evergreen_irq_set, | ||
858 | .irq_process = &evergreen_irq_process, | ||
859 | .get_vblank_counter = &evergreen_get_vblank_counter, | ||
860 | .fence_ring_emit = &r600_fence_ring_emit, | ||
861 | .cs_parse = &evergreen_cs_parse, | ||
862 | .copy_blit = &evergreen_copy_blit, | ||
863 | .copy_dma = &evergreen_copy_blit, | ||
864 | .copy = &evergreen_copy_blit, | ||
865 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
866 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
867 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
868 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
869 | .get_pcie_lanes = NULL, | ||
870 | .set_pcie_lanes = NULL, | ||
871 | .set_clock_gating = NULL, | ||
872 | .set_surface_reg = r600_set_surface_reg, | ||
873 | .clear_surface_reg = r600_clear_surface_reg, | ||
874 | .bandwidth_update = &evergreen_bandwidth_update, | ||
875 | .hpd_init = &evergreen_hpd_init, | ||
876 | .hpd_fini = &evergreen_hpd_fini, | ||
877 | .hpd_sense = &evergreen_hpd_sense, | ||
878 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | ||
879 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
746 | .gui_idle = &r600_gui_idle, | 880 | .gui_idle = &r600_gui_idle, |
747 | .pm_misc = &evergreen_pm_misc, | 881 | .pm_misc = &evergreen_pm_misc, |
748 | .pm_prepare = &evergreen_pm_prepare, | 882 | .pm_prepare = &evergreen_pm_prepare, |
749 | .pm_finish = &evergreen_pm_finish, | 883 | .pm_finish = &evergreen_pm_finish, |
750 | .pm_init_profile = &r600_pm_init_profile, | 884 | .pm_init_profile = &r600_pm_init_profile, |
751 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 885 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, |
886 | .pre_page_flip = &evergreen_pre_page_flip, | ||
887 | .page_flip = &evergreen_page_flip, | ||
888 | .post_page_flip = &evergreen_post_page_flip, | ||
889 | }; | ||
890 | |||
891 | static struct radeon_asic cayman_asic = { | ||
892 | .init = &cayman_init, | ||
893 | .fini = &cayman_fini, | ||
894 | .suspend = &cayman_suspend, | ||
895 | .resume = &cayman_resume, | ||
896 | .cp_commit = &r600_cp_commit, | ||
897 | .gpu_is_lockup = &cayman_gpu_is_lockup, | ||
898 | .asic_reset = &cayman_asic_reset, | ||
899 | .vga_set_state = &r600_vga_set_state, | ||
900 | .gart_tlb_flush = &cayman_pcie_gart_tlb_flush, | ||
901 | .gart_set_page = &rs600_gart_set_page, | ||
902 | .ring_test = &r600_ring_test, | ||
903 | .ring_ib_execute = &evergreen_ring_ib_execute, | ||
904 | .irq_set = &evergreen_irq_set, | ||
905 | .irq_process = &evergreen_irq_process, | ||
906 | .get_vblank_counter = &evergreen_get_vblank_counter, | ||
907 | .fence_ring_emit = &r600_fence_ring_emit, | ||
908 | .cs_parse = &evergreen_cs_parse, | ||
909 | .copy_blit = &evergreen_copy_blit, | ||
910 | .copy_dma = &evergreen_copy_blit, | ||
911 | .copy = &evergreen_copy_blit, | ||
912 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
913 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
914 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
915 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
916 | .get_pcie_lanes = NULL, | ||
917 | .set_pcie_lanes = NULL, | ||
918 | .set_clock_gating = NULL, | ||
919 | .set_surface_reg = r600_set_surface_reg, | ||
920 | .clear_surface_reg = r600_clear_surface_reg, | ||
921 | .bandwidth_update = &evergreen_bandwidth_update, | ||
922 | .hpd_init = &evergreen_hpd_init, | ||
923 | .hpd_fini = &evergreen_hpd_fini, | ||
924 | .hpd_sense = &evergreen_hpd_sense, | ||
925 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | ||
926 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
927 | .gui_idle = &r600_gui_idle, | ||
928 | .pm_misc = &evergreen_pm_misc, | ||
929 | .pm_prepare = &evergreen_pm_prepare, | ||
930 | .pm_finish = &evergreen_pm_finish, | ||
931 | .pm_init_profile = &r600_pm_init_profile, | ||
932 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | ||
933 | .pre_page_flip = &evergreen_pre_page_flip, | ||
934 | .page_flip = &evergreen_page_flip, | ||
935 | .post_page_flip = &evergreen_post_page_flip, | ||
752 | }; | 936 | }; |
753 | 937 | ||
754 | int radeon_asic_init(struct radeon_device *rdev) | 938 | int radeon_asic_init(struct radeon_device *rdev) |
755 | { | 939 | { |
756 | radeon_register_accessor_init(rdev); | 940 | radeon_register_accessor_init(rdev); |
941 | |||
942 | /* set the number of crtcs */ | ||
943 | if (rdev->flags & RADEON_SINGLE_CRTC) | ||
944 | rdev->num_crtc = 1; | ||
945 | else | ||
946 | rdev->num_crtc = 2; | ||
947 | |||
757 | switch (rdev->family) { | 948 | switch (rdev->family) { |
758 | case CHIP_R100: | 949 | case CHIP_R100: |
759 | case CHIP_RV100: | 950 | case CHIP_RV100: |
@@ -833,8 +1024,33 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
833 | case CHIP_JUNIPER: | 1024 | case CHIP_JUNIPER: |
834 | case CHIP_CYPRESS: | 1025 | case CHIP_CYPRESS: |
835 | case CHIP_HEMLOCK: | 1026 | case CHIP_HEMLOCK: |
1027 | /* set num crtcs */ | ||
1028 | if (rdev->family == CHIP_CEDAR) | ||
1029 | rdev->num_crtc = 4; | ||
1030 | else | ||
1031 | rdev->num_crtc = 6; | ||
836 | rdev->asic = &evergreen_asic; | 1032 | rdev->asic = &evergreen_asic; |
837 | break; | 1033 | break; |
1034 | case CHIP_PALM: | ||
1035 | case CHIP_SUMO: | ||
1036 | case CHIP_SUMO2: | ||
1037 | rdev->asic = &sumo_asic; | ||
1038 | break; | ||
1039 | case CHIP_BARTS: | ||
1040 | case CHIP_TURKS: | ||
1041 | case CHIP_CAICOS: | ||
1042 | /* set num crtcs */ | ||
1043 | if (rdev->family == CHIP_CAICOS) | ||
1044 | rdev->num_crtc = 4; | ||
1045 | else | ||
1046 | rdev->num_crtc = 6; | ||
1047 | rdev->asic = &btc_asic; | ||
1048 | break; | ||
1049 | case CHIP_CAYMAN: | ||
1050 | rdev->asic = &cayman_asic; | ||
1051 | /* set num crtcs */ | ||
1052 | rdev->num_crtc = 6; | ||
1053 | break; | ||
838 | default: | 1054 | default: |
839 | /* FIXME: not supported yet */ | 1055 | /* FIXME: not supported yet */ |
840 | return -EINVAL; | 1056 | return -EINVAL; |
@@ -845,16 +1061,6 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
845 | rdev->asic->set_memory_clock = NULL; | 1061 | rdev->asic->set_memory_clock = NULL; |
846 | } | 1062 | } |
847 | 1063 | ||
848 | /* set the number of crtcs */ | ||
849 | if (rdev->flags & RADEON_SINGLE_CRTC) | ||
850 | rdev->num_crtc = 1; | ||
851 | else { | ||
852 | if (ASIC_IS_DCE4(rdev)) | ||
853 | rdev->num_crtc = 6; | ||
854 | else | ||
855 | rdev->num_crtc = 2; | ||
856 | } | ||
857 | |||
858 | return 0; | 1064 | return 0; |
859 | } | 1065 | } |
860 | 1066 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index a5aff755f0d2..3d7a0d7c6a9a 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -57,8 +57,6 @@ int r100_init(struct radeon_device *rdev); | |||
57 | void r100_fini(struct radeon_device *rdev); | 57 | void r100_fini(struct radeon_device *rdev); |
58 | int r100_suspend(struct radeon_device *rdev); | 58 | int r100_suspend(struct radeon_device *rdev); |
59 | int r100_resume(struct radeon_device *rdev); | 59 | int r100_resume(struct radeon_device *rdev); |
60 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); | ||
61 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | ||
62 | void r100_vga_set_state(struct radeon_device *rdev, bool state); | 60 | void r100_vga_set_state(struct radeon_device *rdev, bool state); |
63 | bool r100_gpu_is_lockup(struct radeon_device *rdev); | 61 | bool r100_gpu_is_lockup(struct radeon_device *rdev); |
64 | int r100_asic_reset(struct radeon_device *rdev); | 62 | int r100_asic_reset(struct radeon_device *rdev); |
@@ -102,15 +100,17 @@ int r100_pci_gart_enable(struct radeon_device *rdev); | |||
102 | void r100_pci_gart_disable(struct radeon_device *rdev); | 100 | void r100_pci_gart_disable(struct radeon_device *rdev); |
103 | int r100_debugfs_mc_info_init(struct radeon_device *rdev); | 101 | int r100_debugfs_mc_info_init(struct radeon_device *rdev); |
104 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | 102 | int r100_gui_wait_for_idle(struct radeon_device *rdev); |
103 | void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, | ||
104 | struct radeon_cp *cp); | ||
105 | bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, | ||
106 | struct r100_gpu_lockup *lockup, | ||
107 | struct radeon_cp *cp); | ||
105 | void r100_ib_fini(struct radeon_device *rdev); | 108 | void r100_ib_fini(struct radeon_device *rdev); |
106 | int r100_ib_init(struct radeon_device *rdev); | 109 | int r100_ib_init(struct radeon_device *rdev); |
107 | void r100_irq_disable(struct radeon_device *rdev); | 110 | void r100_irq_disable(struct radeon_device *rdev); |
108 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); | 111 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); |
109 | void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); | 112 | void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); |
110 | void r100_vram_init_sizes(struct radeon_device *rdev); | 113 | void r100_vram_init_sizes(struct radeon_device *rdev); |
111 | void r100_wb_disable(struct radeon_device *rdev); | ||
112 | void r100_wb_fini(struct radeon_device *rdev); | ||
113 | int r100_wb_init(struct radeon_device *rdev); | ||
114 | int r100_cp_reset(struct radeon_device *rdev); | 114 | int r100_cp_reset(struct radeon_device *rdev); |
115 | void r100_vga_render_disable(struct radeon_device *rdev); | 115 | void r100_vga_render_disable(struct radeon_device *rdev); |
116 | void r100_restore_sanity(struct radeon_device *rdev); | 116 | void r100_restore_sanity(struct radeon_device *rdev); |
@@ -133,15 +133,19 @@ extern void r100_pm_prepare(struct radeon_device *rdev); | |||
133 | extern void r100_pm_finish(struct radeon_device *rdev); | 133 | extern void r100_pm_finish(struct radeon_device *rdev); |
134 | extern void r100_pm_init_profile(struct radeon_device *rdev); | 134 | extern void r100_pm_init_profile(struct radeon_device *rdev); |
135 | extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); | 135 | extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); |
136 | extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc); | ||
137 | extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | ||
138 | extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); | ||
136 | 139 | ||
137 | /* | 140 | /* |
138 | * r200,rv250,rs300,rv280 | 141 | * r200,rv250,rs300,rv280 |
139 | */ | 142 | */ |
140 | extern int r200_copy_dma(struct radeon_device *rdev, | 143 | extern int r200_copy_dma(struct radeon_device *rdev, |
141 | uint64_t src_offset, | 144 | uint64_t src_offset, |
142 | uint64_t dst_offset, | 145 | uint64_t dst_offset, |
143 | unsigned num_pages, | 146 | unsigned num_pages, |
144 | struct radeon_fence *fence); | 147 | struct radeon_fence *fence); |
148 | void r200_set_safe_registers(struct radeon_device *rdev); | ||
145 | 149 | ||
146 | /* | 150 | /* |
147 | * r300,r350,rv350,rv380 | 151 | * r300,r350,rv350,rv380 |
@@ -158,10 +162,17 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, | |||
158 | extern int r300_cs_parse(struct radeon_cs_parser *p); | 162 | extern int r300_cs_parse(struct radeon_cs_parser *p); |
159 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); | 163 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
160 | extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 164 | extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
161 | extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | ||
162 | extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | ||
163 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); | 165 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); |
164 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); | 166 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); |
167 | extern void r300_set_reg_safe(struct radeon_device *rdev); | ||
168 | extern void r300_mc_program(struct radeon_device *rdev); | ||
169 | extern void r300_mc_init(struct radeon_device *rdev); | ||
170 | extern void r300_clock_startup(struct radeon_device *rdev); | ||
171 | extern int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
172 | extern int rv370_pcie_gart_init(struct radeon_device *rdev); | ||
173 | extern void rv370_pcie_gart_fini(struct radeon_device *rdev); | ||
174 | extern int rv370_pcie_gart_enable(struct radeon_device *rdev); | ||
175 | extern void rv370_pcie_gart_disable(struct radeon_device *rdev); | ||
165 | 176 | ||
166 | /* | 177 | /* |
167 | * r420,r423,rv410 | 178 | * r420,r423,rv410 |
@@ -171,6 +182,10 @@ extern void r420_fini(struct radeon_device *rdev); | |||
171 | extern int r420_suspend(struct radeon_device *rdev); | 182 | extern int r420_suspend(struct radeon_device *rdev); |
172 | extern int r420_resume(struct radeon_device *rdev); | 183 | extern int r420_resume(struct radeon_device *rdev); |
173 | extern void r420_pm_init_profile(struct radeon_device *rdev); | 184 | extern void r420_pm_init_profile(struct radeon_device *rdev); |
185 | extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); | ||
186 | extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); | ||
187 | extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); | ||
188 | extern void r420_pipes_init(struct radeon_device *rdev); | ||
174 | 189 | ||
175 | /* | 190 | /* |
176 | * rs400,rs480 | 191 | * rs400,rs480 |
@@ -183,6 +198,11 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev); | |||
183 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 198 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
184 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 199 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
185 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 200 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
201 | int rs400_gart_init(struct radeon_device *rdev); | ||
202 | int rs400_gart_enable(struct radeon_device *rdev); | ||
203 | void rs400_gart_adjust_size(struct radeon_device *rdev); | ||
204 | void rs400_gart_disable(struct radeon_device *rdev); | ||
205 | void rs400_gart_fini(struct radeon_device *rdev); | ||
186 | 206 | ||
187 | /* | 207 | /* |
188 | * rs600. | 208 | * rs600. |
@@ -194,6 +214,7 @@ extern int rs600_suspend(struct radeon_device *rdev); | |||
194 | extern int rs600_resume(struct radeon_device *rdev); | 214 | extern int rs600_resume(struct radeon_device *rdev); |
195 | int rs600_irq_set(struct radeon_device *rdev); | 215 | int rs600_irq_set(struct radeon_device *rdev); |
196 | int rs600_irq_process(struct radeon_device *rdev); | 216 | int rs600_irq_process(struct radeon_device *rdev); |
217 | void rs600_irq_disable(struct radeon_device *rdev); | ||
197 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); | 218 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); |
198 | void rs600_gart_tlb_flush(struct radeon_device *rdev); | 219 | void rs600_gart_tlb_flush(struct radeon_device *rdev); |
199 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 220 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
@@ -208,6 +229,11 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev, | |||
208 | extern void rs600_pm_misc(struct radeon_device *rdev); | 229 | extern void rs600_pm_misc(struct radeon_device *rdev); |
209 | extern void rs600_pm_prepare(struct radeon_device *rdev); | 230 | extern void rs600_pm_prepare(struct radeon_device *rdev); |
210 | extern void rs600_pm_finish(struct radeon_device *rdev); | 231 | extern void rs600_pm_finish(struct radeon_device *rdev); |
232 | extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc); | ||
233 | extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | ||
234 | extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc); | ||
235 | void rs600_set_safe_registers(struct radeon_device *rdev); | ||
236 | |||
211 | 237 | ||
212 | /* | 238 | /* |
213 | * rs690,rs740 | 239 | * rs690,rs740 |
@@ -219,20 +245,37 @@ int rs690_suspend(struct radeon_device *rdev); | |||
219 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 245 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
220 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 246 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
221 | void rs690_bandwidth_update(struct radeon_device *rdev); | 247 | void rs690_bandwidth_update(struct radeon_device *rdev); |
248 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | ||
249 | struct drm_display_mode *mode1, | ||
250 | struct drm_display_mode *mode2); | ||
222 | 251 | ||
223 | /* | 252 | /* |
224 | * rv515 | 253 | * rv515 |
225 | */ | 254 | */ |
255 | struct rv515_mc_save { | ||
256 | u32 d1vga_control; | ||
257 | u32 d2vga_control; | ||
258 | u32 vga_render_control; | ||
259 | u32 vga_hdp_control; | ||
260 | u32 d1crtc_control; | ||
261 | u32 d2crtc_control; | ||
262 | }; | ||
226 | int rv515_init(struct radeon_device *rdev); | 263 | int rv515_init(struct radeon_device *rdev); |
227 | void rv515_fini(struct radeon_device *rdev); | 264 | void rv515_fini(struct radeon_device *rdev); |
228 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 265 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
229 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 266 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
230 | void rv515_ring_start(struct radeon_device *rdev); | 267 | void rv515_ring_start(struct radeon_device *rdev); |
231 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | ||
232 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | ||
233 | void rv515_bandwidth_update(struct radeon_device *rdev); | 268 | void rv515_bandwidth_update(struct radeon_device *rdev); |
234 | int rv515_resume(struct radeon_device *rdev); | 269 | int rv515_resume(struct radeon_device *rdev); |
235 | int rv515_suspend(struct radeon_device *rdev); | 270 | int rv515_suspend(struct radeon_device *rdev); |
271 | void rv515_bandwidth_avivo_update(struct radeon_device *rdev); | ||
272 | void rv515_vga_render_disable(struct radeon_device *rdev); | ||
273 | void rv515_set_safe_registers(struct radeon_device *rdev); | ||
274 | void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save); | ||
275 | void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save); | ||
276 | void rv515_clock_startup(struct radeon_device *rdev); | ||
277 | void rv515_debugfs(struct radeon_device *rdev); | ||
278 | |||
236 | 279 | ||
237 | /* | 280 | /* |
238 | * r520,rv530,rv560,rv570,r580 | 281 | * r520,rv530,rv560,rv570,r580 |
@@ -257,19 +300,13 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |||
257 | int r600_cs_parse(struct radeon_cs_parser *p); | 300 | int r600_cs_parse(struct radeon_cs_parser *p); |
258 | void r600_fence_ring_emit(struct radeon_device *rdev, | 301 | void r600_fence_ring_emit(struct radeon_device *rdev, |
259 | struct radeon_fence *fence); | 302 | struct radeon_fence *fence); |
260 | int r600_copy_dma(struct radeon_device *rdev, | ||
261 | uint64_t src_offset, | ||
262 | uint64_t dst_offset, | ||
263 | unsigned num_pages, | ||
264 | struct radeon_fence *fence); | ||
265 | int r600_irq_process(struct radeon_device *rdev); | ||
266 | int r600_irq_set(struct radeon_device *rdev); | ||
267 | bool r600_gpu_is_lockup(struct radeon_device *rdev); | 303 | bool r600_gpu_is_lockup(struct radeon_device *rdev); |
268 | int r600_asic_reset(struct radeon_device *rdev); | 304 | int r600_asic_reset(struct radeon_device *rdev); |
269 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, | 305 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
270 | uint32_t tiling_flags, uint32_t pitch, | 306 | uint32_t tiling_flags, uint32_t pitch, |
271 | uint32_t offset, uint32_t obj_size); | 307 | uint32_t offset, uint32_t obj_size); |
272 | void r600_clear_surface_reg(struct radeon_device *rdev, int reg); | 308 | void r600_clear_surface_reg(struct radeon_device *rdev, int reg); |
309 | int r600_ib_test(struct radeon_device *rdev); | ||
273 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 310 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
274 | int r600_ring_test(struct radeon_device *rdev); | 311 | int r600_ring_test(struct radeon_device *rdev); |
275 | int r600_copy_blit(struct radeon_device *rdev, | 312 | int r600_copy_blit(struct radeon_device *rdev, |
@@ -286,6 +323,52 @@ extern void r600_pm_misc(struct radeon_device *rdev); | |||
286 | extern void r600_pm_init_profile(struct radeon_device *rdev); | 323 | extern void r600_pm_init_profile(struct radeon_device *rdev); |
287 | extern void rs780_pm_init_profile(struct radeon_device *rdev); | 324 | extern void rs780_pm_init_profile(struct radeon_device *rdev); |
288 | extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); | 325 | extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); |
326 | extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes); | ||
327 | extern int r600_get_pcie_lanes(struct radeon_device *rdev); | ||
328 | bool r600_card_posted(struct radeon_device *rdev); | ||
329 | void r600_cp_stop(struct radeon_device *rdev); | ||
330 | int r600_cp_start(struct radeon_device *rdev); | ||
331 | void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); | ||
332 | int r600_cp_resume(struct radeon_device *rdev); | ||
333 | void r600_cp_fini(struct radeon_device *rdev); | ||
334 | int r600_count_pipe_bits(uint32_t val); | ||
335 | int r600_mc_wait_for_idle(struct radeon_device *rdev); | ||
336 | int r600_pcie_gart_init(struct radeon_device *rdev); | ||
337 | void r600_scratch_init(struct radeon_device *rdev); | ||
338 | int r600_blit_init(struct radeon_device *rdev); | ||
339 | void r600_blit_fini(struct radeon_device *rdev); | ||
340 | int r600_init_microcode(struct radeon_device *rdev); | ||
341 | /* r600 irq */ | ||
342 | int r600_irq_process(struct radeon_device *rdev); | ||
343 | int r600_irq_init(struct radeon_device *rdev); | ||
344 | void r600_irq_fini(struct radeon_device *rdev); | ||
345 | void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); | ||
346 | int r600_irq_set(struct radeon_device *rdev); | ||
347 | void r600_irq_suspend(struct radeon_device *rdev); | ||
348 | void r600_disable_interrupts(struct radeon_device *rdev); | ||
349 | void r600_rlc_stop(struct radeon_device *rdev); | ||
350 | /* r600 audio */ | ||
351 | int r600_audio_init(struct radeon_device *rdev); | ||
352 | int r600_audio_tmds_index(struct drm_encoder *encoder); | ||
353 | void r600_audio_set_clock(struct drm_encoder *encoder, int clock); | ||
354 | int r600_audio_channels(struct radeon_device *rdev); | ||
355 | int r600_audio_bits_per_sample(struct radeon_device *rdev); | ||
356 | int r600_audio_rate(struct radeon_device *rdev); | ||
357 | uint8_t r600_audio_status_bits(struct radeon_device *rdev); | ||
358 | uint8_t r600_audio_category_code(struct radeon_device *rdev); | ||
359 | void r600_audio_schedule_polling(struct radeon_device *rdev); | ||
360 | void r600_audio_enable_polling(struct drm_encoder *encoder); | ||
361 | void r600_audio_disable_polling(struct drm_encoder *encoder); | ||
362 | void r600_audio_fini(struct radeon_device *rdev); | ||
363 | void r600_hdmi_init(struct drm_encoder *encoder); | ||
364 | int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); | ||
365 | void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); | ||
366 | /* r600 blit */ | ||
367 | int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); | ||
368 | void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); | ||
369 | void r600_kms_blit_copy(struct radeon_device *rdev, | ||
370 | u64 src_gpu_addr, u64 dst_gpu_addr, | ||
371 | int size_bytes); | ||
289 | 372 | ||
290 | /* | 373 | /* |
291 | * rv770,rv730,rv710,rv740 | 374 | * rv770,rv730,rv710,rv740 |
@@ -294,11 +377,21 @@ int rv770_init(struct radeon_device *rdev); | |||
294 | void rv770_fini(struct radeon_device *rdev); | 377 | void rv770_fini(struct radeon_device *rdev); |
295 | int rv770_suspend(struct radeon_device *rdev); | 378 | int rv770_suspend(struct radeon_device *rdev); |
296 | int rv770_resume(struct radeon_device *rdev); | 379 | int rv770_resume(struct radeon_device *rdev); |
297 | extern void rv770_pm_misc(struct radeon_device *rdev); | 380 | void rv770_pm_misc(struct radeon_device *rdev); |
381 | u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | ||
382 | void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); | ||
383 | void r700_cp_stop(struct radeon_device *rdev); | ||
384 | void r700_cp_fini(struct radeon_device *rdev); | ||
298 | 385 | ||
299 | /* | 386 | /* |
300 | * evergreen | 387 | * evergreen |
301 | */ | 388 | */ |
389 | struct evergreen_mc_save { | ||
390 | u32 vga_control[6]; | ||
391 | u32 vga_render_control; | ||
392 | u32 vga_hdp_control; | ||
393 | u32 crtc_control[6]; | ||
394 | }; | ||
302 | void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); | 395 | void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); |
303 | int evergreen_init(struct radeon_device *rdev); | 396 | int evergreen_init(struct radeon_device *rdev); |
304 | void evergreen_fini(struct radeon_device *rdev); | 397 | void evergreen_fini(struct radeon_device *rdev); |
@@ -307,6 +400,10 @@ int evergreen_resume(struct radeon_device *rdev); | |||
307 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev); | 400 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev); |
308 | int evergreen_asic_reset(struct radeon_device *rdev); | 401 | int evergreen_asic_reset(struct radeon_device *rdev); |
309 | void evergreen_bandwidth_update(struct radeon_device *rdev); | 402 | void evergreen_bandwidth_update(struct radeon_device *rdev); |
403 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | ||
404 | int evergreen_copy_blit(struct radeon_device *rdev, | ||
405 | uint64_t src_offset, uint64_t dst_offset, | ||
406 | unsigned num_pages, struct radeon_fence *fence); | ||
310 | void evergreen_hpd_init(struct radeon_device *rdev); | 407 | void evergreen_hpd_init(struct radeon_device *rdev); |
311 | void evergreen_hpd_fini(struct radeon_device *rdev); | 408 | void evergreen_hpd_fini(struct radeon_device *rdev); |
312 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 409 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
@@ -319,5 +416,28 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p); | |||
319 | extern void evergreen_pm_misc(struct radeon_device *rdev); | 416 | extern void evergreen_pm_misc(struct radeon_device *rdev); |
320 | extern void evergreen_pm_prepare(struct radeon_device *rdev); | 417 | extern void evergreen_pm_prepare(struct radeon_device *rdev); |
321 | extern void evergreen_pm_finish(struct radeon_device *rdev); | 418 | extern void evergreen_pm_finish(struct radeon_device *rdev); |
419 | extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); | ||
420 | extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | ||
421 | extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); | ||
422 | void evergreen_disable_interrupt_state(struct radeon_device *rdev); | ||
423 | int evergreen_blit_init(struct radeon_device *rdev); | ||
424 | void evergreen_blit_fini(struct radeon_device *rdev); | ||
425 | /* evergreen blit */ | ||
426 | int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); | ||
427 | void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); | ||
428 | void evergreen_kms_blit_copy(struct radeon_device *rdev, | ||
429 | u64 src_gpu_addr, u64 dst_gpu_addr, | ||
430 | int size_bytes); | ||
431 | |||
432 | /* | ||
433 | * cayman | ||
434 | */ | ||
435 | void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev); | ||
436 | int cayman_init(struct radeon_device *rdev); | ||
437 | void cayman_fini(struct radeon_device *rdev); | ||
438 | int cayman_suspend(struct radeon_device *rdev); | ||
439 | int cayman_resume(struct radeon_device *rdev); | ||
440 | bool cayman_gpu_is_lockup(struct radeon_device *rdev); | ||
441 | int cayman_asic_reset(struct radeon_device *rdev); | ||
322 | 442 | ||
323 | #endif | 443 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 8e43ddae70cc..bf2b61584cdb 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -37,7 +37,7 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, | |||
37 | extern void radeon_link_encoder_connector(struct drm_device *dev); | 37 | extern void radeon_link_encoder_connector(struct drm_device *dev); |
38 | extern void | 38 | extern void |
39 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, | 39 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, |
40 | uint32_t supported_device); | 40 | uint32_t supported_device, u16 caps); |
41 | 41 | ||
42 | /* from radeon_connector.c */ | 42 | /* from radeon_connector.c */ |
43 | extern void | 43 | extern void |
@@ -88,7 +88,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
88 | /* some evergreen boards have bad data for this entry */ | 88 | /* some evergreen boards have bad data for this entry */ |
89 | if (ASIC_IS_DCE4(rdev)) { | 89 | if (ASIC_IS_DCE4(rdev)) { |
90 | if ((i == 7) && | 90 | if ((i == 7) && |
91 | (gpio->usClkMaskRegisterIndex == 0x1936) && | 91 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && |
92 | (gpio->sucI2cId.ucAccess == 0)) { | 92 | (gpio->sucI2cId.ucAccess == 0)) { |
93 | gpio->sucI2cId.ucAccess = 0x97; | 93 | gpio->sucI2cId.ucAccess = 0x97; |
94 | gpio->ucDataMaskShift = 8; | 94 | gpio->ucDataMaskShift = 8; |
@@ -98,6 +98,14 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
98 | } | 98 | } |
99 | } | 99 | } |
100 | 100 | ||
101 | /* some DCE3 boards have bad data for this entry */ | ||
102 | if (ASIC_IS_DCE3(rdev)) { | ||
103 | if ((i == 4) && | ||
104 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && | ||
105 | (gpio->sucI2cId.ucAccess == 0x94)) | ||
106 | gpio->sucI2cId.ucAccess = 0x14; | ||
107 | } | ||
108 | |||
101 | if (gpio->sucI2cId.ucAccess == id) { | 109 | if (gpio->sucI2cId.ucAccess == id) { |
102 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 110 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; |
103 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | 111 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
@@ -164,7 +172,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
164 | /* some evergreen boards have bad data for this entry */ | 172 | /* some evergreen boards have bad data for this entry */ |
165 | if (ASIC_IS_DCE4(rdev)) { | 173 | if (ASIC_IS_DCE4(rdev)) { |
166 | if ((i == 7) && | 174 | if ((i == 7) && |
167 | (gpio->usClkMaskRegisterIndex == 0x1936) && | 175 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && |
168 | (gpio->sucI2cId.ucAccess == 0)) { | 176 | (gpio->sucI2cId.ucAccess == 0)) { |
169 | gpio->sucI2cId.ucAccess = 0x97; | 177 | gpio->sucI2cId.ucAccess = 0x97; |
170 | gpio->ucDataMaskShift = 8; | 178 | gpio->ucDataMaskShift = 8; |
@@ -174,6 +182,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
174 | } | 182 | } |
175 | } | 183 | } |
176 | 184 | ||
185 | /* some DCE3 boards have bad data for this entry */ | ||
186 | if (ASIC_IS_DCE3(rdev)) { | ||
187 | if ((i == 4) && | ||
188 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && | ||
189 | (gpio->sucI2cId.ucAccess == 0x94)) | ||
190 | gpio->sucI2cId.ucAccess = 0x14; | ||
191 | } | ||
192 | |||
177 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 193 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; |
178 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | 194 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
179 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; | 195 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; |
@@ -236,7 +252,7 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd | |||
236 | pin = &gpio_info->asGPIO_Pin[i]; | 252 | pin = &gpio_info->asGPIO_Pin[i]; |
237 | if (id == pin->ucGPIO_ID) { | 253 | if (id == pin->ucGPIO_ID) { |
238 | gpio.id = pin->ucGPIO_ID; | 254 | gpio.id = pin->ucGPIO_ID; |
239 | gpio.reg = pin->usGpioPin_AIndex * 4; | 255 | gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; |
240 | gpio.mask = (1 << pin->ucGpioPinBitShift); | 256 | gpio.mask = (1 << pin->ucGpioPinBitShift); |
241 | gpio.valid = true; | 257 | gpio.valid = true; |
242 | break; | 258 | break; |
@@ -297,7 +313,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
297 | uint16_t *line_mux, | 313 | uint16_t *line_mux, |
298 | struct radeon_hpd *hpd) | 314 | struct radeon_hpd *hpd) |
299 | { | 315 | { |
300 | struct radeon_device *rdev = dev->dev_private; | ||
301 | 316 | ||
302 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ | 317 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ |
303 | if ((dev->pdev->device == 0x791e) && | 318 | if ((dev->pdev->device == 0x791e) && |
@@ -372,6 +387,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
372 | *line_mux = 0x90; | 387 | *line_mux = 0x90; |
373 | } | 388 | } |
374 | 389 | ||
390 | /* mac rv630, rv730, others */ | ||
391 | if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && | ||
392 | (*connector_type == DRM_MODE_CONNECTOR_DVII)) { | ||
393 | *connector_type = DRM_MODE_CONNECTOR_9PinDIN; | ||
394 | *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1; | ||
395 | } | ||
396 | |||
375 | /* ASUS HD 3600 XT board lists the DVI port as HDMI */ | 397 | /* ASUS HD 3600 XT board lists the DVI port as HDMI */ |
376 | if ((dev->pdev->device == 0x9598) && | 398 | if ((dev->pdev->device == 0x9598) && |
377 | (dev->pdev->subsystem_vendor == 0x1043) && | 399 | (dev->pdev->subsystem_vendor == 0x1043) && |
@@ -409,21 +431,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
409 | } | 431 | } |
410 | } | 432 | } |
411 | 433 | ||
412 | /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */ | 434 | /* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port |
413 | if ((dev->pdev->device == 0x95c4) && | 435 | * on the laptop and a DVI port on the docking station and |
436 | * both share the same encoder, hpd pin, and ddc line. | ||
437 | * So while the bios table is technically correct, | ||
438 | * we drop the DVI port here since xrandr has no concept of | ||
439 | * encoders and will try and drive both connectors | ||
440 | * with different crtcs which isn't possible on the hardware | ||
441 | * side and leaves no crtcs for LVDS or VGA. | ||
442 | */ | ||
443 | if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) && | ||
414 | (dev->pdev->subsystem_vendor == 0x1025) && | 444 | (dev->pdev->subsystem_vendor == 0x1025) && |
415 | (dev->pdev->subsystem_device == 0x013c)) { | 445 | (dev->pdev->subsystem_device == 0x013c)) { |
416 | struct radeon_gpio_rec gpio; | ||
417 | |||
418 | if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && | 446 | if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && |
419 | (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { | 447 | (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { |
420 | gpio = radeon_lookup_gpio(rdev, 6); | 448 | /* actually it's a DVI-D port not DVI-I */ |
421 | *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); | ||
422 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 449 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
423 | } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && | 450 | return false; |
424 | (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { | ||
425 | gpio = radeon_lookup_gpio(rdev, 7); | ||
426 | *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); | ||
427 | } | 451 | } |
428 | } | 452 | } |
429 | 453 | ||
@@ -509,6 +533,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
509 | u16 size, data_offset; | 533 | u16 size, data_offset; |
510 | u8 frev, crev; | 534 | u8 frev, crev; |
511 | ATOM_CONNECTOR_OBJECT_TABLE *con_obj; | 535 | ATOM_CONNECTOR_OBJECT_TABLE *con_obj; |
536 | ATOM_ENCODER_OBJECT_TABLE *enc_obj; | ||
512 | ATOM_OBJECT_TABLE *router_obj; | 537 | ATOM_OBJECT_TABLE *router_obj; |
513 | ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; | 538 | ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; |
514 | ATOM_OBJECT_HEADER *obj_header; | 539 | ATOM_OBJECT_HEADER *obj_header; |
@@ -526,8 +551,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
526 | if (crev < 2) | 551 | if (crev < 2) |
527 | return false; | 552 | return false; |
528 | 553 | ||
529 | router.valid = false; | ||
530 | |||
531 | obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); | 554 | obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); |
532 | path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) | 555 | path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) |
533 | (ctx->bios + data_offset + | 556 | (ctx->bios + data_offset + |
@@ -535,6 +558,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
535 | con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *) | 558 | con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *) |
536 | (ctx->bios + data_offset + | 559 | (ctx->bios + data_offset + |
537 | le16_to_cpu(obj_header->usConnectorObjectTableOffset)); | 560 | le16_to_cpu(obj_header->usConnectorObjectTableOffset)); |
561 | enc_obj = (ATOM_ENCODER_OBJECT_TABLE *) | ||
562 | (ctx->bios + data_offset + | ||
563 | le16_to_cpu(obj_header->usEncoderObjectTableOffset)); | ||
538 | router_obj = (ATOM_OBJECT_TABLE *) | 564 | router_obj = (ATOM_OBJECT_TABLE *) |
539 | (ctx->bios + data_offset + | 565 | (ctx->bios + data_offset + |
540 | le16_to_cpu(obj_header->usRouterObjectTableOffset)); | 566 | le16_to_cpu(obj_header->usRouterObjectTableOffset)); |
@@ -624,6 +650,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
624 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 650 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
625 | continue; | 651 | continue; |
626 | 652 | ||
653 | router.ddc_valid = false; | ||
654 | router.cd_valid = false; | ||
627 | for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { | 655 | for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { |
628 | uint8_t grph_obj_id, grph_obj_num, grph_obj_type; | 656 | uint8_t grph_obj_id, grph_obj_num, grph_obj_type; |
629 | 657 | ||
@@ -638,18 +666,39 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
638 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; | 666 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; |
639 | 667 | ||
640 | if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { | 668 | if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { |
641 | u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]); | 669 | for (k = 0; k < enc_obj->ucNumberOfObjects; k++) { |
642 | 670 | u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID); | |
643 | radeon_add_atom_encoder(dev, | 671 | if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) { |
644 | encoder_obj, | 672 | ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) |
645 | le16_to_cpu | 673 | (ctx->bios + data_offset + |
646 | (path-> | 674 | le16_to_cpu(enc_obj->asObjects[k].usRecordOffset)); |
647 | usDeviceTag)); | 675 | ATOM_ENCODER_CAP_RECORD *cap_record; |
676 | u16 caps = 0; | ||
648 | 677 | ||
678 | while (record->ucRecordSize > 0 && | ||
679 | record->ucRecordType > 0 && | ||
680 | record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { | ||
681 | switch (record->ucRecordType) { | ||
682 | case ATOM_ENCODER_CAP_RECORD_TYPE: | ||
683 | cap_record =(ATOM_ENCODER_CAP_RECORD *) | ||
684 | record; | ||
685 | caps = le16_to_cpu(cap_record->usEncoderCap); | ||
686 | break; | ||
687 | } | ||
688 | record = (ATOM_COMMON_RECORD_HEADER *) | ||
689 | ((char *)record + record->ucRecordSize); | ||
690 | } | ||
691 | radeon_add_atom_encoder(dev, | ||
692 | encoder_obj, | ||
693 | le16_to_cpu | ||
694 | (path-> | ||
695 | usDeviceTag), | ||
696 | caps); | ||
697 | } | ||
698 | } | ||
649 | } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { | 699 | } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { |
650 | router.valid = false; | ||
651 | for (k = 0; k < router_obj->ucNumberOfObjects; k++) { | 700 | for (k = 0; k < router_obj->ucNumberOfObjects; k++) { |
652 | u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID); | 701 | u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); |
653 | if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { | 702 | if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { |
654 | ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) | 703 | ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) |
655 | (ctx->bios + data_offset + | 704 | (ctx->bios + data_offset + |
@@ -657,6 +706,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
657 | ATOM_I2C_RECORD *i2c_record; | 706 | ATOM_I2C_RECORD *i2c_record; |
658 | ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; | 707 | ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; |
659 | ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; | 708 | ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; |
709 | ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path; | ||
660 | ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = | 710 | ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = |
661 | (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) | 711 | (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) |
662 | (ctx->bios + data_offset + | 712 | (ctx->bios + data_offset + |
@@ -671,7 +721,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
671 | break; | 721 | break; |
672 | } | 722 | } |
673 | 723 | ||
674 | while (record->ucRecordType > 0 && | 724 | while (record->ucRecordSize > 0 && |
725 | record->ucRecordType > 0 && | ||
675 | record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { | 726 | record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { |
676 | switch (record->ucRecordType) { | 727 | switch (record->ucRecordType) { |
677 | case ATOM_I2C_RECORD_TYPE: | 728 | case ATOM_I2C_RECORD_TYPE: |
@@ -690,10 +741,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
690 | case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: | 741 | case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: |
691 | ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) | 742 | ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) |
692 | record; | 743 | record; |
693 | router.valid = true; | 744 | router.ddc_valid = true; |
694 | router.mux_type = ddc_path->ucMuxType; | 745 | router.ddc_mux_type = ddc_path->ucMuxType; |
695 | router.mux_control_pin = ddc_path->ucMuxControlPin; | 746 | router.ddc_mux_control_pin = ddc_path->ucMuxControlPin; |
696 | router.mux_state = ddc_path->ucMuxState[enum_id]; | 747 | router.ddc_mux_state = ddc_path->ucMuxState[enum_id]; |
748 | break; | ||
749 | case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE: | ||
750 | cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *) | ||
751 | record; | ||
752 | router.cd_valid = true; | ||
753 | router.cd_mux_type = cd_path->ucMuxType; | ||
754 | router.cd_mux_control_pin = cd_path->ucMuxControlPin; | ||
755 | router.cd_mux_state = cd_path->ucMuxState[enum_id]; | ||
697 | break; | 756 | break; |
698 | } | 757 | } |
699 | record = (ATOM_COMMON_RECORD_HEADER *) | 758 | record = (ATOM_COMMON_RECORD_HEADER *) |
@@ -725,10 +784,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
725 | ATOM_HPD_INT_RECORD *hpd_record; | 784 | ATOM_HPD_INT_RECORD *hpd_record; |
726 | ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; | 785 | ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; |
727 | 786 | ||
728 | while (record->ucRecordType > 0 | 787 | while (record->ucRecordSize > 0 && |
729 | && record-> | 788 | record->ucRecordType > 0 && |
730 | ucRecordType <= | 789 | record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { |
731 | ATOM_MAX_OBJECT_RECORD_NUMBER) { | ||
732 | switch (record->ucRecordType) { | 790 | switch (record->ucRecordType) { |
733 | case ATOM_I2C_RECORD_TYPE: | 791 | case ATOM_I2C_RECORD_TYPE: |
734 | i2c_record = | 792 | i2c_record = |
@@ -860,7 +918,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
860 | size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; | 918 | size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; |
861 | struct radeon_router router; | 919 | struct radeon_router router; |
862 | 920 | ||
863 | router.valid = false; | 921 | router.ddc_valid = false; |
922 | router.cd_valid = false; | ||
864 | 923 | ||
865 | bios_connectors = kzalloc(bc_size, GFP_KERNEL); | 924 | bios_connectors = kzalloc(bc_size, GFP_KERNEL); |
866 | if (!bios_connectors) | 925 | if (!bios_connectors) |
@@ -970,7 +1029,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
970 | radeon_get_encoder_enum(dev, | 1029 | radeon_get_encoder_enum(dev, |
971 | (1 << i), | 1030 | (1 << i), |
972 | dac), | 1031 | dac), |
973 | (1 << i)); | 1032 | (1 << i), |
1033 | 0); | ||
974 | else | 1034 | else |
975 | radeon_add_legacy_encoder(dev, | 1035 | radeon_add_legacy_encoder(dev, |
976 | radeon_get_encoder_enum(dev, | 1036 | radeon_get_encoder_enum(dev, |
@@ -1049,6 +1109,7 @@ union firmware_info { | |||
1049 | ATOM_FIRMWARE_INFO_V1_3 info_13; | 1109 | ATOM_FIRMWARE_INFO_V1_3 info_13; |
1050 | ATOM_FIRMWARE_INFO_V1_4 info_14; | 1110 | ATOM_FIRMWARE_INFO_V1_4 info_14; |
1051 | ATOM_FIRMWARE_INFO_V2_1 info_21; | 1111 | ATOM_FIRMWARE_INFO_V2_1 info_21; |
1112 | ATOM_FIRMWARE_INFO_V2_2 info_22; | ||
1052 | }; | 1113 | }; |
1053 | 1114 | ||
1054 | bool radeon_atom_get_clock_info(struct drm_device *dev) | 1115 | bool radeon_atom_get_clock_info(struct drm_device *dev) |
@@ -1103,17 +1164,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
1103 | p1pll->pll_out_min = 64800; | 1164 | p1pll->pll_out_min = 64800; |
1104 | else | 1165 | else |
1105 | p1pll->pll_out_min = 20000; | 1166 | p1pll->pll_out_min = 20000; |
1106 | } else if (p1pll->pll_out_min > 64800) { | ||
1107 | /* Limiting the pll output range is a good thing generally as | ||
1108 | * it limits the number of possible pll combinations for a given | ||
1109 | * frequency presumably to the ones that work best on each card. | ||
1110 | * However, certain duallink DVI monitors seem to like | ||
1111 | * pll combinations that would be limited by this at least on | ||
1112 | * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per | ||
1113 | * family. | ||
1114 | */ | ||
1115 | if (!radeon_new_pll) | ||
1116 | p1pll->pll_out_min = 64800; | ||
1117 | } | 1167 | } |
1118 | 1168 | ||
1119 | p1pll->pll_in_min = | 1169 | p1pll->pll_in_min = |
@@ -1124,8 +1174,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
1124 | *p2pll = *p1pll; | 1174 | *p2pll = *p1pll; |
1125 | 1175 | ||
1126 | /* system clock */ | 1176 | /* system clock */ |
1127 | spll->reference_freq = | 1177 | if (ASIC_IS_DCE4(rdev)) |
1128 | le16_to_cpu(firmware_info->info.usReferenceClock); | 1178 | spll->reference_freq = |
1179 | le16_to_cpu(firmware_info->info_21.usCoreReferenceClock); | ||
1180 | else | ||
1181 | spll->reference_freq = | ||
1182 | le16_to_cpu(firmware_info->info.usReferenceClock); | ||
1129 | spll->reference_div = 0; | 1183 | spll->reference_div = 0; |
1130 | 1184 | ||
1131 | spll->pll_out_min = | 1185 | spll->pll_out_min = |
@@ -1147,8 +1201,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
1147 | le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input); | 1201 | le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input); |
1148 | 1202 | ||
1149 | /* memory clock */ | 1203 | /* memory clock */ |
1150 | mpll->reference_freq = | 1204 | if (ASIC_IS_DCE4(rdev)) |
1151 | le16_to_cpu(firmware_info->info.usReferenceClock); | 1205 | mpll->reference_freq = |
1206 | le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock); | ||
1207 | else | ||
1208 | mpll->reference_freq = | ||
1209 | le16_to_cpu(firmware_info->info.usReferenceClock); | ||
1152 | mpll->reference_div = 0; | 1210 | mpll->reference_div = 0; |
1153 | 1211 | ||
1154 | mpll->pll_out_min = | 1212 | mpll->pll_out_min = |
@@ -1177,13 +1235,21 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
1177 | if (ASIC_IS_DCE4(rdev)) { | 1235 | if (ASIC_IS_DCE4(rdev)) { |
1178 | rdev->clock.default_dispclk = | 1236 | rdev->clock.default_dispclk = |
1179 | le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); | 1237 | le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); |
1180 | if (rdev->clock.default_dispclk == 0) | 1238 | if (rdev->clock.default_dispclk == 0) { |
1181 | rdev->clock.default_dispclk = 60000; /* 600 Mhz */ | 1239 | if (ASIC_IS_DCE5(rdev)) |
1240 | rdev->clock.default_dispclk = 54000; /* 540 Mhz */ | ||
1241 | else | ||
1242 | rdev->clock.default_dispclk = 60000; /* 600 Mhz */ | ||
1243 | } | ||
1182 | rdev->clock.dp_extclk = | 1244 | rdev->clock.dp_extclk = |
1183 | le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); | 1245 | le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); |
1184 | } | 1246 | } |
1185 | *dcpll = *p1pll; | 1247 | *dcpll = *p1pll; |
1186 | 1248 | ||
1249 | rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock); | ||
1250 | if (rdev->clock.max_pixel_clock == 0) | ||
1251 | rdev->clock.max_pixel_clock = 40000; | ||
1252 | |||
1187 | return true; | 1253 | return true; |
1188 | } | 1254 | } |
1189 | 1255 | ||
@@ -1213,11 +1279,11 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) | |||
1213 | data_offset); | 1279 | data_offset); |
1214 | switch (crev) { | 1280 | switch (crev) { |
1215 | case 1: | 1281 | case 1: |
1216 | if (igp_info->info.ulBootUpMemoryClock) | 1282 | if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock)) |
1217 | return true; | 1283 | return true; |
1218 | break; | 1284 | break; |
1219 | case 2: | 1285 | case 2: |
1220 | if (igp_info->info_2.ulBootUpSidePortClock) | 1286 | if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock)) |
1221 | return true; | 1287 | return true; |
1222 | break; | 1288 | break; |
1223 | default: | 1289 | default: |
@@ -1277,36 +1343,27 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, | |||
1277 | return false; | 1343 | return false; |
1278 | } | 1344 | } |
1279 | 1345 | ||
1280 | static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct | 1346 | bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, |
1281 | radeon_encoder | 1347 | struct radeon_atom_ss *ss, |
1282 | *encoder, | 1348 | int id) |
1283 | int id) | ||
1284 | { | 1349 | { |
1285 | struct drm_device *dev = encoder->base.dev; | ||
1286 | struct radeon_device *rdev = dev->dev_private; | ||
1287 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 1350 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
1288 | int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); | 1351 | int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); |
1289 | uint16_t data_offset; | 1352 | uint16_t data_offset, size; |
1290 | struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; | 1353 | struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; |
1291 | uint8_t frev, crev; | 1354 | uint8_t frev, crev; |
1292 | struct radeon_atom_ss *ss = NULL; | 1355 | int i, num_indices; |
1293 | int i; | ||
1294 | |||
1295 | if (id > ATOM_MAX_SS_ENTRY) | ||
1296 | return NULL; | ||
1297 | 1356 | ||
1298 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | 1357 | memset(ss, 0, sizeof(struct radeon_atom_ss)); |
1358 | if (atom_parse_data_header(mode_info->atom_context, index, &size, | ||
1299 | &frev, &crev, &data_offset)) { | 1359 | &frev, &crev, &data_offset)) { |
1300 | ss_info = | 1360 | ss_info = |
1301 | (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); | 1361 | (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); |
1302 | 1362 | ||
1303 | ss = | 1363 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / |
1304 | kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); | 1364 | sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); |
1305 | |||
1306 | if (!ss) | ||
1307 | return NULL; | ||
1308 | 1365 | ||
1309 | for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) { | 1366 | for (i = 0; i < num_indices; i++) { |
1310 | if (ss_info->asSS_Info[i].ucSS_Id == id) { | 1367 | if (ss_info->asSS_Info[i].ucSS_Id == id) { |
1311 | ss->percentage = | 1368 | ss->percentage = |
1312 | le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); | 1369 | le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); |
@@ -1315,11 +1372,127 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct | |||
1315 | ss->delay = ss_info->asSS_Info[i].ucSS_Delay; | 1372 | ss->delay = ss_info->asSS_Info[i].ucSS_Delay; |
1316 | ss->range = ss_info->asSS_Info[i].ucSS_Range; | 1373 | ss->range = ss_info->asSS_Info[i].ucSS_Range; |
1317 | ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; | 1374 | ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; |
1318 | break; | 1375 | return true; |
1376 | } | ||
1377 | } | ||
1378 | } | ||
1379 | return false; | ||
1380 | } | ||
1381 | |||
1382 | static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, | ||
1383 | struct radeon_atom_ss *ss, | ||
1384 | int id) | ||
1385 | { | ||
1386 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
1387 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | ||
1388 | u16 data_offset, size; | ||
1389 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; | ||
1390 | u8 frev, crev; | ||
1391 | u16 percentage = 0, rate = 0; | ||
1392 | |||
1393 | /* get any igp specific overrides */ | ||
1394 | if (atom_parse_data_header(mode_info->atom_context, index, &size, | ||
1395 | &frev, &crev, &data_offset)) { | ||
1396 | igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) | ||
1397 | (mode_info->atom_context->bios + data_offset); | ||
1398 | switch (id) { | ||
1399 | case ASIC_INTERNAL_SS_ON_TMDS: | ||
1400 | percentage = le16_to_cpu(igp_info->usDVISSPercentage); | ||
1401 | rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); | ||
1402 | break; | ||
1403 | case ASIC_INTERNAL_SS_ON_HDMI: | ||
1404 | percentage = le16_to_cpu(igp_info->usHDMISSPercentage); | ||
1405 | rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); | ||
1406 | break; | ||
1407 | case ASIC_INTERNAL_SS_ON_LVDS: | ||
1408 | percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); | ||
1409 | rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz); | ||
1410 | break; | ||
1411 | } | ||
1412 | if (percentage) | ||
1413 | ss->percentage = percentage; | ||
1414 | if (rate) | ||
1415 | ss->rate = rate; | ||
1416 | } | ||
1417 | } | ||
1418 | |||
1419 | union asic_ss_info { | ||
1420 | struct _ATOM_ASIC_INTERNAL_SS_INFO info; | ||
1421 | struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; | ||
1422 | struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; | ||
1423 | }; | ||
1424 | |||
1425 | bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | ||
1426 | struct radeon_atom_ss *ss, | ||
1427 | int id, u32 clock) | ||
1428 | { | ||
1429 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
1430 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
1431 | uint16_t data_offset, size; | ||
1432 | union asic_ss_info *ss_info; | ||
1433 | uint8_t frev, crev; | ||
1434 | int i, num_indices; | ||
1435 | |||
1436 | memset(ss, 0, sizeof(struct radeon_atom_ss)); | ||
1437 | if (atom_parse_data_header(mode_info->atom_context, index, &size, | ||
1438 | &frev, &crev, &data_offset)) { | ||
1439 | |||
1440 | ss_info = | ||
1441 | (union asic_ss_info *)(mode_info->atom_context->bios + data_offset); | ||
1442 | |||
1443 | switch (frev) { | ||
1444 | case 1: | ||
1445 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | ||
1446 | sizeof(ATOM_ASIC_SS_ASSIGNMENT); | ||
1447 | |||
1448 | for (i = 0; i < num_indices; i++) { | ||
1449 | if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && | ||
1450 | (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { | ||
1451 | ss->percentage = | ||
1452 | le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | ||
1453 | ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; | ||
1454 | ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); | ||
1455 | return true; | ||
1456 | } | ||
1457 | } | ||
1458 | break; | ||
1459 | case 2: | ||
1460 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | ||
1461 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); | ||
1462 | for (i = 0; i < num_indices; i++) { | ||
1463 | if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && | ||
1464 | (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { | ||
1465 | ss->percentage = | ||
1466 | le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | ||
1467 | ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; | ||
1468 | ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); | ||
1469 | return true; | ||
1470 | } | ||
1471 | } | ||
1472 | break; | ||
1473 | case 3: | ||
1474 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | ||
1475 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); | ||
1476 | for (i = 0; i < num_indices; i++) { | ||
1477 | if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && | ||
1478 | (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { | ||
1479 | ss->percentage = | ||
1480 | le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | ||
1481 | ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; | ||
1482 | ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); | ||
1483 | if (rdev->flags & RADEON_IS_IGP) | ||
1484 | radeon_atombios_get_igp_ss_overrides(rdev, ss, id); | ||
1485 | return true; | ||
1486 | } | ||
1319 | } | 1487 | } |
1488 | break; | ||
1489 | default: | ||
1490 | DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev); | ||
1491 | break; | ||
1320 | } | 1492 | } |
1493 | |||
1321 | } | 1494 | } |
1322 | return ss; | 1495 | return false; |
1323 | } | 1496 | } |
1324 | 1497 | ||
1325 | union lvds_info { | 1498 | union lvds_info { |
@@ -1371,7 +1544,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1371 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); | 1544 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); |
1372 | lvds->panel_pwr_delay = | 1545 | lvds->panel_pwr_delay = |
1373 | le16_to_cpu(lvds_info->info.usOffDelayInMs); | 1546 | le16_to_cpu(lvds_info->info.usOffDelayInMs); |
1374 | lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; | 1547 | lvds->lcd_misc = lvds_info->info.ucLVDS_Misc; |
1375 | 1548 | ||
1376 | misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); | 1549 | misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); |
1377 | if (misc & ATOM_VSYNC_POLARITY) | 1550 | if (misc & ATOM_VSYNC_POLARITY) |
@@ -1385,22 +1558,13 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1385 | if (misc & ATOM_DOUBLE_CLOCK_MODE) | 1558 | if (misc & ATOM_DOUBLE_CLOCK_MODE) |
1386 | lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; | 1559 | lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; |
1387 | 1560 | ||
1561 | lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize); | ||
1562 | lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize); | ||
1563 | |||
1388 | /* set crtc values */ | 1564 | /* set crtc values */ |
1389 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); | 1565 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); |
1390 | 1566 | ||
1391 | lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id); | 1567 | lvds->lcd_ss_id = lvds_info->info.ucSS_Id; |
1392 | |||
1393 | if (ASIC_IS_AVIVO(rdev)) { | ||
1394 | if (radeon_new_pll == 0) | ||
1395 | lvds->pll_algo = PLL_ALGO_LEGACY; | ||
1396 | else | ||
1397 | lvds->pll_algo = PLL_ALGO_NEW; | ||
1398 | } else { | ||
1399 | if (radeon_new_pll == 1) | ||
1400 | lvds->pll_algo = PLL_ALGO_NEW; | ||
1401 | else | ||
1402 | lvds->pll_algo = PLL_ALGO_LEGACY; | ||
1403 | } | ||
1404 | 1568 | ||
1405 | encoder->native_mode = lvds->native_mode; | 1569 | encoder->native_mode = lvds->native_mode; |
1406 | 1570 | ||
@@ -1409,6 +1573,68 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1409 | else | 1573 | else |
1410 | lvds->linkb = false; | 1574 | lvds->linkb = false; |
1411 | 1575 | ||
1576 | /* parse the lcd record table */ | ||
1577 | if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) { | ||
1578 | ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; | ||
1579 | ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; | ||
1580 | bool bad_record = false; | ||
1581 | u8 *record; | ||
1582 | |||
1583 | if ((frev == 1) && (crev < 2)) | ||
1584 | /* absolute */ | ||
1585 | record = (u8 *)(mode_info->atom_context->bios + | ||
1586 | le16_to_cpu(lvds_info->info.usModePatchTableOffset)); | ||
1587 | else | ||
1588 | /* relative */ | ||
1589 | record = (u8 *)(mode_info->atom_context->bios + | ||
1590 | data_offset + | ||
1591 | le16_to_cpu(lvds_info->info.usModePatchTableOffset)); | ||
1592 | while (*record != ATOM_RECORD_END_TYPE) { | ||
1593 | switch (*record) { | ||
1594 | case LCD_MODE_PATCH_RECORD_MODE_TYPE: | ||
1595 | record += sizeof(ATOM_PATCH_RECORD_MODE); | ||
1596 | break; | ||
1597 | case LCD_RTS_RECORD_TYPE: | ||
1598 | record += sizeof(ATOM_LCD_RTS_RECORD); | ||
1599 | break; | ||
1600 | case LCD_CAP_RECORD_TYPE: | ||
1601 | record += sizeof(ATOM_LCD_MODE_CONTROL_CAP); | ||
1602 | break; | ||
1603 | case LCD_FAKE_EDID_PATCH_RECORD_TYPE: | ||
1604 | fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; | ||
1605 | if (fake_edid_record->ucFakeEDIDLength) { | ||
1606 | struct edid *edid; | ||
1607 | int edid_size = | ||
1608 | max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); | ||
1609 | edid = kmalloc(edid_size, GFP_KERNEL); | ||
1610 | if (edid) { | ||
1611 | memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], | ||
1612 | fake_edid_record->ucFakeEDIDLength); | ||
1613 | |||
1614 | if (drm_edid_is_valid(edid)) { | ||
1615 | rdev->mode_info.bios_hardcoded_edid = edid; | ||
1616 | rdev->mode_info.bios_hardcoded_edid_size = edid_size; | ||
1617 | } else | ||
1618 | kfree(edid); | ||
1619 | } | ||
1620 | } | ||
1621 | record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD); | ||
1622 | break; | ||
1623 | case LCD_PANEL_RESOLUTION_RECORD_TYPE: | ||
1624 | panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; | ||
1625 | lvds->native_mode.width_mm = panel_res_record->usHSize; | ||
1626 | lvds->native_mode.height_mm = panel_res_record->usVSize; | ||
1627 | record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD); | ||
1628 | break; | ||
1629 | default: | ||
1630 | DRM_ERROR("Bad LCD record %d\n", *record); | ||
1631 | bad_record = true; | ||
1632 | break; | ||
1633 | } | ||
1634 | if (bad_record) | ||
1635 | break; | ||
1636 | } | ||
1637 | } | ||
1412 | } | 1638 | } |
1413 | return lvds; | 1639 | return lvds; |
1414 | } | 1640 | } |
@@ -1660,510 +1886,658 @@ static const char *pp_lib_thermal_controller_names[] = { | |||
1660 | "RV6xx", | 1886 | "RV6xx", |
1661 | "RV770", | 1887 | "RV770", |
1662 | "adt7473", | 1888 | "adt7473", |
1889 | "NONE", | ||
1663 | "External GPIO", | 1890 | "External GPIO", |
1664 | "Evergreen", | 1891 | "Evergreen", |
1665 | "adt7473 with internal", | 1892 | "emc2103", |
1666 | 1893 | "Sumo", | |
1894 | "Northern Islands", | ||
1667 | }; | 1895 | }; |
1668 | 1896 | ||
1669 | union power_info { | 1897 | union power_info { |
1670 | struct _ATOM_POWERPLAY_INFO info; | 1898 | struct _ATOM_POWERPLAY_INFO info; |
1671 | struct _ATOM_POWERPLAY_INFO_V2 info_2; | 1899 | struct _ATOM_POWERPLAY_INFO_V2 info_2; |
1672 | struct _ATOM_POWERPLAY_INFO_V3 info_3; | 1900 | struct _ATOM_POWERPLAY_INFO_V3 info_3; |
1673 | struct _ATOM_PPLIB_POWERPLAYTABLE info_4; | 1901 | struct _ATOM_PPLIB_POWERPLAYTABLE pplib; |
1902 | struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; | ||
1903 | struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; | ||
1674 | }; | 1904 | }; |
1675 | 1905 | ||
1676 | void radeon_atombios_get_power_modes(struct radeon_device *rdev) | 1906 | union pplib_clock_info { |
1907 | struct _ATOM_PPLIB_R600_CLOCK_INFO r600; | ||
1908 | struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; | ||
1909 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; | ||
1910 | struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; | ||
1911 | }; | ||
1912 | |||
1913 | union pplib_power_state { | ||
1914 | struct _ATOM_PPLIB_STATE v1; | ||
1915 | struct _ATOM_PPLIB_STATE_V2 v2; | ||
1916 | }; | ||
1917 | |||
1918 | static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev, | ||
1919 | int state_index, | ||
1920 | u32 misc, u32 misc2) | ||
1921 | { | ||
1922 | rdev->pm.power_state[state_index].misc = misc; | ||
1923 | rdev->pm.power_state[state_index].misc2 = misc2; | ||
1924 | /* order matters! */ | ||
1925 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) | ||
1926 | rdev->pm.power_state[state_index].type = | ||
1927 | POWER_STATE_TYPE_POWERSAVE; | ||
1928 | if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) | ||
1929 | rdev->pm.power_state[state_index].type = | ||
1930 | POWER_STATE_TYPE_BATTERY; | ||
1931 | if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) | ||
1932 | rdev->pm.power_state[state_index].type = | ||
1933 | POWER_STATE_TYPE_BATTERY; | ||
1934 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) | ||
1935 | rdev->pm.power_state[state_index].type = | ||
1936 | POWER_STATE_TYPE_BALANCED; | ||
1937 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { | ||
1938 | rdev->pm.power_state[state_index].type = | ||
1939 | POWER_STATE_TYPE_PERFORMANCE; | ||
1940 | rdev->pm.power_state[state_index].flags &= | ||
1941 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1942 | } | ||
1943 | if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) | ||
1944 | rdev->pm.power_state[state_index].type = | ||
1945 | POWER_STATE_TYPE_BALANCED; | ||
1946 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { | ||
1947 | rdev->pm.power_state[state_index].type = | ||
1948 | POWER_STATE_TYPE_DEFAULT; | ||
1949 | rdev->pm.default_power_state_index = state_index; | ||
1950 | rdev->pm.power_state[state_index].default_clock_mode = | ||
1951 | &rdev->pm.power_state[state_index].clock_info[0]; | ||
1952 | } else if (state_index == 0) { | ||
1953 | rdev->pm.power_state[state_index].clock_info[0].flags |= | ||
1954 | RADEON_PM_MODE_NO_DISPLAY; | ||
1955 | } | ||
1956 | } | ||
1957 | |||
1958 | static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) | ||
1677 | { | 1959 | { |
1678 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 1960 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
1961 | u32 misc, misc2 = 0; | ||
1962 | int num_modes = 0, i; | ||
1963 | int state_index = 0; | ||
1964 | struct radeon_i2c_bus_rec i2c_bus; | ||
1965 | union power_info *power_info; | ||
1679 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | 1966 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
1680 | u16 data_offset; | 1967 | u16 data_offset; |
1681 | u8 frev, crev; | 1968 | u8 frev, crev; |
1682 | u32 misc, misc2 = 0, sclk, mclk; | ||
1683 | union power_info *power_info; | ||
1684 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; | ||
1685 | struct _ATOM_PPLIB_STATE *power_state; | ||
1686 | int num_modes = 0, i, j; | ||
1687 | int state_index = 0, mode_index = 0; | ||
1688 | struct radeon_i2c_bus_rec i2c_bus; | ||
1689 | 1969 | ||
1690 | rdev->pm.default_power_state_index = -1; | 1970 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, |
1691 | 1971 | &frev, &crev, &data_offset)) | |
1692 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | 1972 | return state_index; |
1693 | &frev, &crev, &data_offset)) { | 1973 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); |
1694 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | 1974 | |
1695 | if (frev < 4) { | 1975 | /* add the i2c bus for thermal/fan chip */ |
1696 | /* add the i2c bus for thermal/fan chip */ | 1976 | if (power_info->info.ucOverdriveThermalController > 0) { |
1697 | if (power_info->info.ucOverdriveThermalController > 0) { | 1977 | DRM_INFO("Possible %s thermal controller at 0x%02x\n", |
1698 | DRM_INFO("Possible %s thermal controller at 0x%02x\n", | 1978 | thermal_controller_names[power_info->info.ucOverdriveThermalController], |
1699 | thermal_controller_names[power_info->info.ucOverdriveThermalController], | 1979 | power_info->info.ucOverdriveControllerAddress >> 1); |
1700 | power_info->info.ucOverdriveControllerAddress >> 1); | 1980 | i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); |
1701 | i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); | 1981 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); |
1702 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); | 1982 | if (rdev->pm.i2c_bus) { |
1703 | if (rdev->pm.i2c_bus) { | 1983 | struct i2c_board_info info = { }; |
1704 | struct i2c_board_info info = { }; | 1984 | const char *name = thermal_controller_names[power_info->info. |
1705 | const char *name = thermal_controller_names[power_info->info. | 1985 | ucOverdriveThermalController]; |
1706 | ucOverdriveThermalController]; | 1986 | info.addr = power_info->info.ucOverdriveControllerAddress >> 1; |
1707 | info.addr = power_info->info.ucOverdriveControllerAddress >> 1; | 1987 | strlcpy(info.type, name, sizeof(info.type)); |
1708 | strlcpy(info.type, name, sizeof(info.type)); | 1988 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); |
1709 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | 1989 | } |
1710 | } | 1990 | } |
1991 | num_modes = power_info->info.ucNumOfPowerModeEntries; | ||
1992 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) | ||
1993 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; | ||
1994 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); | ||
1995 | if (!rdev->pm.power_state) | ||
1996 | return state_index; | ||
1997 | /* last mode is usually default, array is low to high */ | ||
1998 | for (i = 0; i < num_modes; i++) { | ||
1999 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | ||
2000 | switch (frev) { | ||
2001 | case 1: | ||
2002 | rdev->pm.power_state[state_index].num_clock_modes = 1; | ||
2003 | rdev->pm.power_state[state_index].clock_info[0].mclk = | ||
2004 | le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); | ||
2005 | rdev->pm.power_state[state_index].clock_info[0].sclk = | ||
2006 | le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); | ||
2007 | /* skip invalid modes */ | ||
2008 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | ||
2009 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | ||
2010 | continue; | ||
2011 | rdev->pm.power_state[state_index].pcie_lanes = | ||
2012 | power_info->info.asPowerPlayInfo[i].ucNumPciELanes; | ||
2013 | misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); | ||
2014 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || | ||
2015 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
2016 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
2017 | VOLTAGE_GPIO; | ||
2018 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | ||
2019 | radeon_lookup_gpio(rdev, | ||
2020 | power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); | ||
2021 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) | ||
2022 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
2023 | true; | ||
2024 | else | ||
2025 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
2026 | false; | ||
2027 | } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { | ||
2028 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
2029 | VOLTAGE_VDDC; | ||
2030 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = | ||
2031 | power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; | ||
1711 | } | 2032 | } |
1712 | num_modes = power_info->info.ucNumOfPowerModeEntries; | 2033 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; |
1713 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) | 2034 | radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0); |
1714 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; | 2035 | state_index++; |
1715 | /* last mode is usually default, array is low to high */ | 2036 | break; |
1716 | for (i = 0; i < num_modes; i++) { | 2037 | case 2: |
1717 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 2038 | rdev->pm.power_state[state_index].num_clock_modes = 1; |
1718 | switch (frev) { | 2039 | rdev->pm.power_state[state_index].clock_info[0].mclk = |
1719 | case 1: | 2040 | le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); |
1720 | rdev->pm.power_state[state_index].num_clock_modes = 1; | 2041 | rdev->pm.power_state[state_index].clock_info[0].sclk = |
1721 | rdev->pm.power_state[state_index].clock_info[0].mclk = | 2042 | le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock); |
1722 | le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); | 2043 | /* skip invalid modes */ |
1723 | rdev->pm.power_state[state_index].clock_info[0].sclk = | 2044 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || |
1724 | le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); | 2045 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) |
1725 | /* skip invalid modes */ | 2046 | continue; |
1726 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | 2047 | rdev->pm.power_state[state_index].pcie_lanes = |
1727 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | 2048 | power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; |
1728 | continue; | 2049 | misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); |
1729 | rdev->pm.power_state[state_index].pcie_lanes = | 2050 | misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); |
1730 | power_info->info.asPowerPlayInfo[i].ucNumPciELanes; | 2051 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || |
1731 | misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); | 2052 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { |
1732 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || | 2053 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
1733 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | 2054 | VOLTAGE_GPIO; |
1734 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | 2055 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = |
1735 | VOLTAGE_GPIO; | 2056 | radeon_lookup_gpio(rdev, |
1736 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | 2057 | power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); |
1737 | radeon_lookup_gpio(rdev, | 2058 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) |
1738 | power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); | 2059 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = |
1739 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) | 2060 | true; |
1740 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | 2061 | else |
1741 | true; | 2062 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = |
1742 | else | 2063 | false; |
1743 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | 2064 | } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { |
1744 | false; | 2065 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
1745 | } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { | 2066 | VOLTAGE_VDDC; |
1746 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | 2067 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = |
1747 | VOLTAGE_VDDC; | 2068 | power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; |
1748 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = | ||
1749 | power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; | ||
1750 | } | ||
1751 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1752 | rdev->pm.power_state[state_index].misc = misc; | ||
1753 | /* order matters! */ | ||
1754 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) | ||
1755 | rdev->pm.power_state[state_index].type = | ||
1756 | POWER_STATE_TYPE_POWERSAVE; | ||
1757 | if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) | ||
1758 | rdev->pm.power_state[state_index].type = | ||
1759 | POWER_STATE_TYPE_BATTERY; | ||
1760 | if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) | ||
1761 | rdev->pm.power_state[state_index].type = | ||
1762 | POWER_STATE_TYPE_BATTERY; | ||
1763 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) | ||
1764 | rdev->pm.power_state[state_index].type = | ||
1765 | POWER_STATE_TYPE_BALANCED; | ||
1766 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { | ||
1767 | rdev->pm.power_state[state_index].type = | ||
1768 | POWER_STATE_TYPE_PERFORMANCE; | ||
1769 | rdev->pm.power_state[state_index].flags &= | ||
1770 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1771 | } | ||
1772 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { | ||
1773 | rdev->pm.power_state[state_index].type = | ||
1774 | POWER_STATE_TYPE_DEFAULT; | ||
1775 | rdev->pm.default_power_state_index = state_index; | ||
1776 | rdev->pm.power_state[state_index].default_clock_mode = | ||
1777 | &rdev->pm.power_state[state_index].clock_info[0]; | ||
1778 | rdev->pm.power_state[state_index].flags &= | ||
1779 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1780 | } else if (state_index == 0) { | ||
1781 | rdev->pm.power_state[state_index].clock_info[0].flags |= | ||
1782 | RADEON_PM_MODE_NO_DISPLAY; | ||
1783 | } | ||
1784 | state_index++; | ||
1785 | break; | ||
1786 | case 2: | ||
1787 | rdev->pm.power_state[state_index].num_clock_modes = 1; | ||
1788 | rdev->pm.power_state[state_index].clock_info[0].mclk = | ||
1789 | le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); | ||
1790 | rdev->pm.power_state[state_index].clock_info[0].sclk = | ||
1791 | le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock); | ||
1792 | /* skip invalid modes */ | ||
1793 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | ||
1794 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | ||
1795 | continue; | ||
1796 | rdev->pm.power_state[state_index].pcie_lanes = | ||
1797 | power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; | ||
1798 | misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); | ||
1799 | misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); | ||
1800 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || | ||
1801 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
1802 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
1803 | VOLTAGE_GPIO; | ||
1804 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | ||
1805 | radeon_lookup_gpio(rdev, | ||
1806 | power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); | ||
1807 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) | ||
1808 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
1809 | true; | ||
1810 | else | ||
1811 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
1812 | false; | ||
1813 | } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { | ||
1814 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
1815 | VOLTAGE_VDDC; | ||
1816 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = | ||
1817 | power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; | ||
1818 | } | ||
1819 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1820 | rdev->pm.power_state[state_index].misc = misc; | ||
1821 | rdev->pm.power_state[state_index].misc2 = misc2; | ||
1822 | /* order matters! */ | ||
1823 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) | ||
1824 | rdev->pm.power_state[state_index].type = | ||
1825 | POWER_STATE_TYPE_POWERSAVE; | ||
1826 | if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) | ||
1827 | rdev->pm.power_state[state_index].type = | ||
1828 | POWER_STATE_TYPE_BATTERY; | ||
1829 | if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) | ||
1830 | rdev->pm.power_state[state_index].type = | ||
1831 | POWER_STATE_TYPE_BATTERY; | ||
1832 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) | ||
1833 | rdev->pm.power_state[state_index].type = | ||
1834 | POWER_STATE_TYPE_BALANCED; | ||
1835 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { | ||
1836 | rdev->pm.power_state[state_index].type = | ||
1837 | POWER_STATE_TYPE_PERFORMANCE; | ||
1838 | rdev->pm.power_state[state_index].flags &= | ||
1839 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1840 | } | ||
1841 | if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) | ||
1842 | rdev->pm.power_state[state_index].type = | ||
1843 | POWER_STATE_TYPE_BALANCED; | ||
1844 | if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT) | ||
1845 | rdev->pm.power_state[state_index].flags &= | ||
1846 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1847 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { | ||
1848 | rdev->pm.power_state[state_index].type = | ||
1849 | POWER_STATE_TYPE_DEFAULT; | ||
1850 | rdev->pm.default_power_state_index = state_index; | ||
1851 | rdev->pm.power_state[state_index].default_clock_mode = | ||
1852 | &rdev->pm.power_state[state_index].clock_info[0]; | ||
1853 | rdev->pm.power_state[state_index].flags &= | ||
1854 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1855 | } else if (state_index == 0) { | ||
1856 | rdev->pm.power_state[state_index].clock_info[0].flags |= | ||
1857 | RADEON_PM_MODE_NO_DISPLAY; | ||
1858 | } | ||
1859 | state_index++; | ||
1860 | break; | ||
1861 | case 3: | ||
1862 | rdev->pm.power_state[state_index].num_clock_modes = 1; | ||
1863 | rdev->pm.power_state[state_index].clock_info[0].mclk = | ||
1864 | le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); | ||
1865 | rdev->pm.power_state[state_index].clock_info[0].sclk = | ||
1866 | le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock); | ||
1867 | /* skip invalid modes */ | ||
1868 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | ||
1869 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | ||
1870 | continue; | ||
1871 | rdev->pm.power_state[state_index].pcie_lanes = | ||
1872 | power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; | ||
1873 | misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); | ||
1874 | misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); | ||
1875 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || | ||
1876 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
1877 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
1878 | VOLTAGE_GPIO; | ||
1879 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | ||
1880 | radeon_lookup_gpio(rdev, | ||
1881 | power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); | ||
1882 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) | ||
1883 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
1884 | true; | ||
1885 | else | ||
1886 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
1887 | false; | ||
1888 | } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { | ||
1889 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
1890 | VOLTAGE_VDDC; | ||
1891 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = | ||
1892 | power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex; | ||
1893 | if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) { | ||
1894 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled = | ||
1895 | true; | ||
1896 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id = | ||
1897 | power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; | ||
1898 | } | ||
1899 | } | ||
1900 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1901 | rdev->pm.power_state[state_index].misc = misc; | ||
1902 | rdev->pm.power_state[state_index].misc2 = misc2; | ||
1903 | /* order matters! */ | ||
1904 | if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) | ||
1905 | rdev->pm.power_state[state_index].type = | ||
1906 | POWER_STATE_TYPE_POWERSAVE; | ||
1907 | if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) | ||
1908 | rdev->pm.power_state[state_index].type = | ||
1909 | POWER_STATE_TYPE_BATTERY; | ||
1910 | if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) | ||
1911 | rdev->pm.power_state[state_index].type = | ||
1912 | POWER_STATE_TYPE_BATTERY; | ||
1913 | if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) | ||
1914 | rdev->pm.power_state[state_index].type = | ||
1915 | POWER_STATE_TYPE_BALANCED; | ||
1916 | if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { | ||
1917 | rdev->pm.power_state[state_index].type = | ||
1918 | POWER_STATE_TYPE_PERFORMANCE; | ||
1919 | rdev->pm.power_state[state_index].flags &= | ||
1920 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
1921 | } | ||
1922 | if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) | ||
1923 | rdev->pm.power_state[state_index].type = | ||
1924 | POWER_STATE_TYPE_BALANCED; | ||
1925 | if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { | ||
1926 | rdev->pm.power_state[state_index].type = | ||
1927 | POWER_STATE_TYPE_DEFAULT; | ||
1928 | rdev->pm.default_power_state_index = state_index; | ||
1929 | rdev->pm.power_state[state_index].default_clock_mode = | ||
1930 | &rdev->pm.power_state[state_index].clock_info[0]; | ||
1931 | } else if (state_index == 0) { | ||
1932 | rdev->pm.power_state[state_index].clock_info[0].flags |= | ||
1933 | RADEON_PM_MODE_NO_DISPLAY; | ||
1934 | } | ||
1935 | state_index++; | ||
1936 | break; | ||
1937 | } | ||
1938 | } | 2069 | } |
1939 | /* last mode is usually default */ | 2070 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; |
1940 | if (rdev->pm.default_power_state_index == -1) { | 2071 | radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); |
1941 | rdev->pm.power_state[state_index - 1].type = | 2072 | state_index++; |
1942 | POWER_STATE_TYPE_DEFAULT; | 2073 | break; |
1943 | rdev->pm.default_power_state_index = state_index - 1; | 2074 | case 3: |
1944 | rdev->pm.power_state[state_index - 1].default_clock_mode = | 2075 | rdev->pm.power_state[state_index].num_clock_modes = 1; |
1945 | &rdev->pm.power_state[state_index - 1].clock_info[0]; | 2076 | rdev->pm.power_state[state_index].clock_info[0].mclk = |
1946 | rdev->pm.power_state[state_index].flags &= | 2077 | le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); |
1947 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | 2078 | rdev->pm.power_state[state_index].clock_info[0].sclk = |
1948 | rdev->pm.power_state[state_index].misc = 0; | 2079 | le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock); |
1949 | rdev->pm.power_state[state_index].misc2 = 0; | 2080 | /* skip invalid modes */ |
2081 | if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || | ||
2082 | (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) | ||
2083 | continue; | ||
2084 | rdev->pm.power_state[state_index].pcie_lanes = | ||
2085 | power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; | ||
2086 | misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); | ||
2087 | misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); | ||
2088 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || | ||
2089 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
2090 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
2091 | VOLTAGE_GPIO; | ||
2092 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | ||
2093 | radeon_lookup_gpio(rdev, | ||
2094 | power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); | ||
2095 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) | ||
2096 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
2097 | true; | ||
2098 | else | ||
2099 | rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = | ||
2100 | false; | ||
2101 | } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { | ||
2102 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | ||
2103 | VOLTAGE_VDDC; | ||
2104 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = | ||
2105 | power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex; | ||
2106 | if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) { | ||
2107 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled = | ||
2108 | true; | ||
2109 | rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id = | ||
2110 | power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; | ||
2111 | } | ||
1950 | } | 2112 | } |
2113 | rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
2114 | radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); | ||
2115 | state_index++; | ||
2116 | break; | ||
2117 | } | ||
2118 | } | ||
2119 | /* last mode is usually default */ | ||
2120 | if (rdev->pm.default_power_state_index == -1) { | ||
2121 | rdev->pm.power_state[state_index - 1].type = | ||
2122 | POWER_STATE_TYPE_DEFAULT; | ||
2123 | rdev->pm.default_power_state_index = state_index - 1; | ||
2124 | rdev->pm.power_state[state_index - 1].default_clock_mode = | ||
2125 | &rdev->pm.power_state[state_index - 1].clock_info[0]; | ||
2126 | rdev->pm.power_state[state_index].flags &= | ||
2127 | ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
2128 | rdev->pm.power_state[state_index].misc = 0; | ||
2129 | rdev->pm.power_state[state_index].misc2 = 0; | ||
2130 | } | ||
2131 | return state_index; | ||
2132 | } | ||
2133 | |||
2134 | static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev, | ||
2135 | ATOM_PPLIB_THERMALCONTROLLER *controller) | ||
2136 | { | ||
2137 | struct radeon_i2c_bus_rec i2c_bus; | ||
2138 | |||
2139 | /* add the i2c bus for thermal/fan chip */ | ||
2140 | if (controller->ucType > 0) { | ||
2141 | if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { | ||
2142 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
2143 | (controller->ucFanParameters & | ||
2144 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2145 | rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; | ||
2146 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { | ||
2147 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
2148 | (controller->ucFanParameters & | ||
2149 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2150 | rdev->pm.int_thermal_type = THERMAL_TYPE_RV770; | ||
2151 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { | ||
2152 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
2153 | (controller->ucFanParameters & | ||
2154 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2155 | rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; | ||
2156 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { | ||
2157 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
2158 | (controller->ucFanParameters & | ||
2159 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2160 | rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO; | ||
2161 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { | ||
2162 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
2163 | (controller->ucFanParameters & | ||
2164 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2165 | rdev->pm.int_thermal_type = THERMAL_TYPE_NI; | ||
2166 | } else if ((controller->ucType == | ||
2167 | ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || | ||
2168 | (controller->ucType == | ||
2169 | ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || | ||
2170 | (controller->ucType == | ||
2171 | ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { | ||
2172 | DRM_INFO("Special thermal controller config\n"); | ||
1951 | } else { | 2173 | } else { |
1952 | int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo); | 2174 | DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", |
1953 | uint8_t fw_frev, fw_crev; | 2175 | pp_lib_thermal_controller_names[controller->ucType], |
1954 | uint16_t fw_data_offset, vddc = 0; | 2176 | controller->ucI2cAddress >> 1, |
1955 | union firmware_info *firmware_info; | 2177 | (controller->ucFanParameters & |
1956 | ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; | 2178 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); |
1957 | 2179 | i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); | |
1958 | if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL, | 2180 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); |
1959 | &fw_frev, &fw_crev, &fw_data_offset)) { | 2181 | if (rdev->pm.i2c_bus) { |
1960 | firmware_info = | 2182 | struct i2c_board_info info = { }; |
1961 | (union firmware_info *)(mode_info->atom_context->bios + | 2183 | const char *name = pp_lib_thermal_controller_names[controller->ucType]; |
1962 | fw_data_offset); | 2184 | info.addr = controller->ucI2cAddress >> 1; |
1963 | vddc = firmware_info->info_14.usBootUpVDDCVoltage; | 2185 | strlcpy(info.type, name, sizeof(info.type)); |
2186 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | ||
1964 | } | 2187 | } |
2188 | } | ||
2189 | } | ||
2190 | } | ||
1965 | 2191 | ||
1966 | /* add the i2c bus for thermal/fan chip */ | 2192 | static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, |
1967 | if (controller->ucType > 0) { | 2193 | u16 *vddc, u16 *vddci) |
1968 | if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { | 2194 | { |
1969 | DRM_INFO("Internal thermal controller %s fan control\n", | 2195 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
1970 | (controller->ucFanParameters & | 2196 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); |
1971 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | 2197 | u8 frev, crev; |
1972 | rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; | 2198 | u16 data_offset; |
1973 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { | 2199 | union firmware_info *firmware_info; |
1974 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
1975 | (controller->ucFanParameters & | ||
1976 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
1977 | rdev->pm.int_thermal_type = THERMAL_TYPE_RV770; | ||
1978 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { | ||
1979 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
1980 | (controller->ucFanParameters & | ||
1981 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
1982 | rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; | ||
1983 | } else if ((controller->ucType == | ||
1984 | ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || | ||
1985 | (controller->ucType == | ||
1986 | ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) { | ||
1987 | DRM_INFO("Special thermal controller config\n"); | ||
1988 | } else { | ||
1989 | DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", | ||
1990 | pp_lib_thermal_controller_names[controller->ucType], | ||
1991 | controller->ucI2cAddress >> 1, | ||
1992 | (controller->ucFanParameters & | ||
1993 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
1994 | i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); | ||
1995 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); | ||
1996 | if (rdev->pm.i2c_bus) { | ||
1997 | struct i2c_board_info info = { }; | ||
1998 | const char *name = pp_lib_thermal_controller_names[controller->ucType]; | ||
1999 | info.addr = controller->ucI2cAddress >> 1; | ||
2000 | strlcpy(info.type, name, sizeof(info.type)); | ||
2001 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | ||
2002 | } | ||
2003 | 2200 | ||
2004 | } | 2201 | *vddc = 0; |
2005 | } | 2202 | *vddci = 0; |
2006 | /* first mode is usually default, followed by low to high */ | 2203 | |
2007 | for (i = 0; i < power_info->info_4.ucNumStates; i++) { | 2204 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
2008 | mode_index = 0; | 2205 | &frev, &crev, &data_offset)) { |
2009 | power_state = (struct _ATOM_PPLIB_STATE *) | 2206 | firmware_info = |
2010 | (mode_info->atom_context->bios + | 2207 | (union firmware_info *)(mode_info->atom_context->bios + |
2011 | data_offset + | 2208 | data_offset); |
2012 | le16_to_cpu(power_info->info_4.usStateArrayOffset) + | 2209 | *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); |
2013 | i * power_info->info_4.ucStateEntrySize); | 2210 | if ((frev == 2) && (crev >= 2)) |
2014 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) | 2211 | *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage); |
2015 | (mode_info->atom_context->bios + | 2212 | } |
2016 | data_offset + | 2213 | } |
2017 | le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) + | 2214 | |
2018 | (power_state->ucNonClockStateIndex * | 2215 | static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, |
2019 | power_info->info_4.ucNonClockSize)); | 2216 | int state_index, int mode_index, |
2020 | for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) { | 2217 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info) |
2021 | if (rdev->flags & RADEON_IS_IGP) { | 2218 | { |
2022 | struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info = | 2219 | int j; |
2023 | (struct _ATOM_PPLIB_RS780_CLOCK_INFO *) | 2220 | u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); |
2024 | (mode_info->atom_context->bios + | 2221 | u32 misc2 = le16_to_cpu(non_clock_info->usClassification); |
2025 | data_offset + | 2222 | u16 vddc, vddci; |
2026 | le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + | 2223 | |
2027 | (power_state->ucClockStateIndices[j] * | 2224 | radeon_atombios_get_default_voltages(rdev, &vddc, &vddci); |
2028 | power_info->info_4.ucClockInfoSize)); | 2225 | |
2029 | sclk = le16_to_cpu(clock_info->usLowEngineClockLow); | 2226 | rdev->pm.power_state[state_index].misc = misc; |
2030 | sclk |= clock_info->ucLowEngineClockHigh << 16; | 2227 | rdev->pm.power_state[state_index].misc2 = misc2; |
2031 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; | 2228 | rdev->pm.power_state[state_index].pcie_lanes = |
2032 | /* skip invalid modes */ | 2229 | ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> |
2033 | if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) | 2230 | ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; |
2034 | continue; | 2231 | switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { |
2035 | /* voltage works differently on IGPs */ | 2232 | case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: |
2036 | mode_index++; | 2233 | rdev->pm.power_state[state_index].type = |
2037 | } else if (ASIC_IS_DCE4(rdev)) { | 2234 | POWER_STATE_TYPE_BATTERY; |
2038 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info = | 2235 | break; |
2039 | (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *) | 2236 | case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: |
2040 | (mode_info->atom_context->bios + | 2237 | rdev->pm.power_state[state_index].type = |
2041 | data_offset + | 2238 | POWER_STATE_TYPE_BALANCED; |
2042 | le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + | 2239 | break; |
2043 | (power_state->ucClockStateIndices[j] * | 2240 | case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: |
2044 | power_info->info_4.ucClockInfoSize)); | 2241 | rdev->pm.power_state[state_index].type = |
2045 | sclk = le16_to_cpu(clock_info->usEngineClockLow); | 2242 | POWER_STATE_TYPE_PERFORMANCE; |
2046 | sclk |= clock_info->ucEngineClockHigh << 16; | 2243 | break; |
2047 | mclk = le16_to_cpu(clock_info->usMemoryClockLow); | 2244 | case ATOM_PPLIB_CLASSIFICATION_UI_NONE: |
2048 | mclk |= clock_info->ucMemoryClockHigh << 16; | 2245 | if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) |
2049 | rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; | 2246 | rdev->pm.power_state[state_index].type = |
2050 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; | 2247 | POWER_STATE_TYPE_PERFORMANCE; |
2051 | /* skip invalid modes */ | 2248 | break; |
2052 | if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || | 2249 | } |
2053 | (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) | 2250 | rdev->pm.power_state[state_index].flags = 0; |
2054 | continue; | 2251 | if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) |
2055 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | 2252 | rdev->pm.power_state[state_index].flags |= |
2056 | VOLTAGE_SW; | 2253 | RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; |
2057 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | 2254 | if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { |
2058 | clock_info->usVDDC; | 2255 | rdev->pm.power_state[state_index].type = |
2059 | /* XXX usVDDCI */ | 2256 | POWER_STATE_TYPE_DEFAULT; |
2060 | mode_index++; | 2257 | rdev->pm.default_power_state_index = state_index; |
2061 | } else { | 2258 | rdev->pm.power_state[state_index].default_clock_mode = |
2062 | struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info = | 2259 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; |
2063 | (struct _ATOM_PPLIB_R600_CLOCK_INFO *) | 2260 | if (ASIC_IS_DCE5(rdev)) { |
2064 | (mode_info->atom_context->bios + | 2261 | /* NI chips post without MC ucode, so default clocks are strobe mode only */ |
2065 | data_offset + | 2262 | rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; |
2066 | le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + | 2263 | rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; |
2067 | (power_state->ucClockStateIndices[j] * | 2264 | rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; |
2068 | power_info->info_4.ucClockInfoSize)); | 2265 | rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci; |
2069 | sclk = le16_to_cpu(clock_info->usEngineClockLow); | 2266 | } else { |
2070 | sclk |= clock_info->ucEngineClockHigh << 16; | 2267 | /* patch the table values with the default slck/mclk from firmware info */ |
2071 | mclk = le16_to_cpu(clock_info->usMemoryClockLow); | 2268 | for (j = 0; j < mode_index; j++) { |
2072 | mclk |= clock_info->ucMemoryClockHigh << 16; | 2269 | rdev->pm.power_state[state_index].clock_info[j].mclk = |
2073 | rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; | 2270 | rdev->clock.default_mclk; |
2074 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; | 2271 | rdev->pm.power_state[state_index].clock_info[j].sclk = |
2075 | /* skip invalid modes */ | 2272 | rdev->clock.default_sclk; |
2076 | if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || | 2273 | if (vddc) |
2077 | (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) | 2274 | rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = |
2078 | continue; | 2275 | vddc; |
2079 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | ||
2080 | VOLTAGE_SW; | ||
2081 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | ||
2082 | clock_info->usVDDC; | ||
2083 | mode_index++; | ||
2084 | } | ||
2085 | } | ||
2086 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; | ||
2087 | if (mode_index) { | ||
2088 | misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); | ||
2089 | misc2 = le16_to_cpu(non_clock_info->usClassification); | ||
2090 | rdev->pm.power_state[state_index].misc = misc; | ||
2091 | rdev->pm.power_state[state_index].misc2 = misc2; | ||
2092 | rdev->pm.power_state[state_index].pcie_lanes = | ||
2093 | ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> | ||
2094 | ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; | ||
2095 | switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { | ||
2096 | case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: | ||
2097 | rdev->pm.power_state[state_index].type = | ||
2098 | POWER_STATE_TYPE_BATTERY; | ||
2099 | break; | ||
2100 | case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: | ||
2101 | rdev->pm.power_state[state_index].type = | ||
2102 | POWER_STATE_TYPE_BALANCED; | ||
2103 | break; | ||
2104 | case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: | ||
2105 | rdev->pm.power_state[state_index].type = | ||
2106 | POWER_STATE_TYPE_PERFORMANCE; | ||
2107 | break; | ||
2108 | case ATOM_PPLIB_CLASSIFICATION_UI_NONE: | ||
2109 | if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) | ||
2110 | rdev->pm.power_state[state_index].type = | ||
2111 | POWER_STATE_TYPE_PERFORMANCE; | ||
2112 | break; | ||
2113 | } | ||
2114 | rdev->pm.power_state[state_index].flags = 0; | ||
2115 | if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) | ||
2116 | rdev->pm.power_state[state_index].flags |= | ||
2117 | RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; | ||
2118 | if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { | ||
2119 | rdev->pm.power_state[state_index].type = | ||
2120 | POWER_STATE_TYPE_DEFAULT; | ||
2121 | rdev->pm.default_power_state_index = state_index; | ||
2122 | rdev->pm.power_state[state_index].default_clock_mode = | ||
2123 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; | ||
2124 | /* patch the table values with the default slck/mclk from firmware info */ | ||
2125 | for (j = 0; j < mode_index; j++) { | ||
2126 | rdev->pm.power_state[state_index].clock_info[j].mclk = | ||
2127 | rdev->clock.default_mclk; | ||
2128 | rdev->pm.power_state[state_index].clock_info[j].sclk = | ||
2129 | rdev->clock.default_sclk; | ||
2130 | if (vddc) | ||
2131 | rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = | ||
2132 | vddc; | ||
2133 | } | ||
2134 | } | ||
2135 | state_index++; | ||
2136 | } | ||
2137 | } | ||
2138 | /* if multiple clock modes, mark the lowest as no display */ | ||
2139 | for (i = 0; i < state_index; i++) { | ||
2140 | if (rdev->pm.power_state[i].num_clock_modes > 1) | ||
2141 | rdev->pm.power_state[i].clock_info[0].flags |= | ||
2142 | RADEON_PM_MODE_NO_DISPLAY; | ||
2143 | } | ||
2144 | /* first mode is usually default */ | ||
2145 | if (rdev->pm.default_power_state_index == -1) { | ||
2146 | rdev->pm.power_state[0].type = | ||
2147 | POWER_STATE_TYPE_DEFAULT; | ||
2148 | rdev->pm.default_power_state_index = 0; | ||
2149 | rdev->pm.power_state[0].default_clock_mode = | ||
2150 | &rdev->pm.power_state[0].clock_info[0]; | ||
2151 | } | 2276 | } |
2152 | } | 2277 | } |
2278 | } | ||
2279 | } | ||
2280 | |||
2281 | static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, | ||
2282 | int state_index, int mode_index, | ||
2283 | union pplib_clock_info *clock_info) | ||
2284 | { | ||
2285 | u32 sclk, mclk; | ||
2286 | |||
2287 | if (rdev->flags & RADEON_IS_IGP) { | ||
2288 | if (rdev->family >= CHIP_PALM) { | ||
2289 | sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); | ||
2290 | sclk |= clock_info->sumo.ucEngineClockHigh << 16; | ||
2291 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; | ||
2292 | } else { | ||
2293 | sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow); | ||
2294 | sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; | ||
2295 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; | ||
2296 | } | ||
2297 | } else if (ASIC_IS_DCE4(rdev)) { | ||
2298 | sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); | ||
2299 | sclk |= clock_info->evergreen.ucEngineClockHigh << 16; | ||
2300 | mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); | ||
2301 | mclk |= clock_info->evergreen.ucMemoryClockHigh << 16; | ||
2302 | rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; | ||
2303 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; | ||
2304 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | ||
2305 | VOLTAGE_SW; | ||
2306 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | ||
2307 | le16_to_cpu(clock_info->evergreen.usVDDC); | ||
2308 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = | ||
2309 | le16_to_cpu(clock_info->evergreen.usVDDCI); | ||
2153 | } else { | 2310 | } else { |
2154 | /* add the default mode */ | 2311 | sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); |
2155 | rdev->pm.power_state[state_index].type = | 2312 | sclk |= clock_info->r600.ucEngineClockHigh << 16; |
2313 | mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow); | ||
2314 | mclk |= clock_info->r600.ucMemoryClockHigh << 16; | ||
2315 | rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; | ||
2316 | rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; | ||
2317 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | ||
2318 | VOLTAGE_SW; | ||
2319 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | ||
2320 | le16_to_cpu(clock_info->r600.usVDDC); | ||
2321 | } | ||
2322 | |||
2323 | /* patch up vddc if necessary */ | ||
2324 | if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) { | ||
2325 | u16 vddc; | ||
2326 | |||
2327 | if (radeon_atom_get_max_vddc(rdev, &vddc) == 0) | ||
2328 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc; | ||
2329 | } | ||
2330 | |||
2331 | if (rdev->flags & RADEON_IS_IGP) { | ||
2332 | /* skip invalid modes */ | ||
2333 | if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) | ||
2334 | return false; | ||
2335 | } else { | ||
2336 | /* skip invalid modes */ | ||
2337 | if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || | ||
2338 | (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) | ||
2339 | return false; | ||
2340 | } | ||
2341 | return true; | ||
2342 | } | ||
2343 | |||
2344 | static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) | ||
2345 | { | ||
2346 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
2347 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; | ||
2348 | union pplib_power_state *power_state; | ||
2349 | int i, j; | ||
2350 | int state_index = 0, mode_index = 0; | ||
2351 | union pplib_clock_info *clock_info; | ||
2352 | bool valid; | ||
2353 | union power_info *power_info; | ||
2354 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | ||
2355 | u16 data_offset; | ||
2356 | u8 frev, crev; | ||
2357 | |||
2358 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
2359 | &frev, &crev, &data_offset)) | ||
2360 | return state_index; | ||
2361 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
2362 | |||
2363 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); | ||
2364 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * | ||
2365 | power_info->pplib.ucNumStates, GFP_KERNEL); | ||
2366 | if (!rdev->pm.power_state) | ||
2367 | return state_index; | ||
2368 | /* first mode is usually default, followed by low to high */ | ||
2369 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { | ||
2370 | mode_index = 0; | ||
2371 | power_state = (union pplib_power_state *) | ||
2372 | (mode_info->atom_context->bios + data_offset + | ||
2373 | le16_to_cpu(power_info->pplib.usStateArrayOffset) + | ||
2374 | i * power_info->pplib.ucStateEntrySize); | ||
2375 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) | ||
2376 | (mode_info->atom_context->bios + data_offset + | ||
2377 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + | ||
2378 | (power_state->v1.ucNonClockStateIndex * | ||
2379 | power_info->pplib.ucNonClockSize)); | ||
2380 | for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { | ||
2381 | clock_info = (union pplib_clock_info *) | ||
2382 | (mode_info->atom_context->bios + data_offset + | ||
2383 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + | ||
2384 | (power_state->v1.ucClockStateIndices[j] * | ||
2385 | power_info->pplib.ucClockInfoSize)); | ||
2386 | valid = radeon_atombios_parse_pplib_clock_info(rdev, | ||
2387 | state_index, mode_index, | ||
2388 | clock_info); | ||
2389 | if (valid) | ||
2390 | mode_index++; | ||
2391 | } | ||
2392 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; | ||
2393 | if (mode_index) { | ||
2394 | radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, | ||
2395 | non_clock_info); | ||
2396 | state_index++; | ||
2397 | } | ||
2398 | } | ||
2399 | /* if multiple clock modes, mark the lowest as no display */ | ||
2400 | for (i = 0; i < state_index; i++) { | ||
2401 | if (rdev->pm.power_state[i].num_clock_modes > 1) | ||
2402 | rdev->pm.power_state[i].clock_info[0].flags |= | ||
2403 | RADEON_PM_MODE_NO_DISPLAY; | ||
2404 | } | ||
2405 | /* first mode is usually default */ | ||
2406 | if (rdev->pm.default_power_state_index == -1) { | ||
2407 | rdev->pm.power_state[0].type = | ||
2156 | POWER_STATE_TYPE_DEFAULT; | 2408 | POWER_STATE_TYPE_DEFAULT; |
2157 | rdev->pm.power_state[state_index].num_clock_modes = 1; | 2409 | rdev->pm.default_power_state_index = 0; |
2158 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; | 2410 | rdev->pm.power_state[0].default_clock_mode = |
2159 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; | 2411 | &rdev->pm.power_state[0].clock_info[0]; |
2160 | rdev->pm.power_state[state_index].default_clock_mode = | 2412 | } |
2161 | &rdev->pm.power_state[state_index].clock_info[0]; | 2413 | return state_index; |
2162 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 2414 | } |
2163 | rdev->pm.power_state[state_index].pcie_lanes = 16; | 2415 | |
2164 | rdev->pm.default_power_state_index = state_index; | 2416 | static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) |
2165 | rdev->pm.power_state[state_index].flags = 0; | 2417 | { |
2166 | state_index++; | 2418 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
2419 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; | ||
2420 | union pplib_power_state *power_state; | ||
2421 | int i, j, non_clock_array_index, clock_array_index; | ||
2422 | int state_index = 0, mode_index = 0; | ||
2423 | union pplib_clock_info *clock_info; | ||
2424 | struct StateArray *state_array; | ||
2425 | struct ClockInfoArray *clock_info_array; | ||
2426 | struct NonClockInfoArray *non_clock_info_array; | ||
2427 | bool valid; | ||
2428 | union power_info *power_info; | ||
2429 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | ||
2430 | u16 data_offset; | ||
2431 | u8 frev, crev; | ||
2432 | |||
2433 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
2434 | &frev, &crev, &data_offset)) | ||
2435 | return state_index; | ||
2436 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
2437 | |||
2438 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); | ||
2439 | state_array = (struct StateArray *) | ||
2440 | (mode_info->atom_context->bios + data_offset + | ||
2441 | le16_to_cpu(power_info->pplib.usStateArrayOffset)); | ||
2442 | clock_info_array = (struct ClockInfoArray *) | ||
2443 | (mode_info->atom_context->bios + data_offset + | ||
2444 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); | ||
2445 | non_clock_info_array = (struct NonClockInfoArray *) | ||
2446 | (mode_info->atom_context->bios + data_offset + | ||
2447 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); | ||
2448 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * | ||
2449 | state_array->ucNumEntries, GFP_KERNEL); | ||
2450 | if (!rdev->pm.power_state) | ||
2451 | return state_index; | ||
2452 | for (i = 0; i < state_array->ucNumEntries; i++) { | ||
2453 | mode_index = 0; | ||
2454 | power_state = (union pplib_power_state *)&state_array->states[i]; | ||
2455 | /* XXX this might be an inagua bug... */ | ||
2456 | non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ | ||
2457 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) | ||
2458 | &non_clock_info_array->nonClockInfo[non_clock_array_index]; | ||
2459 | for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { | ||
2460 | clock_array_index = power_state->v2.clockInfoIndex[j]; | ||
2461 | /* XXX this might be an inagua bug... */ | ||
2462 | if (clock_array_index >= clock_info_array->ucNumEntries) | ||
2463 | continue; | ||
2464 | clock_info = (union pplib_clock_info *) | ||
2465 | &clock_info_array->clockInfo[clock_array_index]; | ||
2466 | valid = radeon_atombios_parse_pplib_clock_info(rdev, | ||
2467 | state_index, mode_index, | ||
2468 | clock_info); | ||
2469 | if (valid) | ||
2470 | mode_index++; | ||
2471 | } | ||
2472 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; | ||
2473 | if (mode_index) { | ||
2474 | radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, | ||
2475 | non_clock_info); | ||
2476 | state_index++; | ||
2477 | } | ||
2478 | } | ||
2479 | /* if multiple clock modes, mark the lowest as no display */ | ||
2480 | for (i = 0; i < state_index; i++) { | ||
2481 | if (rdev->pm.power_state[i].num_clock_modes > 1) | ||
2482 | rdev->pm.power_state[i].clock_info[0].flags |= | ||
2483 | RADEON_PM_MODE_NO_DISPLAY; | ||
2484 | } | ||
2485 | /* first mode is usually default */ | ||
2486 | if (rdev->pm.default_power_state_index == -1) { | ||
2487 | rdev->pm.power_state[0].type = | ||
2488 | POWER_STATE_TYPE_DEFAULT; | ||
2489 | rdev->pm.default_power_state_index = 0; | ||
2490 | rdev->pm.power_state[0].default_clock_mode = | ||
2491 | &rdev->pm.power_state[0].clock_info[0]; | ||
2492 | } | ||
2493 | return state_index; | ||
2494 | } | ||
2495 | |||
2496 | void radeon_atombios_get_power_modes(struct radeon_device *rdev) | ||
2497 | { | ||
2498 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
2499 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | ||
2500 | u16 data_offset; | ||
2501 | u8 frev, crev; | ||
2502 | int state_index = 0; | ||
2503 | |||
2504 | rdev->pm.default_power_state_index = -1; | ||
2505 | |||
2506 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
2507 | &frev, &crev, &data_offset)) { | ||
2508 | switch (frev) { | ||
2509 | case 1: | ||
2510 | case 2: | ||
2511 | case 3: | ||
2512 | state_index = radeon_atombios_parse_power_table_1_3(rdev); | ||
2513 | break; | ||
2514 | case 4: | ||
2515 | case 5: | ||
2516 | state_index = radeon_atombios_parse_power_table_4_5(rdev); | ||
2517 | break; | ||
2518 | case 6: | ||
2519 | state_index = radeon_atombios_parse_power_table_6(rdev); | ||
2520 | break; | ||
2521 | default: | ||
2522 | break; | ||
2523 | } | ||
2524 | } else { | ||
2525 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); | ||
2526 | if (rdev->pm.power_state) { | ||
2527 | /* add the default mode */ | ||
2528 | rdev->pm.power_state[state_index].type = | ||
2529 | POWER_STATE_TYPE_DEFAULT; | ||
2530 | rdev->pm.power_state[state_index].num_clock_modes = 1; | ||
2531 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; | ||
2532 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; | ||
2533 | rdev->pm.power_state[state_index].default_clock_mode = | ||
2534 | &rdev->pm.power_state[state_index].clock_info[0]; | ||
2535 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | ||
2536 | rdev->pm.power_state[state_index].pcie_lanes = 16; | ||
2537 | rdev->pm.default_power_state_index = state_index; | ||
2538 | rdev->pm.power_state[state_index].flags = 0; | ||
2539 | state_index++; | ||
2540 | } | ||
2167 | } | 2541 | } |
2168 | 2542 | ||
2169 | rdev->pm.num_power_states = state_index; | 2543 | rdev->pm.num_power_states = state_index; |
@@ -2189,7 +2563,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev) | |||
2189 | int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); | 2563 | int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); |
2190 | 2564 | ||
2191 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2565 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2192 | return args.ulReturnEngineClock; | 2566 | return le32_to_cpu(args.ulReturnEngineClock); |
2193 | } | 2567 | } |
2194 | 2568 | ||
2195 | uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) | 2569 | uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) |
@@ -2198,7 +2572,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) | |||
2198 | int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); | 2572 | int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); |
2199 | 2573 | ||
2200 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2574 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2201 | return args.ulReturnMemoryClock; | 2575 | return le32_to_cpu(args.ulReturnMemoryClock); |
2202 | } | 2576 | } |
2203 | 2577 | ||
2204 | void radeon_atom_set_engine_clock(struct radeon_device *rdev, | 2578 | void radeon_atom_set_engine_clock(struct radeon_device *rdev, |
@@ -2207,7 +2581,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev, | |||
2207 | SET_ENGINE_CLOCK_PS_ALLOCATION args; | 2581 | SET_ENGINE_CLOCK_PS_ALLOCATION args; |
2208 | int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); | 2582 | int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); |
2209 | 2583 | ||
2210 | args.ulTargetEngineClock = eng_clock; /* 10 khz */ | 2584 | args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */ |
2211 | 2585 | ||
2212 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2586 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2213 | } | 2587 | } |
@@ -2221,7 +2595,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, | |||
2221 | if (rdev->flags & RADEON_IS_IGP) | 2595 | if (rdev->flags & RADEON_IS_IGP) |
2222 | return; | 2596 | return; |
2223 | 2597 | ||
2224 | args.ulTargetMemoryClock = mem_clock; /* 10 khz */ | 2598 | args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ |
2225 | 2599 | ||
2226 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2600 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2227 | } | 2601 | } |
@@ -2232,25 +2606,29 @@ union set_voltage { | |||
2232 | struct _SET_VOLTAGE_PARAMETERS_V2 v2; | 2606 | struct _SET_VOLTAGE_PARAMETERS_V2 v2; |
2233 | }; | 2607 | }; |
2234 | 2608 | ||
2235 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) | 2609 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type) |
2236 | { | 2610 | { |
2237 | union set_voltage args; | 2611 | union set_voltage args; |
2238 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); | 2612 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); |
2239 | u8 frev, crev, volt_index = level; | 2613 | u8 frev, crev, volt_index = voltage_level; |
2240 | 2614 | ||
2241 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | 2615 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
2242 | return; | 2616 | return; |
2243 | 2617 | ||
2618 | /* 0xff01 is a flag rather then an actual voltage */ | ||
2619 | if (voltage_level == 0xff01) | ||
2620 | return; | ||
2621 | |||
2244 | switch (crev) { | 2622 | switch (crev) { |
2245 | case 1: | 2623 | case 1: |
2246 | args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; | 2624 | args.v1.ucVoltageType = voltage_type; |
2247 | args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; | 2625 | args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; |
2248 | args.v1.ucVoltageIndex = volt_index; | 2626 | args.v1.ucVoltageIndex = volt_index; |
2249 | break; | 2627 | break; |
2250 | case 2: | 2628 | case 2: |
2251 | args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; | 2629 | args.v2.ucVoltageType = voltage_type; |
2252 | args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; | 2630 | args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; |
2253 | args.v2.usVoltageLevel = cpu_to_le16(level); | 2631 | args.v2.usVoltageLevel = cpu_to_le16(voltage_level); |
2254 | break; | 2632 | break; |
2255 | default: | 2633 | default: |
2256 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | 2634 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); |
@@ -2260,7 +2638,35 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) | |||
2260 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2638 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2261 | } | 2639 | } |
2262 | 2640 | ||
2641 | int radeon_atom_get_max_vddc(struct radeon_device *rdev, | ||
2642 | u16 *voltage) | ||
2643 | { | ||
2644 | union set_voltage args; | ||
2645 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); | ||
2646 | u8 frev, crev; | ||
2647 | |||
2648 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
2649 | return -EINVAL; | ||
2650 | |||
2651 | switch (crev) { | ||
2652 | case 1: | ||
2653 | return -EINVAL; | ||
2654 | case 2: | ||
2655 | args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE; | ||
2656 | args.v2.ucVoltageMode = 0; | ||
2657 | args.v2.usVoltageLevel = 0; | ||
2263 | 2658 | ||
2659 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
2660 | |||
2661 | *voltage = le16_to_cpu(args.v2.usVoltageLevel); | ||
2662 | break; | ||
2663 | default: | ||
2664 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
2665 | return -EINVAL; | ||
2666 | } | ||
2667 | |||
2668 | return 0; | ||
2669 | } | ||
2264 | 2670 | ||
2265 | void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) | 2671 | void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) |
2266 | { | 2672 | { |
@@ -2279,7 +2685,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) | |||
2279 | bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; | 2685 | bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; |
2280 | 2686 | ||
2281 | /* tell the bios not to handle mode switching */ | 2687 | /* tell the bios not to handle mode switching */ |
2282 | bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE); | 2688 | bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; |
2283 | 2689 | ||
2284 | if (rdev->family >= CHIP_R600) { | 2690 | if (rdev->family >= CHIP_R600) { |
2285 | WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); | 2691 | WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); |
@@ -2330,10 +2736,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock) | |||
2330 | else | 2736 | else |
2331 | bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); | 2737 | bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); |
2332 | 2738 | ||
2333 | if (lock) | 2739 | if (lock) { |
2334 | bios_6_scratch |= ATOM_S6_CRITICAL_STATE; | 2740 | bios_6_scratch |= ATOM_S6_CRITICAL_STATE; |
2335 | else | 2741 | bios_6_scratch &= ~ATOM_S6_ACC_MODE; |
2742 | } else { | ||
2336 | bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; | 2743 | bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; |
2744 | bios_6_scratch |= ATOM_S6_ACC_MODE; | ||
2745 | } | ||
2337 | 2746 | ||
2338 | if (rdev->family >= CHIP_R600) | 2747 | if (rdev->family >= CHIP_R600) |
2339 | WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); | 2748 | WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); |
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index ed5dfe58f29c..9d95792bea3e 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c | |||
@@ -15,6 +15,9 @@ | |||
15 | #define ATPX_VERSION 0 | 15 | #define ATPX_VERSION 0 |
16 | #define ATPX_GPU_PWR 2 | 16 | #define ATPX_GPU_PWR 2 |
17 | #define ATPX_MUX_SELECT 3 | 17 | #define ATPX_MUX_SELECT 3 |
18 | #define ATPX_I2C_MUX_SELECT 4 | ||
19 | #define ATPX_SWITCH_START 5 | ||
20 | #define ATPX_SWITCH_END 6 | ||
18 | 21 | ||
19 | #define ATPX_INTEGRATED 0 | 22 | #define ATPX_INTEGRATED 0 |
20 | #define ATPX_DISCRETE 1 | 23 | #define ATPX_DISCRETE 1 |
@@ -149,13 +152,35 @@ static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id) | |||
149 | return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); | 152 | return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); |
150 | } | 153 | } |
151 | 154 | ||
155 | static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id) | ||
156 | { | ||
157 | return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id); | ||
158 | } | ||
159 | |||
160 | static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id) | ||
161 | { | ||
162 | return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id); | ||
163 | } | ||
164 | |||
165 | static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id) | ||
166 | { | ||
167 | return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id); | ||
168 | } | ||
152 | 169 | ||
153 | static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) | 170 | static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) |
154 | { | 171 | { |
172 | int gpu_id; | ||
173 | |||
155 | if (id == VGA_SWITCHEROO_IGD) | 174 | if (id == VGA_SWITCHEROO_IGD) |
156 | radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0); | 175 | gpu_id = ATPX_INTEGRATED; |
157 | else | 176 | else |
158 | radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1); | 177 | gpu_id = ATPX_DISCRETE; |
178 | |||
179 | radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id); | ||
180 | radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id); | ||
181 | radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id); | ||
182 | radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id); | ||
183 | |||
159 | return 0; | 184 | return 0; |
160 | } | 185 | } |
161 | 186 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 7932dc4d6b90..10191d9372d8 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
41 | 41 | ||
42 | size = bsize; | 42 | size = bsize; |
43 | n = 1024; | 43 | n = 1024; |
44 | r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj); | 44 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); |
45 | if (r) { | 45 | if (r) { |
46 | goto out_cleanup; | 46 | goto out_cleanup; |
47 | } | 47 | } |
@@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
53 | if (r) { | 53 | if (r) { |
54 | goto out_cleanup; | 54 | goto out_cleanup; |
55 | } | 55 | } |
56 | r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj); | 56 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj); |
57 | if (r) { | 57 | if (r) { |
58 | goto out_cleanup; | 58 | goto out_cleanup; |
59 | } | 59 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 654787ec43f4..229a20f10e2b 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
@@ -104,7 +104,7 @@ static bool radeon_read_bios(struct radeon_device *rdev) | |||
104 | static bool radeon_atrm_get_bios(struct radeon_device *rdev) | 104 | static bool radeon_atrm_get_bios(struct radeon_device *rdev) |
105 | { | 105 | { |
106 | int ret; | 106 | int ret; |
107 | int size = 64 * 1024; | 107 | int size = 256 * 1024; |
108 | int i; | 108 | int i; |
109 | 109 | ||
110 | if (!radeon_atrm_supported(rdev->pdev)) | 110 | if (!radeon_atrm_supported(rdev->pdev)) |
@@ -130,6 +130,46 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) | |||
130 | } | 130 | } |
131 | return true; | 131 | return true; |
132 | } | 132 | } |
133 | |||
134 | static bool ni_read_disabled_bios(struct radeon_device *rdev) | ||
135 | { | ||
136 | u32 bus_cntl; | ||
137 | u32 d1vga_control; | ||
138 | u32 d2vga_control; | ||
139 | u32 vga_render_control; | ||
140 | u32 rom_cntl; | ||
141 | bool r; | ||
142 | |||
143 | bus_cntl = RREG32(R600_BUS_CNTL); | ||
144 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); | ||
145 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); | ||
146 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); | ||
147 | rom_cntl = RREG32(R600_ROM_CNTL); | ||
148 | |||
149 | /* enable the rom */ | ||
150 | WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); | ||
151 | /* Disable VGA mode */ | ||
152 | WREG32(AVIVO_D1VGA_CONTROL, | ||
153 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | ||
154 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); | ||
155 | WREG32(AVIVO_D2VGA_CONTROL, | ||
156 | (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | ||
157 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); | ||
158 | WREG32(AVIVO_VGA_RENDER_CONTROL, | ||
159 | (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); | ||
160 | WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE); | ||
161 | |||
162 | r = radeon_read_bios(rdev); | ||
163 | |||
164 | /* restore regs */ | ||
165 | WREG32(R600_BUS_CNTL, bus_cntl); | ||
166 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); | ||
167 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); | ||
168 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); | ||
169 | WREG32(R600_ROM_CNTL, rom_cntl); | ||
170 | return r; | ||
171 | } | ||
172 | |||
133 | static bool r700_read_disabled_bios(struct radeon_device *rdev) | 173 | static bool r700_read_disabled_bios(struct radeon_device *rdev) |
134 | { | 174 | { |
135 | uint32_t viph_control; | 175 | uint32_t viph_control; |
@@ -143,7 +183,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) | |||
143 | bool r; | 183 | bool r; |
144 | 184 | ||
145 | viph_control = RREG32(RADEON_VIPH_CONTROL); | 185 | viph_control = RREG32(RADEON_VIPH_CONTROL); |
146 | bus_cntl = RREG32(RADEON_BUS_CNTL); | 186 | bus_cntl = RREG32(R600_BUS_CNTL); |
147 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); | 187 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); |
148 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); | 188 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); |
149 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); | 189 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); |
@@ -152,7 +192,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) | |||
152 | /* disable VIP */ | 192 | /* disable VIP */ |
153 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); | 193 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); |
154 | /* enable the rom */ | 194 | /* enable the rom */ |
155 | WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); | 195 | WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); |
156 | /* Disable VGA mode */ | 196 | /* Disable VGA mode */ |
157 | WREG32(AVIVO_D1VGA_CONTROL, | 197 | WREG32(AVIVO_D1VGA_CONTROL, |
158 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | 198 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | |
@@ -191,7 +231,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) | |||
191 | cg_spll_status = RREG32(R600_CG_SPLL_STATUS); | 231 | cg_spll_status = RREG32(R600_CG_SPLL_STATUS); |
192 | } | 232 | } |
193 | WREG32(RADEON_VIPH_CONTROL, viph_control); | 233 | WREG32(RADEON_VIPH_CONTROL, viph_control); |
194 | WREG32(RADEON_BUS_CNTL, bus_cntl); | 234 | WREG32(R600_BUS_CNTL, bus_cntl); |
195 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); | 235 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); |
196 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); | 236 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); |
197 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); | 237 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); |
@@ -216,7 +256,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) | |||
216 | bool r; | 256 | bool r; |
217 | 257 | ||
218 | viph_control = RREG32(RADEON_VIPH_CONTROL); | 258 | viph_control = RREG32(RADEON_VIPH_CONTROL); |
219 | bus_cntl = RREG32(RADEON_BUS_CNTL); | 259 | bus_cntl = RREG32(R600_BUS_CNTL); |
220 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); | 260 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); |
221 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); | 261 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); |
222 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); | 262 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); |
@@ -231,7 +271,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) | |||
231 | /* disable VIP */ | 271 | /* disable VIP */ |
232 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); | 272 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); |
233 | /* enable the rom */ | 273 | /* enable the rom */ |
234 | WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); | 274 | WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); |
235 | /* Disable VGA mode */ | 275 | /* Disable VGA mode */ |
236 | WREG32(AVIVO_D1VGA_CONTROL, | 276 | WREG32(AVIVO_D1VGA_CONTROL, |
237 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | 277 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | |
@@ -262,7 +302,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) | |||
262 | 302 | ||
263 | /* restore regs */ | 303 | /* restore regs */ |
264 | WREG32(RADEON_VIPH_CONTROL, viph_control); | 304 | WREG32(RADEON_VIPH_CONTROL, viph_control); |
265 | WREG32(RADEON_BUS_CNTL, bus_cntl); | 305 | WREG32(R600_BUS_CNTL, bus_cntl); |
266 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); | 306 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); |
267 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); | 307 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); |
268 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); | 308 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); |
@@ -291,7 +331,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev) | |||
291 | 331 | ||
292 | seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1); | 332 | seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1); |
293 | viph_control = RREG32(RADEON_VIPH_CONTROL); | 333 | viph_control = RREG32(RADEON_VIPH_CONTROL); |
294 | bus_cntl = RREG32(RADEON_BUS_CNTL); | 334 | bus_cntl = RREG32(RV370_BUS_CNTL); |
295 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); | 335 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); |
296 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); | 336 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); |
297 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); | 337 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); |
@@ -310,7 +350,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev) | |||
310 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); | 350 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); |
311 | 351 | ||
312 | /* enable the rom */ | 352 | /* enable the rom */ |
313 | WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); | 353 | WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM)); |
314 | 354 | ||
315 | /* Disable VGA mode */ | 355 | /* Disable VGA mode */ |
316 | WREG32(AVIVO_D1VGA_CONTROL, | 356 | WREG32(AVIVO_D1VGA_CONTROL, |
@@ -327,7 +367,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev) | |||
327 | /* restore regs */ | 367 | /* restore regs */ |
328 | WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1); | 368 | WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1); |
329 | WREG32(RADEON_VIPH_CONTROL, viph_control); | 369 | WREG32(RADEON_VIPH_CONTROL, viph_control); |
330 | WREG32(RADEON_BUS_CNTL, bus_cntl); | 370 | WREG32(RV370_BUS_CNTL, bus_cntl); |
331 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); | 371 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); |
332 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); | 372 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); |
333 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); | 373 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); |
@@ -350,7 +390,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) | |||
350 | 390 | ||
351 | seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1); | 391 | seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1); |
352 | viph_control = RREG32(RADEON_VIPH_CONTROL); | 392 | viph_control = RREG32(RADEON_VIPH_CONTROL); |
353 | bus_cntl = RREG32(RADEON_BUS_CNTL); | 393 | if (rdev->flags & RADEON_IS_PCIE) |
394 | bus_cntl = RREG32(RV370_BUS_CNTL); | ||
395 | else | ||
396 | bus_cntl = RREG32(RADEON_BUS_CNTL); | ||
354 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); | 397 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); |
355 | crtc2_gen_cntl = 0; | 398 | crtc2_gen_cntl = 0; |
356 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); | 399 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); |
@@ -372,7 +415,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) | |||
372 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); | 415 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); |
373 | 416 | ||
374 | /* enable the rom */ | 417 | /* enable the rom */ |
375 | WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); | 418 | if (rdev->flags & RADEON_IS_PCIE) |
419 | WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM)); | ||
420 | else | ||
421 | WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); | ||
376 | 422 | ||
377 | /* Turn off mem requests and CRTC for both controllers */ | 423 | /* Turn off mem requests and CRTC for both controllers */ |
378 | WREG32(RADEON_CRTC_GEN_CNTL, | 424 | WREG32(RADEON_CRTC_GEN_CNTL, |
@@ -399,7 +445,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) | |||
399 | /* restore regs */ | 445 | /* restore regs */ |
400 | WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1); | 446 | WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1); |
401 | WREG32(RADEON_VIPH_CONTROL, viph_control); | 447 | WREG32(RADEON_VIPH_CONTROL, viph_control); |
402 | WREG32(RADEON_BUS_CNTL, bus_cntl); | 448 | if (rdev->flags & RADEON_IS_PCIE) |
449 | WREG32(RV370_BUS_CNTL, bus_cntl); | ||
450 | else | ||
451 | WREG32(RADEON_BUS_CNTL, bus_cntl); | ||
403 | WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); | 452 | WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); |
404 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { | 453 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
405 | WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); | 454 | WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); |
@@ -415,6 +464,8 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev) | |||
415 | { | 464 | { |
416 | if (rdev->flags & RADEON_IS_IGP) | 465 | if (rdev->flags & RADEON_IS_IGP) |
417 | return igp_read_bios_from_vram(rdev); | 466 | return igp_read_bios_from_vram(rdev); |
467 | else if (rdev->family >= CHIP_BARTS) | ||
468 | return ni_read_disabled_bios(rdev); | ||
418 | else if (rdev->family >= CHIP_RV770) | 469 | else if (rdev->family >= CHIP_RV770) |
419 | return r700_read_disabled_bios(rdev); | 470 | return r700_read_disabled_bios(rdev); |
420 | else if (rdev->family >= CHIP_R600) | 471 | else if (rdev->family >= CHIP_R600) |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 5249af8931e6..2d48e7a1474b 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
@@ -117,7 +117,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev) | |||
117 | p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff; | 117 | p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff; |
118 | if (p1pll->reference_div < 2) | 118 | if (p1pll->reference_div < 2) |
119 | p1pll->reference_div = 12; | 119 | p1pll->reference_div = 12; |
120 | p2pll->reference_div = p1pll->reference_div; | 120 | p2pll->reference_div = p1pll->reference_div; |
121 | 121 | ||
122 | /* These aren't in the device-tree */ | 122 | /* These aren't in the device-tree */ |
123 | if (rdev->family >= CHIP_R420) { | 123 | if (rdev->family >= CHIP_R420) { |
@@ -139,6 +139,8 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev) | |||
139 | p2pll->pll_out_min = 12500; | 139 | p2pll->pll_out_min = 12500; |
140 | p2pll->pll_out_max = 35000; | 140 | p2pll->pll_out_max = 35000; |
141 | } | 141 | } |
142 | /* not sure what the max should be in all cases */ | ||
143 | rdev->clock.max_pixel_clock = 35000; | ||
142 | 144 | ||
143 | spll->reference_freq = mpll->reference_freq = p1pll->reference_freq; | 145 | spll->reference_freq = mpll->reference_freq = p1pll->reference_freq; |
144 | spll->reference_div = mpll->reference_div = | 146 | spll->reference_div = mpll->reference_div = |
@@ -151,7 +153,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev) | |||
151 | else | 153 | else |
152 | rdev->clock.default_sclk = | 154 | rdev->clock.default_sclk = |
153 | radeon_legacy_get_engine_clock(rdev); | 155 | radeon_legacy_get_engine_clock(rdev); |
154 | 156 | ||
155 | val = of_get_property(dp, "ATY,MCLK", NULL); | 157 | val = of_get_property(dp, "ATY,MCLK", NULL); |
156 | if (val && *val) | 158 | if (val && *val) |
157 | rdev->clock.default_mclk = (*val) / 10; | 159 | rdev->clock.default_mclk = (*val) / 10; |
@@ -160,7 +162,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev) | |||
160 | radeon_legacy_get_memory_clock(rdev); | 162 | radeon_legacy_get_memory_clock(rdev); |
161 | 163 | ||
162 | DRM_INFO("Using device-tree clock info\n"); | 164 | DRM_INFO("Using device-tree clock info\n"); |
163 | 165 | ||
164 | return true; | 166 | return true; |
165 | } | 167 | } |
166 | #else | 168 | #else |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 7b7ea269549c..e4594676a07c 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -448,7 +448,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
448 | 448 | ||
449 | bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) | 449 | bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) |
450 | { | 450 | { |
451 | int edid_info; | 451 | int edid_info, size; |
452 | struct edid *edid; | 452 | struct edid *edid; |
453 | unsigned char *raw; | 453 | unsigned char *raw; |
454 | edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); | 454 | edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); |
@@ -456,11 +456,12 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) | |||
456 | return false; | 456 | return false; |
457 | 457 | ||
458 | raw = rdev->bios + edid_info; | 458 | raw = rdev->bios + edid_info; |
459 | edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL); | 459 | size = EDID_LENGTH * (raw[0x7e] + 1); |
460 | edid = kmalloc(size, GFP_KERNEL); | ||
460 | if (edid == NULL) | 461 | if (edid == NULL) |
461 | return false; | 462 | return false; |
462 | 463 | ||
463 | memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1)); | 464 | memcpy((unsigned char *)edid, raw, size); |
464 | 465 | ||
465 | if (!drm_edid_is_valid(edid)) { | 466 | if (!drm_edid_is_valid(edid)) { |
466 | kfree(edid); | 467 | kfree(edid); |
@@ -468,14 +469,25 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) | |||
468 | } | 469 | } |
469 | 470 | ||
470 | rdev->mode_info.bios_hardcoded_edid = edid; | 471 | rdev->mode_info.bios_hardcoded_edid = edid; |
472 | rdev->mode_info.bios_hardcoded_edid_size = size; | ||
471 | return true; | 473 | return true; |
472 | } | 474 | } |
473 | 475 | ||
476 | /* this is used for atom LCDs as well */ | ||
474 | struct edid * | 477 | struct edid * |
475 | radeon_combios_get_hardcoded_edid(struct radeon_device *rdev) | 478 | radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) |
476 | { | 479 | { |
477 | if (rdev->mode_info.bios_hardcoded_edid) | 480 | struct edid *edid; |
478 | return rdev->mode_info.bios_hardcoded_edid; | 481 | |
482 | if (rdev->mode_info.bios_hardcoded_edid) { | ||
483 | edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); | ||
484 | if (edid) { | ||
485 | memcpy((unsigned char *)edid, | ||
486 | (unsigned char *)rdev->mode_info.bios_hardcoded_edid, | ||
487 | rdev->mode_info.bios_hardcoded_edid_size); | ||
488 | return edid; | ||
489 | } | ||
490 | } | ||
479 | return NULL; | 491 | return NULL; |
480 | } | 492 | } |
481 | 493 | ||
@@ -493,12 +505,18 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde | |||
493 | * DDC_VGA = RADEON_GPIO_VGA_DDC | 505 | * DDC_VGA = RADEON_GPIO_VGA_DDC |
494 | * DDC_LCD = RADEON_GPIOPAD_MASK | 506 | * DDC_LCD = RADEON_GPIOPAD_MASK |
495 | * DDC_GPIO = RADEON_MDGPIO_MASK | 507 | * DDC_GPIO = RADEON_MDGPIO_MASK |
496 | * r1xx/r2xx | 508 | * r1xx |
497 | * DDC_MONID = RADEON_GPIO_MONID | 509 | * DDC_MONID = RADEON_GPIO_MONID |
498 | * DDC_CRT2 = RADEON_GPIO_CRT2_DDC | 510 | * DDC_CRT2 = RADEON_GPIO_CRT2_DDC |
499 | * r3xx | 511 | * r200 |
500 | * DDC_MONID = RADEON_GPIO_MONID | 512 | * DDC_MONID = RADEON_GPIO_MONID |
501 | * DDC_CRT2 = RADEON_GPIO_DVI_DDC | 513 | * DDC_CRT2 = RADEON_GPIO_DVI_DDC |
514 | * r300/r350 | ||
515 | * DDC_MONID = RADEON_GPIO_DVI_DDC | ||
516 | * DDC_CRT2 = RADEON_GPIO_DVI_DDC | ||
517 | * rv2xx/rv3xx | ||
518 | * DDC_MONID = RADEON_GPIO_MONID | ||
519 | * DDC_CRT2 = RADEON_GPIO_MONID | ||
502 | * rs3xx/rs4xx | 520 | * rs3xx/rs4xx |
503 | * DDC_MONID = RADEON_GPIOPAD_MASK | 521 | * DDC_MONID = RADEON_GPIOPAD_MASK |
504 | * DDC_CRT2 = RADEON_GPIO_MONID | 522 | * DDC_CRT2 = RADEON_GPIO_MONID |
@@ -525,17 +543,26 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde | |||
525 | rdev->family == CHIP_RS400 || | 543 | rdev->family == CHIP_RS400 || |
526 | rdev->family == CHIP_RS480) | 544 | rdev->family == CHIP_RS480) |
527 | ddc_line = RADEON_GPIOPAD_MASK; | 545 | ddc_line = RADEON_GPIOPAD_MASK; |
528 | else | 546 | else if (rdev->family == CHIP_R300 || |
547 | rdev->family == CHIP_R350) { | ||
548 | ddc_line = RADEON_GPIO_DVI_DDC; | ||
549 | ddc = DDC_DVI; | ||
550 | } else | ||
529 | ddc_line = RADEON_GPIO_MONID; | 551 | ddc_line = RADEON_GPIO_MONID; |
530 | break; | 552 | break; |
531 | case DDC_CRT2: | 553 | case DDC_CRT2: |
532 | if (rdev->family == CHIP_RS300 || | 554 | if (rdev->family == CHIP_R200 || |
533 | rdev->family == CHIP_RS400 || | 555 | rdev->family == CHIP_R300 || |
534 | rdev->family == CHIP_RS480) | 556 | rdev->family == CHIP_R350) { |
535 | ddc_line = RADEON_GPIO_MONID; | ||
536 | else if (rdev->family >= CHIP_R300) { | ||
537 | ddc_line = RADEON_GPIO_DVI_DDC; | 557 | ddc_line = RADEON_GPIO_DVI_DDC; |
538 | ddc = DDC_DVI; | 558 | ddc = DDC_DVI; |
559 | } else if (rdev->family == CHIP_RS300 || | ||
560 | rdev->family == CHIP_RS400 || | ||
561 | rdev->family == CHIP_RS480) | ||
562 | ddc_line = RADEON_GPIO_MONID; | ||
563 | else if (rdev->family >= CHIP_RV350) { | ||
564 | ddc_line = RADEON_GPIO_MONID; | ||
565 | ddc = DDC_MONID; | ||
539 | } else | 566 | } else |
540 | ddc_line = RADEON_GPIO_CRT2_DDC; | 567 | ddc_line = RADEON_GPIO_CRT2_DDC; |
541 | break; | 568 | break; |
@@ -571,6 +598,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde | |||
571 | } | 598 | } |
572 | 599 | ||
573 | if (clk_mask && data_mask) { | 600 | if (clk_mask && data_mask) { |
601 | /* system specific masks */ | ||
574 | i2c.mask_clk_mask = clk_mask; | 602 | i2c.mask_clk_mask = clk_mask; |
575 | i2c.mask_data_mask = data_mask; | 603 | i2c.mask_data_mask = data_mask; |
576 | i2c.a_clk_mask = clk_mask; | 604 | i2c.a_clk_mask = clk_mask; |
@@ -579,7 +607,19 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde | |||
579 | i2c.en_data_mask = data_mask; | 607 | i2c.en_data_mask = data_mask; |
580 | i2c.y_clk_mask = clk_mask; | 608 | i2c.y_clk_mask = clk_mask; |
581 | i2c.y_data_mask = data_mask; | 609 | i2c.y_data_mask = data_mask; |
610 | } else if ((ddc_line == RADEON_GPIOPAD_MASK) || | ||
611 | (ddc_line == RADEON_MDGPIO_MASK)) { | ||
612 | /* default gpiopad masks */ | ||
613 | i2c.mask_clk_mask = (0x20 << 8); | ||
614 | i2c.mask_data_mask = 0x80; | ||
615 | i2c.a_clk_mask = (0x20 << 8); | ||
616 | i2c.a_data_mask = 0x80; | ||
617 | i2c.en_clk_mask = (0x20 << 8); | ||
618 | i2c.en_data_mask = 0x80; | ||
619 | i2c.y_clk_mask = (0x20 << 8); | ||
620 | i2c.y_data_mask = 0x80; | ||
582 | } else { | 621 | } else { |
622 | /* default masks for ddc pads */ | ||
583 | i2c.mask_clk_mask = RADEON_GPIO_EN_1; | 623 | i2c.mask_clk_mask = RADEON_GPIO_EN_1; |
584 | i2c.mask_data_mask = RADEON_GPIO_EN_0; | 624 | i2c.mask_data_mask = RADEON_GPIO_EN_0; |
585 | i2c.a_clk_mask = RADEON_GPIO_A_1; | 625 | i2c.a_clk_mask = RADEON_GPIO_A_1; |
@@ -684,26 +724,42 @@ void radeon_combios_i2c_init(struct radeon_device *rdev) | |||
684 | struct drm_device *dev = rdev->ddev; | 724 | struct drm_device *dev = rdev->ddev; |
685 | struct radeon_i2c_bus_rec i2c; | 725 | struct radeon_i2c_bus_rec i2c; |
686 | 726 | ||
727 | /* actual hw pads | ||
728 | * r1xx/rs2xx/rs3xx | ||
729 | * 0x60, 0x64, 0x68, 0x6c, gpiopads, mm | ||
730 | * r200 | ||
731 | * 0x60, 0x64, 0x68, mm | ||
732 | * r300/r350 | ||
733 | * 0x60, 0x64, mm | ||
734 | * rv2xx/rv3xx/rs4xx | ||
735 | * 0x60, 0x64, 0x68, gpiopads, mm | ||
736 | */ | ||
687 | 737 | ||
738 | /* 0x60 */ | ||
688 | i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 739 | i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
689 | rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC"); | 740 | rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC"); |
690 | 741 | /* 0x64 */ | |
691 | i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 742 | i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
692 | rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC"); | 743 | rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC"); |
693 | 744 | ||
745 | /* mm i2c */ | ||
694 | i2c.valid = true; | 746 | i2c.valid = true; |
695 | i2c.hw_capable = true; | 747 | i2c.hw_capable = true; |
696 | i2c.mm_i2c = true; | 748 | i2c.mm_i2c = true; |
697 | i2c.i2c_id = 0xa0; | 749 | i2c.i2c_id = 0xa0; |
698 | rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C"); | 750 | rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C"); |
699 | 751 | ||
700 | if (rdev->family == CHIP_RS300 || | 752 | if (rdev->family == CHIP_R300 || |
701 | rdev->family == CHIP_RS400 || | 753 | rdev->family == CHIP_R350) { |
702 | rdev->family == CHIP_RS480) { | 754 | /* only 2 sw i2c pads */ |
755 | } else if (rdev->family == CHIP_RS300 || | ||
756 | rdev->family == CHIP_RS400 || | ||
757 | rdev->family == CHIP_RS480) { | ||
703 | u16 offset; | 758 | u16 offset; |
704 | u8 id, blocks, clk, data; | 759 | u8 id, blocks, clk, data; |
705 | int i; | 760 | int i; |
706 | 761 | ||
762 | /* 0x68 */ | ||
707 | i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); | 763 | i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); |
708 | rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); | 764 | rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); |
709 | 765 | ||
@@ -715,21 +771,23 @@ void radeon_combios_i2c_init(struct radeon_device *rdev) | |||
715 | if (id == 136) { | 771 | if (id == 136) { |
716 | clk = RBIOS8(offset + 3 + (i * 5) + 3); | 772 | clk = RBIOS8(offset + 3 + (i * 5) + 3); |
717 | data = RBIOS8(offset + 3 + (i * 5) + 4); | 773 | data = RBIOS8(offset + 3 + (i * 5) + 4); |
774 | /* gpiopad */ | ||
718 | i2c = combios_setup_i2c_bus(rdev, DDC_MONID, | 775 | i2c = combios_setup_i2c_bus(rdev, DDC_MONID, |
719 | clk, data); | 776 | (1 << clk), (1 << data)); |
720 | rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); | 777 | rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); |
721 | break; | 778 | break; |
722 | } | 779 | } |
723 | } | 780 | } |
724 | } | 781 | } |
725 | 782 | } else if (rdev->family >= CHIP_R200) { | |
726 | } else if (rdev->family >= CHIP_R300) { | 783 | /* 0x68 */ |
727 | i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); | 784 | i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); |
728 | rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); | 785 | rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); |
729 | } else { | 786 | } else { |
787 | /* 0x68 */ | ||
730 | i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); | 788 | i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); |
731 | rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); | 789 | rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); |
732 | 790 | /* 0x6c */ | |
733 | i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); | 791 | i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); |
734 | rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC"); | 792 | rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC"); |
735 | } | 793 | } |
@@ -808,6 +866,11 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) | |||
808 | rdev->clock.default_sclk = sclk; | 866 | rdev->clock.default_sclk = sclk; |
809 | rdev->clock.default_mclk = mclk; | 867 | rdev->clock.default_mclk = mclk; |
810 | 868 | ||
869 | if (RBIOS32(pll_info + 0x16)) | ||
870 | rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16); | ||
871 | else | ||
872 | rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */ | ||
873 | |||
811 | return true; | 874 | return true; |
812 | } | 875 | } |
813 | return false; | 876 | return false; |
@@ -1490,6 +1553,13 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1490 | (rdev->pdev->subsystem_device == 0x4a48)) { | 1553 | (rdev->pdev->subsystem_device == 0x4a48)) { |
1491 | /* Mac X800 */ | 1554 | /* Mac X800 */ |
1492 | rdev->mode_info.connector_table = CT_MAC_X800; | 1555 | rdev->mode_info.connector_table = CT_MAC_X800; |
1556 | } else if ((of_machine_is_compatible("PowerMac7,2") || | ||
1557 | of_machine_is_compatible("PowerMac7,3")) && | ||
1558 | (rdev->pdev->device == 0x4150) && | ||
1559 | (rdev->pdev->subsystem_vendor == 0x1002) && | ||
1560 | (rdev->pdev->subsystem_device == 0x4150)) { | ||
1561 | /* Mac G5 tower 9600 */ | ||
1562 | rdev->mode_info.connector_table = CT_MAC_G5_9600; | ||
1493 | } else | 1563 | } else |
1494 | #endif /* CONFIG_PPC_PMAC */ | 1564 | #endif /* CONFIG_PPC_PMAC */ |
1495 | #ifdef CONFIG_PPC64 | 1565 | #ifdef CONFIG_PPC64 |
@@ -2008,6 +2078,61 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
2008 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, | 2078 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, |
2009 | &hpd); | 2079 | &hpd); |
2010 | break; | 2080 | break; |
2081 | case CT_MAC_G5_9600: | ||
2082 | DRM_INFO("Connector Table: %d (mac g5 9600)\n", | ||
2083 | rdev->mode_info.connector_table); | ||
2084 | /* DVI - tv dac, dvo */ | ||
2085 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | ||
2086 | hpd.hpd = RADEON_HPD_1; /* ??? */ | ||
2087 | radeon_add_legacy_encoder(dev, | ||
2088 | radeon_get_encoder_enum(dev, | ||
2089 | ATOM_DEVICE_DFP2_SUPPORT, | ||
2090 | 0), | ||
2091 | ATOM_DEVICE_DFP2_SUPPORT); | ||
2092 | radeon_add_legacy_encoder(dev, | ||
2093 | radeon_get_encoder_enum(dev, | ||
2094 | ATOM_DEVICE_CRT2_SUPPORT, | ||
2095 | 2), | ||
2096 | ATOM_DEVICE_CRT2_SUPPORT); | ||
2097 | radeon_add_legacy_connector(dev, 0, | ||
2098 | ATOM_DEVICE_DFP2_SUPPORT | | ||
2099 | ATOM_DEVICE_CRT2_SUPPORT, | ||
2100 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | ||
2101 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, | ||
2102 | &hpd); | ||
2103 | /* ADC - primary dac, internal tmds */ | ||
2104 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | ||
2105 | hpd.hpd = RADEON_HPD_2; /* ??? */ | ||
2106 | radeon_add_legacy_encoder(dev, | ||
2107 | radeon_get_encoder_enum(dev, | ||
2108 | ATOM_DEVICE_DFP1_SUPPORT, | ||
2109 | 0), | ||
2110 | ATOM_DEVICE_DFP1_SUPPORT); | ||
2111 | radeon_add_legacy_encoder(dev, | ||
2112 | radeon_get_encoder_enum(dev, | ||
2113 | ATOM_DEVICE_CRT1_SUPPORT, | ||
2114 | 1), | ||
2115 | ATOM_DEVICE_CRT1_SUPPORT); | ||
2116 | radeon_add_legacy_connector(dev, 1, | ||
2117 | ATOM_DEVICE_DFP1_SUPPORT | | ||
2118 | ATOM_DEVICE_CRT1_SUPPORT, | ||
2119 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | ||
2120 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, | ||
2121 | &hpd); | ||
2122 | /* TV - TV DAC */ | ||
2123 | ddc_i2c.valid = false; | ||
2124 | hpd.hpd = RADEON_HPD_NONE; | ||
2125 | radeon_add_legacy_encoder(dev, | ||
2126 | radeon_get_encoder_enum(dev, | ||
2127 | ATOM_DEVICE_TV1_SUPPORT, | ||
2128 | 2), | ||
2129 | ATOM_DEVICE_TV1_SUPPORT); | ||
2130 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | ||
2131 | DRM_MODE_CONNECTOR_SVIDEO, | ||
2132 | &ddc_i2c, | ||
2133 | CONNECTOR_OBJECT_ID_SVIDEO, | ||
2134 | &hpd); | ||
2135 | break; | ||
2011 | default: | 2136 | default: |
2012 | DRM_INFO("Connector table: %d (invalid)\n", | 2137 | DRM_INFO("Connector table: %d (invalid)\n", |
2013 | rdev->mode_info.connector_table); | 2138 | rdev->mode_info.connector_table); |
@@ -2419,6 +2544,12 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2419 | return true; | 2544 | return true; |
2420 | } | 2545 | } |
2421 | 2546 | ||
2547 | static const char *thermal_controller_names[] = { | ||
2548 | "NONE", | ||
2549 | "lm63", | ||
2550 | "adm1032", | ||
2551 | }; | ||
2552 | |||
2422 | void radeon_combios_get_power_modes(struct radeon_device *rdev) | 2553 | void radeon_combios_get_power_modes(struct radeon_device *rdev) |
2423 | { | 2554 | { |
2424 | struct drm_device *dev = rdev->ddev; | 2555 | struct drm_device *dev = rdev->ddev; |
@@ -2428,6 +2559,65 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
2428 | 2559 | ||
2429 | rdev->pm.default_power_state_index = -1; | 2560 | rdev->pm.default_power_state_index = -1; |
2430 | 2561 | ||
2562 | /* allocate 2 power states */ | ||
2563 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); | ||
2564 | if (!rdev->pm.power_state) { | ||
2565 | rdev->pm.default_power_state_index = state_index; | ||
2566 | rdev->pm.num_power_states = 0; | ||
2567 | |||
2568 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | ||
2569 | rdev->pm.current_clock_mode_index = 0; | ||
2570 | return; | ||
2571 | } | ||
2572 | |||
2573 | /* check for a thermal chip */ | ||
2574 | offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); | ||
2575 | if (offset) { | ||
2576 | u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0; | ||
2577 | struct radeon_i2c_bus_rec i2c_bus; | ||
2578 | |||
2579 | rev = RBIOS8(offset); | ||
2580 | |||
2581 | if (rev == 0) { | ||
2582 | thermal_controller = RBIOS8(offset + 3); | ||
2583 | gpio = RBIOS8(offset + 4) & 0x3f; | ||
2584 | i2c_addr = RBIOS8(offset + 5); | ||
2585 | } else if (rev == 1) { | ||
2586 | thermal_controller = RBIOS8(offset + 4); | ||
2587 | gpio = RBIOS8(offset + 5) & 0x3f; | ||
2588 | i2c_addr = RBIOS8(offset + 6); | ||
2589 | } else if (rev == 2) { | ||
2590 | thermal_controller = RBIOS8(offset + 4); | ||
2591 | gpio = RBIOS8(offset + 5) & 0x3f; | ||
2592 | i2c_addr = RBIOS8(offset + 6); | ||
2593 | clk_bit = RBIOS8(offset + 0xa); | ||
2594 | data_bit = RBIOS8(offset + 0xb); | ||
2595 | } | ||
2596 | if ((thermal_controller > 0) && (thermal_controller < 3)) { | ||
2597 | DRM_INFO("Possible %s thermal controller at 0x%02x\n", | ||
2598 | thermal_controller_names[thermal_controller], | ||
2599 | i2c_addr >> 1); | ||
2600 | if (gpio == DDC_LCD) { | ||
2601 | /* MM i2c */ | ||
2602 | i2c_bus.valid = true; | ||
2603 | i2c_bus.hw_capable = true; | ||
2604 | i2c_bus.mm_i2c = true; | ||
2605 | i2c_bus.i2c_id = 0xa0; | ||
2606 | } else if (gpio == DDC_GPIO) | ||
2607 | i2c_bus = combios_setup_i2c_bus(rdev, gpio, 1 << clk_bit, 1 << data_bit); | ||
2608 | else | ||
2609 | i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0); | ||
2610 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); | ||
2611 | if (rdev->pm.i2c_bus) { | ||
2612 | struct i2c_board_info info = { }; | ||
2613 | const char *name = thermal_controller_names[thermal_controller]; | ||
2614 | info.addr = i2c_addr >> 1; | ||
2615 | strlcpy(info.type, name, sizeof(info.type)); | ||
2616 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | ||
2617 | } | ||
2618 | } | ||
2619 | } | ||
2620 | |||
2431 | if (rdev->flags & RADEON_IS_MOBILITY) { | 2621 | if (rdev->flags & RADEON_IS_MOBILITY) { |
2432 | offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); | 2622 | offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); |
2433 | if (offset) { | 2623 | if (offset) { |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index ecc1a8fafbfd..9792d4ffdc86 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -40,26 +40,39 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, | |||
40 | struct drm_encoder *encoder, | 40 | struct drm_encoder *encoder, |
41 | bool connected); | 41 | bool connected); |
42 | 42 | ||
43 | extern void | ||
44 | radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, | ||
45 | struct drm_connector *drm_connector); | ||
46 | |||
47 | bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector); | ||
48 | |||
43 | void radeon_connector_hotplug(struct drm_connector *connector) | 49 | void radeon_connector_hotplug(struct drm_connector *connector) |
44 | { | 50 | { |
45 | struct drm_device *dev = connector->dev; | 51 | struct drm_device *dev = connector->dev; |
46 | struct radeon_device *rdev = dev->dev_private; | 52 | struct radeon_device *rdev = dev->dev_private; |
47 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 53 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
48 | 54 | ||
49 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) | 55 | /* bail if the connector does not have hpd pin, e.g., |
50 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | 56 | * VGA, TV, etc. |
57 | */ | ||
58 | if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) | ||
59 | return; | ||
51 | 60 | ||
52 | if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || | 61 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); |
53 | (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { | 62 | |
54 | if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 63 | /* powering up/down the eDP panel generates hpd events which |
55 | (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) { | 64 | * can interfere with modesetting. |
56 | if (radeon_dp_needs_link_train(radeon_connector)) { | 65 | */ |
57 | if (connector->encoder) | 66 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) |
58 | dp_link_train(connector->encoder, connector); | 67 | return; |
59 | } | ||
60 | } | ||
61 | } | ||
62 | 68 | ||
69 | /* pre-r600 did not always have the hpd pins mapped accurately to connectors */ | ||
70 | if (rdev->family >= CHIP_R600) { | ||
71 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) | ||
72 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
73 | else | ||
74 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | ||
75 | } | ||
63 | } | 76 | } |
64 | 77 | ||
65 | static void radeon_property_change_mode(struct drm_encoder *encoder) | 78 | static void radeon_property_change_mode(struct drm_encoder *encoder) |
@@ -183,13 +196,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, | |||
183 | continue; | 196 | continue; |
184 | 197 | ||
185 | if (priority == true) { | 198 | if (priority == true) { |
186 | DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); | 199 | DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); |
187 | DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); | 200 | DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector)); |
188 | conflict->status = connector_status_disconnected; | 201 | conflict->status = connector_status_disconnected; |
189 | radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); | 202 | radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); |
190 | } else { | 203 | } else { |
191 | DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); | 204 | DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); |
192 | DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict)); | 205 | DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict)); |
193 | current_status = connector_status_disconnected; | 206 | current_status = connector_status_disconnected; |
194 | } | 207 | } |
195 | break; | 208 | break; |
@@ -326,6 +339,34 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
326 | } | 339 | } |
327 | } | 340 | } |
328 | 341 | ||
342 | if (property == rdev->mode_info.underscan_hborder_property) { | ||
343 | /* need to find digital encoder on connector */ | ||
344 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | ||
345 | if (!encoder) | ||
346 | return 0; | ||
347 | |||
348 | radeon_encoder = to_radeon_encoder(encoder); | ||
349 | |||
350 | if (radeon_encoder->underscan_hborder != val) { | ||
351 | radeon_encoder->underscan_hborder = val; | ||
352 | radeon_property_change_mode(&radeon_encoder->base); | ||
353 | } | ||
354 | } | ||
355 | |||
356 | if (property == rdev->mode_info.underscan_vborder_property) { | ||
357 | /* need to find digital encoder on connector */ | ||
358 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | ||
359 | if (!encoder) | ||
360 | return 0; | ||
361 | |||
362 | radeon_encoder = to_radeon_encoder(encoder); | ||
363 | |||
364 | if (radeon_encoder->underscan_vborder != val) { | ||
365 | radeon_encoder->underscan_vborder = val; | ||
366 | radeon_property_change_mode(&radeon_encoder->base); | ||
367 | } | ||
368 | } | ||
369 | |||
329 | if (property == rdev->mode_info.tv_std_property) { | 370 | if (property == rdev->mode_info.tv_std_property) { |
330 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC); | 371 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC); |
331 | if (!encoder) { | 372 | if (!encoder) { |
@@ -404,13 +445,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | |||
404 | mode->vdisplay == native_mode->vdisplay) { | 445 | mode->vdisplay == native_mode->vdisplay) { |
405 | *native_mode = *mode; | 446 | *native_mode = *mode; |
406 | drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); | 447 | drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); |
407 | DRM_INFO("Determined LVDS native mode details from EDID\n"); | 448 | DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n"); |
408 | break; | 449 | break; |
409 | } | 450 | } |
410 | } | 451 | } |
411 | } | 452 | } |
412 | if (!native_mode->clock) { | 453 | if (!native_mode->clock) { |
413 | DRM_INFO("No LVDS native mode details, disabling RMX\n"); | 454 | DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); |
414 | radeon_encoder->rmx_type = RMX_OFF; | 455 | radeon_encoder->rmx_type = RMX_OFF; |
415 | } | 456 | } |
416 | } | 457 | } |
@@ -444,6 +485,9 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) | |||
444 | if (mode) { | 485 | if (mode) { |
445 | ret = 1; | 486 | ret = 1; |
446 | drm_mode_probed_add(connector, mode); | 487 | drm_mode_probed_add(connector, mode); |
488 | /* add the width/height from vbios tables if available */ | ||
489 | connector->display_info.width_mm = mode->width_mm; | ||
490 | connector->display_info.height_mm = mode->height_mm; | ||
447 | /* add scaled modes */ | 491 | /* add scaled modes */ |
448 | radeon_add_common_modes(encoder, connector); | 492 | radeon_add_common_modes(encoder, connector); |
449 | } | 493 | } |
@@ -590,14 +634,22 @@ static int radeon_vga_get_modes(struct drm_connector *connector) | |||
590 | static int radeon_vga_mode_valid(struct drm_connector *connector, | 634 | static int radeon_vga_mode_valid(struct drm_connector *connector, |
591 | struct drm_display_mode *mode) | 635 | struct drm_display_mode *mode) |
592 | { | 636 | { |
637 | struct drm_device *dev = connector->dev; | ||
638 | struct radeon_device *rdev = dev->dev_private; | ||
639 | |||
593 | /* XXX check mode bandwidth */ | 640 | /* XXX check mode bandwidth */ |
594 | /* XXX verify against max DAC output frequency */ | 641 | |
642 | if ((mode->clock / 10) > rdev->clock.max_pixel_clock) | ||
643 | return MODE_CLOCK_HIGH; | ||
644 | |||
595 | return MODE_OK; | 645 | return MODE_OK; |
596 | } | 646 | } |
597 | 647 | ||
598 | static enum drm_connector_status | 648 | static enum drm_connector_status |
599 | radeon_vga_detect(struct drm_connector *connector, bool force) | 649 | radeon_vga_detect(struct drm_connector *connector, bool force) |
600 | { | 650 | { |
651 | struct drm_device *dev = connector->dev; | ||
652 | struct radeon_device *rdev = dev->dev_private; | ||
601 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 653 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
602 | struct drm_encoder *encoder; | 654 | struct drm_encoder *encoder; |
603 | struct drm_encoder_helper_funcs *encoder_funcs; | 655 | struct drm_encoder_helper_funcs *encoder_funcs; |
@@ -635,6 +687,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
635 | ret = connector_status_connected; | 687 | ret = connector_status_connected; |
636 | } | 688 | } |
637 | } else { | 689 | } else { |
690 | |||
691 | /* if we aren't forcing don't do destructive polling */ | ||
692 | if (!force) | ||
693 | return connector->status; | ||
694 | |||
638 | if (radeon_connector->dac_load_detect && encoder) { | 695 | if (radeon_connector->dac_load_detect && encoder) { |
639 | encoder_funcs = encoder->helper_private; | 696 | encoder_funcs = encoder->helper_private; |
640 | ret = encoder_funcs->detect(encoder, connector); | 697 | ret = encoder_funcs->detect(encoder, connector); |
@@ -643,6 +700,17 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
643 | 700 | ||
644 | if (ret == connector_status_connected) | 701 | if (ret == connector_status_connected) |
645 | ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); | 702 | ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); |
703 | |||
704 | /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the | ||
705 | * vbios to deal with KVMs. If we have one and are not able to detect a monitor | ||
706 | * by other means, assume the CRT is connected and use that EDID. | ||
707 | */ | ||
708 | if ((!rdev->is_atom_bios) && | ||
709 | (ret == connector_status_disconnected) && | ||
710 | rdev->mode_info.bios_hardcoded_edid_size) { | ||
711 | ret = connector_status_connected; | ||
712 | } | ||
713 | |||
646 | radeon_connector_update_scratch_regs(connector, ret); | 714 | radeon_connector_update_scratch_regs(connector, ret); |
647 | return ret; | 715 | return ret; |
648 | } | 716 | } |
@@ -754,6 +822,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector) | |||
754 | static enum drm_connector_status | 822 | static enum drm_connector_status |
755 | radeon_dvi_detect(struct drm_connector *connector, bool force) | 823 | radeon_dvi_detect(struct drm_connector *connector, bool force) |
756 | { | 824 | { |
825 | struct drm_device *dev = connector->dev; | ||
826 | struct radeon_device *rdev = dev->dev_private; | ||
757 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 827 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
758 | struct drm_encoder *encoder = NULL; | 828 | struct drm_encoder *encoder = NULL; |
759 | struct drm_encoder_helper_funcs *encoder_funcs; | 829 | struct drm_encoder_helper_funcs *encoder_funcs; |
@@ -774,6 +844,13 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
774 | if (!radeon_connector->edid) { | 844 | if (!radeon_connector->edid) { |
775 | DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", | 845 | DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", |
776 | drm_get_connector_name(connector)); | 846 | drm_get_connector_name(connector)); |
847 | /* rs690 seems to have a problem with connectors not existing and always | ||
848 | * return a block of 0's. If we see this just stop polling on this output */ | ||
849 | if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) { | ||
850 | ret = connector_status_disconnected; | ||
851 | DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector)); | ||
852 | radeon_connector->ddc_bus = NULL; | ||
853 | } | ||
777 | } else { | 854 | } else { |
778 | radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); | 855 | radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); |
779 | 856 | ||
@@ -793,8 +870,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
793 | * you don't really know what's connected to which port as both are digital. | 870 | * you don't really know what's connected to which port as both are digital. |
794 | */ | 871 | */ |
795 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { | 872 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { |
796 | struct drm_device *dev = connector->dev; | ||
797 | struct radeon_device *rdev = dev->dev_private; | ||
798 | struct drm_connector *list_connector; | 873 | struct drm_connector *list_connector; |
799 | struct radeon_connector *list_radeon_connector; | 874 | struct radeon_connector *list_radeon_connector; |
800 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { | 875 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { |
@@ -822,6 +897,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
822 | if ((ret == connector_status_connected) && (radeon_connector->use_digital == true)) | 897 | if ((ret == connector_status_connected) && (radeon_connector->use_digital == true)) |
823 | goto out; | 898 | goto out; |
824 | 899 | ||
900 | if (!force) { | ||
901 | ret = connector->status; | ||
902 | goto out; | ||
903 | } | ||
904 | |||
825 | /* find analog encoder */ | 905 | /* find analog encoder */ |
826 | if (radeon_connector->dac_load_detect) { | 906 | if (radeon_connector->dac_load_detect) { |
827 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | 907 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
@@ -854,6 +934,19 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
854 | ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); | 934 | ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); |
855 | } | 935 | } |
856 | 936 | ||
937 | /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the | ||
938 | * vbios to deal with KVMs. If we have one and are not able to detect a monitor | ||
939 | * by other means, assume the DFP is connected and use that EDID. In most | ||
940 | * cases the DVI port is actually a virtual KVM port connected to the service | ||
941 | * processor. | ||
942 | */ | ||
943 | if ((!rdev->is_atom_bios) && | ||
944 | (ret == connector_status_disconnected) && | ||
945 | rdev->mode_info.bios_hardcoded_edid_size) { | ||
946 | radeon_connector->use_digital = true; | ||
947 | ret = connector_status_connected; | ||
948 | } | ||
949 | |||
857 | out: | 950 | out: |
858 | /* updated in get modes as well since we need to know if it's analog or digital */ | 951 | /* updated in get modes as well since we need to know if it's analog or digital */ |
859 | radeon_connector_update_scratch_regs(connector, ret); | 952 | radeon_connector_update_scratch_regs(connector, ret); |
@@ -931,9 +1024,23 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector, | |||
931 | (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || | 1024 | (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || |
932 | (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) | 1025 | (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) |
933 | return MODE_OK; | 1026 | return MODE_OK; |
934 | else | 1027 | else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) { |
1028 | if (ASIC_IS_DCE3(rdev)) { | ||
1029 | /* HDMI 1.3+ supports max clock of 340 Mhz */ | ||
1030 | if (mode->clock > 340000) | ||
1031 | return MODE_CLOCK_HIGH; | ||
1032 | else | ||
1033 | return MODE_OK; | ||
1034 | } else | ||
1035 | return MODE_CLOCK_HIGH; | ||
1036 | } else | ||
935 | return MODE_CLOCK_HIGH; | 1037 | return MODE_CLOCK_HIGH; |
936 | } | 1038 | } |
1039 | |||
1040 | /* check against the max pixel clock */ | ||
1041 | if ((mode->clock / 10) > rdev->clock.max_pixel_clock) | ||
1042 | return MODE_CLOCK_HIGH; | ||
1043 | |||
937 | return MODE_OK; | 1044 | return MODE_OK; |
938 | } | 1045 | } |
939 | 1046 | ||
@@ -970,37 +1077,193 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector) | |||
970 | static int radeon_dp_get_modes(struct drm_connector *connector) | 1077 | static int radeon_dp_get_modes(struct drm_connector *connector) |
971 | { | 1078 | { |
972 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 1079 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
1080 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; | ||
1081 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | ||
973 | int ret; | 1082 | int ret; |
974 | 1083 | ||
975 | ret = radeon_ddc_get_modes(radeon_connector); | 1084 | if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || |
1085 | (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { | ||
1086 | struct drm_display_mode *mode; | ||
1087 | |||
1088 | if (!radeon_dig_connector->edp_on) | ||
1089 | atombios_set_edp_panel_power(connector, | ||
1090 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
1091 | ret = radeon_ddc_get_modes(radeon_connector); | ||
1092 | if (!radeon_dig_connector->edp_on) | ||
1093 | atombios_set_edp_panel_power(connector, | ||
1094 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
1095 | |||
1096 | if (ret > 0) { | ||
1097 | if (encoder) { | ||
1098 | radeon_fixup_lvds_native_mode(encoder, connector); | ||
1099 | /* add scaled modes */ | ||
1100 | radeon_add_common_modes(encoder, connector); | ||
1101 | } | ||
1102 | return ret; | ||
1103 | } | ||
1104 | |||
1105 | encoder = radeon_best_single_encoder(connector); | ||
1106 | if (!encoder) | ||
1107 | return 0; | ||
1108 | |||
1109 | /* we have no EDID modes */ | ||
1110 | mode = radeon_fp_native_mode(encoder); | ||
1111 | if (mode) { | ||
1112 | ret = 1; | ||
1113 | drm_mode_probed_add(connector, mode); | ||
1114 | /* add the width/height from vbios tables if available */ | ||
1115 | connector->display_info.width_mm = mode->width_mm; | ||
1116 | connector->display_info.height_mm = mode->height_mm; | ||
1117 | /* add scaled modes */ | ||
1118 | radeon_add_common_modes(encoder, connector); | ||
1119 | } | ||
1120 | } else { | ||
1121 | /* need to setup ddc on the bridge */ | ||
1122 | if (radeon_connector_encoder_is_dp_bridge(connector)) { | ||
1123 | if (encoder) | ||
1124 | radeon_atom_ext_encoder_setup_ddc(encoder); | ||
1125 | } | ||
1126 | ret = radeon_ddc_get_modes(radeon_connector); | ||
1127 | } | ||
1128 | |||
976 | return ret; | 1129 | return ret; |
977 | } | 1130 | } |
978 | 1131 | ||
1132 | bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector) | ||
1133 | { | ||
1134 | struct drm_mode_object *obj; | ||
1135 | struct drm_encoder *encoder; | ||
1136 | struct radeon_encoder *radeon_encoder; | ||
1137 | int i; | ||
1138 | bool found = false; | ||
1139 | |||
1140 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
1141 | if (connector->encoder_ids[i] == 0) | ||
1142 | break; | ||
1143 | |||
1144 | obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); | ||
1145 | if (!obj) | ||
1146 | continue; | ||
1147 | |||
1148 | encoder = obj_to_encoder(obj); | ||
1149 | radeon_encoder = to_radeon_encoder(encoder); | ||
1150 | |||
1151 | switch (radeon_encoder->encoder_id) { | ||
1152 | case ENCODER_OBJECT_ID_TRAVIS: | ||
1153 | case ENCODER_OBJECT_ID_NUTMEG: | ||
1154 | found = true; | ||
1155 | break; | ||
1156 | default: | ||
1157 | break; | ||
1158 | } | ||
1159 | } | ||
1160 | |||
1161 | return found; | ||
1162 | } | ||
1163 | |||
1164 | bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) | ||
1165 | { | ||
1166 | struct drm_mode_object *obj; | ||
1167 | struct drm_encoder *encoder; | ||
1168 | struct radeon_encoder *radeon_encoder; | ||
1169 | int i; | ||
1170 | bool found = false; | ||
1171 | |||
1172 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
1173 | if (connector->encoder_ids[i] == 0) | ||
1174 | break; | ||
1175 | |||
1176 | obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); | ||
1177 | if (!obj) | ||
1178 | continue; | ||
1179 | |||
1180 | encoder = obj_to_encoder(obj); | ||
1181 | radeon_encoder = to_radeon_encoder(encoder); | ||
1182 | if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2) | ||
1183 | found = true; | ||
1184 | } | ||
1185 | |||
1186 | return found; | ||
1187 | } | ||
1188 | |||
1189 | bool radeon_connector_is_dp12_capable(struct drm_connector *connector) | ||
1190 | { | ||
1191 | struct drm_device *dev = connector->dev; | ||
1192 | struct radeon_device *rdev = dev->dev_private; | ||
1193 | |||
1194 | if (ASIC_IS_DCE5(rdev) && | ||
1195 | (rdev->clock.dp_extclk >= 53900) && | ||
1196 | radeon_connector_encoder_is_hbr2(connector)) { | ||
1197 | return true; | ||
1198 | } | ||
1199 | |||
1200 | return false; | ||
1201 | } | ||
1202 | |||
979 | static enum drm_connector_status | 1203 | static enum drm_connector_status |
980 | radeon_dp_detect(struct drm_connector *connector, bool force) | 1204 | radeon_dp_detect(struct drm_connector *connector, bool force) |
981 | { | 1205 | { |
1206 | struct drm_device *dev = connector->dev; | ||
1207 | struct radeon_device *rdev = dev->dev_private; | ||
982 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 1208 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
983 | enum drm_connector_status ret = connector_status_disconnected; | 1209 | enum drm_connector_status ret = connector_status_disconnected; |
984 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; | 1210 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; |
1211 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | ||
985 | 1212 | ||
986 | if (radeon_connector->edid) { | 1213 | if (radeon_connector->edid) { |
987 | kfree(radeon_connector->edid); | 1214 | kfree(radeon_connector->edid); |
988 | radeon_connector->edid = NULL; | 1215 | radeon_connector->edid = NULL; |
989 | } | 1216 | } |
990 | 1217 | ||
991 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | 1218 | if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || |
1219 | (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { | ||
1220 | if (encoder) { | ||
1221 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1222 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | ||
1223 | |||
1224 | /* check if panel is valid */ | ||
1225 | if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) | ||
1226 | ret = connector_status_connected; | ||
1227 | } | ||
992 | /* eDP is always DP */ | 1228 | /* eDP is always DP */ |
993 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; | 1229 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; |
1230 | if (!radeon_dig_connector->edp_on) | ||
1231 | atombios_set_edp_panel_power(connector, | ||
1232 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
994 | if (radeon_dp_getdpcd(radeon_connector)) | 1233 | if (radeon_dp_getdpcd(radeon_connector)) |
995 | ret = connector_status_connected; | 1234 | ret = connector_status_connected; |
1235 | if (!radeon_dig_connector->edp_on) | ||
1236 | atombios_set_edp_panel_power(connector, | ||
1237 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
996 | } else { | 1238 | } else { |
1239 | /* need to setup ddc on the bridge */ | ||
1240 | if (radeon_connector_encoder_is_dp_bridge(connector)) { | ||
1241 | if (encoder) | ||
1242 | radeon_atom_ext_encoder_setup_ddc(encoder); | ||
1243 | } | ||
997 | radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); | 1244 | radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); |
998 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { | 1245 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { |
999 | if (radeon_dp_getdpcd(radeon_connector)) | 1246 | ret = connector_status_connected; |
1000 | ret = connector_status_connected; | 1247 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) |
1248 | radeon_dp_getdpcd(radeon_connector); | ||
1001 | } else { | 1249 | } else { |
1002 | if (radeon_ddc_probe(radeon_connector)) | 1250 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { |
1003 | ret = connector_status_connected; | 1251 | if (radeon_dp_getdpcd(radeon_connector)) |
1252 | ret = connector_status_connected; | ||
1253 | } else { | ||
1254 | if (radeon_ddc_probe(radeon_connector)) | ||
1255 | ret = connector_status_connected; | ||
1256 | } | ||
1257 | } | ||
1258 | |||
1259 | if ((ret == connector_status_disconnected) && | ||
1260 | radeon_connector->dac_load_detect) { | ||
1261 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | ||
1262 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
1263 | if (encoder) { | ||
1264 | encoder_funcs = encoder->helper_private; | ||
1265 | ret = encoder_funcs->detect(encoder, connector); | ||
1266 | } | ||
1004 | } | 1267 | } |
1005 | } | 1268 | } |
1006 | 1269 | ||
@@ -1016,11 +1279,39 @@ static int radeon_dp_mode_valid(struct drm_connector *connector, | |||
1016 | 1279 | ||
1017 | /* XXX check mode bandwidth */ | 1280 | /* XXX check mode bandwidth */ |
1018 | 1281 | ||
1019 | if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 1282 | if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || |
1020 | (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | 1283 | (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { |
1021 | return radeon_dp_mode_valid_helper(radeon_connector, mode); | 1284 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); |
1022 | else | 1285 | |
1286 | if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) | ||
1287 | return MODE_PANEL; | ||
1288 | |||
1289 | if (encoder) { | ||
1290 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1291 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | ||
1292 | |||
1293 | /* AVIVO hardware supports downscaling modes larger than the panel | ||
1294 | * to the panel size, but I'm not sure this is desirable. | ||
1295 | */ | ||
1296 | if ((mode->hdisplay > native_mode->hdisplay) || | ||
1297 | (mode->vdisplay > native_mode->vdisplay)) | ||
1298 | return MODE_PANEL; | ||
1299 | |||
1300 | /* if scaling is disabled, block non-native modes */ | ||
1301 | if (radeon_encoder->rmx_type == RMX_OFF) { | ||
1302 | if ((mode->hdisplay != native_mode->hdisplay) || | ||
1303 | (mode->vdisplay != native_mode->vdisplay)) | ||
1304 | return MODE_PANEL; | ||
1305 | } | ||
1306 | } | ||
1023 | return MODE_OK; | 1307 | return MODE_OK; |
1308 | } else { | ||
1309 | if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | ||
1310 | (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | ||
1311 | return radeon_dp_mode_valid_helper(connector, mode); | ||
1312 | else | ||
1313 | return MODE_OK; | ||
1314 | } | ||
1024 | } | 1315 | } |
1025 | 1316 | ||
1026 | struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { | 1317 | struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { |
@@ -1053,8 +1344,11 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1053 | struct drm_connector *connector; | 1344 | struct drm_connector *connector; |
1054 | struct radeon_connector *radeon_connector; | 1345 | struct radeon_connector *radeon_connector; |
1055 | struct radeon_connector_atom_dig *radeon_dig_connector; | 1346 | struct radeon_connector_atom_dig *radeon_dig_connector; |
1347 | struct drm_encoder *encoder; | ||
1348 | struct radeon_encoder *radeon_encoder; | ||
1056 | uint32_t subpixel_order = SubPixelNone; | 1349 | uint32_t subpixel_order = SubPixelNone; |
1057 | bool shared_ddc = false; | 1350 | bool shared_ddc = false; |
1351 | bool is_dp_bridge = false; | ||
1058 | 1352 | ||
1059 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 1353 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
1060 | return; | 1354 | return; |
@@ -1078,7 +1372,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1078 | radeon_connector->shared_ddc = true; | 1372 | radeon_connector->shared_ddc = true; |
1079 | shared_ddc = true; | 1373 | shared_ddc = true; |
1080 | } | 1374 | } |
1081 | if (radeon_connector->router_bus && router->valid && | 1375 | if (radeon_connector->router_bus && router->ddc_valid && |
1082 | (radeon_connector->router.router_id == router->router_id)) { | 1376 | (radeon_connector->router.router_id == router->router_id)) { |
1083 | radeon_connector->shared_ddc = false; | 1377 | radeon_connector->shared_ddc = false; |
1084 | shared_ddc = false; | 1378 | shared_ddc = false; |
@@ -1086,6 +1380,21 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1086 | } | 1380 | } |
1087 | } | 1381 | } |
1088 | 1382 | ||
1383 | /* check if it's a dp bridge */ | ||
1384 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
1385 | radeon_encoder = to_radeon_encoder(encoder); | ||
1386 | if (radeon_encoder->devices & supported_device) { | ||
1387 | switch (radeon_encoder->encoder_id) { | ||
1388 | case ENCODER_OBJECT_ID_TRAVIS: | ||
1389 | case ENCODER_OBJECT_ID_NUTMEG: | ||
1390 | is_dp_bridge = true; | ||
1391 | break; | ||
1392 | default: | ||
1393 | break; | ||
1394 | } | ||
1395 | } | ||
1396 | } | ||
1397 | |||
1089 | radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); | 1398 | radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); |
1090 | if (!radeon_connector) | 1399 | if (!radeon_connector) |
1091 | return; | 1400 | return; |
@@ -1098,158 +1407,291 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1098 | radeon_connector->connector_object_id = connector_object_id; | 1407 | radeon_connector->connector_object_id = connector_object_id; |
1099 | radeon_connector->hpd = *hpd; | 1408 | radeon_connector->hpd = *hpd; |
1100 | radeon_connector->router = *router; | 1409 | radeon_connector->router = *router; |
1101 | if (router->valid) { | 1410 | if (router->ddc_valid || router->cd_valid) { |
1102 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); | 1411 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); |
1103 | if (!radeon_connector->router_bus) | 1412 | if (!radeon_connector->router_bus) |
1104 | goto failed; | 1413 | DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); |
1105 | } | 1414 | } |
1106 | switch (connector_type) { | 1415 | |
1107 | case DRM_MODE_CONNECTOR_VGA: | 1416 | if (is_dp_bridge) { |
1108 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | ||
1109 | drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); | ||
1110 | if (i2c_bus->valid) { | ||
1111 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | ||
1112 | if (!radeon_connector->ddc_bus) | ||
1113 | goto failed; | ||
1114 | } | ||
1115 | radeon_connector->dac_load_detect = true; | ||
1116 | drm_connector_attach_property(&radeon_connector->base, | ||
1117 | rdev->mode_info.load_detect_property, | ||
1118 | 1); | ||
1119 | /* no HPD on analog connectors */ | ||
1120 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | ||
1121 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
1122 | break; | ||
1123 | case DRM_MODE_CONNECTOR_DVIA: | ||
1124 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | ||
1125 | drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); | ||
1126 | if (i2c_bus->valid) { | ||
1127 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | ||
1128 | if (!radeon_connector->ddc_bus) | ||
1129 | goto failed; | ||
1130 | } | ||
1131 | radeon_connector->dac_load_detect = true; | ||
1132 | drm_connector_attach_property(&radeon_connector->base, | ||
1133 | rdev->mode_info.load_detect_property, | ||
1134 | 1); | ||
1135 | /* no HPD on analog connectors */ | ||
1136 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | ||
1137 | break; | ||
1138 | case DRM_MODE_CONNECTOR_DVII: | ||
1139 | case DRM_MODE_CONNECTOR_DVID: | ||
1140 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1417 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
1141 | if (!radeon_dig_connector) | 1418 | if (!radeon_dig_connector) |
1142 | goto failed; | 1419 | goto failed; |
1143 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 1420 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
1144 | radeon_connector->con_priv = radeon_dig_connector; | 1421 | radeon_connector->con_priv = radeon_dig_connector; |
1145 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); | 1422 | drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); |
1146 | drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); | 1423 | drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); |
1147 | if (i2c_bus->valid) { | 1424 | if (i2c_bus->valid) { |
1425 | /* add DP i2c bus */ | ||
1426 | if (connector_type == DRM_MODE_CONNECTOR_eDP) | ||
1427 | radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); | ||
1428 | else | ||
1429 | radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); | ||
1430 | if (!radeon_dig_connector->dp_i2c_bus) | ||
1431 | DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); | ||
1148 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1432 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
1149 | if (!radeon_connector->ddc_bus) | 1433 | if (!radeon_connector->ddc_bus) |
1150 | goto failed; | 1434 | DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
1151 | } | 1435 | } |
1152 | subpixel_order = SubPixelHorizontalRGB; | 1436 | switch (connector_type) { |
1153 | drm_connector_attach_property(&radeon_connector->base, | 1437 | case DRM_MODE_CONNECTOR_VGA: |
1154 | rdev->mode_info.coherent_mode_property, | 1438 | case DRM_MODE_CONNECTOR_DVIA: |
1155 | 1); | 1439 | default: |
1156 | if (ASIC_IS_AVIVO(rdev)) | 1440 | connector->interlace_allowed = true; |
1441 | connector->doublescan_allowed = true; | ||
1442 | radeon_connector->dac_load_detect = true; | ||
1443 | drm_connector_attach_property(&radeon_connector->base, | ||
1444 | rdev->mode_info.load_detect_property, | ||
1445 | 1); | ||
1446 | break; | ||
1447 | case DRM_MODE_CONNECTOR_DVII: | ||
1448 | case DRM_MODE_CONNECTOR_DVID: | ||
1449 | case DRM_MODE_CONNECTOR_HDMIA: | ||
1450 | case DRM_MODE_CONNECTOR_HDMIB: | ||
1451 | case DRM_MODE_CONNECTOR_DisplayPort: | ||
1157 | drm_connector_attach_property(&radeon_connector->base, | 1452 | drm_connector_attach_property(&radeon_connector->base, |
1158 | rdev->mode_info.underscan_property, | 1453 | rdev->mode_info.underscan_property, |
1159 | UNDERSCAN_AUTO); | 1454 | UNDERSCAN_OFF); |
1160 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { | 1455 | drm_connector_attach_property(&radeon_connector->base, |
1456 | rdev->mode_info.underscan_hborder_property, | ||
1457 | 0); | ||
1458 | drm_connector_attach_property(&radeon_connector->base, | ||
1459 | rdev->mode_info.underscan_vborder_property, | ||
1460 | 0); | ||
1461 | subpixel_order = SubPixelHorizontalRGB; | ||
1462 | connector->interlace_allowed = true; | ||
1463 | if (connector_type == DRM_MODE_CONNECTOR_HDMIB) | ||
1464 | connector->doublescan_allowed = true; | ||
1465 | else | ||
1466 | connector->doublescan_allowed = false; | ||
1467 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { | ||
1468 | radeon_connector->dac_load_detect = true; | ||
1469 | drm_connector_attach_property(&radeon_connector->base, | ||
1470 | rdev->mode_info.load_detect_property, | ||
1471 | 1); | ||
1472 | } | ||
1473 | break; | ||
1474 | case DRM_MODE_CONNECTOR_LVDS: | ||
1475 | case DRM_MODE_CONNECTOR_eDP: | ||
1476 | drm_connector_attach_property(&radeon_connector->base, | ||
1477 | dev->mode_config.scaling_mode_property, | ||
1478 | DRM_MODE_SCALE_FULLSCREEN); | ||
1479 | subpixel_order = SubPixelHorizontalRGB; | ||
1480 | connector->interlace_allowed = false; | ||
1481 | connector->doublescan_allowed = false; | ||
1482 | break; | ||
1483 | } | ||
1484 | } else { | ||
1485 | switch (connector_type) { | ||
1486 | case DRM_MODE_CONNECTOR_VGA: | ||
1487 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | ||
1488 | drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); | ||
1489 | if (i2c_bus->valid) { | ||
1490 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | ||
1491 | if (!radeon_connector->ddc_bus) | ||
1492 | DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
1493 | } | ||
1161 | radeon_connector->dac_load_detect = true; | 1494 | radeon_connector->dac_load_detect = true; |
1162 | drm_connector_attach_property(&radeon_connector->base, | 1495 | drm_connector_attach_property(&radeon_connector->base, |
1163 | rdev->mode_info.load_detect_property, | 1496 | rdev->mode_info.load_detect_property, |
1164 | 1); | 1497 | 1); |
1165 | } | 1498 | /* no HPD on analog connectors */ |
1166 | break; | 1499 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1167 | case DRM_MODE_CONNECTOR_HDMIA: | 1500 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1168 | case DRM_MODE_CONNECTOR_HDMIB: | 1501 | connector->interlace_allowed = true; |
1169 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1502 | connector->doublescan_allowed = true; |
1170 | if (!radeon_dig_connector) | 1503 | break; |
1171 | goto failed; | 1504 | case DRM_MODE_CONNECTOR_DVIA: |
1172 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 1505 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
1173 | radeon_connector->con_priv = radeon_dig_connector; | 1506 | drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); |
1174 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); | 1507 | if (i2c_bus->valid) { |
1175 | drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); | 1508 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
1176 | if (i2c_bus->valid) { | 1509 | if (!radeon_connector->ddc_bus) |
1177 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1510 | DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
1178 | if (!radeon_connector->ddc_bus) | 1511 | } |
1512 | radeon_connector->dac_load_detect = true; | ||
1513 | drm_connector_attach_property(&radeon_connector->base, | ||
1514 | rdev->mode_info.load_detect_property, | ||
1515 | 1); | ||
1516 | /* no HPD on analog connectors */ | ||
1517 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | ||
1518 | connector->interlace_allowed = true; | ||
1519 | connector->doublescan_allowed = true; | ||
1520 | break; | ||
1521 | case DRM_MODE_CONNECTOR_DVII: | ||
1522 | case DRM_MODE_CONNECTOR_DVID: | ||
1523 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | ||
1524 | if (!radeon_dig_connector) | ||
1179 | goto failed; | 1525 | goto failed; |
1180 | } | 1526 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
1181 | drm_connector_attach_property(&radeon_connector->base, | 1527 | radeon_connector->con_priv = radeon_dig_connector; |
1182 | rdev->mode_info.coherent_mode_property, | 1528 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); |
1183 | 1); | 1529 | drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); |
1184 | if (ASIC_IS_AVIVO(rdev)) | 1530 | if (i2c_bus->valid) { |
1531 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | ||
1532 | if (!radeon_connector->ddc_bus) | ||
1533 | DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
1534 | } | ||
1535 | subpixel_order = SubPixelHorizontalRGB; | ||
1185 | drm_connector_attach_property(&radeon_connector->base, | 1536 | drm_connector_attach_property(&radeon_connector->base, |
1186 | rdev->mode_info.underscan_property, | 1537 | rdev->mode_info.coherent_mode_property, |
1187 | UNDERSCAN_AUTO); | 1538 | 1); |
1188 | subpixel_order = SubPixelHorizontalRGB; | 1539 | if (ASIC_IS_AVIVO(rdev)) { |
1189 | break; | 1540 | drm_connector_attach_property(&radeon_connector->base, |
1190 | case DRM_MODE_CONNECTOR_DisplayPort: | 1541 | rdev->mode_info.underscan_property, |
1191 | case DRM_MODE_CONNECTOR_eDP: | 1542 | UNDERSCAN_OFF); |
1192 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1543 | drm_connector_attach_property(&radeon_connector->base, |
1193 | if (!radeon_dig_connector) | 1544 | rdev->mode_info.underscan_hborder_property, |
1194 | goto failed; | 1545 | 0); |
1195 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 1546 | drm_connector_attach_property(&radeon_connector->base, |
1196 | radeon_connector->con_priv = radeon_dig_connector; | 1547 | rdev->mode_info.underscan_vborder_property, |
1197 | drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); | 1548 | 0); |
1198 | drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); | 1549 | } |
1199 | if (i2c_bus->valid) { | 1550 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { |
1200 | /* add DP i2c bus */ | 1551 | radeon_connector->dac_load_detect = true; |
1201 | if (connector_type == DRM_MODE_CONNECTOR_eDP) | 1552 | drm_connector_attach_property(&radeon_connector->base, |
1202 | radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); | 1553 | rdev->mode_info.load_detect_property, |
1554 | 1); | ||
1555 | } | ||
1556 | connector->interlace_allowed = true; | ||
1557 | if (connector_type == DRM_MODE_CONNECTOR_DVII) | ||
1558 | connector->doublescan_allowed = true; | ||
1203 | else | 1559 | else |
1204 | radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); | 1560 | connector->doublescan_allowed = false; |
1205 | if (!radeon_dig_connector->dp_i2c_bus) | 1561 | break; |
1562 | case DRM_MODE_CONNECTOR_HDMIA: | ||
1563 | case DRM_MODE_CONNECTOR_HDMIB: | ||
1564 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | ||
1565 | if (!radeon_dig_connector) | ||
1206 | goto failed; | 1566 | goto failed; |
1207 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1567 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
1208 | if (!radeon_connector->ddc_bus) | 1568 | radeon_connector->con_priv = radeon_dig_connector; |
1569 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); | ||
1570 | drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); | ||
1571 | if (i2c_bus->valid) { | ||
1572 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | ||
1573 | if (!radeon_connector->ddc_bus) | ||
1574 | DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
1575 | } | ||
1576 | drm_connector_attach_property(&radeon_connector->base, | ||
1577 | rdev->mode_info.coherent_mode_property, | ||
1578 | 1); | ||
1579 | if (ASIC_IS_AVIVO(rdev)) { | ||
1580 | drm_connector_attach_property(&radeon_connector->base, | ||
1581 | rdev->mode_info.underscan_property, | ||
1582 | UNDERSCAN_OFF); | ||
1583 | drm_connector_attach_property(&radeon_connector->base, | ||
1584 | rdev->mode_info.underscan_hborder_property, | ||
1585 | 0); | ||
1586 | drm_connector_attach_property(&radeon_connector->base, | ||
1587 | rdev->mode_info.underscan_vborder_property, | ||
1588 | 0); | ||
1589 | } | ||
1590 | subpixel_order = SubPixelHorizontalRGB; | ||
1591 | connector->interlace_allowed = true; | ||
1592 | if (connector_type == DRM_MODE_CONNECTOR_HDMIB) | ||
1593 | connector->doublescan_allowed = true; | ||
1594 | else | ||
1595 | connector->doublescan_allowed = false; | ||
1596 | break; | ||
1597 | case DRM_MODE_CONNECTOR_DisplayPort: | ||
1598 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | ||
1599 | if (!radeon_dig_connector) | ||
1209 | goto failed; | 1600 | goto failed; |
1210 | } | 1601 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
1211 | subpixel_order = SubPixelHorizontalRGB; | 1602 | radeon_connector->con_priv = radeon_dig_connector; |
1212 | drm_connector_attach_property(&radeon_connector->base, | 1603 | drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); |
1213 | rdev->mode_info.coherent_mode_property, | 1604 | drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); |
1214 | 1); | 1605 | if (i2c_bus->valid) { |
1215 | if (ASIC_IS_AVIVO(rdev)) | 1606 | /* add DP i2c bus */ |
1607 | radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); | ||
1608 | if (!radeon_dig_connector->dp_i2c_bus) | ||
1609 | DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); | ||
1610 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | ||
1611 | if (!radeon_connector->ddc_bus) | ||
1612 | DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
1613 | } | ||
1614 | subpixel_order = SubPixelHorizontalRGB; | ||
1216 | drm_connector_attach_property(&radeon_connector->base, | 1615 | drm_connector_attach_property(&radeon_connector->base, |
1217 | rdev->mode_info.underscan_property, | 1616 | rdev->mode_info.coherent_mode_property, |
1218 | UNDERSCAN_AUTO); | 1617 | 1); |
1219 | break; | 1618 | if (ASIC_IS_AVIVO(rdev)) { |
1220 | case DRM_MODE_CONNECTOR_SVIDEO: | 1619 | drm_connector_attach_property(&radeon_connector->base, |
1221 | case DRM_MODE_CONNECTOR_Composite: | 1620 | rdev->mode_info.underscan_property, |
1222 | case DRM_MODE_CONNECTOR_9PinDIN: | 1621 | UNDERSCAN_OFF); |
1223 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); | 1622 | drm_connector_attach_property(&radeon_connector->base, |
1224 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); | 1623 | rdev->mode_info.underscan_hborder_property, |
1225 | radeon_connector->dac_load_detect = true; | 1624 | 0); |
1226 | drm_connector_attach_property(&radeon_connector->base, | 1625 | drm_connector_attach_property(&radeon_connector->base, |
1227 | rdev->mode_info.load_detect_property, | 1626 | rdev->mode_info.underscan_vborder_property, |
1228 | 1); | 1627 | 0); |
1229 | drm_connector_attach_property(&radeon_connector->base, | 1628 | } |
1230 | rdev->mode_info.tv_std_property, | 1629 | connector->interlace_allowed = true; |
1231 | radeon_atombios_get_tv_info(rdev)); | 1630 | /* in theory with a DP to VGA converter... */ |
1232 | /* no HPD on analog connectors */ | 1631 | connector->doublescan_allowed = false; |
1233 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | 1632 | break; |
1234 | break; | 1633 | case DRM_MODE_CONNECTOR_eDP: |
1235 | case DRM_MODE_CONNECTOR_LVDS: | 1634 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
1236 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1635 | if (!radeon_dig_connector) |
1237 | if (!radeon_dig_connector) | ||
1238 | goto failed; | ||
1239 | radeon_dig_connector->igp_lane_info = igp_lane_info; | ||
1240 | radeon_connector->con_priv = radeon_dig_connector; | ||
1241 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); | ||
1242 | drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); | ||
1243 | if (i2c_bus->valid) { | ||
1244 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | ||
1245 | if (!radeon_connector->ddc_bus) | ||
1246 | goto failed; | 1636 | goto failed; |
1637 | radeon_dig_connector->igp_lane_info = igp_lane_info; | ||
1638 | radeon_connector->con_priv = radeon_dig_connector; | ||
1639 | drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); | ||
1640 | drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); | ||
1641 | if (i2c_bus->valid) { | ||
1642 | /* add DP i2c bus */ | ||
1643 | radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); | ||
1644 | if (!radeon_dig_connector->dp_i2c_bus) | ||
1645 | DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); | ||
1646 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | ||
1647 | if (!radeon_connector->ddc_bus) | ||
1648 | DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
1649 | } | ||
1650 | drm_connector_attach_property(&radeon_connector->base, | ||
1651 | dev->mode_config.scaling_mode_property, | ||
1652 | DRM_MODE_SCALE_FULLSCREEN); | ||
1653 | subpixel_order = SubPixelHorizontalRGB; | ||
1654 | connector->interlace_allowed = false; | ||
1655 | connector->doublescan_allowed = false; | ||
1656 | break; | ||
1657 | case DRM_MODE_CONNECTOR_SVIDEO: | ||
1658 | case DRM_MODE_CONNECTOR_Composite: | ||
1659 | case DRM_MODE_CONNECTOR_9PinDIN: | ||
1660 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); | ||
1661 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); | ||
1662 | radeon_connector->dac_load_detect = true; | ||
1663 | drm_connector_attach_property(&radeon_connector->base, | ||
1664 | rdev->mode_info.load_detect_property, | ||
1665 | 1); | ||
1666 | drm_connector_attach_property(&radeon_connector->base, | ||
1667 | rdev->mode_info.tv_std_property, | ||
1668 | radeon_atombios_get_tv_info(rdev)); | ||
1669 | /* no HPD on analog connectors */ | ||
1670 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | ||
1671 | connector->interlace_allowed = false; | ||
1672 | connector->doublescan_allowed = false; | ||
1673 | break; | ||
1674 | case DRM_MODE_CONNECTOR_LVDS: | ||
1675 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | ||
1676 | if (!radeon_dig_connector) | ||
1677 | goto failed; | ||
1678 | radeon_dig_connector->igp_lane_info = igp_lane_info; | ||
1679 | radeon_connector->con_priv = radeon_dig_connector; | ||
1680 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); | ||
1681 | drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); | ||
1682 | if (i2c_bus->valid) { | ||
1683 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | ||
1684 | if (!radeon_connector->ddc_bus) | ||
1685 | DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
1686 | } | ||
1687 | drm_connector_attach_property(&radeon_connector->base, | ||
1688 | dev->mode_config.scaling_mode_property, | ||
1689 | DRM_MODE_SCALE_FULLSCREEN); | ||
1690 | subpixel_order = SubPixelHorizontalRGB; | ||
1691 | connector->interlace_allowed = false; | ||
1692 | connector->doublescan_allowed = false; | ||
1693 | break; | ||
1247 | } | 1694 | } |
1248 | drm_connector_attach_property(&radeon_connector->base, | ||
1249 | dev->mode_config.scaling_mode_property, | ||
1250 | DRM_MODE_SCALE_FULLSCREEN); | ||
1251 | subpixel_order = SubPixelHorizontalRGB; | ||
1252 | break; | ||
1253 | } | 1695 | } |
1254 | 1696 | ||
1255 | if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { | 1697 | if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { |
@@ -1317,7 +1759,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1317 | if (i2c_bus->valid) { | 1759 | if (i2c_bus->valid) { |
1318 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1760 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
1319 | if (!radeon_connector->ddc_bus) | 1761 | if (!radeon_connector->ddc_bus) |
1320 | goto failed; | 1762 | DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
1321 | } | 1763 | } |
1322 | radeon_connector->dac_load_detect = true; | 1764 | radeon_connector->dac_load_detect = true; |
1323 | drm_connector_attach_property(&radeon_connector->base, | 1765 | drm_connector_attach_property(&radeon_connector->base, |
@@ -1326,6 +1768,8 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1326 | /* no HPD on analog connectors */ | 1768 | /* no HPD on analog connectors */ |
1327 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | 1769 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1328 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | 1770 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1771 | connector->interlace_allowed = true; | ||
1772 | connector->doublescan_allowed = true; | ||
1329 | break; | 1773 | break; |
1330 | case DRM_MODE_CONNECTOR_DVIA: | 1774 | case DRM_MODE_CONNECTOR_DVIA: |
1331 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 1775 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
@@ -1333,7 +1777,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1333 | if (i2c_bus->valid) { | 1777 | if (i2c_bus->valid) { |
1334 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1778 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
1335 | if (!radeon_connector->ddc_bus) | 1779 | if (!radeon_connector->ddc_bus) |
1336 | goto failed; | 1780 | DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
1337 | } | 1781 | } |
1338 | radeon_connector->dac_load_detect = true; | 1782 | radeon_connector->dac_load_detect = true; |
1339 | drm_connector_attach_property(&radeon_connector->base, | 1783 | drm_connector_attach_property(&radeon_connector->base, |
@@ -1341,6 +1785,8 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1341 | 1); | 1785 | 1); |
1342 | /* no HPD on analog connectors */ | 1786 | /* no HPD on analog connectors */ |
1343 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | 1787 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1788 | connector->interlace_allowed = true; | ||
1789 | connector->doublescan_allowed = true; | ||
1344 | break; | 1790 | break; |
1345 | case DRM_MODE_CONNECTOR_DVII: | 1791 | case DRM_MODE_CONNECTOR_DVII: |
1346 | case DRM_MODE_CONNECTOR_DVID: | 1792 | case DRM_MODE_CONNECTOR_DVID: |
@@ -1349,7 +1795,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1349 | if (i2c_bus->valid) { | 1795 | if (i2c_bus->valid) { |
1350 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1796 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
1351 | if (!radeon_connector->ddc_bus) | 1797 | if (!radeon_connector->ddc_bus) |
1352 | goto failed; | 1798 | DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
1353 | } | 1799 | } |
1354 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { | 1800 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { |
1355 | radeon_connector->dac_load_detect = true; | 1801 | radeon_connector->dac_load_detect = true; |
@@ -1358,6 +1804,11 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1358 | 1); | 1804 | 1); |
1359 | } | 1805 | } |
1360 | subpixel_order = SubPixelHorizontalRGB; | 1806 | subpixel_order = SubPixelHorizontalRGB; |
1807 | connector->interlace_allowed = true; | ||
1808 | if (connector_type == DRM_MODE_CONNECTOR_DVII) | ||
1809 | connector->doublescan_allowed = true; | ||
1810 | else | ||
1811 | connector->doublescan_allowed = false; | ||
1361 | break; | 1812 | break; |
1362 | case DRM_MODE_CONNECTOR_SVIDEO: | 1813 | case DRM_MODE_CONNECTOR_SVIDEO: |
1363 | case DRM_MODE_CONNECTOR_Composite: | 1814 | case DRM_MODE_CONNECTOR_Composite: |
@@ -1380,6 +1831,8 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1380 | radeon_combios_get_tv_info(rdev)); | 1831 | radeon_combios_get_tv_info(rdev)); |
1381 | /* no HPD on analog connectors */ | 1832 | /* no HPD on analog connectors */ |
1382 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | 1833 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1834 | connector->interlace_allowed = false; | ||
1835 | connector->doublescan_allowed = false; | ||
1383 | break; | 1836 | break; |
1384 | case DRM_MODE_CONNECTOR_LVDS: | 1837 | case DRM_MODE_CONNECTOR_LVDS: |
1385 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); | 1838 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); |
@@ -1387,12 +1840,14 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1387 | if (i2c_bus->valid) { | 1840 | if (i2c_bus->valid) { |
1388 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1841 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
1389 | if (!radeon_connector->ddc_bus) | 1842 | if (!radeon_connector->ddc_bus) |
1390 | goto failed; | 1843 | DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
1391 | } | 1844 | } |
1392 | drm_connector_attach_property(&radeon_connector->base, | 1845 | drm_connector_attach_property(&radeon_connector->base, |
1393 | dev->mode_config.scaling_mode_property, | 1846 | dev->mode_config.scaling_mode_property, |
1394 | DRM_MODE_SCALE_FULLSCREEN); | 1847 | DRM_MODE_SCALE_FULLSCREEN); |
1395 | subpixel_order = SubPixelHorizontalRGB; | 1848 | subpixel_order = SubPixelHorizontalRGB; |
1849 | connector->interlace_allowed = false; | ||
1850 | connector->doublescan_allowed = false; | ||
1396 | break; | 1851 | break; |
1397 | } | 1852 | } |
1398 | 1853 | ||
@@ -1403,9 +1858,15 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1403 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 1858 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
1404 | connector->display_info.subpixel_order = subpixel_order; | 1859 | connector->display_info.subpixel_order = subpixel_order; |
1405 | drm_sysfs_connector_add(connector); | 1860 | drm_sysfs_connector_add(connector); |
1406 | return; | 1861 | if (connector_type == DRM_MODE_CONNECTOR_LVDS) { |
1862 | struct drm_encoder *drm_encoder; | ||
1407 | 1863 | ||
1408 | failed: | 1864 | list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) { |
1409 | drm_connector_cleanup(connector); | 1865 | struct radeon_encoder *radeon_encoder; |
1410 | kfree(connector); | 1866 | |
1867 | radeon_encoder = to_radeon_encoder(drm_encoder); | ||
1868 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS) | ||
1869 | radeon_legacy_backlight_init(radeon_encoder, connector); | ||
1870 | } | ||
1871 | } | ||
1411 | } | 1872 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index eb6b9eed7349..75867792a4e2 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -244,7 +244,7 @@ void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base) | |||
244 | u32 agp_base_lo = agp_base & 0xffffffff; | 244 | u32 agp_base_lo = agp_base & 0xffffffff; |
245 | u32 r6xx_agp_base = (agp_base >> 22) & 0x3ffff; | 245 | u32 r6xx_agp_base = (agp_base >> 22) & 0x3ffff; |
246 | 246 | ||
247 | /* R6xx/R7xx must be aligned to a 4MB boundry */ | 247 | /* R6xx/R7xx must be aligned to a 4MB boundary */ |
248 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) | 248 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) |
249 | RADEON_WRITE(R700_MC_VM_AGP_BASE, r6xx_agp_base); | 249 | RADEON_WRITE(R700_MC_VM_AGP_BASE, r6xx_agp_base); |
250 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | 250 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) |
@@ -2113,9 +2113,9 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) | |||
2113 | break; | 2113 | break; |
2114 | } | 2114 | } |
2115 | 2115 | ||
2116 | if (drm_device_is_agp(dev)) | 2116 | if (drm_pci_device_is_agp(dev)) |
2117 | dev_priv->flags |= RADEON_IS_AGP; | 2117 | dev_priv->flags |= RADEON_IS_AGP; |
2118 | else if (drm_device_is_pcie(dev)) | 2118 | else if (drm_pci_device_is_pcie(dev)) |
2119 | dev_priv->flags |= RADEON_IS_PCIE; | 2119 | dev_priv->flags |= RADEON_IS_PCIE; |
2120 | else | 2120 | else |
2121 | dev_priv->flags |= RADEON_IS_PCI; | 2121 | dev_priv->flags |= RADEON_IS_PCI; |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index fcc79b5d22d1..fae00c0d75aa 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -75,15 +75,15 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
75 | return -ENOENT; | 75 | return -ENOENT; |
76 | } | 76 | } |
77 | p->relocs_ptr[i] = &p->relocs[i]; | 77 | p->relocs_ptr[i] = &p->relocs[i]; |
78 | p->relocs[i].robj = p->relocs[i].gobj->driver_private; | 78 | p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); |
79 | p->relocs[i].lobj.bo = p->relocs[i].robj; | 79 | p->relocs[i].lobj.bo = p->relocs[i].robj; |
80 | p->relocs[i].lobj.rdomain = r->read_domains; | ||
81 | p->relocs[i].lobj.wdomain = r->write_domain; | 80 | p->relocs[i].lobj.wdomain = r->write_domain; |
81 | p->relocs[i].lobj.rdomain = r->read_domains; | ||
82 | p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; | ||
82 | p->relocs[i].handle = r->handle; | 83 | p->relocs[i].handle = r->handle; |
83 | p->relocs[i].flags = r->flags; | 84 | p->relocs[i].flags = r->flags; |
84 | INIT_LIST_HEAD(&p->relocs[i].lobj.list); | ||
85 | radeon_bo_list_add_object(&p->relocs[i].lobj, | 85 | radeon_bo_list_add_object(&p->relocs[i].lobj, |
86 | &p->validated); | 86 | &p->validated); |
87 | } | 87 | } |
88 | } | 88 | } |
89 | return radeon_bo_list_validate(&p->validated); | 89 | return radeon_bo_list_validate(&p->validated); |
@@ -189,10 +189,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
189 | { | 189 | { |
190 | unsigned i; | 190 | unsigned i; |
191 | 191 | ||
192 | if (!error && parser->ib) { | 192 | |
193 | radeon_bo_list_fence(&parser->validated, parser->ib->fence); | 193 | if (!error && parser->ib) |
194 | } | 194 | ttm_eu_fence_buffer_objects(&parser->validated, |
195 | radeon_bo_list_unreserve(&parser->validated); | 195 | parser->ib->fence); |
196 | else | ||
197 | ttm_eu_backoff_reservation(&parser->validated); | ||
198 | |||
196 | if (parser->relocs != NULL) { | 199 | if (parser->relocs != NULL) { |
197 | for (i = 0; i < parser->nrelocs; i++) { | 200 | for (i = 0; i < parser->nrelocs; i++) { |
198 | if (parser->relocs[i].gobj) | 201 | if (parser->relocs[i].gobj) |
@@ -225,6 +228,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
225 | parser.filp = filp; | 228 | parser.filp = filp; |
226 | parser.rdev = rdev; | 229 | parser.rdev = rdev; |
227 | parser.dev = rdev->dev; | 230 | parser.dev = rdev->dev; |
231 | parser.family = rdev->family; | ||
228 | r = radeon_cs_parser_init(&parser, data); | 232 | r = radeon_cs_parser_init(&parser, data); |
229 | if (r) { | 233 | if (r) { |
230 | DRM_ERROR("Failed to initialize parser !\n"); | 234 | DRM_ERROR("Failed to initialize parser !\n"); |
@@ -268,7 +272,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
268 | } | 272 | } |
269 | r = radeon_ib_schedule(rdev, parser.ib); | 273 | r = radeon_ib_schedule(rdev, parser.ib); |
270 | if (r) { | 274 | if (r) { |
271 | DRM_ERROR("Faild to schedule IB !\n"); | 275 | DRM_ERROR("Failed to schedule IB !\n"); |
272 | } | 276 | } |
273 | radeon_cs_parser_fini(&parser, r); | 277 | radeon_cs_parser_fini(&parser, r); |
274 | mutex_unlock(&rdev->cs_mutex); | 278 | mutex_unlock(&rdev->cs_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 3eef567b0421..3189a7efb2e9 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -118,22 +118,25 @@ static void radeon_show_cursor(struct drm_crtc *crtc) | |||
118 | } | 118 | } |
119 | 119 | ||
120 | static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, | 120 | static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, |
121 | uint32_t gpu_addr) | 121 | uint64_t gpu_addr) |
122 | { | 122 | { |
123 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 123 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
124 | struct radeon_device *rdev = crtc->dev->dev_private; | 124 | struct radeon_device *rdev = crtc->dev->dev_private; |
125 | 125 | ||
126 | if (ASIC_IS_DCE4(rdev)) { | 126 | if (ASIC_IS_DCE4(rdev)) { |
127 | WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0); | 127 | WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, |
128 | WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); | 128 | upper_32_bits(gpu_addr)); |
129 | WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
130 | gpu_addr & 0xffffffff); | ||
129 | } else if (ASIC_IS_AVIVO(rdev)) { | 131 | } else if (ASIC_IS_AVIVO(rdev)) { |
130 | if (rdev->family >= CHIP_RV770) { | 132 | if (rdev->family >= CHIP_RV770) { |
131 | if (radeon_crtc->crtc_id) | 133 | if (radeon_crtc->crtc_id) |
132 | WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0); | 134 | WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); |
133 | else | 135 | else |
134 | WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0); | 136 | WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); |
135 | } | 137 | } |
136 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); | 138 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
139 | gpu_addr & 0xffffffff); | ||
137 | } else { | 140 | } else { |
138 | radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; | 141 | radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; |
139 | /* offset is from DISP(2)_BASE_ADDRESS */ | 142 | /* offset is from DISP(2)_BASE_ADDRESS */ |
@@ -164,9 +167,6 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, | |||
164 | return -EINVAL; | 167 | return -EINVAL; |
165 | } | 168 | } |
166 | 169 | ||
167 | radeon_crtc->cursor_width = width; | ||
168 | radeon_crtc->cursor_height = height; | ||
169 | |||
170 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); | 170 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); |
171 | if (!obj) { | 171 | if (!obj) { |
172 | DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); | 172 | DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); |
@@ -177,6 +177,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, | |||
177 | if (ret) | 177 | if (ret) |
178 | goto fail; | 178 | goto fail; |
179 | 179 | ||
180 | radeon_crtc->cursor_width = width; | ||
181 | radeon_crtc->cursor_height = height; | ||
182 | |||
180 | radeon_lock_cursor(crtc, true); | 183 | radeon_lock_cursor(crtc, true); |
181 | /* XXX only 27 bit offset for legacy cursor */ | 184 | /* XXX only 27 bit offset for legacy cursor */ |
182 | radeon_set_cursor(crtc, obj, gpu_addr); | 185 | radeon_set_cursor(crtc, obj, gpu_addr); |
@@ -223,7 +226,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
223 | y += crtc->y; | 226 | y += crtc->y; |
224 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | 227 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); |
225 | 228 | ||
226 | /* avivo cursor image can't end on 128 pixel boundry or | 229 | /* avivo cursor image can't end on 128 pixel boundary or |
227 | * go past the end of the frame if both crtcs are enabled | 230 | * go past the end of the frame if both crtcs are enabled |
228 | */ | 231 | */ |
229 | list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) { | 232 | list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) { |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 256d204a6d24..7cfaa7e2f3b5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -81,6 +81,13 @@ static const char radeon_family_name[][16] = { | |||
81 | "JUNIPER", | 81 | "JUNIPER", |
82 | "CYPRESS", | 82 | "CYPRESS", |
83 | "HEMLOCK", | 83 | "HEMLOCK", |
84 | "PALM", | ||
85 | "SUMO", | ||
86 | "SUMO2", | ||
87 | "BARTS", | ||
88 | "TURKS", | ||
89 | "CAICOS", | ||
90 | "CAYMAN", | ||
84 | "LAST", | 91 | "LAST", |
85 | }; | 92 | }; |
86 | 93 | ||
@@ -117,9 +124,10 @@ void radeon_scratch_init(struct radeon_device *rdev) | |||
117 | } else { | 124 | } else { |
118 | rdev->scratch.num_reg = 7; | 125 | rdev->scratch.num_reg = 7; |
119 | } | 126 | } |
127 | rdev->scratch.reg_base = RADEON_SCRATCH_REG0; | ||
120 | for (i = 0; i < rdev->scratch.num_reg; i++) { | 128 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
121 | rdev->scratch.free[i] = true; | 129 | rdev->scratch.free[i] = true; |
122 | rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); | 130 | rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); |
123 | } | 131 | } |
124 | } | 132 | } |
125 | 133 | ||
@@ -149,6 +157,93 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) | |||
149 | } | 157 | } |
150 | } | 158 | } |
151 | 159 | ||
160 | void radeon_wb_disable(struct radeon_device *rdev) | ||
161 | { | ||
162 | int r; | ||
163 | |||
164 | if (rdev->wb.wb_obj) { | ||
165 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
166 | if (unlikely(r != 0)) | ||
167 | return; | ||
168 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
169 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
170 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
171 | } | ||
172 | rdev->wb.enabled = false; | ||
173 | } | ||
174 | |||
175 | void radeon_wb_fini(struct radeon_device *rdev) | ||
176 | { | ||
177 | radeon_wb_disable(rdev); | ||
178 | if (rdev->wb.wb_obj) { | ||
179 | radeon_bo_unref(&rdev->wb.wb_obj); | ||
180 | rdev->wb.wb = NULL; | ||
181 | rdev->wb.wb_obj = NULL; | ||
182 | } | ||
183 | } | ||
184 | |||
185 | int radeon_wb_init(struct radeon_device *rdev) | ||
186 | { | ||
187 | int r; | ||
188 | |||
189 | if (rdev->wb.wb_obj == NULL) { | ||
190 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, | ||
191 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); | ||
192 | if (r) { | ||
193 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); | ||
194 | return r; | ||
195 | } | ||
196 | } | ||
197 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
198 | if (unlikely(r != 0)) { | ||
199 | radeon_wb_fini(rdev); | ||
200 | return r; | ||
201 | } | ||
202 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | ||
203 | &rdev->wb.gpu_addr); | ||
204 | if (r) { | ||
205 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
206 | dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); | ||
207 | radeon_wb_fini(rdev); | ||
208 | return r; | ||
209 | } | ||
210 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | ||
211 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
212 | if (r) { | ||
213 | dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); | ||
214 | radeon_wb_fini(rdev); | ||
215 | return r; | ||
216 | } | ||
217 | |||
218 | /* clear wb memory */ | ||
219 | memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE); | ||
220 | /* disable event_write fences */ | ||
221 | rdev->wb.use_event = false; | ||
222 | /* disabled via module param */ | ||
223 | if (radeon_no_wb == 1) | ||
224 | rdev->wb.enabled = false; | ||
225 | else { | ||
226 | /* often unreliable on AGP */ | ||
227 | if (rdev->flags & RADEON_IS_AGP) { | ||
228 | rdev->wb.enabled = false; | ||
229 | } else { | ||
230 | rdev->wb.enabled = true; | ||
231 | /* event_write fences are only available on r600+ */ | ||
232 | if (rdev->family >= CHIP_R600) | ||
233 | rdev->wb.use_event = true; | ||
234 | } | ||
235 | } | ||
236 | /* always use writeback/events on NI */ | ||
237 | if (ASIC_IS_DCE5(rdev)) { | ||
238 | rdev->wb.enabled = true; | ||
239 | rdev->wb.use_event = true; | ||
240 | } | ||
241 | |||
242 | dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
152 | /** | 247 | /** |
153 | * radeon_vram_location - try to find VRAM location | 248 | * radeon_vram_location - try to find VRAM location |
154 | * @rdev: radeon device structure holding all necessary informations | 249 | * @rdev: radeon device structure holding all necessary informations |
@@ -171,7 +266,7 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) | |||
171 | * Note: GTT start, end, size should be initialized before calling this | 266 | * Note: GTT start, end, size should be initialized before calling this |
172 | * function on AGP platform. | 267 | * function on AGP platform. |
173 | * | 268 | * |
174 | * Note: We don't explictly enforce VRAM start to be aligned on VRAM size, | 269 | * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, |
175 | * this shouldn't be a problem as we are using the PCI aperture as a reference. | 270 | * this shouldn't be a problem as we are using the PCI aperture as a reference. |
176 | * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but | 271 | * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but |
177 | * not IGP. | 272 | * not IGP. |
@@ -205,7 +300,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 | |||
205 | mc->mc_vram_size = mc->aper_size; | 300 | mc->mc_vram_size = mc->aper_size; |
206 | } | 301 | } |
207 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | 302 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; |
208 | dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", | 303 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", |
209 | mc->mc_vram_size >> 20, mc->vram_start, | 304 | mc->mc_vram_size >> 20, mc->vram_start, |
210 | mc->vram_end, mc->real_vram_size >> 20); | 305 | mc->vram_end, mc->real_vram_size >> 20); |
211 | } | 306 | } |
@@ -242,7 +337,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
242 | mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; | 337 | mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; |
243 | } | 338 | } |
244 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; | 339 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; |
245 | dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", | 340 | dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", |
246 | mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); | 341 | mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); |
247 | } | 342 | } |
248 | 343 | ||
@@ -254,7 +349,12 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
254 | uint32_t reg; | 349 | uint32_t reg; |
255 | 350 | ||
256 | /* first check CRTCs */ | 351 | /* first check CRTCs */ |
257 | if (ASIC_IS_DCE4(rdev)) { | 352 | if (ASIC_IS_DCE41(rdev)) { |
353 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | ||
354 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); | ||
355 | if (reg & EVERGREEN_CRTC_MASTER_EN) | ||
356 | return true; | ||
357 | } else if (ASIC_IS_DCE4(rdev)) { | ||
258 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | 358 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | |
259 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | | 359 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | |
260 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | | 360 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | |
@@ -555,20 +655,20 @@ void radeon_check_arguments(struct radeon_device *rdev) | |||
555 | static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) | 655 | static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) |
556 | { | 656 | { |
557 | struct drm_device *dev = pci_get_drvdata(pdev); | 657 | struct drm_device *dev = pci_get_drvdata(pdev); |
558 | struct radeon_device *rdev = dev->dev_private; | ||
559 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 658 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
560 | if (state == VGA_SWITCHEROO_ON) { | 659 | if (state == VGA_SWITCHEROO_ON) { |
561 | printk(KERN_INFO "radeon: switched on\n"); | 660 | printk(KERN_INFO "radeon: switched on\n"); |
562 | /* don't suspend or resume card normally */ | 661 | /* don't suspend or resume card normally */ |
563 | rdev->powered_down = false; | 662 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
564 | radeon_resume_kms(dev); | 663 | radeon_resume_kms(dev); |
664 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | ||
565 | drm_kms_helper_poll_enable(dev); | 665 | drm_kms_helper_poll_enable(dev); |
566 | } else { | 666 | } else { |
567 | printk(KERN_INFO "radeon: switched off\n"); | 667 | printk(KERN_INFO "radeon: switched off\n"); |
568 | drm_kms_helper_poll_disable(dev); | 668 | drm_kms_helper_poll_disable(dev); |
669 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
569 | radeon_suspend_kms(dev, pmm); | 670 | radeon_suspend_kms(dev, pmm); |
570 | /* don't suspend or resume card normally */ | 671 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; |
571 | rdev->powered_down = true; | ||
572 | } | 672 | } |
573 | } | 673 | } |
574 | 674 | ||
@@ -623,11 +723,6 @@ int radeon_device_init(struct radeon_device *rdev, | |||
623 | init_waitqueue_head(&rdev->irq.vblank_queue); | 723 | init_waitqueue_head(&rdev->irq.vblank_queue); |
624 | init_waitqueue_head(&rdev->irq.idle_queue); | 724 | init_waitqueue_head(&rdev->irq.idle_queue); |
625 | 725 | ||
626 | /* setup workqueue */ | ||
627 | rdev->wq = create_workqueue("radeon"); | ||
628 | if (rdev->wq == NULL) | ||
629 | return -ENOMEM; | ||
630 | |||
631 | /* Set asic functions */ | 726 | /* Set asic functions */ |
632 | r = radeon_asic_init(rdev); | 727 | r = radeon_asic_init(rdev); |
633 | if (r) | 728 | if (r) |
@@ -661,6 +756,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
661 | dma_bits = rdev->need_dma32 ? 32 : 40; | 756 | dma_bits = rdev->need_dma32 ? 32 : 40; |
662 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); | 757 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); |
663 | if (r) { | 758 | if (r) { |
759 | rdev->need_dma32 = true; | ||
664 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); | 760 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); |
665 | } | 761 | } |
666 | 762 | ||
@@ -692,6 +788,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
692 | vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); | 788 | vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); |
693 | vga_switcheroo_register_client(rdev->pdev, | 789 | vga_switcheroo_register_client(rdev->pdev, |
694 | radeon_switcheroo_set_state, | 790 | radeon_switcheroo_set_state, |
791 | NULL, | ||
695 | radeon_switcheroo_can_switch); | 792 | radeon_switcheroo_can_switch); |
696 | 793 | ||
697 | r = radeon_init(rdev); | 794 | r = radeon_init(rdev); |
@@ -725,7 +822,6 @@ void radeon_device_fini(struct radeon_device *rdev) | |||
725 | /* evict vram memory */ | 822 | /* evict vram memory */ |
726 | radeon_bo_evict_vram(rdev); | 823 | radeon_bo_evict_vram(rdev); |
727 | radeon_fini(rdev); | 824 | radeon_fini(rdev); |
728 | destroy_workqueue(rdev->wq); | ||
729 | vga_switcheroo_unregister_client(rdev->pdev); | 825 | vga_switcheroo_unregister_client(rdev->pdev); |
730 | vga_client_register(rdev->pdev, NULL, NULL, NULL); | 826 | vga_client_register(rdev->pdev, NULL, NULL, NULL); |
731 | if (rdev->rio_mem) | 827 | if (rdev->rio_mem) |
@@ -754,7 +850,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
754 | } | 850 | } |
755 | rdev = dev->dev_private; | 851 | rdev = dev->dev_private; |
756 | 852 | ||
757 | if (rdev->powered_down) | 853 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
758 | return 0; | 854 | return 0; |
759 | 855 | ||
760 | /* turn off display hw */ | 856 | /* turn off display hw */ |
@@ -770,7 +866,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
770 | if (rfb == NULL || rfb->obj == NULL) { | 866 | if (rfb == NULL || rfb->obj == NULL) { |
771 | continue; | 867 | continue; |
772 | } | 868 | } |
773 | robj = rfb->obj->driver_private; | 869 | robj = gem_to_radeon_bo(rfb->obj); |
774 | /* don't unpin kernel fb objects */ | 870 | /* don't unpin kernel fb objects */ |
775 | if (!radeon_fbdev_robj_is_fb(rdev, robj)) { | 871 | if (!radeon_fbdev_robj_is_fb(rdev, robj)) { |
776 | r = radeon_bo_reserve(robj, false); | 872 | r = radeon_bo_reserve(robj, false); |
@@ -801,9 +897,9 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
801 | pci_disable_device(dev->pdev); | 897 | pci_disable_device(dev->pdev); |
802 | pci_set_power_state(dev->pdev, PCI_D3hot); | 898 | pci_set_power_state(dev->pdev, PCI_D3hot); |
803 | } | 899 | } |
804 | acquire_console_sem(); | 900 | console_lock(); |
805 | radeon_fbdev_set_suspend(rdev, 1); | 901 | radeon_fbdev_set_suspend(rdev, 1); |
806 | release_console_sem(); | 902 | console_unlock(); |
807 | return 0; | 903 | return 0; |
808 | } | 904 | } |
809 | 905 | ||
@@ -812,14 +908,14 @@ int radeon_resume_kms(struct drm_device *dev) | |||
812 | struct drm_connector *connector; | 908 | struct drm_connector *connector; |
813 | struct radeon_device *rdev = dev->dev_private; | 909 | struct radeon_device *rdev = dev->dev_private; |
814 | 910 | ||
815 | if (rdev->powered_down) | 911 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
816 | return 0; | 912 | return 0; |
817 | 913 | ||
818 | acquire_console_sem(); | 914 | console_lock(); |
819 | pci_set_power_state(dev->pdev, PCI_D0); | 915 | pci_set_power_state(dev->pdev, PCI_D0); |
820 | pci_restore_state(dev->pdev); | 916 | pci_restore_state(dev->pdev); |
821 | if (pci_enable_device(dev->pdev)) { | 917 | if (pci_enable_device(dev->pdev)) { |
822 | release_console_sem(); | 918 | console_unlock(); |
823 | return -1; | 919 | return -1; |
824 | } | 920 | } |
825 | pci_set_master(dev->pdev); | 921 | pci_set_master(dev->pdev); |
@@ -829,26 +925,31 @@ int radeon_resume_kms(struct drm_device *dev) | |||
829 | radeon_pm_resume(rdev); | 925 | radeon_pm_resume(rdev); |
830 | radeon_restore_bios_scratch_regs(rdev); | 926 | radeon_restore_bios_scratch_regs(rdev); |
831 | 927 | ||
832 | /* turn on display hw */ | ||
833 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
834 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
835 | } | ||
836 | |||
837 | radeon_fbdev_set_suspend(rdev, 0); | 928 | radeon_fbdev_set_suspend(rdev, 0); |
838 | release_console_sem(); | 929 | console_unlock(); |
839 | 930 | ||
931 | /* init dig PHYs */ | ||
932 | if (rdev->is_atom_bios) | ||
933 | radeon_atom_encoder_init(rdev); | ||
840 | /* reset hpd state */ | 934 | /* reset hpd state */ |
841 | radeon_hpd_init(rdev); | 935 | radeon_hpd_init(rdev); |
842 | /* blat the mode back in */ | 936 | /* blat the mode back in */ |
843 | drm_helper_resume_force_mode(dev); | 937 | drm_helper_resume_force_mode(dev); |
938 | /* turn on display hw */ | ||
939 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
940 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
941 | } | ||
844 | return 0; | 942 | return 0; |
845 | } | 943 | } |
846 | 944 | ||
847 | int radeon_gpu_reset(struct radeon_device *rdev) | 945 | int radeon_gpu_reset(struct radeon_device *rdev) |
848 | { | 946 | { |
849 | int r; | 947 | int r; |
948 | int resched; | ||
850 | 949 | ||
851 | radeon_save_bios_scratch_regs(rdev); | 950 | radeon_save_bios_scratch_regs(rdev); |
951 | /* block TTM */ | ||
952 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | ||
852 | radeon_suspend(rdev); | 953 | radeon_suspend(rdev); |
853 | 954 | ||
854 | r = radeon_asic_reset(rdev); | 955 | r = radeon_asic_reset(rdev); |
@@ -857,6 +958,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) | |||
857 | radeon_resume(rdev); | 958 | radeon_resume(rdev); |
858 | radeon_restore_bios_scratch_regs(rdev); | 959 | radeon_restore_bios_scratch_regs(rdev); |
859 | drm_helper_resume_force_mode(rdev->ddev); | 960 | drm_helper_resume_force_mode(rdev->ddev); |
961 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | ||
860 | return 0; | 962 | return 0; |
861 | } | 963 | } |
862 | /* bad news, how to tell it to userspace ? */ | 964 | /* bad news, how to tell it to userspace ? */ |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b92d2f2fcbed..292f73f0ddbd 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -68,7 +68,7 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc) | |||
68 | WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); | 68 | WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); |
69 | } | 69 | } |
70 | 70 | ||
71 | static void evergreen_crtc_load_lut(struct drm_crtc *crtc) | 71 | static void dce4_crtc_load_lut(struct drm_crtc *crtc) |
72 | { | 72 | { |
73 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 73 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
74 | struct drm_device *dev = crtc->dev; | 74 | struct drm_device *dev = crtc->dev; |
@@ -98,6 +98,66 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc) | |||
98 | } | 98 | } |
99 | } | 99 | } |
100 | 100 | ||
101 | static void dce5_crtc_load_lut(struct drm_crtc *crtc) | ||
102 | { | ||
103 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
104 | struct drm_device *dev = crtc->dev; | ||
105 | struct radeon_device *rdev = dev->dev_private; | ||
106 | int i; | ||
107 | |||
108 | DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); | ||
109 | |||
110 | WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, | ||
111 | (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | | ||
112 | NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); | ||
113 | WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset, | ||
114 | NI_GRPH_PRESCALE_BYPASS); | ||
115 | WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset, | ||
116 | NI_OVL_PRESCALE_BYPASS); | ||
117 | WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset, | ||
118 | (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) | | ||
119 | NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT))); | ||
120 | |||
121 | WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); | ||
122 | |||
123 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); | ||
124 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); | ||
125 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); | ||
126 | |||
127 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); | ||
128 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); | ||
129 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); | ||
130 | |||
131 | WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); | ||
132 | WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); | ||
133 | |||
134 | WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); | ||
135 | for (i = 0; i < 256; i++) { | ||
136 | WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, | ||
137 | (radeon_crtc->lut_r[i] << 20) | | ||
138 | (radeon_crtc->lut_g[i] << 10) | | ||
139 | (radeon_crtc->lut_b[i] << 0)); | ||
140 | } | ||
141 | |||
142 | WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset, | ||
143 | (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | | ||
144 | NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | | ||
145 | NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | | ||
146 | NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS))); | ||
147 | WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset, | ||
148 | (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) | | ||
149 | NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS))); | ||
150 | WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset, | ||
151 | (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | | ||
152 | NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); | ||
153 | WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset, | ||
154 | (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) | | ||
155 | NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); | ||
156 | /* XXX match this to the depth of the crtc fmt block, move to modeset? */ | ||
157 | WREG32(0x6940 + radeon_crtc->crtc_offset, 0); | ||
158 | |||
159 | } | ||
160 | |||
101 | static void legacy_crtc_load_lut(struct drm_crtc *crtc) | 161 | static void legacy_crtc_load_lut(struct drm_crtc *crtc) |
102 | { | 162 | { |
103 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 163 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
@@ -130,8 +190,10 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc) | |||
130 | if (!crtc->enabled) | 190 | if (!crtc->enabled) |
131 | return; | 191 | return; |
132 | 192 | ||
133 | if (ASIC_IS_DCE4(rdev)) | 193 | if (ASIC_IS_DCE5(rdev)) |
134 | evergreen_crtc_load_lut(crtc); | 194 | dce5_crtc_load_lut(crtc); |
195 | else if (ASIC_IS_DCE4(rdev)) | ||
196 | dce4_crtc_load_lut(crtc); | ||
135 | else if (ASIC_IS_AVIVO(rdev)) | 197 | else if (ASIC_IS_AVIVO(rdev)) |
136 | avivo_crtc_load_lut(crtc); | 198 | avivo_crtc_load_lut(crtc); |
137 | else | 199 | else |
@@ -183,12 +245,275 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc) | |||
183 | kfree(radeon_crtc); | 245 | kfree(radeon_crtc); |
184 | } | 246 | } |
185 | 247 | ||
248 | /* | ||
249 | * Handle unpin events outside the interrupt handler proper. | ||
250 | */ | ||
251 | static void radeon_unpin_work_func(struct work_struct *__work) | ||
252 | { | ||
253 | struct radeon_unpin_work *work = | ||
254 | container_of(__work, struct radeon_unpin_work, work); | ||
255 | int r; | ||
256 | |||
257 | /* unpin of the old buffer */ | ||
258 | r = radeon_bo_reserve(work->old_rbo, false); | ||
259 | if (likely(r == 0)) { | ||
260 | r = radeon_bo_unpin(work->old_rbo); | ||
261 | if (unlikely(r != 0)) { | ||
262 | DRM_ERROR("failed to unpin buffer after flip\n"); | ||
263 | } | ||
264 | radeon_bo_unreserve(work->old_rbo); | ||
265 | } else | ||
266 | DRM_ERROR("failed to reserve buffer after flip\n"); | ||
267 | |||
268 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | ||
269 | kfree(work); | ||
270 | } | ||
271 | |||
272 | void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) | ||
273 | { | ||
274 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | ||
275 | struct radeon_unpin_work *work; | ||
276 | struct drm_pending_vblank_event *e; | ||
277 | struct timeval now; | ||
278 | unsigned long flags; | ||
279 | u32 update_pending; | ||
280 | int vpos, hpos; | ||
281 | |||
282 | spin_lock_irqsave(&rdev->ddev->event_lock, flags); | ||
283 | work = radeon_crtc->unpin_work; | ||
284 | if (work == NULL || | ||
285 | !radeon_fence_signaled(work->fence)) { | ||
286 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); | ||
287 | return; | ||
288 | } | ||
289 | /* New pageflip, or just completion of a previous one? */ | ||
290 | if (!radeon_crtc->deferred_flip_completion) { | ||
291 | /* do the flip (mmio) */ | ||
292 | update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base); | ||
293 | } else { | ||
294 | /* This is just a completion of a flip queued in crtc | ||
295 | * at last invocation. Make sure we go directly to | ||
296 | * completion routine. | ||
297 | */ | ||
298 | update_pending = 0; | ||
299 | radeon_crtc->deferred_flip_completion = 0; | ||
300 | } | ||
301 | |||
302 | /* Has the pageflip already completed in crtc, or is it certain | ||
303 | * to complete in this vblank? | ||
304 | */ | ||
305 | if (update_pending && | ||
306 | (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, | ||
307 | &vpos, &hpos)) && | ||
308 | (vpos >=0) && | ||
309 | (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) { | ||
310 | /* crtc didn't flip in this target vblank interval, | ||
311 | * but flip is pending in crtc. It will complete it | ||
312 | * in next vblank interval, so complete the flip at | ||
313 | * next vblank irq. | ||
314 | */ | ||
315 | radeon_crtc->deferred_flip_completion = 1; | ||
316 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); | ||
317 | return; | ||
318 | } | ||
319 | |||
320 | /* Pageflip (will be) certainly completed in this vblank. Clean up. */ | ||
321 | radeon_crtc->unpin_work = NULL; | ||
322 | |||
323 | /* wakeup userspace */ | ||
324 | if (work->event) { | ||
325 | e = work->event; | ||
326 | e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now); | ||
327 | e->event.tv_sec = now.tv_sec; | ||
328 | e->event.tv_usec = now.tv_usec; | ||
329 | list_add_tail(&e->base.link, &e->base.file_priv->event_list); | ||
330 | wake_up_interruptible(&e->base.file_priv->event_wait); | ||
331 | } | ||
332 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); | ||
333 | |||
334 | drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); | ||
335 | radeon_fence_unref(&work->fence); | ||
336 | radeon_post_page_flip(work->rdev, work->crtc_id); | ||
337 | schedule_work(&work->work); | ||
338 | } | ||
339 | |||
340 | static int radeon_crtc_page_flip(struct drm_crtc *crtc, | ||
341 | struct drm_framebuffer *fb, | ||
342 | struct drm_pending_vblank_event *event) | ||
343 | { | ||
344 | struct drm_device *dev = crtc->dev; | ||
345 | struct radeon_device *rdev = dev->dev_private; | ||
346 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
347 | struct radeon_framebuffer *old_radeon_fb; | ||
348 | struct radeon_framebuffer *new_radeon_fb; | ||
349 | struct drm_gem_object *obj; | ||
350 | struct radeon_bo *rbo; | ||
351 | struct radeon_fence *fence; | ||
352 | struct radeon_unpin_work *work; | ||
353 | unsigned long flags; | ||
354 | u32 tiling_flags, pitch_pixels; | ||
355 | u64 base; | ||
356 | int r; | ||
357 | |||
358 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
359 | if (work == NULL) | ||
360 | return -ENOMEM; | ||
361 | |||
362 | r = radeon_fence_create(rdev, &fence); | ||
363 | if (unlikely(r != 0)) { | ||
364 | kfree(work); | ||
365 | DRM_ERROR("flip queue: failed to create fence.\n"); | ||
366 | return -ENOMEM; | ||
367 | } | ||
368 | work->event = event; | ||
369 | work->rdev = rdev; | ||
370 | work->crtc_id = radeon_crtc->crtc_id; | ||
371 | work->fence = radeon_fence_ref(fence); | ||
372 | old_radeon_fb = to_radeon_framebuffer(crtc->fb); | ||
373 | new_radeon_fb = to_radeon_framebuffer(fb); | ||
374 | /* schedule unpin of the old buffer */ | ||
375 | obj = old_radeon_fb->obj; | ||
376 | /* take a reference to the old object */ | ||
377 | drm_gem_object_reference(obj); | ||
378 | rbo = gem_to_radeon_bo(obj); | ||
379 | work->old_rbo = rbo; | ||
380 | INIT_WORK(&work->work, radeon_unpin_work_func); | ||
381 | |||
382 | /* We borrow the event spin lock for protecting unpin_work */ | ||
383 | spin_lock_irqsave(&dev->event_lock, flags); | ||
384 | if (radeon_crtc->unpin_work) { | ||
385 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | ||
386 | r = -EBUSY; | ||
387 | goto unlock_free; | ||
388 | } | ||
389 | radeon_crtc->unpin_work = work; | ||
390 | radeon_crtc->deferred_flip_completion = 0; | ||
391 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
392 | |||
393 | /* pin the new buffer */ | ||
394 | obj = new_radeon_fb->obj; | ||
395 | rbo = gem_to_radeon_bo(obj); | ||
396 | |||
397 | DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", | ||
398 | work->old_rbo, rbo); | ||
399 | |||
400 | r = radeon_bo_reserve(rbo, false); | ||
401 | if (unlikely(r != 0)) { | ||
402 | DRM_ERROR("failed to reserve new rbo buffer before flip\n"); | ||
403 | goto pflip_cleanup; | ||
404 | } | ||
405 | r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base); | ||
406 | if (unlikely(r != 0)) { | ||
407 | radeon_bo_unreserve(rbo); | ||
408 | r = -EINVAL; | ||
409 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); | ||
410 | goto pflip_cleanup; | ||
411 | } | ||
412 | radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); | ||
413 | radeon_bo_unreserve(rbo); | ||
414 | |||
415 | if (!ASIC_IS_AVIVO(rdev)) { | ||
416 | /* crtc offset is from display base addr not FB location */ | ||
417 | base -= radeon_crtc->legacy_display_base_addr; | ||
418 | pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8); | ||
419 | |||
420 | if (tiling_flags & RADEON_TILING_MACRO) { | ||
421 | if (ASIC_IS_R300(rdev)) { | ||
422 | base &= ~0x7ff; | ||
423 | } else { | ||
424 | int byteshift = fb->bits_per_pixel >> 4; | ||
425 | int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11; | ||
426 | base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8); | ||
427 | } | ||
428 | } else { | ||
429 | int offset = crtc->y * pitch_pixels + crtc->x; | ||
430 | switch (fb->bits_per_pixel) { | ||
431 | case 8: | ||
432 | default: | ||
433 | offset *= 1; | ||
434 | break; | ||
435 | case 15: | ||
436 | case 16: | ||
437 | offset *= 2; | ||
438 | break; | ||
439 | case 24: | ||
440 | offset *= 3; | ||
441 | break; | ||
442 | case 32: | ||
443 | offset *= 4; | ||
444 | break; | ||
445 | } | ||
446 | base += offset; | ||
447 | } | ||
448 | base &= ~7; | ||
449 | } | ||
450 | |||
451 | spin_lock_irqsave(&dev->event_lock, flags); | ||
452 | work->new_crtc_base = base; | ||
453 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
454 | |||
455 | /* update crtc fb */ | ||
456 | crtc->fb = fb; | ||
457 | |||
458 | r = drm_vblank_get(dev, radeon_crtc->crtc_id); | ||
459 | if (r) { | ||
460 | DRM_ERROR("failed to get vblank before flip\n"); | ||
461 | goto pflip_cleanup1; | ||
462 | } | ||
463 | |||
464 | /* 32 ought to cover us */ | ||
465 | r = radeon_ring_lock(rdev, 32); | ||
466 | if (r) { | ||
467 | DRM_ERROR("failed to lock the ring before flip\n"); | ||
468 | goto pflip_cleanup2; | ||
469 | } | ||
470 | |||
471 | /* emit the fence */ | ||
472 | radeon_fence_emit(rdev, fence); | ||
473 | /* set the proper interrupt */ | ||
474 | radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); | ||
475 | /* fire the ring */ | ||
476 | radeon_ring_unlock_commit(rdev); | ||
477 | |||
478 | return 0; | ||
479 | |||
480 | pflip_cleanup2: | ||
481 | drm_vblank_put(dev, radeon_crtc->crtc_id); | ||
482 | |||
483 | pflip_cleanup1: | ||
484 | r = radeon_bo_reserve(rbo, false); | ||
485 | if (unlikely(r != 0)) { | ||
486 | DRM_ERROR("failed to reserve new rbo in error path\n"); | ||
487 | goto pflip_cleanup; | ||
488 | } | ||
489 | r = radeon_bo_unpin(rbo); | ||
490 | if (unlikely(r != 0)) { | ||
491 | radeon_bo_unreserve(rbo); | ||
492 | r = -EINVAL; | ||
493 | DRM_ERROR("failed to unpin new rbo in error path\n"); | ||
494 | goto pflip_cleanup; | ||
495 | } | ||
496 | radeon_bo_unreserve(rbo); | ||
497 | |||
498 | pflip_cleanup: | ||
499 | spin_lock_irqsave(&dev->event_lock, flags); | ||
500 | radeon_crtc->unpin_work = NULL; | ||
501 | unlock_free: | ||
502 | drm_gem_object_unreference_unlocked(old_radeon_fb->obj); | ||
503 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
504 | radeon_fence_unref(&fence); | ||
505 | kfree(work); | ||
506 | |||
507 | return r; | ||
508 | } | ||
509 | |||
186 | static const struct drm_crtc_funcs radeon_crtc_funcs = { | 510 | static const struct drm_crtc_funcs radeon_crtc_funcs = { |
187 | .cursor_set = radeon_crtc_cursor_set, | 511 | .cursor_set = radeon_crtc_cursor_set, |
188 | .cursor_move = radeon_crtc_cursor_move, | 512 | .cursor_move = radeon_crtc_cursor_move, |
189 | .gamma_set = radeon_crtc_gamma_set, | 513 | .gamma_set = radeon_crtc_gamma_set, |
190 | .set_config = drm_crtc_helper_set_config, | 514 | .set_config = drm_crtc_helper_set_config, |
191 | .destroy = radeon_crtc_destroy, | 515 | .destroy = radeon_crtc_destroy, |
516 | .page_flip = radeon_crtc_page_flip, | ||
192 | }; | 517 | }; |
193 | 518 | ||
194 | static void radeon_crtc_init(struct drm_device *dev, int index) | 519 | static void radeon_crtc_init(struct drm_device *dev, int index) |
@@ -225,7 +550,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) | |||
225 | radeon_legacy_init_crtc(dev, radeon_crtc); | 550 | radeon_legacy_init_crtc(dev, radeon_crtc); |
226 | } | 551 | } |
227 | 552 | ||
228 | static const char *encoder_names[34] = { | 553 | static const char *encoder_names[36] = { |
229 | "NONE", | 554 | "NONE", |
230 | "INTERNAL_LVDS", | 555 | "INTERNAL_LVDS", |
231 | "INTERNAL_TMDS1", | 556 | "INTERNAL_TMDS1", |
@@ -260,6 +585,8 @@ static const char *encoder_names[34] = { | |||
260 | "INTERNAL_KLDSCP_LVTMA", | 585 | "INTERNAL_KLDSCP_LVTMA", |
261 | "INTERNAL_UNIPHY1", | 586 | "INTERNAL_UNIPHY1", |
262 | "INTERNAL_UNIPHY2", | 587 | "INTERNAL_UNIPHY2", |
588 | "NUTMEG", | ||
589 | "TRAVIS", | ||
263 | }; | 590 | }; |
264 | 591 | ||
265 | static const char *connector_names[15] = { | 592 | static const char *connector_names[15] = { |
@@ -315,10 +642,14 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
315 | radeon_connector->ddc_bus->rec.en_data_reg, | 642 | radeon_connector->ddc_bus->rec.en_data_reg, |
316 | radeon_connector->ddc_bus->rec.y_clk_reg, | 643 | radeon_connector->ddc_bus->rec.y_clk_reg, |
317 | radeon_connector->ddc_bus->rec.y_data_reg); | 644 | radeon_connector->ddc_bus->rec.y_data_reg); |
318 | if (radeon_connector->router_bus) | 645 | if (radeon_connector->router.ddc_valid) |
319 | DRM_INFO(" DDC Router 0x%x/0x%x\n", | 646 | DRM_INFO(" DDC Router 0x%x/0x%x\n", |
320 | radeon_connector->router.mux_control_pin, | 647 | radeon_connector->router.ddc_mux_control_pin, |
321 | radeon_connector->router.mux_state); | 648 | radeon_connector->router.ddc_mux_state); |
649 | if (radeon_connector->router.cd_valid) | ||
650 | DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", | ||
651 | radeon_connector->router.cd_mux_control_pin, | ||
652 | radeon_connector->router.cd_mux_state); | ||
322 | } else { | 653 | } else { |
323 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || | 654 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || |
324 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || | 655 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || |
@@ -398,8 +729,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
398 | int ret = 0; | 729 | int ret = 0; |
399 | 730 | ||
400 | /* on hw with routers, select right port */ | 731 | /* on hw with routers, select right port */ |
401 | if (radeon_connector->router.valid) | 732 | if (radeon_connector->router.ddc_valid) |
402 | radeon_router_select_port(radeon_connector); | 733 | radeon_router_select_ddc_port(radeon_connector); |
403 | 734 | ||
404 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || | 735 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
405 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { | 736 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { |
@@ -413,9 +744,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
413 | if (!radeon_connector->edid) { | 744 | if (!radeon_connector->edid) { |
414 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | 745 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); |
415 | } | 746 | } |
416 | /* some servers provide a hardcoded edid in rom for KVMs */ | 747 | |
417 | if (!radeon_connector->edid) | 748 | if (!radeon_connector->edid) { |
418 | radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev); | 749 | if (rdev->is_atom_bios) { |
750 | /* some laptops provide a hardcoded edid in rom for LCDs */ | ||
751 | if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) || | ||
752 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP))) | ||
753 | radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); | ||
754 | } else | ||
755 | /* some servers provide a hardcoded edid in rom for KVMs */ | ||
756 | radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); | ||
757 | } | ||
419 | if (radeon_connector->edid) { | 758 | if (radeon_connector->edid) { |
420 | drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); | 759 | drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); |
421 | ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); | 760 | ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); |
@@ -432,8 +771,8 @@ static int radeon_ddc_dump(struct drm_connector *connector) | |||
432 | int ret = 0; | 771 | int ret = 0; |
433 | 772 | ||
434 | /* on hw with routers, select right port */ | 773 | /* on hw with routers, select right port */ |
435 | if (radeon_connector->router.valid) | 774 | if (radeon_connector->router.ddc_valid) |
436 | radeon_router_select_port(radeon_connector); | 775 | radeon_router_select_ddc_port(radeon_connector); |
437 | 776 | ||
438 | if (!radeon_connector->ddc_bus) | 777 | if (!radeon_connector->ddc_bus) |
439 | return -1; | 778 | return -1; |
@@ -444,6 +783,125 @@ static int radeon_ddc_dump(struct drm_connector *connector) | |||
444 | return ret; | 783 | return ret; |
445 | } | 784 | } |
446 | 785 | ||
786 | /* avivo */ | ||
787 | static void avivo_get_fb_div(struct radeon_pll *pll, | ||
788 | u32 target_clock, | ||
789 | u32 post_div, | ||
790 | u32 ref_div, | ||
791 | u32 *fb_div, | ||
792 | u32 *frac_fb_div) | ||
793 | { | ||
794 | u32 tmp = post_div * ref_div; | ||
795 | |||
796 | tmp *= target_clock; | ||
797 | *fb_div = tmp / pll->reference_freq; | ||
798 | *frac_fb_div = tmp % pll->reference_freq; | ||
799 | |||
800 | if (*fb_div > pll->max_feedback_div) | ||
801 | *fb_div = pll->max_feedback_div; | ||
802 | else if (*fb_div < pll->min_feedback_div) | ||
803 | *fb_div = pll->min_feedback_div; | ||
804 | } | ||
805 | |||
806 | static u32 avivo_get_post_div(struct radeon_pll *pll, | ||
807 | u32 target_clock) | ||
808 | { | ||
809 | u32 vco, post_div, tmp; | ||
810 | |||
811 | if (pll->flags & RADEON_PLL_USE_POST_DIV) | ||
812 | return pll->post_div; | ||
813 | |||
814 | if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { | ||
815 | if (pll->flags & RADEON_PLL_IS_LCD) | ||
816 | vco = pll->lcd_pll_out_min; | ||
817 | else | ||
818 | vco = pll->pll_out_min; | ||
819 | } else { | ||
820 | if (pll->flags & RADEON_PLL_IS_LCD) | ||
821 | vco = pll->lcd_pll_out_max; | ||
822 | else | ||
823 | vco = pll->pll_out_max; | ||
824 | } | ||
825 | |||
826 | post_div = vco / target_clock; | ||
827 | tmp = vco % target_clock; | ||
828 | |||
829 | if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { | ||
830 | if (tmp) | ||
831 | post_div++; | ||
832 | } else { | ||
833 | if (!tmp) | ||
834 | post_div--; | ||
835 | } | ||
836 | |||
837 | if (post_div > pll->max_post_div) | ||
838 | post_div = pll->max_post_div; | ||
839 | else if (post_div < pll->min_post_div) | ||
840 | post_div = pll->min_post_div; | ||
841 | |||
842 | return post_div; | ||
843 | } | ||
844 | |||
845 | #define MAX_TOLERANCE 10 | ||
846 | |||
847 | void radeon_compute_pll_avivo(struct radeon_pll *pll, | ||
848 | u32 freq, | ||
849 | u32 *dot_clock_p, | ||
850 | u32 *fb_div_p, | ||
851 | u32 *frac_fb_div_p, | ||
852 | u32 *ref_div_p, | ||
853 | u32 *post_div_p) | ||
854 | { | ||
855 | u32 target_clock = freq / 10; | ||
856 | u32 post_div = avivo_get_post_div(pll, target_clock); | ||
857 | u32 ref_div = pll->min_ref_div; | ||
858 | u32 fb_div = 0, frac_fb_div = 0, tmp; | ||
859 | |||
860 | if (pll->flags & RADEON_PLL_USE_REF_DIV) | ||
861 | ref_div = pll->reference_div; | ||
862 | |||
863 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { | ||
864 | avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div); | ||
865 | frac_fb_div = (100 * frac_fb_div) / pll->reference_freq; | ||
866 | if (frac_fb_div >= 5) { | ||
867 | frac_fb_div -= 5; | ||
868 | frac_fb_div = frac_fb_div / 10; | ||
869 | frac_fb_div++; | ||
870 | } | ||
871 | if (frac_fb_div >= 10) { | ||
872 | fb_div++; | ||
873 | frac_fb_div = 0; | ||
874 | } | ||
875 | } else { | ||
876 | while (ref_div <= pll->max_ref_div) { | ||
877 | avivo_get_fb_div(pll, target_clock, post_div, ref_div, | ||
878 | &fb_div, &frac_fb_div); | ||
879 | if (frac_fb_div >= (pll->reference_freq / 2)) | ||
880 | fb_div++; | ||
881 | frac_fb_div = 0; | ||
882 | tmp = (pll->reference_freq * fb_div) / (post_div * ref_div); | ||
883 | tmp = (tmp * 10000) / target_clock; | ||
884 | |||
885 | if (tmp > (10000 + MAX_TOLERANCE)) | ||
886 | ref_div++; | ||
887 | else if (tmp >= (10000 - MAX_TOLERANCE)) | ||
888 | break; | ||
889 | else | ||
890 | ref_div++; | ||
891 | } | ||
892 | } | ||
893 | |||
894 | *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) / | ||
895 | (ref_div * post_div * 10); | ||
896 | *fb_div_p = fb_div; | ||
897 | *frac_fb_div_p = frac_fb_div; | ||
898 | *ref_div_p = ref_div; | ||
899 | *post_div_p = post_div; | ||
900 | DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n", | ||
901 | *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div); | ||
902 | } | ||
903 | |||
904 | /* pre-avivo */ | ||
447 | static inline uint32_t radeon_div(uint64_t n, uint32_t d) | 905 | static inline uint32_t radeon_div(uint64_t n, uint32_t d) |
448 | { | 906 | { |
449 | uint64_t mod; | 907 | uint64_t mod; |
@@ -454,13 +912,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d) | |||
454 | return n; | 912 | return n; |
455 | } | 913 | } |
456 | 914 | ||
457 | static void radeon_compute_pll_legacy(struct radeon_pll *pll, | 915 | void radeon_compute_pll_legacy(struct radeon_pll *pll, |
458 | uint64_t freq, | 916 | uint64_t freq, |
459 | uint32_t *dot_clock_p, | 917 | uint32_t *dot_clock_p, |
460 | uint32_t *fb_div_p, | 918 | uint32_t *fb_div_p, |
461 | uint32_t *frac_fb_div_p, | 919 | uint32_t *frac_fb_div_p, |
462 | uint32_t *ref_div_p, | 920 | uint32_t *ref_div_p, |
463 | uint32_t *post_div_p) | 921 | uint32_t *post_div_p) |
464 | { | 922 | { |
465 | uint32_t min_ref_div = pll->min_ref_div; | 923 | uint32_t min_ref_div = pll->min_ref_div; |
466 | uint32_t max_ref_div = pll->max_ref_div; | 924 | uint32_t max_ref_div = pll->max_ref_div; |
@@ -490,6 +948,9 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
490 | pll_out_max = pll->pll_out_max; | 948 | pll_out_max = pll->pll_out_max; |
491 | } | 949 | } |
492 | 950 | ||
951 | if (pll_out_min > 64800) | ||
952 | pll_out_min = 64800; | ||
953 | |||
493 | if (pll->flags & RADEON_PLL_USE_REF_DIV) | 954 | if (pll->flags & RADEON_PLL_USE_REF_DIV) |
494 | min_ref_div = max_ref_div = pll->reference_div; | 955 | min_ref_div = max_ref_div = pll->reference_div; |
495 | else { | 956 | else { |
@@ -513,7 +974,7 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
513 | max_fractional_feed_div = pll->max_frac_feedback_div; | 974 | max_fractional_feed_div = pll->max_frac_feedback_div; |
514 | } | 975 | } |
515 | 976 | ||
516 | for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { | 977 | for (post_div = max_post_div; post_div >= min_post_div; --post_div) { |
517 | uint32_t ref_div; | 978 | uint32_t ref_div; |
518 | 979 | ||
519 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) | 980 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) |
@@ -629,214 +1090,11 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
629 | *frac_fb_div_p = best_frac_feedback_div; | 1090 | *frac_fb_div_p = best_frac_feedback_div; |
630 | *ref_div_p = best_ref_div; | 1091 | *ref_div_p = best_ref_div; |
631 | *post_div_p = best_post_div; | 1092 | *post_div_p = best_post_div; |
632 | } | 1093 | DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n", |
633 | 1094 | (long long)freq, | |
634 | static bool | 1095 | best_freq / 1000, best_feedback_div, best_frac_feedback_div, |
635 | calc_fb_div(struct radeon_pll *pll, | 1096 | best_ref_div, best_post_div); |
636 | uint32_t freq, | ||
637 | uint32_t post_div, | ||
638 | uint32_t ref_div, | ||
639 | uint32_t *fb_div, | ||
640 | uint32_t *fb_div_frac) | ||
641 | { | ||
642 | fixed20_12 feedback_divider, a, b; | ||
643 | u32 vco_freq; | ||
644 | |||
645 | vco_freq = freq * post_div; | ||
646 | /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ | ||
647 | a.full = dfixed_const(pll->reference_freq); | ||
648 | feedback_divider.full = dfixed_const(vco_freq); | ||
649 | feedback_divider.full = dfixed_div(feedback_divider, a); | ||
650 | a.full = dfixed_const(ref_div); | ||
651 | feedback_divider.full = dfixed_mul(feedback_divider, a); | ||
652 | |||
653 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { | ||
654 | /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ | ||
655 | a.full = dfixed_const(10); | ||
656 | feedback_divider.full = dfixed_mul(feedback_divider, a); | ||
657 | feedback_divider.full += dfixed_const_half(0); | ||
658 | feedback_divider.full = dfixed_floor(feedback_divider); | ||
659 | feedback_divider.full = dfixed_div(feedback_divider, a); | ||
660 | |||
661 | /* *fb_div = floor(feedback_divider); */ | ||
662 | a.full = dfixed_floor(feedback_divider); | ||
663 | *fb_div = dfixed_trunc(a); | ||
664 | /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ | ||
665 | a.full = dfixed_const(10); | ||
666 | b.full = dfixed_mul(feedback_divider, a); | ||
667 | |||
668 | feedback_divider.full = dfixed_floor(feedback_divider); | ||
669 | feedback_divider.full = dfixed_mul(feedback_divider, a); | ||
670 | feedback_divider.full = b.full - feedback_divider.full; | ||
671 | *fb_div_frac = dfixed_trunc(feedback_divider); | ||
672 | } else { | ||
673 | /* *fb_div = floor(feedback_divider + 0.5); */ | ||
674 | feedback_divider.full += dfixed_const_half(0); | ||
675 | feedback_divider.full = dfixed_floor(feedback_divider); | ||
676 | |||
677 | *fb_div = dfixed_trunc(feedback_divider); | ||
678 | *fb_div_frac = 0; | ||
679 | } | ||
680 | |||
681 | if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div)) | ||
682 | return false; | ||
683 | else | ||
684 | return true; | ||
685 | } | ||
686 | |||
687 | static bool | ||
688 | calc_fb_ref_div(struct radeon_pll *pll, | ||
689 | uint32_t freq, | ||
690 | uint32_t post_div, | ||
691 | uint32_t *fb_div, | ||
692 | uint32_t *fb_div_frac, | ||
693 | uint32_t *ref_div) | ||
694 | { | ||
695 | fixed20_12 ffreq, max_error, error, pll_out, a; | ||
696 | u32 vco; | ||
697 | u32 pll_out_min, pll_out_max; | ||
698 | |||
699 | if (pll->flags & RADEON_PLL_IS_LCD) { | ||
700 | pll_out_min = pll->lcd_pll_out_min; | ||
701 | pll_out_max = pll->lcd_pll_out_max; | ||
702 | } else { | ||
703 | pll_out_min = pll->pll_out_min; | ||
704 | pll_out_max = pll->pll_out_max; | ||
705 | } | ||
706 | |||
707 | ffreq.full = dfixed_const(freq); | ||
708 | /* max_error = ffreq * 0.0025; */ | ||
709 | a.full = dfixed_const(400); | ||
710 | max_error.full = dfixed_div(ffreq, a); | ||
711 | |||
712 | for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { | ||
713 | if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { | ||
714 | vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac)); | ||
715 | vco = vco / ((*ref_div) * 10); | ||
716 | |||
717 | if ((vco < pll_out_min) || (vco > pll_out_max)) | ||
718 | continue; | ||
719 | |||
720 | /* pll_out = vco / post_div; */ | ||
721 | a.full = dfixed_const(post_div); | ||
722 | pll_out.full = dfixed_const(vco); | ||
723 | pll_out.full = dfixed_div(pll_out, a); | ||
724 | |||
725 | if (pll_out.full >= ffreq.full) { | ||
726 | error.full = pll_out.full - ffreq.full; | ||
727 | if (error.full <= max_error.full) | ||
728 | return true; | ||
729 | } | ||
730 | } | ||
731 | } | ||
732 | return false; | ||
733 | } | ||
734 | |||
735 | static void radeon_compute_pll_new(struct radeon_pll *pll, | ||
736 | uint64_t freq, | ||
737 | uint32_t *dot_clock_p, | ||
738 | uint32_t *fb_div_p, | ||
739 | uint32_t *frac_fb_div_p, | ||
740 | uint32_t *ref_div_p, | ||
741 | uint32_t *post_div_p) | ||
742 | { | ||
743 | u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0; | ||
744 | u32 best_freq = 0, vco_frequency; | ||
745 | u32 pll_out_min, pll_out_max; | ||
746 | |||
747 | if (pll->flags & RADEON_PLL_IS_LCD) { | ||
748 | pll_out_min = pll->lcd_pll_out_min; | ||
749 | pll_out_max = pll->lcd_pll_out_max; | ||
750 | } else { | ||
751 | pll_out_min = pll->pll_out_min; | ||
752 | pll_out_max = pll->pll_out_max; | ||
753 | } | ||
754 | |||
755 | /* freq = freq / 10; */ | ||
756 | do_div(freq, 10); | ||
757 | |||
758 | if (pll->flags & RADEON_PLL_USE_POST_DIV) { | ||
759 | post_div = pll->post_div; | ||
760 | if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div)) | ||
761 | goto done; | ||
762 | |||
763 | vco_frequency = freq * post_div; | ||
764 | if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) | ||
765 | goto done; | ||
766 | |||
767 | if (pll->flags & RADEON_PLL_USE_REF_DIV) { | ||
768 | ref_div = pll->reference_div; | ||
769 | if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div)) | ||
770 | goto done; | ||
771 | if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac)) | ||
772 | goto done; | ||
773 | } | ||
774 | } else { | ||
775 | for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) { | ||
776 | if (pll->flags & RADEON_PLL_LEGACY) { | ||
777 | if ((post_div == 5) || | ||
778 | (post_div == 7) || | ||
779 | (post_div == 9) || | ||
780 | (post_div == 10) || | ||
781 | (post_div == 11)) | ||
782 | continue; | ||
783 | } | ||
784 | 1097 | ||
785 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) | ||
786 | continue; | ||
787 | |||
788 | vco_frequency = freq * post_div; | ||
789 | if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) | ||
790 | continue; | ||
791 | if (pll->flags & RADEON_PLL_USE_REF_DIV) { | ||
792 | ref_div = pll->reference_div; | ||
793 | if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div)) | ||
794 | goto done; | ||
795 | if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac)) | ||
796 | break; | ||
797 | } else { | ||
798 | if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div)) | ||
799 | break; | ||
800 | } | ||
801 | } | ||
802 | } | ||
803 | |||
804 | best_freq = pll->reference_freq * 10 * fb_div; | ||
805 | best_freq += pll->reference_freq * fb_div_frac; | ||
806 | best_freq = best_freq / (ref_div * post_div); | ||
807 | |||
808 | done: | ||
809 | if (best_freq == 0) | ||
810 | DRM_ERROR("Couldn't find valid PLL dividers\n"); | ||
811 | |||
812 | *dot_clock_p = best_freq / 10; | ||
813 | *fb_div_p = fb_div; | ||
814 | *frac_fb_div_p = fb_div_frac; | ||
815 | *ref_div_p = ref_div; | ||
816 | *post_div_p = post_div; | ||
817 | |||
818 | DRM_DEBUG_KMS("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); | ||
819 | } | ||
820 | |||
821 | void radeon_compute_pll(struct radeon_pll *pll, | ||
822 | uint64_t freq, | ||
823 | uint32_t *dot_clock_p, | ||
824 | uint32_t *fb_div_p, | ||
825 | uint32_t *frac_fb_div_p, | ||
826 | uint32_t *ref_div_p, | ||
827 | uint32_t *post_div_p) | ||
828 | { | ||
829 | switch (pll->algo) { | ||
830 | case PLL_ALGO_NEW: | ||
831 | radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p, | ||
832 | frac_fb_div_p, ref_div_p, post_div_p); | ||
833 | break; | ||
834 | case PLL_ALGO_LEGACY: | ||
835 | default: | ||
836 | radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p, | ||
837 | frac_fb_div_p, ref_div_p, post_div_p); | ||
838 | break; | ||
839 | } | ||
840 | } | 1098 | } |
841 | 1099 | ||
842 | static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | 1100 | static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) |
@@ -1002,6 +1260,24 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) | |||
1002 | radeon_underscan_enum_list[i].name); | 1260 | radeon_underscan_enum_list[i].name); |
1003 | } | 1261 | } |
1004 | 1262 | ||
1263 | rdev->mode_info.underscan_hborder_property = | ||
1264 | drm_property_create(rdev->ddev, | ||
1265 | DRM_MODE_PROP_RANGE, | ||
1266 | "underscan hborder", 2); | ||
1267 | if (!rdev->mode_info.underscan_hborder_property) | ||
1268 | return -ENOMEM; | ||
1269 | rdev->mode_info.underscan_hborder_property->values[0] = 0; | ||
1270 | rdev->mode_info.underscan_hborder_property->values[1] = 128; | ||
1271 | |||
1272 | rdev->mode_info.underscan_vborder_property = | ||
1273 | drm_property_create(rdev->ddev, | ||
1274 | DRM_MODE_PROP_RANGE, | ||
1275 | "underscan vborder", 2); | ||
1276 | if (!rdev->mode_info.underscan_vborder_property) | ||
1277 | return -ENOMEM; | ||
1278 | rdev->mode_info.underscan_vborder_property->values[0] = 0; | ||
1279 | rdev->mode_info.underscan_vborder_property->values[1] = 128; | ||
1280 | |||
1005 | return 0; | 1281 | return 0; |
1006 | } | 1282 | } |
1007 | 1283 | ||
@@ -1035,7 +1311,10 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
1035 | 1311 | ||
1036 | rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs; | 1312 | rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs; |
1037 | 1313 | ||
1038 | if (ASIC_IS_AVIVO(rdev)) { | 1314 | if (ASIC_IS_DCE5(rdev)) { |
1315 | rdev->ddev->mode_config.max_width = 16384; | ||
1316 | rdev->ddev->mode_config.max_height = 16384; | ||
1317 | } else if (ASIC_IS_AVIVO(rdev)) { | ||
1039 | rdev->ddev->mode_config.max_width = 8192; | 1318 | rdev->ddev->mode_config.max_width = 8192; |
1040 | rdev->ddev->mode_config.max_height = 8192; | 1319 | rdev->ddev->mode_config.max_height = 8192; |
1041 | } else { | 1320 | } else { |
@@ -1069,6 +1348,11 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
1069 | if (!ret) { | 1348 | if (!ret) { |
1070 | return ret; | 1349 | return ret; |
1071 | } | 1350 | } |
1351 | |||
1352 | /* init dig PHYs */ | ||
1353 | if (rdev->is_atom_bios) | ||
1354 | radeon_atom_encoder_init(rdev); | ||
1355 | |||
1072 | /* initialize hpd */ | 1356 | /* initialize hpd */ |
1073 | radeon_hpd_init(rdev); | 1357 | radeon_hpd_init(rdev); |
1074 | 1358 | ||
@@ -1159,8 +1443,14 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
1159 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && | 1443 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && |
1160 | drm_detect_hdmi_monitor(radeon_connector->edid) && | 1444 | drm_detect_hdmi_monitor(radeon_connector->edid) && |
1161 | is_hdtv_mode(mode)))) { | 1445 | is_hdtv_mode(mode)))) { |
1162 | radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; | 1446 | if (radeon_encoder->underscan_hborder != 0) |
1163 | radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; | 1447 | radeon_crtc->h_border = radeon_encoder->underscan_hborder; |
1448 | else | ||
1449 | radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; | ||
1450 | if (radeon_encoder->underscan_vborder != 0) | ||
1451 | radeon_crtc->v_border = radeon_encoder->underscan_vborder; | ||
1452 | else | ||
1453 | radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; | ||
1164 | radeon_crtc->rmx_type = RMX_FULL; | 1454 | radeon_crtc->rmx_type = RMX_FULL; |
1165 | src_v = crtc->mode.vdisplay; | 1455 | src_v = crtc->mode.vdisplay; |
1166 | dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); | 1456 | dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); |
@@ -1195,3 +1485,158 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
1195 | } | 1485 | } |
1196 | return true; | 1486 | return true; |
1197 | } | 1487 | } |
1488 | |||
1489 | /* | ||
1490 | * Retrieve current video scanout position of crtc on a given gpu. | ||
1491 | * | ||
1492 | * \param dev Device to query. | ||
1493 | * \param crtc Crtc to query. | ||
1494 | * \param *vpos Location where vertical scanout position should be stored. | ||
1495 | * \param *hpos Location where horizontal scanout position should go. | ||
1496 | * | ||
1497 | * Returns vpos as a positive number while in active scanout area. | ||
1498 | * Returns vpos as a negative number inside vblank, counting the number | ||
1499 | * of scanlines to go until end of vblank, e.g., -1 means "one scanline | ||
1500 | * until start of active scanout / end of vblank." | ||
1501 | * | ||
1502 | * \return Flags, or'ed together as follows: | ||
1503 | * | ||
1504 | * DRM_SCANOUTPOS_VALID = Query successful. | ||
1505 | * DRM_SCANOUTPOS_INVBL = Inside vblank. | ||
1506 | * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of | ||
1507 | * this flag means that returned position may be offset by a constant but | ||
1508 | * unknown small number of scanlines wrt. real scanout position. | ||
1509 | * | ||
1510 | */ | ||
1511 | int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos) | ||
1512 | { | ||
1513 | u32 stat_crtc = 0, vbl = 0, position = 0; | ||
1514 | int vbl_start, vbl_end, vtotal, ret = 0; | ||
1515 | bool in_vbl = true; | ||
1516 | |||
1517 | struct radeon_device *rdev = dev->dev_private; | ||
1518 | |||
1519 | if (ASIC_IS_DCE4(rdev)) { | ||
1520 | if (crtc == 0) { | ||
1521 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
1522 | EVERGREEN_CRTC0_REGISTER_OFFSET); | ||
1523 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
1524 | EVERGREEN_CRTC0_REGISTER_OFFSET); | ||
1525 | ret |= DRM_SCANOUTPOS_VALID; | ||
1526 | } | ||
1527 | if (crtc == 1) { | ||
1528 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
1529 | EVERGREEN_CRTC1_REGISTER_OFFSET); | ||
1530 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
1531 | EVERGREEN_CRTC1_REGISTER_OFFSET); | ||
1532 | ret |= DRM_SCANOUTPOS_VALID; | ||
1533 | } | ||
1534 | if (crtc == 2) { | ||
1535 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
1536 | EVERGREEN_CRTC2_REGISTER_OFFSET); | ||
1537 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
1538 | EVERGREEN_CRTC2_REGISTER_OFFSET); | ||
1539 | ret |= DRM_SCANOUTPOS_VALID; | ||
1540 | } | ||
1541 | if (crtc == 3) { | ||
1542 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
1543 | EVERGREEN_CRTC3_REGISTER_OFFSET); | ||
1544 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
1545 | EVERGREEN_CRTC3_REGISTER_OFFSET); | ||
1546 | ret |= DRM_SCANOUTPOS_VALID; | ||
1547 | } | ||
1548 | if (crtc == 4) { | ||
1549 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
1550 | EVERGREEN_CRTC4_REGISTER_OFFSET); | ||
1551 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
1552 | EVERGREEN_CRTC4_REGISTER_OFFSET); | ||
1553 | ret |= DRM_SCANOUTPOS_VALID; | ||
1554 | } | ||
1555 | if (crtc == 5) { | ||
1556 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
1557 | EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
1558 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
1559 | EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
1560 | ret |= DRM_SCANOUTPOS_VALID; | ||
1561 | } | ||
1562 | } else if (ASIC_IS_AVIVO(rdev)) { | ||
1563 | if (crtc == 0) { | ||
1564 | vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); | ||
1565 | position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); | ||
1566 | ret |= DRM_SCANOUTPOS_VALID; | ||
1567 | } | ||
1568 | if (crtc == 1) { | ||
1569 | vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); | ||
1570 | position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); | ||
1571 | ret |= DRM_SCANOUTPOS_VALID; | ||
1572 | } | ||
1573 | } else { | ||
1574 | /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ | ||
1575 | if (crtc == 0) { | ||
1576 | /* Assume vbl_end == 0, get vbl_start from | ||
1577 | * upper 16 bits. | ||
1578 | */ | ||
1579 | vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) & | ||
1580 | RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; | ||
1581 | /* Only retrieve vpos from upper 16 bits, set hpos == 0. */ | ||
1582 | position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; | ||
1583 | stat_crtc = RREG32(RADEON_CRTC_STATUS); | ||
1584 | if (!(stat_crtc & 1)) | ||
1585 | in_vbl = false; | ||
1586 | |||
1587 | ret |= DRM_SCANOUTPOS_VALID; | ||
1588 | } | ||
1589 | if (crtc == 1) { | ||
1590 | vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & | ||
1591 | RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; | ||
1592 | position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; | ||
1593 | stat_crtc = RREG32(RADEON_CRTC2_STATUS); | ||
1594 | if (!(stat_crtc & 1)) | ||
1595 | in_vbl = false; | ||
1596 | |||
1597 | ret |= DRM_SCANOUTPOS_VALID; | ||
1598 | } | ||
1599 | } | ||
1600 | |||
1601 | /* Decode into vertical and horizontal scanout position. */ | ||
1602 | *vpos = position & 0x1fff; | ||
1603 | *hpos = (position >> 16) & 0x1fff; | ||
1604 | |||
1605 | /* Valid vblank area boundaries from gpu retrieved? */ | ||
1606 | if (vbl > 0) { | ||
1607 | /* Yes: Decode. */ | ||
1608 | ret |= DRM_SCANOUTPOS_ACCURATE; | ||
1609 | vbl_start = vbl & 0x1fff; | ||
1610 | vbl_end = (vbl >> 16) & 0x1fff; | ||
1611 | } | ||
1612 | else { | ||
1613 | /* No: Fake something reasonable which gives at least ok results. */ | ||
1614 | vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; | ||
1615 | vbl_end = 0; | ||
1616 | } | ||
1617 | |||
1618 | /* Test scanout position against vblank region. */ | ||
1619 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) | ||
1620 | in_vbl = false; | ||
1621 | |||
1622 | /* Check if inside vblank area and apply corrective offsets: | ||
1623 | * vpos will then be >=0 in video scanout area, but negative | ||
1624 | * within vblank area, counting down the number of lines until | ||
1625 | * start of scanout. | ||
1626 | */ | ||
1627 | |||
1628 | /* Inside "upper part" of vblank area? Apply corrective offset if so: */ | ||
1629 | if (in_vbl && (*vpos >= vbl_start)) { | ||
1630 | vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; | ||
1631 | *vpos = *vpos - vtotal; | ||
1632 | } | ||
1633 | |||
1634 | /* Correct for shifted end of vbl at vbl_end. */ | ||
1635 | *vpos = *vpos - vbl_end; | ||
1636 | |||
1637 | /* In vblank? */ | ||
1638 | if (in_vbl) | ||
1639 | ret |= DRM_SCANOUTPOS_INVBL; | ||
1640 | |||
1641 | return ret; | ||
1642 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 795403b0e2cd..73dfbe8e5f9e 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -47,9 +47,13 @@ | |||
47 | * - 2.4.0 - add crtc id query | 47 | * - 2.4.0 - add crtc id query |
48 | * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen | 48 | * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen |
49 | * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) | 49 | * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) |
50 | * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs | ||
51 | * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query | ||
52 | * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query | ||
53 | * 2.10.0 - fusion 2D tiling | ||
50 | */ | 54 | */ |
51 | #define KMS_DRIVER_MAJOR 2 | 55 | #define KMS_DRIVER_MAJOR 2 |
52 | #define KMS_DRIVER_MINOR 6 | 56 | #define KMS_DRIVER_MINOR 10 |
53 | #define KMS_DRIVER_PATCHLEVEL 0 | 57 | #define KMS_DRIVER_PATCHLEVEL 0 |
54 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 58 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
55 | int radeon_driver_unload_kms(struct drm_device *dev); | 59 | int radeon_driver_unload_kms(struct drm_device *dev); |
@@ -65,6 +69,10 @@ int radeon_resume_kms(struct drm_device *dev); | |||
65 | u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc); | 69 | u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc); |
66 | int radeon_enable_vblank_kms(struct drm_device *dev, int crtc); | 70 | int radeon_enable_vblank_kms(struct drm_device *dev, int crtc); |
67 | void radeon_disable_vblank_kms(struct drm_device *dev, int crtc); | 71 | void radeon_disable_vblank_kms(struct drm_device *dev, int crtc); |
72 | int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, | ||
73 | int *max_error, | ||
74 | struct timeval *vblank_time, | ||
75 | unsigned flags); | ||
68 | void radeon_driver_irq_preinstall_kms(struct drm_device *dev); | 76 | void radeon_driver_irq_preinstall_kms(struct drm_device *dev); |
69 | int radeon_driver_irq_postinstall_kms(struct drm_device *dev); | 77 | int radeon_driver_irq_postinstall_kms(struct drm_device *dev); |
70 | void radeon_driver_irq_uninstall_kms(struct drm_device *dev); | 78 | void radeon_driver_irq_uninstall_kms(struct drm_device *dev); |
@@ -73,9 +81,21 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, | |||
73 | struct drm_file *file_priv); | 81 | struct drm_file *file_priv); |
74 | int radeon_gem_object_init(struct drm_gem_object *obj); | 82 | int radeon_gem_object_init(struct drm_gem_object *obj); |
75 | void radeon_gem_object_free(struct drm_gem_object *obj); | 83 | void radeon_gem_object_free(struct drm_gem_object *obj); |
84 | extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, | ||
85 | int *vpos, int *hpos); | ||
76 | extern struct drm_ioctl_desc radeon_ioctls_kms[]; | 86 | extern struct drm_ioctl_desc radeon_ioctls_kms[]; |
77 | extern int radeon_max_kms_ioctl; | 87 | extern int radeon_max_kms_ioctl; |
78 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma); | 88 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma); |
89 | int radeon_mode_dumb_mmap(struct drm_file *filp, | ||
90 | struct drm_device *dev, | ||
91 | uint32_t handle, uint64_t *offset_p); | ||
92 | int radeon_mode_dumb_create(struct drm_file *file_priv, | ||
93 | struct drm_device *dev, | ||
94 | struct drm_mode_create_dumb *args); | ||
95 | int radeon_mode_dumb_destroy(struct drm_file *file_priv, | ||
96 | struct drm_device *dev, | ||
97 | uint32_t handle); | ||
98 | |||
79 | #if defined(CONFIG_DEBUG_FS) | 99 | #if defined(CONFIG_DEBUG_FS) |
80 | int radeon_debugfs_init(struct drm_minor *minor); | 100 | int radeon_debugfs_init(struct drm_minor *minor); |
81 | void radeon_debugfs_cleanup(struct drm_minor *minor); | 101 | void radeon_debugfs_cleanup(struct drm_minor *minor); |
@@ -93,10 +113,10 @@ int radeon_benchmarking = 0; | |||
93 | int radeon_testing = 0; | 113 | int radeon_testing = 0; |
94 | int radeon_connector_table = 0; | 114 | int radeon_connector_table = 0; |
95 | int radeon_tv = 1; | 115 | int radeon_tv = 1; |
96 | int radeon_new_pll = -1; | 116 | int radeon_audio = 0; |
97 | int radeon_audio = 1; | ||
98 | int radeon_disp_priority = 0; | 117 | int radeon_disp_priority = 0; |
99 | int radeon_hw_i2c = 0; | 118 | int radeon_hw_i2c = 0; |
119 | int radeon_pcie_gen2 = 0; | ||
100 | 120 | ||
101 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); | 121 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); |
102 | module_param_named(no_wb, radeon_no_wb, int, 0444); | 122 | module_param_named(no_wb, radeon_no_wb, int, 0444); |
@@ -131,10 +151,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444); | |||
131 | MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); | 151 | MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); |
132 | module_param_named(tv, radeon_tv, int, 0444); | 152 | module_param_named(tv, radeon_tv, int, 0444); |
133 | 153 | ||
134 | MODULE_PARM_DESC(new_pll, "Select new PLL code"); | 154 | MODULE_PARM_DESC(audio, "Audio enable (1 = enable)"); |
135 | module_param_named(new_pll, radeon_new_pll, int, 0444); | ||
136 | |||
137 | MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); | ||
138 | module_param_named(audio, radeon_audio, int, 0444); | 155 | module_param_named(audio, radeon_audio, int, 0444); |
139 | 156 | ||
140 | MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); | 157 | MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); |
@@ -143,6 +160,9 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444); | |||
143 | MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); | 160 | MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); |
144 | module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); | 161 | module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); |
145 | 162 | ||
163 | MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)"); | ||
164 | module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444); | ||
165 | |||
146 | static int radeon_suspend(struct drm_device *dev, pm_message_t state) | 166 | static int radeon_suspend(struct drm_device *dev, pm_message_t state) |
147 | { | 167 | { |
148 | drm_radeon_private_t *dev_priv = dev->dev_private; | 168 | drm_radeon_private_t *dev_priv = dev->dev_private; |
@@ -203,8 +223,6 @@ static struct drm_driver driver_old = { | |||
203 | .irq_uninstall = radeon_driver_irq_uninstall, | 223 | .irq_uninstall = radeon_driver_irq_uninstall, |
204 | .irq_handler = radeon_driver_irq_handler, | 224 | .irq_handler = radeon_driver_irq_handler, |
205 | .reclaim_buffers = drm_core_reclaim_buffers, | 225 | .reclaim_buffers = drm_core_reclaim_buffers, |
206 | .get_map_ofs = drm_core_get_map_ofs, | ||
207 | .get_reg_ofs = drm_core_get_reg_ofs, | ||
208 | .ioctls = radeon_ioctls, | 226 | .ioctls = radeon_ioctls, |
209 | .dma_ioctl = radeon_cp_buffers, | 227 | .dma_ioctl = radeon_cp_buffers, |
210 | .fops = { | 228 | .fops = { |
@@ -219,11 +237,7 @@ static struct drm_driver driver_old = { | |||
219 | #ifdef CONFIG_COMPAT | 237 | #ifdef CONFIG_COMPAT |
220 | .compat_ioctl = radeon_compat_ioctl, | 238 | .compat_ioctl = radeon_compat_ioctl, |
221 | #endif | 239 | #endif |
222 | }, | 240 | .llseek = noop_llseek, |
223 | |||
224 | .pci_driver = { | ||
225 | .name = DRIVER_NAME, | ||
226 | .id_table = pciidlist, | ||
227 | }, | 241 | }, |
228 | 242 | ||
229 | .name = DRIVER_NAME, | 243 | .name = DRIVER_NAME, |
@@ -236,9 +250,28 @@ static struct drm_driver driver_old = { | |||
236 | 250 | ||
237 | static struct drm_driver kms_driver; | 251 | static struct drm_driver kms_driver; |
238 | 252 | ||
253 | static void radeon_kick_out_firmware_fb(struct pci_dev *pdev) | ||
254 | { | ||
255 | struct apertures_struct *ap; | ||
256 | bool primary = false; | ||
257 | |||
258 | ap = alloc_apertures(1); | ||
259 | ap->ranges[0].base = pci_resource_start(pdev, 0); | ||
260 | ap->ranges[0].size = pci_resource_len(pdev, 0); | ||
261 | |||
262 | #ifdef CONFIG_X86 | ||
263 | primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | ||
264 | #endif | ||
265 | remove_conflicting_framebuffers(ap, "radeondrmfb", primary); | ||
266 | kfree(ap); | ||
267 | } | ||
268 | |||
239 | static int __devinit | 269 | static int __devinit |
240 | radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 270 | radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
241 | { | 271 | { |
272 | /* Get rid of things like offb */ | ||
273 | radeon_kick_out_firmware_fb(pdev); | ||
274 | |||
242 | return drm_get_pci_dev(pdev, ent, &kms_driver); | 275 | return drm_get_pci_dev(pdev, ent, &kms_driver); |
243 | } | 276 | } |
244 | 277 | ||
@@ -281,6 +314,8 @@ static struct drm_driver kms_driver = { | |||
281 | .get_vblank_counter = radeon_get_vblank_counter_kms, | 314 | .get_vblank_counter = radeon_get_vblank_counter_kms, |
282 | .enable_vblank = radeon_enable_vblank_kms, | 315 | .enable_vblank = radeon_enable_vblank_kms, |
283 | .disable_vblank = radeon_disable_vblank_kms, | 316 | .disable_vblank = radeon_disable_vblank_kms, |
317 | .get_vblank_timestamp = radeon_get_vblank_timestamp_kms, | ||
318 | .get_scanout_position = radeon_get_crtc_scanoutpos, | ||
284 | #if defined(CONFIG_DEBUG_FS) | 319 | #if defined(CONFIG_DEBUG_FS) |
285 | .debugfs_init = radeon_debugfs_init, | 320 | .debugfs_init = radeon_debugfs_init, |
286 | .debugfs_cleanup = radeon_debugfs_cleanup, | 321 | .debugfs_cleanup = radeon_debugfs_cleanup, |
@@ -290,12 +325,13 @@ static struct drm_driver kms_driver = { | |||
290 | .irq_uninstall = radeon_driver_irq_uninstall_kms, | 325 | .irq_uninstall = radeon_driver_irq_uninstall_kms, |
291 | .irq_handler = radeon_driver_irq_handler_kms, | 326 | .irq_handler = radeon_driver_irq_handler_kms, |
292 | .reclaim_buffers = drm_core_reclaim_buffers, | 327 | .reclaim_buffers = drm_core_reclaim_buffers, |
293 | .get_map_ofs = drm_core_get_map_ofs, | ||
294 | .get_reg_ofs = drm_core_get_reg_ofs, | ||
295 | .ioctls = radeon_ioctls_kms, | 328 | .ioctls = radeon_ioctls_kms, |
296 | .gem_init_object = radeon_gem_object_init, | 329 | .gem_init_object = radeon_gem_object_init, |
297 | .gem_free_object = radeon_gem_object_free, | 330 | .gem_free_object = radeon_gem_object_free, |
298 | .dma_ioctl = radeon_dma_ioctl_kms, | 331 | .dma_ioctl = radeon_dma_ioctl_kms, |
332 | .dumb_create = radeon_mode_dumb_create, | ||
333 | .dumb_map_offset = radeon_mode_dumb_mmap, | ||
334 | .dumb_destroy = radeon_mode_dumb_destroy, | ||
299 | .fops = { | 335 | .fops = { |
300 | .owner = THIS_MODULE, | 336 | .owner = THIS_MODULE, |
301 | .open = drm_open, | 337 | .open = drm_open, |
@@ -310,15 +346,6 @@ static struct drm_driver kms_driver = { | |||
310 | #endif | 346 | #endif |
311 | }, | 347 | }, |
312 | 348 | ||
313 | .pci_driver = { | ||
314 | .name = DRIVER_NAME, | ||
315 | .id_table = pciidlist, | ||
316 | .probe = radeon_pci_probe, | ||
317 | .remove = radeon_pci_remove, | ||
318 | .suspend = radeon_pci_suspend, | ||
319 | .resume = radeon_pci_resume, | ||
320 | }, | ||
321 | |||
322 | .name = DRIVER_NAME, | 349 | .name = DRIVER_NAME, |
323 | .desc = DRIVER_DESC, | 350 | .desc = DRIVER_DESC, |
324 | .date = DRIVER_DATE, | 351 | .date = DRIVER_DATE, |
@@ -328,15 +355,32 @@ static struct drm_driver kms_driver = { | |||
328 | }; | 355 | }; |
329 | 356 | ||
330 | static struct drm_driver *driver; | 357 | static struct drm_driver *driver; |
358 | static struct pci_driver *pdriver; | ||
359 | |||
360 | static struct pci_driver radeon_pci_driver = { | ||
361 | .name = DRIVER_NAME, | ||
362 | .id_table = pciidlist, | ||
363 | }; | ||
364 | |||
365 | static struct pci_driver radeon_kms_pci_driver = { | ||
366 | .name = DRIVER_NAME, | ||
367 | .id_table = pciidlist, | ||
368 | .probe = radeon_pci_probe, | ||
369 | .remove = radeon_pci_remove, | ||
370 | .suspend = radeon_pci_suspend, | ||
371 | .resume = radeon_pci_resume, | ||
372 | }; | ||
331 | 373 | ||
332 | static int __init radeon_init(void) | 374 | static int __init radeon_init(void) |
333 | { | 375 | { |
334 | driver = &driver_old; | 376 | driver = &driver_old; |
377 | pdriver = &radeon_pci_driver; | ||
335 | driver->num_ioctls = radeon_max_ioctl; | 378 | driver->num_ioctls = radeon_max_ioctl; |
336 | #ifdef CONFIG_VGA_CONSOLE | 379 | #ifdef CONFIG_VGA_CONSOLE |
337 | if (vgacon_text_force() && radeon_modeset == -1) { | 380 | if (vgacon_text_force() && radeon_modeset == -1) { |
338 | DRM_INFO("VGACON disable radeon kernel modesetting.\n"); | 381 | DRM_INFO("VGACON disable radeon kernel modesetting.\n"); |
339 | driver = &driver_old; | 382 | driver = &driver_old; |
383 | pdriver = &radeon_pci_driver; | ||
340 | driver->driver_features &= ~DRIVER_MODESET; | 384 | driver->driver_features &= ~DRIVER_MODESET; |
341 | radeon_modeset = 0; | 385 | radeon_modeset = 0; |
342 | } | 386 | } |
@@ -354,18 +398,19 @@ static int __init radeon_init(void) | |||
354 | if (radeon_modeset == 1) { | 398 | if (radeon_modeset == 1) { |
355 | DRM_INFO("radeon kernel modesetting enabled.\n"); | 399 | DRM_INFO("radeon kernel modesetting enabled.\n"); |
356 | driver = &kms_driver; | 400 | driver = &kms_driver; |
401 | pdriver = &radeon_kms_pci_driver; | ||
357 | driver->driver_features |= DRIVER_MODESET; | 402 | driver->driver_features |= DRIVER_MODESET; |
358 | driver->num_ioctls = radeon_max_kms_ioctl; | 403 | driver->num_ioctls = radeon_max_kms_ioctl; |
359 | radeon_register_atpx_handler(); | 404 | radeon_register_atpx_handler(); |
360 | } | 405 | } |
361 | /* if the vga console setting is enabled still | 406 | /* if the vga console setting is enabled still |
362 | * let modprobe override it */ | 407 | * let modprobe override it */ |
363 | return drm_init(driver); | 408 | return drm_pci_init(driver, pdriver); |
364 | } | 409 | } |
365 | 410 | ||
366 | static void __exit radeon_exit(void) | 411 | static void __exit radeon_exit(void) |
367 | { | 412 | { |
368 | drm_exit(driver); | 413 | drm_pci_exit(driver, pdriver); |
369 | radeon_unregister_atpx_handler(); | 414 | radeon_unregister_atpx_handler(); |
370 | } | 415 | } |
371 | 416 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 448eba89d1e6..a1b59ca96d01 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -271,7 +271,7 @@ typedef struct drm_radeon_private { | |||
271 | 271 | ||
272 | int have_z_offset; | 272 | int have_z_offset; |
273 | 273 | ||
274 | /* starting from here on, data is preserved accross an open */ | 274 | /* starting from here on, data is preserved across an open */ |
275 | uint32_t flags; /* see radeon_chip_flags */ | 275 | uint32_t flags; /* see radeon_chip_flags */ |
276 | resource_size_t fb_aper_offset; | 276 | resource_size_t fb_aper_offset; |
277 | 277 | ||
@@ -1524,6 +1524,7 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index); | |||
1524 | #define R600_CP_RB_CNTL 0xc104 | 1524 | #define R600_CP_RB_CNTL 0xc104 |
1525 | # define R600_RB_BUFSZ(x) ((x) << 0) | 1525 | # define R600_RB_BUFSZ(x) ((x) << 0) |
1526 | # define R600_RB_BLKSZ(x) ((x) << 8) | 1526 | # define R600_RB_BLKSZ(x) ((x) << 8) |
1527 | # define R600_BUF_SWAP_32BIT (2 << 16) | ||
1527 | # define R600_RB_NO_UPDATE (1 << 27) | 1528 | # define R600_RB_NO_UPDATE (1 << 27) |
1528 | # define R600_RB_RPTR_WR_ENA (1 << 31) | 1529 | # define R600_RB_RPTR_WR_ENA (1 << 31) |
1529 | #define R600_CP_RB_RPTR_WR 0xc108 | 1530 | #define R600_CP_RB_RPTR_WR 0xc108 |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 2c293e8304d6..b293487e5aa3 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -176,6 +176,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) | |||
176 | return false; | 176 | return false; |
177 | } | 177 | } |
178 | } | 178 | } |
179 | |||
179 | void | 180 | void |
180 | radeon_link_encoder_connector(struct drm_device *dev) | 181 | radeon_link_encoder_connector(struct drm_device *dev) |
181 | { | 182 | { |
@@ -228,6 +229,62 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) | |||
228 | return NULL; | 229 | return NULL; |
229 | } | 230 | } |
230 | 231 | ||
232 | static struct drm_connector * | ||
233 | radeon_get_connector_for_encoder_init(struct drm_encoder *encoder) | ||
234 | { | ||
235 | struct drm_device *dev = encoder->dev; | ||
236 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
237 | struct drm_connector *connector; | ||
238 | struct radeon_connector *radeon_connector; | ||
239 | |||
240 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
241 | radeon_connector = to_radeon_connector(connector); | ||
242 | if (radeon_encoder->devices & radeon_connector->devices) | ||
243 | return connector; | ||
244 | } | ||
245 | return NULL; | ||
246 | } | ||
247 | |||
248 | struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder) | ||
249 | { | ||
250 | struct drm_device *dev = encoder->dev; | ||
251 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
252 | struct drm_encoder *other_encoder; | ||
253 | struct radeon_encoder *other_radeon_encoder; | ||
254 | |||
255 | if (radeon_encoder->is_ext_encoder) | ||
256 | return NULL; | ||
257 | |||
258 | list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { | ||
259 | if (other_encoder == encoder) | ||
260 | continue; | ||
261 | other_radeon_encoder = to_radeon_encoder(other_encoder); | ||
262 | if (other_radeon_encoder->is_ext_encoder && | ||
263 | (radeon_encoder->devices & other_radeon_encoder->devices)) | ||
264 | return other_encoder; | ||
265 | } | ||
266 | return NULL; | ||
267 | } | ||
268 | |||
269 | bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder) | ||
270 | { | ||
271 | struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder); | ||
272 | |||
273 | if (other_encoder) { | ||
274 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder); | ||
275 | |||
276 | switch (radeon_encoder->encoder_id) { | ||
277 | case ENCODER_OBJECT_ID_TRAVIS: | ||
278 | case ENCODER_OBJECT_ID_NUTMEG: | ||
279 | return true; | ||
280 | default: | ||
281 | return false; | ||
282 | } | ||
283 | } | ||
284 | |||
285 | return false; | ||
286 | } | ||
287 | |||
231 | void radeon_panel_mode_fixup(struct drm_encoder *encoder, | 288 | void radeon_panel_mode_fixup(struct drm_encoder *encoder, |
232 | struct drm_display_mode *adjusted_mode) | 289 | struct drm_display_mode *adjusted_mode) |
233 | { | 290 | { |
@@ -310,7 +367,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | |||
310 | } | 367 | } |
311 | 368 | ||
312 | if (ASIC_IS_DCE3(rdev) && | 369 | if (ASIC_IS_DCE3(rdev) && |
313 | (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) { | 370 | ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || |
371 | radeon_encoder_is_dp_bridge(encoder))) { | ||
314 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 372 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
315 | radeon_dp_set_link_config(connector, mode); | 373 | radeon_dp_set_link_config(connector, mode); |
316 | } | 374 | } |
@@ -426,52 +484,49 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) | |||
426 | 484 | ||
427 | } | 485 | } |
428 | 486 | ||
429 | void | 487 | union dvo_encoder_control { |
430 | atombios_external_tmds_setup(struct drm_encoder *encoder, int action) | 488 | ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; |
431 | { | 489 | DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; |
432 | struct drm_device *dev = encoder->dev; | 490 | DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; |
433 | struct radeon_device *rdev = dev->dev_private; | 491 | }; |
434 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
435 | ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args; | ||
436 | int index = 0; | ||
437 | |||
438 | memset(&args, 0, sizeof(args)); | ||
439 | |||
440 | index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); | ||
441 | |||
442 | args.sXTmdsEncoder.ucEnable = action; | ||
443 | |||
444 | if (radeon_encoder->pixel_clock > 165000) | ||
445 | args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL; | ||
446 | |||
447 | /*if (pScrn->rgbBits == 8)*/ | ||
448 | args.sXTmdsEncoder.ucMisc |= (1 << 1); | ||
449 | |||
450 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
451 | |||
452 | } | ||
453 | 492 | ||
454 | static void | 493 | void |
455 | atombios_ddia_setup(struct drm_encoder *encoder, int action) | 494 | atombios_dvo_setup(struct drm_encoder *encoder, int action) |
456 | { | 495 | { |
457 | struct drm_device *dev = encoder->dev; | 496 | struct drm_device *dev = encoder->dev; |
458 | struct radeon_device *rdev = dev->dev_private; | 497 | struct radeon_device *rdev = dev->dev_private; |
459 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 498 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
460 | DVO_ENCODER_CONTROL_PS_ALLOCATION args; | 499 | union dvo_encoder_control args; |
461 | int index = 0; | 500 | int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); |
462 | 501 | ||
463 | memset(&args, 0, sizeof(args)); | 502 | memset(&args, 0, sizeof(args)); |
464 | 503 | ||
465 | index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); | 504 | if (ASIC_IS_DCE3(rdev)) { |
505 | /* DCE3+ */ | ||
506 | args.dvo_v3.ucAction = action; | ||
507 | args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
508 | args.dvo_v3.ucDVOConfig = 0; /* XXX */ | ||
509 | } else if (ASIC_IS_DCE2(rdev)) { | ||
510 | /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */ | ||
511 | args.dvo.sDVOEncoder.ucAction = action; | ||
512 | args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
513 | /* DFP1, CRT1, TV1 depending on the type of port */ | ||
514 | args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; | ||
515 | |||
516 | if (radeon_encoder->pixel_clock > 165000) | ||
517 | args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; | ||
518 | } else { | ||
519 | /* R4xx, R5xx */ | ||
520 | args.ext_tmds.sXTmdsEncoder.ucEnable = action; | ||
466 | 521 | ||
467 | args.sDVOEncoder.ucAction = action; | 522 | if (radeon_encoder->pixel_clock > 165000) |
468 | args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 523 | args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
469 | 524 | ||
470 | if (radeon_encoder->pixel_clock > 165000) | 525 | /*if (pScrn->rgbBits == 8)*/ |
471 | args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL; | 526 | args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; |
527 | } | ||
472 | 528 | ||
473 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 529 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
474 | |||
475 | } | 530 | } |
476 | 531 | ||
477 | union lvds_encoder_control { | 532 | union lvds_encoder_control { |
@@ -529,17 +584,17 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
529 | args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; | 584 | args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; |
530 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 585 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
531 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 586 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
532 | if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) | 587 | if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) |
533 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 588 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
534 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) | 589 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) |
535 | args.v1.ucMisc |= (1 << 1); | 590 | args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; |
536 | } else { | 591 | } else { |
537 | if (dig->linkb) | 592 | if (dig->linkb) |
538 | args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; | 593 | args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; |
539 | if (radeon_encoder->pixel_clock > 165000) | 594 | if (radeon_encoder->pixel_clock > 165000) |
540 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 595 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
541 | /*if (pScrn->rgbBits == 8) */ | 596 | /*if (pScrn->rgbBits == 8) */ |
542 | args.v1.ucMisc |= (1 << 1); | 597 | args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; |
543 | } | 598 | } |
544 | break; | 599 | break; |
545 | case 2: | 600 | case 2: |
@@ -558,18 +613,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
558 | args.v2.ucTemporal = 0; | 613 | args.v2.ucTemporal = 0; |
559 | args.v2.ucFRC = 0; | 614 | args.v2.ucFRC = 0; |
560 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 615 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
561 | if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) | 616 | if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) |
562 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 617 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
563 | if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) { | 618 | if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) { |
564 | args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; | 619 | args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; |
565 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) | 620 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) |
566 | args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; | 621 | args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; |
567 | } | 622 | } |
568 | if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) { | 623 | if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) { |
569 | args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; | 624 | args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; |
570 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) | 625 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) |
571 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; | 626 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; |
572 | if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) | 627 | if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) |
573 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; | 628 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; |
574 | } | 629 | } |
575 | } else { | 630 | } else { |
@@ -595,22 +650,33 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
595 | int | 650 | int |
596 | atombios_get_encoder_mode(struct drm_encoder *encoder) | 651 | atombios_get_encoder_mode(struct drm_encoder *encoder) |
597 | { | 652 | { |
653 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
598 | struct drm_device *dev = encoder->dev; | 654 | struct drm_device *dev = encoder->dev; |
599 | struct radeon_device *rdev = dev->dev_private; | 655 | struct radeon_device *rdev = dev->dev_private; |
600 | struct drm_connector *connector; | 656 | struct drm_connector *connector; |
601 | struct radeon_connector *radeon_connector; | 657 | struct radeon_connector *radeon_connector; |
602 | struct radeon_connector_atom_dig *dig_connector; | 658 | struct radeon_connector_atom_dig *dig_connector; |
603 | 659 | ||
660 | /* dp bridges are always DP */ | ||
661 | if (radeon_encoder_is_dp_bridge(encoder)) | ||
662 | return ATOM_ENCODER_MODE_DP; | ||
663 | |||
664 | /* DVO is always DVO */ | ||
665 | if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO) | ||
666 | return ATOM_ENCODER_MODE_DVO; | ||
667 | |||
604 | connector = radeon_get_connector_for_encoder(encoder); | 668 | connector = radeon_get_connector_for_encoder(encoder); |
669 | /* if we don't have an active device yet, just use one of | ||
670 | * the connectors tied to the encoder. | ||
671 | */ | ||
605 | if (!connector) | 672 | if (!connector) |
606 | return 0; | 673 | connector = radeon_get_connector_for_encoder_init(encoder); |
607 | |||
608 | radeon_connector = to_radeon_connector(connector); | 674 | radeon_connector = to_radeon_connector(connector); |
609 | 675 | ||
610 | switch (connector->connector_type) { | 676 | switch (connector->connector_type) { |
611 | case DRM_MODE_CONNECTOR_DVII: | 677 | case DRM_MODE_CONNECTOR_DVII: |
612 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ | 678 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ |
613 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { | 679 | if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { |
614 | /* fix me */ | 680 | /* fix me */ |
615 | if (ASIC_IS_DCE4(rdev)) | 681 | if (ASIC_IS_DCE4(rdev)) |
616 | return ATOM_ENCODER_MODE_DVI; | 682 | return ATOM_ENCODER_MODE_DVI; |
@@ -624,7 +690,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
624 | case DRM_MODE_CONNECTOR_DVID: | 690 | case DRM_MODE_CONNECTOR_DVID: |
625 | case DRM_MODE_CONNECTOR_HDMIA: | 691 | case DRM_MODE_CONNECTOR_HDMIA: |
626 | default: | 692 | default: |
627 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { | 693 | if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { |
628 | /* fix me */ | 694 | /* fix me */ |
629 | if (ASIC_IS_DCE4(rdev)) | 695 | if (ASIC_IS_DCE4(rdev)) |
630 | return ATOM_ENCODER_MODE_DVI; | 696 | return ATOM_ENCODER_MODE_DVI; |
@@ -637,12 +703,11 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
637 | return ATOM_ENCODER_MODE_LVDS; | 703 | return ATOM_ENCODER_MODE_LVDS; |
638 | break; | 704 | break; |
639 | case DRM_MODE_CONNECTOR_DisplayPort: | 705 | case DRM_MODE_CONNECTOR_DisplayPort: |
640 | case DRM_MODE_CONNECTOR_eDP: | ||
641 | dig_connector = radeon_connector->con_priv; | 706 | dig_connector = radeon_connector->con_priv; |
642 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 707 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
643 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | 708 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) |
644 | return ATOM_ENCODER_MODE_DP; | 709 | return ATOM_ENCODER_MODE_DP; |
645 | else if (drm_detect_hdmi_monitor(radeon_connector->edid)) { | 710 | else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { |
646 | /* fix me */ | 711 | /* fix me */ |
647 | if (ASIC_IS_DCE4(rdev)) | 712 | if (ASIC_IS_DCE4(rdev)) |
648 | return ATOM_ENCODER_MODE_DVI; | 713 | return ATOM_ENCODER_MODE_DVI; |
@@ -651,6 +716,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
651 | } else | 716 | } else |
652 | return ATOM_ENCODER_MODE_DVI; | 717 | return ATOM_ENCODER_MODE_DVI; |
653 | break; | 718 | break; |
719 | case DRM_MODE_CONNECTOR_eDP: | ||
720 | return ATOM_ENCODER_MODE_DP; | ||
654 | case DRM_MODE_CONNECTOR_DVIA: | 721 | case DRM_MODE_CONNECTOR_DVIA: |
655 | case DRM_MODE_CONNECTOR_VGA: | 722 | case DRM_MODE_CONNECTOR_VGA: |
656 | return ATOM_ENCODER_MODE_CRT; | 723 | return ATOM_ENCODER_MODE_CRT; |
@@ -681,8 +748,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
681 | * - 2 DIG encoder blocks. | 748 | * - 2 DIG encoder blocks. |
682 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B | 749 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B |
683 | * | 750 | * |
684 | * DCE 4.0 | 751 | * DCE 4.0/5.0 |
685 | * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B). | 752 | * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). |
686 | * Supports up to 6 digital outputs | 753 | * Supports up to 6 digital outputs |
687 | * - 6 DIG encoder blocks. | 754 | * - 6 DIG encoder blocks. |
688 | * - DIG to PHY mapping is hardcoded | 755 | * - DIG to PHY mapping is hardcoded |
@@ -693,6 +760,12 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
693 | * DIG5 drives UNIPHY2 link A, A+B | 760 | * DIG5 drives UNIPHY2 link A, A+B |
694 | * DIG6 drives UNIPHY2 link B | 761 | * DIG6 drives UNIPHY2 link B |
695 | * | 762 | * |
763 | * DCE 4.1 | ||
764 | * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). | ||
765 | * Supports up to 6 digital outputs | ||
766 | * - 2 DIG encoder blocks. | ||
767 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B | ||
768 | * | ||
696 | * Routing | 769 | * Routing |
697 | * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) | 770 | * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) |
698 | * Examples: | 771 | * Examples: |
@@ -706,10 +779,11 @@ union dig_encoder_control { | |||
706 | DIG_ENCODER_CONTROL_PS_ALLOCATION v1; | 779 | DIG_ENCODER_CONTROL_PS_ALLOCATION v1; |
707 | DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; | 780 | DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; |
708 | DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; | 781 | DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; |
782 | DIG_ENCODER_CONTROL_PARAMETERS_V4 v4; | ||
709 | }; | 783 | }; |
710 | 784 | ||
711 | void | 785 | void |
712 | atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | 786 | atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode) |
713 | { | 787 | { |
714 | struct drm_device *dev = encoder->dev; | 788 | struct drm_device *dev = encoder->dev; |
715 | struct radeon_device *rdev = dev->dev_private; | 789 | struct radeon_device *rdev = dev->dev_private; |
@@ -721,6 +795,8 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
721 | uint8_t frev, crev; | 795 | uint8_t frev, crev; |
722 | int dp_clock = 0; | 796 | int dp_clock = 0; |
723 | int dp_lane_count = 0; | 797 | int dp_lane_count = 0; |
798 | int hpd_id = RADEON_HPD_NONE; | ||
799 | int bpc = 8; | ||
724 | 800 | ||
725 | if (connector) { | 801 | if (connector) { |
726 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 802 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
@@ -729,6 +805,8 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
729 | 805 | ||
730 | dp_clock = dig_connector->dp_clock; | 806 | dp_clock = dig_connector->dp_clock; |
731 | dp_lane_count = dig_connector->dp_lane_count; | 807 | dp_lane_count = dig_connector->dp_lane_count; |
808 | hpd_id = radeon_connector->hpd.hpd; | ||
809 | bpc = connector->display_info.bpc; | ||
732 | } | 810 | } |
733 | 811 | ||
734 | /* no dig encoder assigned */ | 812 | /* no dig encoder assigned */ |
@@ -751,21 +829,81 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
751 | 829 | ||
752 | args.v1.ucAction = action; | 830 | args.v1.ucAction = action; |
753 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 831 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
754 | args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); | 832 | if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) |
833 | args.v3.ucPanelMode = panel_mode; | ||
834 | else | ||
835 | args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
755 | 836 | ||
756 | if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { | 837 | if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || |
757 | if (dp_clock == 270000) | 838 | (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) |
758 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
759 | args.v1.ucLaneNum = dp_lane_count; | 839 | args.v1.ucLaneNum = dp_lane_count; |
760 | } else if (radeon_encoder->pixel_clock > 165000) | 840 | else if (radeon_encoder->pixel_clock > 165000) |
761 | args.v1.ucLaneNum = 8; | 841 | args.v1.ucLaneNum = 8; |
762 | else | 842 | else |
763 | args.v1.ucLaneNum = 4; | 843 | args.v1.ucLaneNum = 4; |
764 | 844 | ||
765 | if (ASIC_IS_DCE4(rdev)) { | 845 | if (ASIC_IS_DCE5(rdev)) { |
846 | if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || | ||
847 | (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) { | ||
848 | if (dp_clock == 270000) | ||
849 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; | ||
850 | else if (dp_clock == 540000) | ||
851 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; | ||
852 | } | ||
853 | args.v4.acConfig.ucDigSel = dig->dig_encoder; | ||
854 | switch (bpc) { | ||
855 | case 0: | ||
856 | args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE; | ||
857 | break; | ||
858 | case 6: | ||
859 | args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR; | ||
860 | break; | ||
861 | case 8: | ||
862 | default: | ||
863 | args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; | ||
864 | break; | ||
865 | case 10: | ||
866 | args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR; | ||
867 | break; | ||
868 | case 12: | ||
869 | args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR; | ||
870 | break; | ||
871 | case 16: | ||
872 | args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR; | ||
873 | break; | ||
874 | } | ||
875 | if (hpd_id == RADEON_HPD_NONE) | ||
876 | args.v4.ucHPD_ID = 0; | ||
877 | else | ||
878 | args.v4.ucHPD_ID = hpd_id + 1; | ||
879 | } else if (ASIC_IS_DCE4(rdev)) { | ||
880 | if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) | ||
881 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; | ||
766 | args.v3.acConfig.ucDigSel = dig->dig_encoder; | 882 | args.v3.acConfig.ucDigSel = dig->dig_encoder; |
767 | args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; | 883 | switch (bpc) { |
884 | case 0: | ||
885 | args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE; | ||
886 | break; | ||
887 | case 6: | ||
888 | args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR; | ||
889 | break; | ||
890 | case 8: | ||
891 | default: | ||
892 | args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; | ||
893 | break; | ||
894 | case 10: | ||
895 | args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR; | ||
896 | break; | ||
897 | case 12: | ||
898 | args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR; | ||
899 | break; | ||
900 | case 16: | ||
901 | args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR; | ||
902 | break; | ||
903 | } | ||
768 | } else { | 904 | } else { |
905 | if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) | ||
906 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
769 | switch (radeon_encoder->encoder_id) { | 907 | switch (radeon_encoder->encoder_id) { |
770 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 908 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
771 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; | 909 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; |
@@ -792,6 +930,7 @@ union dig_transmitter_control { | |||
792 | DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; | 930 | DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; |
793 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; | 931 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; |
794 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; | 932 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; |
933 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4; | ||
795 | }; | 934 | }; |
796 | 935 | ||
797 | void | 936 | void |
@@ -801,7 +940,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
801 | struct radeon_device *rdev = dev->dev_private; | 940 | struct radeon_device *rdev = dev->dev_private; |
802 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 941 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
803 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 942 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
804 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 943 | struct drm_connector *connector; |
805 | union dig_transmitter_control args; | 944 | union dig_transmitter_control args; |
806 | int index = 0; | 945 | int index = 0; |
807 | uint8_t frev, crev; | 946 | uint8_t frev, crev; |
@@ -811,6 +950,16 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
811 | int dp_lane_count = 0; | 950 | int dp_lane_count = 0; |
812 | int connector_object_id = 0; | 951 | int connector_object_id = 0; |
813 | int igp_lane_info = 0; | 952 | int igp_lane_info = 0; |
953 | int dig_encoder = dig->dig_encoder; | ||
954 | |||
955 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
956 | connector = radeon_get_connector_for_encoder_init(encoder); | ||
957 | /* just needed to avoid bailing in the encoder check. the encoder | ||
958 | * isn't used for init | ||
959 | */ | ||
960 | dig_encoder = 0; | ||
961 | } else | ||
962 | connector = radeon_get_connector_for_encoder(encoder); | ||
814 | 963 | ||
815 | if (connector) { | 964 | if (connector) { |
816 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 965 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
@@ -825,7 +974,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
825 | } | 974 | } |
826 | 975 | ||
827 | /* no dig encoder assigned */ | 976 | /* no dig encoder assigned */ |
828 | if (dig->dig_encoder == -1) | 977 | if (dig_encoder == -1) |
829 | return; | 978 | return; |
830 | 979 | ||
831 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) | 980 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) |
@@ -834,6 +983,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
834 | memset(&args, 0, sizeof(args)); | 983 | memset(&args, 0, sizeof(args)); |
835 | 984 | ||
836 | switch (radeon_encoder->encoder_id) { | 985 | switch (radeon_encoder->encoder_id) { |
986 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
987 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | ||
988 | break; | ||
837 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 989 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
838 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 990 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
839 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 991 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
@@ -849,7 +1001,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
849 | 1001 | ||
850 | args.v1.ucAction = action; | 1002 | args.v1.ucAction = action; |
851 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | 1003 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { |
852 | args.v1.usInitInfo = connector_object_id; | 1004 | args.v1.usInitInfo = cpu_to_le16(connector_object_id); |
853 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | 1005 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { |
854 | args.v1.asMode.ucLaneSel = lane_num; | 1006 | args.v1.asMode.ucLaneSel = lane_num; |
855 | args.v1.asMode.ucLaneSet = lane_set; | 1007 | args.v1.asMode.ucLaneSet = lane_set; |
@@ -870,10 +1022,10 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
870 | else | 1022 | else |
871 | args.v3.ucLaneNum = 4; | 1023 | args.v3.ucLaneNum = 4; |
872 | 1024 | ||
873 | if (dig->linkb) { | 1025 | if (dig->linkb) |
874 | args.v3.acConfig.ucLinkSel = 1; | 1026 | args.v3.acConfig.ucLinkSel = 1; |
1027 | if (dig_encoder & 1) | ||
875 | args.v3.acConfig.ucEncoderSel = 1; | 1028 | args.v3.acConfig.ucEncoderSel = 1; |
876 | } | ||
877 | 1029 | ||
878 | /* Select the PLL for the PHY | 1030 | /* Select the PLL for the PHY |
879 | * DP PHY should be clocked from external src if there is | 1031 | * DP PHY should be clocked from external src if there is |
@@ -883,10 +1035,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
883 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | 1035 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
884 | pll_id = radeon_crtc->pll_id; | 1036 | pll_id = radeon_crtc->pll_id; |
885 | } | 1037 | } |
886 | if (is_dp && rdev->clock.dp_extclk) | 1038 | |
887 | args.v3.acConfig.ucRefClkSource = 2; /* external src */ | 1039 | if (ASIC_IS_DCE5(rdev)) { |
888 | else | 1040 | /* On DCE5 DCPLL usually generates the DP ref clock */ |
889 | args.v3.acConfig.ucRefClkSource = pll_id; | 1041 | if (is_dp) { |
1042 | if (rdev->clock.dp_extclk) | ||
1043 | args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK; | ||
1044 | else | ||
1045 | args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL; | ||
1046 | } else | ||
1047 | args.v4.acConfig.ucRefClkSource = pll_id; | ||
1048 | } else { | ||
1049 | /* On DCE4, if there is an external clock, it generates the DP ref clock */ | ||
1050 | if (is_dp && rdev->clock.dp_extclk) | ||
1051 | args.v3.acConfig.ucRefClkSource = 2; /* external src */ | ||
1052 | else | ||
1053 | args.v3.acConfig.ucRefClkSource = pll_id; | ||
1054 | } | ||
890 | 1055 | ||
891 | switch (radeon_encoder->encoder_id) { | 1056 | switch (radeon_encoder->encoder_id) { |
892 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 1057 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
@@ -909,7 +1074,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
909 | args.v3.acConfig.fDualLinkConnector = 1; | 1074 | args.v3.acConfig.fDualLinkConnector = 1; |
910 | } | 1075 | } |
911 | } else if (ASIC_IS_DCE32(rdev)) { | 1076 | } else if (ASIC_IS_DCE32(rdev)) { |
912 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; | 1077 | args.v2.acConfig.ucEncoderSel = dig_encoder; |
913 | if (dig->linkb) | 1078 | if (dig->linkb) |
914 | args.v2.acConfig.ucLinkSel = 1; | 1079 | args.v2.acConfig.ucLinkSel = 1; |
915 | 1080 | ||
@@ -925,9 +1090,10 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
925 | break; | 1090 | break; |
926 | } | 1091 | } |
927 | 1092 | ||
928 | if (is_dp) | 1093 | if (is_dp) { |
929 | args.v2.acConfig.fCoherentMode = 1; | 1094 | args.v2.acConfig.fCoherentMode = 1; |
930 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 1095 | args.v2.acConfig.fDPConnector = 1; |
1096 | } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
931 | if (dig->coherent_mode) | 1097 | if (dig->coherent_mode) |
932 | args.v2.acConfig.fCoherentMode = 1; | 1098 | args.v2.acConfig.fCoherentMode = 1; |
933 | if (radeon_encoder->pixel_clock > 165000) | 1099 | if (radeon_encoder->pixel_clock > 165000) |
@@ -936,7 +1102,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
936 | } else { | 1102 | } else { |
937 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; | 1103 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; |
938 | 1104 | ||
939 | if (dig->dig_encoder) | 1105 | if (dig_encoder) |
940 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; | 1106 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; |
941 | else | 1107 | else |
942 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; | 1108 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; |
@@ -978,6 +1144,180 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
978 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1144 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
979 | } | 1145 | } |
980 | 1146 | ||
1147 | bool | ||
1148 | atombios_set_edp_panel_power(struct drm_connector *connector, int action) | ||
1149 | { | ||
1150 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1151 | struct drm_device *dev = radeon_connector->base.dev; | ||
1152 | struct radeon_device *rdev = dev->dev_private; | ||
1153 | union dig_transmitter_control args; | ||
1154 | int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); | ||
1155 | uint8_t frev, crev; | ||
1156 | |||
1157 | if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) | ||
1158 | goto done; | ||
1159 | |||
1160 | if (!ASIC_IS_DCE4(rdev)) | ||
1161 | goto done; | ||
1162 | |||
1163 | if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && | ||
1164 | (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) | ||
1165 | goto done; | ||
1166 | |||
1167 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1168 | goto done; | ||
1169 | |||
1170 | memset(&args, 0, sizeof(args)); | ||
1171 | |||
1172 | args.v1.ucAction = action; | ||
1173 | |||
1174 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1175 | |||
1176 | /* wait for the panel to power up */ | ||
1177 | if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) { | ||
1178 | int i; | ||
1179 | |||
1180 | for (i = 0; i < 300; i++) { | ||
1181 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) | ||
1182 | return true; | ||
1183 | mdelay(1); | ||
1184 | } | ||
1185 | return false; | ||
1186 | } | ||
1187 | done: | ||
1188 | return true; | ||
1189 | } | ||
1190 | |||
1191 | union external_encoder_control { | ||
1192 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; | ||
1193 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3; | ||
1194 | }; | ||
1195 | |||
1196 | static void | ||
1197 | atombios_external_encoder_setup(struct drm_encoder *encoder, | ||
1198 | struct drm_encoder *ext_encoder, | ||
1199 | int action) | ||
1200 | { | ||
1201 | struct drm_device *dev = encoder->dev; | ||
1202 | struct radeon_device *rdev = dev->dev_private; | ||
1203 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1204 | struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); | ||
1205 | union external_encoder_control args; | ||
1206 | struct drm_connector *connector; | ||
1207 | int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); | ||
1208 | u8 frev, crev; | ||
1209 | int dp_clock = 0; | ||
1210 | int dp_lane_count = 0; | ||
1211 | int connector_object_id = 0; | ||
1212 | u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
1213 | int bpc = 8; | ||
1214 | |||
1215 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) | ||
1216 | connector = radeon_get_connector_for_encoder_init(encoder); | ||
1217 | else | ||
1218 | connector = radeon_get_connector_for_encoder(encoder); | ||
1219 | |||
1220 | if (connector) { | ||
1221 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1222 | struct radeon_connector_atom_dig *dig_connector = | ||
1223 | radeon_connector->con_priv; | ||
1224 | |||
1225 | dp_clock = dig_connector->dp_clock; | ||
1226 | dp_lane_count = dig_connector->dp_lane_count; | ||
1227 | connector_object_id = | ||
1228 | (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
1229 | bpc = connector->display_info.bpc; | ||
1230 | } | ||
1231 | |||
1232 | memset(&args, 0, sizeof(args)); | ||
1233 | |||
1234 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1235 | return; | ||
1236 | |||
1237 | switch (frev) { | ||
1238 | case 1: | ||
1239 | /* no params on frev 1 */ | ||
1240 | break; | ||
1241 | case 2: | ||
1242 | switch (crev) { | ||
1243 | case 1: | ||
1244 | case 2: | ||
1245 | args.v1.sDigEncoder.ucAction = action; | ||
1246 | args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
1247 | args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
1248 | |||
1249 | if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { | ||
1250 | if (dp_clock == 270000) | ||
1251 | args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
1252 | args.v1.sDigEncoder.ucLaneNum = dp_lane_count; | ||
1253 | } else if (radeon_encoder->pixel_clock > 165000) | ||
1254 | args.v1.sDigEncoder.ucLaneNum = 8; | ||
1255 | else | ||
1256 | args.v1.sDigEncoder.ucLaneNum = 4; | ||
1257 | break; | ||
1258 | case 3: | ||
1259 | args.v3.sExtEncoder.ucAction = action; | ||
1260 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) | ||
1261 | args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id); | ||
1262 | else | ||
1263 | args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
1264 | args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
1265 | |||
1266 | if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { | ||
1267 | if (dp_clock == 270000) | ||
1268 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; | ||
1269 | else if (dp_clock == 540000) | ||
1270 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ; | ||
1271 | args.v3.sExtEncoder.ucLaneNum = dp_lane_count; | ||
1272 | } else if (radeon_encoder->pixel_clock > 165000) | ||
1273 | args.v3.sExtEncoder.ucLaneNum = 8; | ||
1274 | else | ||
1275 | args.v3.sExtEncoder.ucLaneNum = 4; | ||
1276 | switch (ext_enum) { | ||
1277 | case GRAPH_OBJECT_ENUM_ID1: | ||
1278 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1; | ||
1279 | break; | ||
1280 | case GRAPH_OBJECT_ENUM_ID2: | ||
1281 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2; | ||
1282 | break; | ||
1283 | case GRAPH_OBJECT_ENUM_ID3: | ||
1284 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; | ||
1285 | break; | ||
1286 | } | ||
1287 | switch (bpc) { | ||
1288 | case 0: | ||
1289 | args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE; | ||
1290 | break; | ||
1291 | case 6: | ||
1292 | args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR; | ||
1293 | break; | ||
1294 | case 8: | ||
1295 | default: | ||
1296 | args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; | ||
1297 | break; | ||
1298 | case 10: | ||
1299 | args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR; | ||
1300 | break; | ||
1301 | case 12: | ||
1302 | args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR; | ||
1303 | break; | ||
1304 | case 16: | ||
1305 | args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR; | ||
1306 | break; | ||
1307 | } | ||
1308 | break; | ||
1309 | default: | ||
1310 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
1311 | return; | ||
1312 | } | ||
1313 | break; | ||
1314 | default: | ||
1315 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
1316 | return; | ||
1317 | } | ||
1318 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1319 | } | ||
1320 | |||
981 | static void | 1321 | static void |
982 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) | 1322 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) |
983 | { | 1323 | { |
@@ -1021,9 +1361,12 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1021 | struct drm_device *dev = encoder->dev; | 1361 | struct drm_device *dev = encoder->dev; |
1022 | struct radeon_device *rdev = dev->dev_private; | 1362 | struct radeon_device *rdev = dev->dev_private; |
1023 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1363 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1364 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
1024 | DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; | 1365 | DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; |
1025 | int index = 0; | 1366 | int index = 0; |
1026 | bool is_dig = false; | 1367 | bool is_dig = false; |
1368 | bool is_dce5_dac = false; | ||
1369 | bool is_dce5_dvo = false; | ||
1027 | 1370 | ||
1028 | memset(&args, 0, sizeof(args)); | 1371 | memset(&args, 0, sizeof(args)); |
1029 | 1372 | ||
@@ -1043,9 +1386,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1043 | break; | 1386 | break; |
1044 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 1387 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
1045 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 1388 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
1046 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1047 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | 1389 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); |
1048 | break; | 1390 | break; |
1391 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1392 | if (ASIC_IS_DCE5(rdev)) | ||
1393 | is_dce5_dvo = true; | ||
1394 | else if (ASIC_IS_DCE3(rdev)) | ||
1395 | is_dig = true; | ||
1396 | else | ||
1397 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | ||
1398 | break; | ||
1049 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 1399 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
1050 | index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); | 1400 | index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); |
1051 | break; | 1401 | break; |
@@ -1057,12 +1407,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1057 | break; | 1407 | break; |
1058 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | 1408 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
1059 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | 1409 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
1060 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | 1410 | if (ASIC_IS_DCE5(rdev)) |
1061 | index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); | 1411 | is_dce5_dac = true; |
1062 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | 1412 | else { |
1063 | index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); | 1413 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
1064 | else | 1414 | index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); |
1065 | index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); | 1415 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
1416 | index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); | ||
1417 | else | ||
1418 | index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); | ||
1419 | } | ||
1066 | break; | 1420 | break; |
1067 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | 1421 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
1068 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | 1422 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
@@ -1078,38 +1432,126 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1078 | if (is_dig) { | 1432 | if (is_dig) { |
1079 | switch (mode) { | 1433 | switch (mode) { |
1080 | case DRM_MODE_DPMS_ON: | 1434 | case DRM_MODE_DPMS_ON: |
1081 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | 1435 | /* some early dce3.2 boards have a bug in their transmitter control table */ |
1436 | if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730)) | ||
1437 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | ||
1438 | else | ||
1439 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
1082 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1440 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
1083 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 1441 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
1084 | 1442 | ||
1085 | dp_link_train(encoder, connector); | 1443 | if (connector && |
1444 | (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { | ||
1445 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1446 | struct radeon_connector_atom_dig *radeon_dig_connector = | ||
1447 | radeon_connector->con_priv; | ||
1448 | atombios_set_edp_panel_power(connector, | ||
1449 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
1450 | radeon_dig_connector->edp_on = true; | ||
1451 | } | ||
1086 | if (ASIC_IS_DCE4(rdev)) | 1452 | if (ASIC_IS_DCE4(rdev)) |
1087 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); | 1453 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); |
1454 | radeon_dp_link_train(encoder, connector); | ||
1455 | if (ASIC_IS_DCE4(rdev)) | ||
1456 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); | ||
1088 | } | 1457 | } |
1458 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
1459 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); | ||
1089 | break; | 1460 | break; |
1090 | case DRM_MODE_DPMS_STANDBY: | 1461 | case DRM_MODE_DPMS_STANDBY: |
1091 | case DRM_MODE_DPMS_SUSPEND: | 1462 | case DRM_MODE_DPMS_SUSPEND: |
1092 | case DRM_MODE_DPMS_OFF: | 1463 | case DRM_MODE_DPMS_OFF: |
1093 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | 1464 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); |
1094 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1465 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
1466 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1467 | |||
1095 | if (ASIC_IS_DCE4(rdev)) | 1468 | if (ASIC_IS_DCE4(rdev)) |
1096 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); | 1469 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); |
1470 | if (connector && | ||
1471 | (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { | ||
1472 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1473 | struct radeon_connector_atom_dig *radeon_dig_connector = | ||
1474 | radeon_connector->con_priv; | ||
1475 | atombios_set_edp_panel_power(connector, | ||
1476 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
1477 | radeon_dig_connector->edp_on = false; | ||
1478 | } | ||
1097 | } | 1479 | } |
1480 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
1481 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); | ||
1482 | break; | ||
1483 | } | ||
1484 | } else if (is_dce5_dac) { | ||
1485 | switch (mode) { | ||
1486 | case DRM_MODE_DPMS_ON: | ||
1487 | atombios_dac_setup(encoder, ATOM_ENABLE); | ||
1488 | break; | ||
1489 | case DRM_MODE_DPMS_STANDBY: | ||
1490 | case DRM_MODE_DPMS_SUSPEND: | ||
1491 | case DRM_MODE_DPMS_OFF: | ||
1492 | atombios_dac_setup(encoder, ATOM_DISABLE); | ||
1493 | break; | ||
1494 | } | ||
1495 | } else if (is_dce5_dvo) { | ||
1496 | switch (mode) { | ||
1497 | case DRM_MODE_DPMS_ON: | ||
1498 | atombios_dvo_setup(encoder, ATOM_ENABLE); | ||
1499 | break; | ||
1500 | case DRM_MODE_DPMS_STANDBY: | ||
1501 | case DRM_MODE_DPMS_SUSPEND: | ||
1502 | case DRM_MODE_DPMS_OFF: | ||
1503 | atombios_dvo_setup(encoder, ATOM_DISABLE); | ||
1098 | break; | 1504 | break; |
1099 | } | 1505 | } |
1100 | } else { | 1506 | } else { |
1101 | switch (mode) { | 1507 | switch (mode) { |
1102 | case DRM_MODE_DPMS_ON: | 1508 | case DRM_MODE_DPMS_ON: |
1103 | args.ucAction = ATOM_ENABLE; | 1509 | args.ucAction = ATOM_ENABLE; |
1510 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1511 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
1512 | args.ucAction = ATOM_LCD_BLON; | ||
1513 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1514 | } | ||
1104 | break; | 1515 | break; |
1105 | case DRM_MODE_DPMS_STANDBY: | 1516 | case DRM_MODE_DPMS_STANDBY: |
1106 | case DRM_MODE_DPMS_SUSPEND: | 1517 | case DRM_MODE_DPMS_SUSPEND: |
1107 | case DRM_MODE_DPMS_OFF: | 1518 | case DRM_MODE_DPMS_OFF: |
1108 | args.ucAction = ATOM_DISABLE; | 1519 | args.ucAction = ATOM_DISABLE; |
1520 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1521 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
1522 | args.ucAction = ATOM_LCD_BLOFF; | ||
1523 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1524 | } | ||
1109 | break; | 1525 | break; |
1110 | } | 1526 | } |
1111 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1112 | } | 1527 | } |
1528 | |||
1529 | if (ext_encoder) { | ||
1530 | switch (mode) { | ||
1531 | case DRM_MODE_DPMS_ON: | ||
1532 | default: | ||
1533 | if (ASIC_IS_DCE41(rdev)) { | ||
1534 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1535 | EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT); | ||
1536 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1537 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF); | ||
1538 | } else | ||
1539 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | ||
1540 | break; | ||
1541 | case DRM_MODE_DPMS_STANDBY: | ||
1542 | case DRM_MODE_DPMS_SUSPEND: | ||
1543 | case DRM_MODE_DPMS_OFF: | ||
1544 | if (ASIC_IS_DCE41(rdev)) { | ||
1545 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1546 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING); | ||
1547 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1548 | EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT); | ||
1549 | } else | ||
1550 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE); | ||
1551 | break; | ||
1552 | } | ||
1553 | } | ||
1554 | |||
1113 | radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); | 1555 | radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); |
1114 | 1556 | ||
1115 | } | 1557 | } |
@@ -1242,7 +1684,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
1242 | break; | 1684 | break; |
1243 | default: | 1685 | default: |
1244 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | 1686 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); |
1245 | break; | 1687 | return; |
1246 | } | 1688 | } |
1247 | 1689 | ||
1248 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1690 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
@@ -1275,11 +1717,21 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder, | |||
1275 | } | 1717 | } |
1276 | 1718 | ||
1277 | /* set scaler clears this on some chips */ | 1719 | /* set scaler clears this on some chips */ |
1278 | /* XXX check DCE4 */ | 1720 | if (ASIC_IS_AVIVO(rdev) && |
1279 | if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { | 1721 | (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) { |
1280 | if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) | 1722 | if (ASIC_IS_DCE4(rdev)) { |
1281 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, | 1723 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
1282 | AVIVO_D1MODE_INTERLEAVE_EN); | 1724 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, |
1725 | EVERGREEN_INTERLEAVE_EN); | ||
1726 | else | ||
1727 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1728 | } else { | ||
1729 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1730 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, | ||
1731 | AVIVO_D1MODE_INTERLEAVE_EN); | ||
1732 | else | ||
1733 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1734 | } | ||
1283 | } | 1735 | } |
1284 | } | 1736 | } |
1285 | 1737 | ||
@@ -1293,27 +1745,32 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | |||
1293 | struct radeon_encoder_atom_dig *dig; | 1745 | struct radeon_encoder_atom_dig *dig; |
1294 | uint32_t dig_enc_in_use = 0; | 1746 | uint32_t dig_enc_in_use = 0; |
1295 | 1747 | ||
1748 | /* DCE4/5 */ | ||
1296 | if (ASIC_IS_DCE4(rdev)) { | 1749 | if (ASIC_IS_DCE4(rdev)) { |
1297 | dig = radeon_encoder->enc_priv; | 1750 | dig = radeon_encoder->enc_priv; |
1298 | switch (radeon_encoder->encoder_id) { | 1751 | if (ASIC_IS_DCE41(rdev)) |
1299 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 1752 | return radeon_crtc->crtc_id; |
1300 | if (dig->linkb) | 1753 | else { |
1301 | return 1; | 1754 | switch (radeon_encoder->encoder_id) { |
1302 | else | 1755 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
1303 | return 0; | 1756 | if (dig->linkb) |
1304 | break; | 1757 | return 1; |
1305 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 1758 | else |
1306 | if (dig->linkb) | 1759 | return 0; |
1307 | return 3; | 1760 | break; |
1308 | else | 1761 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
1309 | return 2; | 1762 | if (dig->linkb) |
1310 | break; | 1763 | return 3; |
1311 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 1764 | else |
1312 | if (dig->linkb) | 1765 | return 2; |
1313 | return 5; | 1766 | break; |
1314 | else | 1767 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
1315 | return 4; | 1768 | if (dig->linkb) |
1316 | break; | 1769 | return 5; |
1770 | else | ||
1771 | return 4; | ||
1772 | break; | ||
1773 | } | ||
1317 | } | 1774 | } |
1318 | } | 1775 | } |
1319 | 1776 | ||
@@ -1349,6 +1806,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | |||
1349 | return 1; | 1806 | return 1; |
1350 | } | 1807 | } |
1351 | 1808 | ||
1809 | /* This only needs to be called once at startup */ | ||
1810 | void | ||
1811 | radeon_atom_encoder_init(struct radeon_device *rdev) | ||
1812 | { | ||
1813 | struct drm_device *dev = rdev->ddev; | ||
1814 | struct drm_encoder *encoder; | ||
1815 | |||
1816 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
1817 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1818 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
1819 | |||
1820 | switch (radeon_encoder->encoder_id) { | ||
1821 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1822 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1823 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1824 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
1825 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); | ||
1826 | break; | ||
1827 | default: | ||
1828 | break; | ||
1829 | } | ||
1830 | |||
1831 | if (ext_encoder && ASIC_IS_DCE41(rdev)) | ||
1832 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1833 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); | ||
1834 | } | ||
1835 | } | ||
1836 | |||
1352 | static void | 1837 | static void |
1353 | radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | 1838 | radeon_atom_encoder_mode_set(struct drm_encoder *encoder, |
1354 | struct drm_display_mode *mode, | 1839 | struct drm_display_mode *mode, |
@@ -1357,6 +1842,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1357 | struct drm_device *dev = encoder->dev; | 1842 | struct drm_device *dev = encoder->dev; |
1358 | struct radeon_device *rdev = dev->dev_private; | 1843 | struct radeon_device *rdev = dev->dev_private; |
1359 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1844 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1845 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
1360 | 1846 | ||
1361 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1847 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
1362 | 1848 | ||
@@ -1382,29 +1868,25 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1382 | /* disable the transmitter */ | 1868 | /* disable the transmitter */ |
1383 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | 1869 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); |
1384 | /* setup and enable the encoder */ | 1870 | /* setup and enable the encoder */ |
1385 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP); | 1871 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); |
1386 | 1872 | ||
1387 | /* init and enable the transmitter */ | 1873 | /* enable the transmitter */ |
1388 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); | ||
1389 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | 1874 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); |
1390 | } else { | 1875 | } else { |
1391 | /* disable the encoder and transmitter */ | 1876 | /* disable the encoder and transmitter */ |
1392 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | 1877 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); |
1393 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE); | 1878 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); |
1394 | 1879 | ||
1395 | /* setup and enable the encoder and transmitter */ | 1880 | /* setup and enable the encoder and transmitter */ |
1396 | atombios_dig_encoder_setup(encoder, ATOM_ENABLE); | 1881 | atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); |
1397 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); | ||
1398 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); | 1882 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); |
1399 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | 1883 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); |
1400 | } | 1884 | } |
1401 | break; | 1885 | break; |
1402 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 1886 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
1403 | atombios_ddia_setup(encoder, ATOM_ENABLE); | ||
1404 | break; | ||
1405 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 1887 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
1406 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | 1888 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
1407 | atombios_external_tmds_setup(encoder, ATOM_ENABLE); | 1889 | atombios_dvo_setup(encoder, ATOM_ENABLE); |
1408 | break; | 1890 | break; |
1409 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | 1891 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
1410 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | 1892 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
@@ -1419,6 +1901,15 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1419 | } | 1901 | } |
1420 | break; | 1902 | break; |
1421 | } | 1903 | } |
1904 | |||
1905 | if (ext_encoder) { | ||
1906 | if (ASIC_IS_DCE41(rdev)) | ||
1907 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1908 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); | ||
1909 | else | ||
1910 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | ||
1911 | } | ||
1912 | |||
1422 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 1913 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
1423 | 1914 | ||
1424 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { | 1915 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { |
@@ -1517,12 +2008,73 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec | |||
1517 | return connector_status_disconnected; | 2008 | return connector_status_disconnected; |
1518 | } | 2009 | } |
1519 | 2010 | ||
2011 | static enum drm_connector_status | ||
2012 | radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
2013 | { | ||
2014 | struct drm_device *dev = encoder->dev; | ||
2015 | struct radeon_device *rdev = dev->dev_private; | ||
2016 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
2017 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
2018 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
2019 | u32 bios_0_scratch; | ||
2020 | |||
2021 | if (!ASIC_IS_DCE4(rdev)) | ||
2022 | return connector_status_unknown; | ||
2023 | |||
2024 | if (!ext_encoder) | ||
2025 | return connector_status_unknown; | ||
2026 | |||
2027 | if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0) | ||
2028 | return connector_status_unknown; | ||
2029 | |||
2030 | /* load detect on the dp bridge */ | ||
2031 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
2032 | EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION); | ||
2033 | |||
2034 | bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); | ||
2035 | |||
2036 | DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); | ||
2037 | if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { | ||
2038 | if (bios_0_scratch & ATOM_S0_CRT1_MASK) | ||
2039 | return connector_status_connected; | ||
2040 | } | ||
2041 | if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { | ||
2042 | if (bios_0_scratch & ATOM_S0_CRT2_MASK) | ||
2043 | return connector_status_connected; | ||
2044 | } | ||
2045 | if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { | ||
2046 | if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) | ||
2047 | return connector_status_connected; | ||
2048 | } | ||
2049 | if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { | ||
2050 | if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) | ||
2051 | return connector_status_connected; /* CTV */ | ||
2052 | else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) | ||
2053 | return connector_status_connected; /* STV */ | ||
2054 | } | ||
2055 | return connector_status_disconnected; | ||
2056 | } | ||
2057 | |||
2058 | void | ||
2059 | radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder) | ||
2060 | { | ||
2061 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
2062 | |||
2063 | if (ext_encoder) | ||
2064 | /* ddc_setup on the dp bridge */ | ||
2065 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
2066 | EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP); | ||
2067 | |||
2068 | } | ||
2069 | |||
1520 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | 2070 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) |
1521 | { | 2071 | { |
1522 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 2072 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
2073 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1523 | 2074 | ||
1524 | if (radeon_encoder->active_device & | 2075 | if ((radeon_encoder->active_device & |
1525 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { | 2076 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || |
2077 | radeon_encoder_is_dp_bridge(encoder)) { | ||
1526 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 2078 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
1527 | if (dig) | 2079 | if (dig) |
1528 | dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); | 2080 | dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); |
@@ -1531,6 +2083,19 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | |||
1531 | radeon_atom_output_lock(encoder, true); | 2083 | radeon_atom_output_lock(encoder, true); |
1532 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 2084 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1533 | 2085 | ||
2086 | if (connector) { | ||
2087 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
2088 | |||
2089 | /* select the clock/data port if it uses a router */ | ||
2090 | if (radeon_connector->router.cd_valid) | ||
2091 | radeon_router_select_cd_port(radeon_connector); | ||
2092 | |||
2093 | /* turn eDP panel on for mode set */ | ||
2094 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) | ||
2095 | atombios_set_edp_panel_power(connector, | ||
2096 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
2097 | } | ||
2098 | |||
1534 | /* this is needed for the pll/ss setup to work correctly in some cases */ | 2099 | /* this is needed for the pll/ss setup to work correctly in some cases */ |
1535 | atombios_set_encoder_crtc_source(encoder); | 2100 | atombios_set_encoder_crtc_source(encoder); |
1536 | } | 2101 | } |
@@ -1547,6 +2112,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1547 | struct radeon_device *rdev = dev->dev_private; | 2112 | struct radeon_device *rdev = dev->dev_private; |
1548 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 2113 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1549 | struct radeon_encoder_atom_dig *dig; | 2114 | struct radeon_encoder_atom_dig *dig; |
2115 | |||
2116 | /* check for pre-DCE3 cards with shared encoders; | ||
2117 | * can't really use the links individually, so don't disable | ||
2118 | * the encoder if it's in use by another connector | ||
2119 | */ | ||
2120 | if (!ASIC_IS_DCE3(rdev)) { | ||
2121 | struct drm_encoder *other_encoder; | ||
2122 | struct radeon_encoder *other_radeon_encoder; | ||
2123 | |||
2124 | list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { | ||
2125 | other_radeon_encoder = to_radeon_encoder(other_encoder); | ||
2126 | if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) && | ||
2127 | drm_helper_encoder_in_use(other_encoder)) | ||
2128 | goto disable_done; | ||
2129 | } | ||
2130 | } | ||
2131 | |||
1550 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 2132 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1551 | 2133 | ||
1552 | switch (radeon_encoder->encoder_id) { | 2134 | switch (radeon_encoder->encoder_id) { |
@@ -1566,15 +2148,13 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1566 | else { | 2148 | else { |
1567 | /* disable the encoder and transmitter */ | 2149 | /* disable the encoder and transmitter */ |
1568 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | 2150 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); |
1569 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE); | 2151 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); |
1570 | } | 2152 | } |
1571 | break; | 2153 | break; |
1572 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 2154 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
1573 | atombios_ddia_setup(encoder, ATOM_DISABLE); | ||
1574 | break; | ||
1575 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 2155 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
1576 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | 2156 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
1577 | atombios_external_tmds_setup(encoder, ATOM_DISABLE); | 2157 | atombios_dvo_setup(encoder, ATOM_DISABLE); |
1578 | break; | 2158 | break; |
1579 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | 2159 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
1580 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | 2160 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
@@ -1586,6 +2166,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1586 | break; | 2166 | break; |
1587 | } | 2167 | } |
1588 | 2168 | ||
2169 | disable_done: | ||
1589 | if (radeon_encoder_is_digital(encoder)) { | 2170 | if (radeon_encoder_is_digital(encoder)) { |
1590 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | 2171 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) |
1591 | r600_hdmi_disable(encoder); | 2172 | r600_hdmi_disable(encoder); |
@@ -1595,6 +2176,53 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1595 | radeon_encoder->active_device = 0; | 2176 | radeon_encoder->active_device = 0; |
1596 | } | 2177 | } |
1597 | 2178 | ||
2179 | /* these are handled by the primary encoders */ | ||
2180 | static void radeon_atom_ext_prepare(struct drm_encoder *encoder) | ||
2181 | { | ||
2182 | |||
2183 | } | ||
2184 | |||
2185 | static void radeon_atom_ext_commit(struct drm_encoder *encoder) | ||
2186 | { | ||
2187 | |||
2188 | } | ||
2189 | |||
2190 | static void | ||
2191 | radeon_atom_ext_mode_set(struct drm_encoder *encoder, | ||
2192 | struct drm_display_mode *mode, | ||
2193 | struct drm_display_mode *adjusted_mode) | ||
2194 | { | ||
2195 | |||
2196 | } | ||
2197 | |||
2198 | static void radeon_atom_ext_disable(struct drm_encoder *encoder) | ||
2199 | { | ||
2200 | |||
2201 | } | ||
2202 | |||
2203 | static void | ||
2204 | radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) | ||
2205 | { | ||
2206 | |||
2207 | } | ||
2208 | |||
2209 | static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, | ||
2210 | struct drm_display_mode *mode, | ||
2211 | struct drm_display_mode *adjusted_mode) | ||
2212 | { | ||
2213 | return true; | ||
2214 | } | ||
2215 | |||
2216 | static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { | ||
2217 | .dpms = radeon_atom_ext_dpms, | ||
2218 | .mode_fixup = radeon_atom_ext_mode_fixup, | ||
2219 | .prepare = radeon_atom_ext_prepare, | ||
2220 | .mode_set = radeon_atom_ext_mode_set, | ||
2221 | .commit = radeon_atom_ext_commit, | ||
2222 | .disable = radeon_atom_ext_disable, | ||
2223 | /* no detect for TMDS/LVDS yet */ | ||
2224 | }; | ||
2225 | |||
1598 | static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { | 2226 | static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { |
1599 | .dpms = radeon_atom_encoder_dpms, | 2227 | .dpms = radeon_atom_encoder_dpms, |
1600 | .mode_fixup = radeon_atom_mode_fixup, | 2228 | .mode_fixup = radeon_atom_mode_fixup, |
@@ -1602,7 +2230,7 @@ static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { | |||
1602 | .mode_set = radeon_atom_encoder_mode_set, | 2230 | .mode_set = radeon_atom_encoder_mode_set, |
1603 | .commit = radeon_atom_encoder_commit, | 2231 | .commit = radeon_atom_encoder_commit, |
1604 | .disable = radeon_atom_encoder_disable, | 2232 | .disable = radeon_atom_encoder_disable, |
1605 | /* no detect for TMDS/LVDS yet */ | 2233 | .detect = radeon_atom_dig_detect, |
1606 | }; | 2234 | }; |
1607 | 2235 | ||
1608 | static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { | 2236 | static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { |
@@ -1662,7 +2290,10 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) | |||
1662 | } | 2290 | } |
1663 | 2291 | ||
1664 | void | 2292 | void |
1665 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) | 2293 | radeon_add_atom_encoder(struct drm_device *dev, |
2294 | uint32_t encoder_enum, | ||
2295 | uint32_t supported_device, | ||
2296 | u16 caps) | ||
1666 | { | 2297 | { |
1667 | struct radeon_device *rdev = dev->dev_private; | 2298 | struct radeon_device *rdev = dev->dev_private; |
1668 | struct drm_encoder *encoder; | 2299 | struct drm_encoder *encoder; |
@@ -1704,6 +2335,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t | |||
1704 | radeon_encoder->devices = supported_device; | 2335 | radeon_encoder->devices = supported_device; |
1705 | radeon_encoder->rmx_type = RMX_OFF; | 2336 | radeon_encoder->rmx_type = RMX_OFF; |
1706 | radeon_encoder->underscan_type = UNDERSCAN_OFF; | 2337 | radeon_encoder->underscan_type = UNDERSCAN_OFF; |
2338 | radeon_encoder->is_ext_encoder = false; | ||
2339 | radeon_encoder->caps = caps; | ||
1707 | 2340 | ||
1708 | switch (radeon_encoder->encoder_id) { | 2341 | switch (radeon_encoder->encoder_id) { |
1709 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 2342 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
@@ -1717,8 +2350,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t | |||
1717 | } else { | 2350 | } else { |
1718 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | 2351 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); |
1719 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | 2352 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); |
1720 | if (ASIC_IS_AVIVO(rdev)) | ||
1721 | radeon_encoder->underscan_type = UNDERSCAN_AUTO; | ||
1722 | } | 2353 | } |
1723 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); | 2354 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); |
1724 | break; | 2355 | break; |
@@ -1745,13 +2376,33 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t | |||
1745 | radeon_encoder->rmx_type = RMX_FULL; | 2376 | radeon_encoder->rmx_type = RMX_FULL; |
1746 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); | 2377 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); |
1747 | radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); | 2378 | radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); |
2379 | } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { | ||
2380 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | ||
2381 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | ||
1748 | } else { | 2382 | } else { |
1749 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | 2383 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); |
1750 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | 2384 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); |
1751 | if (ASIC_IS_AVIVO(rdev)) | ||
1752 | radeon_encoder->underscan_type = UNDERSCAN_AUTO; | ||
1753 | } | 2385 | } |
1754 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); | 2386 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); |
1755 | break; | 2387 | break; |
2388 | case ENCODER_OBJECT_ID_SI170B: | ||
2389 | case ENCODER_OBJECT_ID_CH7303: | ||
2390 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: | ||
2391 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: | ||
2392 | case ENCODER_OBJECT_ID_TITFP513: | ||
2393 | case ENCODER_OBJECT_ID_VT1623: | ||
2394 | case ENCODER_OBJECT_ID_HDMI_SI1930: | ||
2395 | case ENCODER_OBJECT_ID_TRAVIS: | ||
2396 | case ENCODER_OBJECT_ID_NUTMEG: | ||
2397 | /* these are handled by the primary encoders */ | ||
2398 | radeon_encoder->is_ext_encoder = true; | ||
2399 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
2400 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); | ||
2401 | else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) | ||
2402 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | ||
2403 | else | ||
2404 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | ||
2405 | drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); | ||
2406 | break; | ||
1756 | } | 2407 | } |
1757 | } | 2408 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index e329066dcabd..ec2f1ea84f81 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h | |||
@@ -80,6 +80,13 @@ enum radeon_family { | |||
80 | CHIP_JUNIPER, | 80 | CHIP_JUNIPER, |
81 | CHIP_CYPRESS, | 81 | CHIP_CYPRESS, |
82 | CHIP_HEMLOCK, | 82 | CHIP_HEMLOCK, |
83 | CHIP_PALM, | ||
84 | CHIP_SUMO, | ||
85 | CHIP_SUMO2, | ||
86 | CHIP_BARTS, | ||
87 | CHIP_TURKS, | ||
88 | CHIP_CAICOS, | ||
89 | CHIP_CAYMAN, | ||
83 | CHIP_LAST, | 90 | CHIP_LAST, |
84 | }; | 91 | }; |
85 | 92 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 40b0c087b592..0b7b486c97e8 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -59,10 +59,12 @@ static struct fb_ops radeonfb_ops = { | |||
59 | .fb_pan_display = drm_fb_helper_pan_display, | 59 | .fb_pan_display = drm_fb_helper_pan_display, |
60 | .fb_blank = drm_fb_helper_blank, | 60 | .fb_blank = drm_fb_helper_blank, |
61 | .fb_setcmap = drm_fb_helper_setcmap, | 61 | .fb_setcmap = drm_fb_helper_setcmap, |
62 | .fb_debug_enter = drm_fb_helper_debug_enter, | ||
63 | .fb_debug_leave = drm_fb_helper_debug_leave, | ||
62 | }; | 64 | }; |
63 | 65 | ||
64 | 66 | ||
65 | static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) | 67 | int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) |
66 | { | 68 | { |
67 | int aligned = width; | 69 | int aligned = width; |
68 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; | 70 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
@@ -88,7 +90,7 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo | |||
88 | 90 | ||
89 | static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) | 91 | static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) |
90 | { | 92 | { |
91 | struct radeon_bo *rbo = gobj->driver_private; | 93 | struct radeon_bo *rbo = gem_to_radeon_bo(gobj); |
92 | int ret; | 94 | int ret; |
93 | 95 | ||
94 | ret = radeon_bo_reserve(rbo, false); | 96 | ret = radeon_bo_reserve(rbo, false); |
@@ -111,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, | |||
111 | u32 tiling_flags = 0; | 113 | u32 tiling_flags = 0; |
112 | int ret; | 114 | int ret; |
113 | int aligned_size, size; | 115 | int aligned_size, size; |
116 | int height = mode_cmd->height; | ||
114 | 117 | ||
115 | /* need to align pitch with crtc limits */ | 118 | /* need to align pitch with crtc limits */ |
116 | mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); | 119 | mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); |
117 | 120 | ||
118 | size = mode_cmd->pitch * mode_cmd->height; | 121 | if (rdev->family >= CHIP_R600) |
122 | height = ALIGN(mode_cmd->height, 8); | ||
123 | size = mode_cmd->pitch * height; | ||
119 | aligned_size = ALIGN(size, PAGE_SIZE); | 124 | aligned_size = ALIGN(size, PAGE_SIZE); |
120 | ret = radeon_gem_object_create(rdev, aligned_size, 0, | 125 | ret = radeon_gem_object_create(rdev, aligned_size, 0, |
121 | RADEON_GEM_DOMAIN_VRAM, | 126 | RADEON_GEM_DOMAIN_VRAM, |
@@ -126,7 +131,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, | |||
126 | aligned_size); | 131 | aligned_size); |
127 | return -ENOMEM; | 132 | return -ENOMEM; |
128 | } | 133 | } |
129 | rbo = gobj->driver_private; | 134 | rbo = gem_to_radeon_bo(gobj); |
130 | 135 | ||
131 | if (fb_tiled) | 136 | if (fb_tiled) |
132 | tiling_flags = RADEON_TILING_MACRO; | 137 | tiling_flags = RADEON_TILING_MACRO; |
@@ -200,7 +205,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev, | |||
200 | mode_cmd.depth = sizes->surface_depth; | 205 | mode_cmd.depth = sizes->surface_depth; |
201 | 206 | ||
202 | ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); | 207 | ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); |
203 | rbo = gobj->driver_private; | 208 | rbo = gem_to_radeon_bo(gobj); |
204 | 209 | ||
205 | /* okay we have an object now allocate the framebuffer */ | 210 | /* okay we have an object now allocate the framebuffer */ |
206 | info = framebuffer_alloc(0, device); | 211 | info = framebuffer_alloc(0, device); |
@@ -243,10 +248,8 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev, | |||
243 | goto out_unref; | 248 | goto out_unref; |
244 | } | 249 | } |
245 | info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; | 250 | info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; |
246 | info->apertures->ranges[0].size = rdev->mc.real_vram_size; | 251 | info->apertures->ranges[0].size = rdev->mc.aper_size; |
247 | 252 | ||
248 | info->fix.mmio_start = 0; | ||
249 | info->fix.mmio_len = 0; | ||
250 | info->pixmap.size = 64*1024; | 253 | info->pixmap.size = 64*1024; |
251 | info->pixmap.buf_align = 8; | 254 | info->pixmap.buf_align = 8; |
252 | info->pixmap.access_align = 32; | 255 | info->pixmap.access_align = 32; |
@@ -403,14 +406,14 @@ int radeon_fbdev_total_size(struct radeon_device *rdev) | |||
403 | struct radeon_bo *robj; | 406 | struct radeon_bo *robj; |
404 | int size = 0; | 407 | int size = 0; |
405 | 408 | ||
406 | robj = rdev->mode_info.rfbdev->rfb.obj->driver_private; | 409 | robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj); |
407 | size += radeon_bo_size(robj); | 410 | size += radeon_bo_size(robj); |
408 | return size; | 411 | return size; |
409 | } | 412 | } |
410 | 413 | ||
411 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) | 414 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) |
412 | { | 415 | { |
413 | if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private) | 416 | if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj)) |
414 | return true; | 417 | return true; |
415 | return false; | 418 | return false; |
416 | } | 419 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index b1f9a81b5d1d..021d2b6b556f 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -38,6 +38,36 @@ | |||
38 | #include "drm.h" | 38 | #include "drm.h" |
39 | #include "radeon_reg.h" | 39 | #include "radeon_reg.h" |
40 | #include "radeon.h" | 40 | #include "radeon.h" |
41 | #include "radeon_trace.h" | ||
42 | |||
43 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq) | ||
44 | { | ||
45 | if (rdev->wb.enabled) { | ||
46 | u32 scratch_index; | ||
47 | if (rdev->wb.use_event) | ||
48 | scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; | ||
49 | else | ||
50 | scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; | ||
51 | rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);; | ||
52 | } else | ||
53 | WREG32(rdev->fence_drv.scratch_reg, seq); | ||
54 | } | ||
55 | |||
56 | static u32 radeon_fence_read(struct radeon_device *rdev) | ||
57 | { | ||
58 | u32 seq; | ||
59 | |||
60 | if (rdev->wb.enabled) { | ||
61 | u32 scratch_index; | ||
62 | if (rdev->wb.use_event) | ||
63 | scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; | ||
64 | else | ||
65 | scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; | ||
66 | seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); | ||
67 | } else | ||
68 | seq = RREG32(rdev->fence_drv.scratch_reg); | ||
69 | return seq; | ||
70 | } | ||
41 | 71 | ||
42 | int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) | 72 | int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) |
43 | { | 73 | { |
@@ -49,17 +79,17 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) | |||
49 | return 0; | 79 | return 0; |
50 | } | 80 | } |
51 | fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); | 81 | fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); |
52 | if (!rdev->cp.ready) { | 82 | if (!rdev->cp.ready) |
53 | /* FIXME: cp is not running assume everythings is done right | 83 | /* FIXME: cp is not running assume everythings is done right |
54 | * away | 84 | * away |
55 | */ | 85 | */ |
56 | WREG32(rdev->fence_drv.scratch_reg, fence->seq); | 86 | radeon_fence_write(rdev, fence->seq); |
57 | } else | 87 | else |
58 | radeon_fence_ring_emit(rdev, fence); | 88 | radeon_fence_ring_emit(rdev, fence); |
59 | 89 | ||
90 | trace_radeon_fence_emit(rdev->ddev, fence->seq); | ||
60 | fence->emited = true; | 91 | fence->emited = true; |
61 | list_del(&fence->list); | 92 | list_move_tail(&fence->list, &rdev->fence_drv.emited); |
62 | list_add_tail(&fence->list, &rdev->fence_drv.emited); | ||
63 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); | 93 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
64 | return 0; | 94 | return 0; |
65 | } | 95 | } |
@@ -72,7 +102,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) | |||
72 | bool wake = false; | 102 | bool wake = false; |
73 | unsigned long cjiffies; | 103 | unsigned long cjiffies; |
74 | 104 | ||
75 | seq = RREG32(rdev->fence_drv.scratch_reg); | 105 | seq = radeon_fence_read(rdev); |
76 | if (seq != rdev->fence_drv.last_seq) { | 106 | if (seq != rdev->fence_drv.last_seq) { |
77 | rdev->fence_drv.last_seq = seq; | 107 | rdev->fence_drv.last_seq = seq; |
78 | rdev->fence_drv.last_jiffies = jiffies; | 108 | rdev->fence_drv.last_jiffies = jiffies; |
@@ -111,8 +141,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) | |||
111 | i = n; | 141 | i = n; |
112 | do { | 142 | do { |
113 | n = i->prev; | 143 | n = i->prev; |
114 | list_del(i); | 144 | list_move_tail(i, &rdev->fence_drv.signaled); |
115 | list_add_tail(i, &rdev->fence_drv.signaled); | ||
116 | fence = list_entry(i, struct radeon_fence, list); | 145 | fence = list_entry(i, struct radeon_fence, list); |
117 | fence->signaled = true; | 146 | fence->signaled = true; |
118 | i = n; | 147 | i = n; |
@@ -205,6 +234,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) | |||
205 | retry: | 234 | retry: |
206 | /* save current sequence used to check for GPU lockup */ | 235 | /* save current sequence used to check for GPU lockup */ |
207 | seq = rdev->fence_drv.last_seq; | 236 | seq = rdev->fence_drv.last_seq; |
237 | trace_radeon_fence_wait_begin(rdev->ddev, seq); | ||
208 | if (intr) { | 238 | if (intr) { |
209 | radeon_irq_kms_sw_irq_get(rdev); | 239 | radeon_irq_kms_sw_irq_get(rdev); |
210 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, | 240 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, |
@@ -219,6 +249,7 @@ retry: | |||
219 | radeon_fence_signaled(fence), timeout); | 249 | radeon_fence_signaled(fence), timeout); |
220 | radeon_irq_kms_sw_irq_put(rdev); | 250 | radeon_irq_kms_sw_irq_put(rdev); |
221 | } | 251 | } |
252 | trace_radeon_fence_wait_end(rdev->ddev, seq); | ||
222 | if (unlikely(!radeon_fence_signaled(fence))) { | 253 | if (unlikely(!radeon_fence_signaled(fence))) { |
223 | /* we were interrupted for some reason and fence isn't | 254 | /* we were interrupted for some reason and fence isn't |
224 | * isn't signaled yet, resume wait | 255 | * isn't signaled yet, resume wait |
@@ -232,7 +263,8 @@ retry: | |||
232 | */ | 263 | */ |
233 | if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { | 264 | if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { |
234 | /* good news we believe it's a lockup */ | 265 | /* good news we believe it's a lockup */ |
235 | WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); | 266 | WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", |
267 | fence->seq, seq); | ||
236 | /* FIXME: what should we do ? marking everyone | 268 | /* FIXME: what should we do ? marking everyone |
237 | * as signaled for now | 269 | * as signaled for now |
238 | */ | 270 | */ |
@@ -240,7 +272,7 @@ retry: | |||
240 | r = radeon_gpu_reset(rdev); | 272 | r = radeon_gpu_reset(rdev); |
241 | if (r) | 273 | if (r) |
242 | return r; | 274 | return r; |
243 | WREG32(rdev->fence_drv.scratch_reg, fence->seq); | 275 | radeon_fence_write(rdev, fence->seq); |
244 | rdev->gpu_lockup = false; | 276 | rdev->gpu_lockup = false; |
245 | } | 277 | } |
246 | timeout = RADEON_FENCE_JIFFIES_TIMEOUT; | 278 | timeout = RADEON_FENCE_JIFFIES_TIMEOUT; |
@@ -311,7 +343,7 @@ void radeon_fence_unref(struct radeon_fence **fence) | |||
311 | 343 | ||
312 | *fence = NULL; | 344 | *fence = NULL; |
313 | if (tmp) { | 345 | if (tmp) { |
314 | kref_put(&tmp->kref, &radeon_fence_destroy); | 346 | kref_put(&tmp->kref, radeon_fence_destroy); |
315 | } | 347 | } |
316 | } | 348 | } |
317 | 349 | ||
@@ -340,7 +372,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev) | |||
340 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); | 372 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
341 | return r; | 373 | return r; |
342 | } | 374 | } |
343 | WREG32(rdev->fence_drv.scratch_reg, 0); | 375 | radeon_fence_write(rdev, 0); |
344 | atomic_set(&rdev->fence_drv.seq, 0); | 376 | atomic_set(&rdev->fence_drv.seq, 0); |
345 | INIT_LIST_HEAD(&rdev->fence_drv.created); | 377 | INIT_LIST_HEAD(&rdev->fence_drv.created); |
346 | INIT_LIST_HEAD(&rdev->fence_drv.emited); | 378 | INIT_LIST_HEAD(&rdev->fence_drv.emited); |
@@ -380,7 +412,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) | |||
380 | struct radeon_fence *fence; | 412 | struct radeon_fence *fence; |
381 | 413 | ||
382 | seq_printf(m, "Last signaled fence 0x%08X\n", | 414 | seq_printf(m, "Last signaled fence 0x%08X\n", |
383 | RREG32(rdev->fence_drv.scratch_reg)); | 415 | radeon_fence_read(rdev)); |
384 | if (!list_empty(&rdev->fence_drv.emited)) { | 416 | if (!list_empty(&rdev->fence_drv.emited)) { |
385 | fence = list_entry(rdev->fence_drv.emited.prev, | 417 | fence = list_entry(rdev->fence_drv.emited.prev, |
386 | struct radeon_fence, list); | 418 | struct radeon_fence, list); |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index e65b90317fab..a533f52fd163 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -78,9 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) | |||
78 | int r; | 78 | int r; |
79 | 79 | ||
80 | if (rdev->gart.table.vram.robj == NULL) { | 80 | if (rdev->gart.table.vram.robj == NULL) { |
81 | r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, | 81 | r = radeon_bo_create(rdev, rdev->gart.table_size, |
82 | true, RADEON_GEM_DOMAIN_VRAM, | 82 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
83 | &rdev->gart.table.vram.robj); | 83 | &rdev->gart.table.vram.robj); |
84 | if (r) { | 84 | if (r) { |
85 | return r; | 85 | return r; |
86 | } | 86 | } |
@@ -149,8 +149,9 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
149 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); | 149 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
150 | for (i = 0; i < pages; i++, p++) { | 150 | for (i = 0; i < pages; i++, p++) { |
151 | if (rdev->gart.pages[p]) { | 151 | if (rdev->gart.pages[p]) { |
152 | pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], | 152 | if (!rdev->gart.ttm_alloced[p]) |
153 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 153 | pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], |
154 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
154 | rdev->gart.pages[p] = NULL; | 155 | rdev->gart.pages[p] = NULL; |
155 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; | 156 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; |
156 | page_base = rdev->gart.pages_addr[p]; | 157 | page_base = rdev->gart.pages_addr[p]; |
@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
165 | } | 166 | } |
166 | 167 | ||
167 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | 168 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
168 | int pages, struct page **pagelist) | 169 | int pages, struct page **pagelist, dma_addr_t *dma_addr) |
169 | { | 170 | { |
170 | unsigned t; | 171 | unsigned t; |
171 | unsigned p; | 172 | unsigned p; |
@@ -180,15 +181,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
180 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); | 181 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
181 | 182 | ||
182 | for (i = 0; i < pages; i++, p++) { | 183 | for (i = 0; i < pages; i++, p++) { |
183 | /* we need to support large memory configurations */ | 184 | /* we reverted the patch using dma_addr in TTM for now but this |
184 | /* assume that unbind have already been call on the range */ | 185 | * code stops building on alpha so just comment it out for now */ |
185 | rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], | 186 | if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */ |
187 | rdev->gart.ttm_alloced[p] = true; | ||
188 | rdev->gart.pages_addr[p] = dma_addr[i]; | ||
189 | } else { | ||
190 | /* we need to support large memory configurations */ | ||
191 | /* assume that unbind have already been call on the range */ | ||
192 | rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], | ||
186 | 0, PAGE_SIZE, | 193 | 0, PAGE_SIZE, |
187 | PCI_DMA_BIDIRECTIONAL); | 194 | PCI_DMA_BIDIRECTIONAL); |
188 | if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { | 195 | if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { |
189 | /* FIXME: failed to map page (return -ENOMEM?) */ | 196 | /* FIXME: failed to map page (return -ENOMEM?) */ |
190 | radeon_gart_unbind(rdev, offset, pages); | 197 | radeon_gart_unbind(rdev, offset, pages); |
191 | return -ENOMEM; | 198 | return -ENOMEM; |
199 | } | ||
192 | } | 200 | } |
193 | rdev->gart.pages[p] = pagelist[i]; | 201 | rdev->gart.pages[p] = pagelist[i]; |
194 | page_base = rdev->gart.pages_addr[p]; | 202 | page_base = rdev->gart.pages_addr[p]; |
@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev) | |||
251 | radeon_gart_fini(rdev); | 259 | radeon_gart_fini(rdev); |
252 | return -ENOMEM; | 260 | return -ENOMEM; |
253 | } | 261 | } |
262 | rdev->gart.ttm_alloced = kzalloc(sizeof(bool) * | ||
263 | rdev->gart.num_cpu_pages, GFP_KERNEL); | ||
264 | if (rdev->gart.ttm_alloced == NULL) { | ||
265 | radeon_gart_fini(rdev); | ||
266 | return -ENOMEM; | ||
267 | } | ||
254 | /* set GART entry to point to the dummy page by default */ | 268 | /* set GART entry to point to the dummy page by default */ |
255 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { | 269 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { |
256 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; | 270 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; |
@@ -267,6 +281,10 @@ void radeon_gart_fini(struct radeon_device *rdev) | |||
267 | rdev->gart.ready = false; | 281 | rdev->gart.ready = false; |
268 | kfree(rdev->gart.pages); | 282 | kfree(rdev->gart.pages); |
269 | kfree(rdev->gart.pages_addr); | 283 | kfree(rdev->gart.pages_addr); |
284 | kfree(rdev->gart.ttm_alloced); | ||
270 | rdev->gart.pages = NULL; | 285 | rdev->gart.pages = NULL; |
271 | rdev->gart.pages_addr = NULL; | 286 | rdev->gart.pages_addr = NULL; |
287 | rdev->gart.ttm_alloced = NULL; | ||
288 | |||
289 | radeon_dummy_page_fini(rdev); | ||
272 | } | 290 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d1e595d91723..aa1ca2dea42f 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -32,21 +32,18 @@ | |||
32 | 32 | ||
33 | int radeon_gem_object_init(struct drm_gem_object *obj) | 33 | int radeon_gem_object_init(struct drm_gem_object *obj) |
34 | { | 34 | { |
35 | /* we do nothings here */ | 35 | BUG(); |
36 | |||
36 | return 0; | 37 | return 0; |
37 | } | 38 | } |
38 | 39 | ||
39 | void radeon_gem_object_free(struct drm_gem_object *gobj) | 40 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
40 | { | 41 | { |
41 | struct radeon_bo *robj = gobj->driver_private; | 42 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
42 | 43 | ||
43 | gobj->driver_private = NULL; | ||
44 | if (robj) { | 44 | if (robj) { |
45 | radeon_bo_unref(&robj); | 45 | radeon_bo_unref(&robj); |
46 | } | 46 | } |
47 | |||
48 | drm_gem_object_release(gobj); | ||
49 | kfree(gobj); | ||
50 | } | 47 | } |
51 | 48 | ||
52 | int radeon_gem_object_create(struct radeon_device *rdev, int size, | 49 | int radeon_gem_object_create(struct radeon_device *rdev, int size, |
@@ -54,36 +51,34 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
54 | bool discardable, bool kernel, | 51 | bool discardable, bool kernel, |
55 | struct drm_gem_object **obj) | 52 | struct drm_gem_object **obj) |
56 | { | 53 | { |
57 | struct drm_gem_object *gobj; | ||
58 | struct radeon_bo *robj; | 54 | struct radeon_bo *robj; |
59 | int r; | 55 | int r; |
60 | 56 | ||
61 | *obj = NULL; | 57 | *obj = NULL; |
62 | gobj = drm_gem_object_alloc(rdev->ddev, size); | ||
63 | if (!gobj) { | ||
64 | return -ENOMEM; | ||
65 | } | ||
66 | /* At least align on page size */ | 58 | /* At least align on page size */ |
67 | if (alignment < PAGE_SIZE) { | 59 | if (alignment < PAGE_SIZE) { |
68 | alignment = PAGE_SIZE; | 60 | alignment = PAGE_SIZE; |
69 | } | 61 | } |
70 | r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj); | 62 | r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj); |
71 | if (r) { | 63 | if (r) { |
72 | if (r != -ERESTARTSYS) | 64 | if (r != -ERESTARTSYS) |
73 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", | 65 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", |
74 | size, initial_domain, alignment, r); | 66 | size, initial_domain, alignment, r); |
75 | drm_gem_object_unreference_unlocked(gobj); | ||
76 | return r; | 67 | return r; |
77 | } | 68 | } |
78 | gobj->driver_private = robj; | 69 | *obj = &robj->gem_base; |
79 | *obj = gobj; | 70 | |
71 | mutex_lock(&rdev->gem.mutex); | ||
72 | list_add_tail(&robj->list, &rdev->gem.objects); | ||
73 | mutex_unlock(&rdev->gem.mutex); | ||
74 | |||
80 | return 0; | 75 | return 0; |
81 | } | 76 | } |
82 | 77 | ||
83 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | 78 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, |
84 | uint64_t *gpu_addr) | 79 | uint64_t *gpu_addr) |
85 | { | 80 | { |
86 | struct radeon_bo *robj = obj->driver_private; | 81 | struct radeon_bo *robj = gem_to_radeon_bo(obj); |
87 | int r; | 82 | int r; |
88 | 83 | ||
89 | r = radeon_bo_reserve(robj, false); | 84 | r = radeon_bo_reserve(robj, false); |
@@ -96,7 +91,7 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | |||
96 | 91 | ||
97 | void radeon_gem_object_unpin(struct drm_gem_object *obj) | 92 | void radeon_gem_object_unpin(struct drm_gem_object *obj) |
98 | { | 93 | { |
99 | struct radeon_bo *robj = obj->driver_private; | 94 | struct radeon_bo *robj = gem_to_radeon_bo(obj); |
100 | int r; | 95 | int r; |
101 | 96 | ||
102 | r = radeon_bo_reserve(robj, false); | 97 | r = radeon_bo_reserve(robj, false); |
@@ -114,7 +109,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj, | |||
114 | int r; | 109 | int r; |
115 | 110 | ||
116 | /* FIXME: reeimplement */ | 111 | /* FIXME: reeimplement */ |
117 | robj = gobj->driver_private; | 112 | robj = gem_to_radeon_bo(gobj); |
118 | /* work out where to validate the buffer to */ | 113 | /* work out where to validate the buffer to */ |
119 | domain = wdomain; | 114 | domain = wdomain; |
120 | if (!domain) { | 115 | if (!domain) { |
@@ -156,9 +151,12 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |||
156 | { | 151 | { |
157 | struct radeon_device *rdev = dev->dev_private; | 152 | struct radeon_device *rdev = dev->dev_private; |
158 | struct drm_radeon_gem_info *args = data; | 153 | struct drm_radeon_gem_info *args = data; |
154 | struct ttm_mem_type_manager *man; | ||
155 | |||
156 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | ||
159 | 157 | ||
160 | args->vram_size = rdev->mc.real_vram_size; | 158 | args->vram_size = rdev->mc.real_vram_size; |
161 | args->vram_visible = rdev->mc.real_vram_size; | 159 | args->vram_visible = (u64)man->size << PAGE_SHIFT; |
162 | if (rdev->stollen_vga_memory) | 160 | if (rdev->stollen_vga_memory) |
163 | args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); | 161 | args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); |
164 | args->vram_visible -= radeon_fbdev_total_size(rdev); | 162 | args->vram_visible -= radeon_fbdev_total_size(rdev); |
@@ -228,7 +226,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
228 | if (gobj == NULL) { | 226 | if (gobj == NULL) { |
229 | return -ENOENT; | 227 | return -ENOENT; |
230 | } | 228 | } |
231 | robj = gobj->driver_private; | 229 | robj = gem_to_radeon_bo(gobj); |
232 | 230 | ||
233 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); | 231 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); |
234 | 232 | ||
@@ -236,23 +234,31 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
236 | return r; | 234 | return r; |
237 | } | 235 | } |
238 | 236 | ||
239 | int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, | 237 | int radeon_mode_dumb_mmap(struct drm_file *filp, |
240 | struct drm_file *filp) | 238 | struct drm_device *dev, |
239 | uint32_t handle, uint64_t *offset_p) | ||
241 | { | 240 | { |
242 | struct drm_radeon_gem_mmap *args = data; | ||
243 | struct drm_gem_object *gobj; | 241 | struct drm_gem_object *gobj; |
244 | struct radeon_bo *robj; | 242 | struct radeon_bo *robj; |
245 | 243 | ||
246 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 244 | gobj = drm_gem_object_lookup(dev, filp, handle); |
247 | if (gobj == NULL) { | 245 | if (gobj == NULL) { |
248 | return -ENOENT; | 246 | return -ENOENT; |
249 | } | 247 | } |
250 | robj = gobj->driver_private; | 248 | robj = gem_to_radeon_bo(gobj); |
251 | args->addr_ptr = radeon_bo_mmap_offset(robj); | 249 | *offset_p = radeon_bo_mmap_offset(robj); |
252 | drm_gem_object_unreference_unlocked(gobj); | 250 | drm_gem_object_unreference_unlocked(gobj); |
253 | return 0; | 251 | return 0; |
254 | } | 252 | } |
255 | 253 | ||
254 | int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, | ||
255 | struct drm_file *filp) | ||
256 | { | ||
257 | struct drm_radeon_gem_mmap *args = data; | ||
258 | |||
259 | return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); | ||
260 | } | ||
261 | |||
256 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | 262 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
257 | struct drm_file *filp) | 263 | struct drm_file *filp) |
258 | { | 264 | { |
@@ -266,7 +272,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
266 | if (gobj == NULL) { | 272 | if (gobj == NULL) { |
267 | return -ENOENT; | 273 | return -ENOENT; |
268 | } | 274 | } |
269 | robj = gobj->driver_private; | 275 | robj = gem_to_radeon_bo(gobj); |
270 | r = radeon_bo_wait(robj, &cur_placement, true); | 276 | r = radeon_bo_wait(robj, &cur_placement, true); |
271 | switch (cur_placement) { | 277 | switch (cur_placement) { |
272 | case TTM_PL_VRAM: | 278 | case TTM_PL_VRAM: |
@@ -296,7 +302,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
296 | if (gobj == NULL) { | 302 | if (gobj == NULL) { |
297 | return -ENOENT; | 303 | return -ENOENT; |
298 | } | 304 | } |
299 | robj = gobj->driver_private; | 305 | robj = gem_to_radeon_bo(gobj); |
300 | r = radeon_bo_wait(robj, NULL, false); | 306 | r = radeon_bo_wait(robj, NULL, false); |
301 | /* callback hw specific functions if any */ | 307 | /* callback hw specific functions if any */ |
302 | if (robj->rdev->asic->ioctl_wait_idle) | 308 | if (robj->rdev->asic->ioctl_wait_idle) |
@@ -317,7 +323,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |||
317 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 323 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
318 | if (gobj == NULL) | 324 | if (gobj == NULL) |
319 | return -ENOENT; | 325 | return -ENOENT; |
320 | robj = gobj->driver_private; | 326 | robj = gem_to_radeon_bo(gobj); |
321 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); | 327 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
322 | drm_gem_object_unreference_unlocked(gobj); | 328 | drm_gem_object_unreference_unlocked(gobj); |
323 | return r; | 329 | return r; |
@@ -335,7 +341,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | |||
335 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 341 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
336 | if (gobj == NULL) | 342 | if (gobj == NULL) |
337 | return -ENOENT; | 343 | return -ENOENT; |
338 | rbo = gobj->driver_private; | 344 | rbo = gem_to_radeon_bo(gobj); |
339 | r = radeon_bo_reserve(rbo, false); | 345 | r = radeon_bo_reserve(rbo, false); |
340 | if (unlikely(r != 0)) | 346 | if (unlikely(r != 0)) |
341 | goto out; | 347 | goto out; |
@@ -345,3 +351,40 @@ out: | |||
345 | drm_gem_object_unreference_unlocked(gobj); | 351 | drm_gem_object_unreference_unlocked(gobj); |
346 | return r; | 352 | return r; |
347 | } | 353 | } |
354 | |||
355 | int radeon_mode_dumb_create(struct drm_file *file_priv, | ||
356 | struct drm_device *dev, | ||
357 | struct drm_mode_create_dumb *args) | ||
358 | { | ||
359 | struct radeon_device *rdev = dev->dev_private; | ||
360 | struct drm_gem_object *gobj; | ||
361 | uint32_t handle; | ||
362 | int r; | ||
363 | |||
364 | args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); | ||
365 | args->size = args->pitch * args->height; | ||
366 | args->size = ALIGN(args->size, PAGE_SIZE); | ||
367 | |||
368 | r = radeon_gem_object_create(rdev, args->size, 0, | ||
369 | RADEON_GEM_DOMAIN_VRAM, | ||
370 | false, ttm_bo_type_device, | ||
371 | &gobj); | ||
372 | if (r) | ||
373 | return -ENOMEM; | ||
374 | |||
375 | r = drm_gem_handle_create(file_priv, gobj, &handle); | ||
376 | /* drop reference from allocate - handle holds it now */ | ||
377 | drm_gem_object_unreference_unlocked(gobj); | ||
378 | if (r) { | ||
379 | return r; | ||
380 | } | ||
381 | args->handle = handle; | ||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | int radeon_mode_dumb_destroy(struct drm_file *file_priv, | ||
386 | struct drm_device *dev, | ||
387 | uint32_t handle) | ||
388 | { | ||
389 | return drm_gem_handle_delete(file_priv, handle); | ||
390 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 6a13ee38a5b9..781196db792f 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | /* on hw with routers, select right port */ | 55 | /* on hw with routers, select right port */ |
56 | if (radeon_connector->router.valid) | 56 | if (radeon_connector->router.ddc_valid) |
57 | radeon_router_select_port(radeon_connector); | 57 | radeon_router_select_ddc_port(radeon_connector); |
58 | 58 | ||
59 | ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); | 59 | ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); |
60 | if (ret == 2) | 60 | if (ret == 2) |
@@ -888,6 +888,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
888 | 888 | ||
889 | i2c->rec = *rec; | 889 | i2c->rec = *rec; |
890 | i2c->adapter.owner = THIS_MODULE; | 890 | i2c->adapter.owner = THIS_MODULE; |
891 | i2c->adapter.class = I2C_CLASS_DDC; | ||
891 | i2c->dev = dev; | 892 | i2c->dev = dev; |
892 | i2c_set_adapdata(&i2c->adapter, i2c); | 893 | i2c_set_adapdata(&i2c->adapter, i2c); |
893 | if (rec->mm_i2c || | 894 | if (rec->mm_i2c || |
@@ -896,7 +897,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
896 | ((rdev->family <= CHIP_RS480) || | 897 | ((rdev->family <= CHIP_RS480) || |
897 | ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { | 898 | ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { |
898 | /* set the radeon hw i2c adapter */ | 899 | /* set the radeon hw i2c adapter */ |
899 | sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name); | 900 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), |
901 | "Radeon i2c hw bus %s", name); | ||
900 | i2c->adapter.algo = &radeon_i2c_algo; | 902 | i2c->adapter.algo = &radeon_i2c_algo; |
901 | ret = i2c_add_adapter(&i2c->adapter); | 903 | ret = i2c_add_adapter(&i2c->adapter); |
902 | if (ret) { | 904 | if (ret) { |
@@ -905,7 +907,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
905 | } | 907 | } |
906 | } else { | 908 | } else { |
907 | /* set the radeon bit adapter */ | 909 | /* set the radeon bit adapter */ |
908 | sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name); | 910 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), |
911 | "Radeon i2c bit bus %s", name); | ||
909 | i2c->adapter.algo_data = &i2c->algo.bit; | 912 | i2c->adapter.algo_data = &i2c->algo.bit; |
910 | i2c->algo.bit.pre_xfer = pre_xfer; | 913 | i2c->algo.bit.pre_xfer = pre_xfer; |
911 | i2c->algo.bit.post_xfer = post_xfer; | 914 | i2c->algo.bit.post_xfer = post_xfer; |
@@ -945,7 +948,10 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, | |||
945 | 948 | ||
946 | i2c->rec = *rec; | 949 | i2c->rec = *rec; |
947 | i2c->adapter.owner = THIS_MODULE; | 950 | i2c->adapter.owner = THIS_MODULE; |
951 | i2c->adapter.class = I2C_CLASS_DDC; | ||
948 | i2c->dev = dev; | 952 | i2c->dev = dev; |
953 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), | ||
954 | "Radeon aux bus %s", name); | ||
949 | i2c_set_adapdata(&i2c->adapter, i2c); | 955 | i2c_set_adapdata(&i2c->adapter, i2c); |
950 | i2c->adapter.algo_data = &i2c->algo.dp; | 956 | i2c->adapter.algo_data = &i2c->algo.dp; |
951 | i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; | 957 | i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; |
@@ -1058,7 +1064,7 @@ void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, | |||
1058 | *val = in_buf[0]; | 1064 | *val = in_buf[0]; |
1059 | DRM_DEBUG("val = 0x%02x\n", *val); | 1065 | DRM_DEBUG("val = 0x%02x\n", *val); |
1060 | } else { | 1066 | } else { |
1061 | DRM_ERROR("i2c 0x%02x 0x%02x read failed\n", | 1067 | DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n", |
1062 | addr, *val); | 1068 | addr, *val); |
1063 | } | 1069 | } |
1064 | } | 1070 | } |
@@ -1080,30 +1086,61 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus, | |||
1080 | out_buf[1] = val; | 1086 | out_buf[1] = val; |
1081 | 1087 | ||
1082 | if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) | 1088 | if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) |
1083 | DRM_ERROR("i2c 0x%02x 0x%02x write failed\n", | 1089 | DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n", |
1084 | addr, val); | 1090 | addr, val); |
1085 | } | 1091 | } |
1086 | 1092 | ||
1087 | /* router switching */ | 1093 | /* ddc router switching */ |
1088 | void radeon_router_select_port(struct radeon_connector *radeon_connector) | 1094 | void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector) |
1089 | { | 1095 | { |
1090 | u8 val; | 1096 | u8 val; |
1091 | 1097 | ||
1092 | if (!radeon_connector->router.valid) | 1098 | if (!radeon_connector->router.ddc_valid) |
1099 | return; | ||
1100 | |||
1101 | if (!radeon_connector->router_bus) | ||
1102 | return; | ||
1103 | |||
1104 | radeon_i2c_get_byte(radeon_connector->router_bus, | ||
1105 | radeon_connector->router.i2c_addr, | ||
1106 | 0x3, &val); | ||
1107 | val &= ~radeon_connector->router.ddc_mux_control_pin; | ||
1108 | radeon_i2c_put_byte(radeon_connector->router_bus, | ||
1109 | radeon_connector->router.i2c_addr, | ||
1110 | 0x3, val); | ||
1111 | radeon_i2c_get_byte(radeon_connector->router_bus, | ||
1112 | radeon_connector->router.i2c_addr, | ||
1113 | 0x1, &val); | ||
1114 | val &= ~radeon_connector->router.ddc_mux_control_pin; | ||
1115 | val |= radeon_connector->router.ddc_mux_state; | ||
1116 | radeon_i2c_put_byte(radeon_connector->router_bus, | ||
1117 | radeon_connector->router.i2c_addr, | ||
1118 | 0x1, val); | ||
1119 | } | ||
1120 | |||
1121 | /* clock/data router switching */ | ||
1122 | void radeon_router_select_cd_port(struct radeon_connector *radeon_connector) | ||
1123 | { | ||
1124 | u8 val; | ||
1125 | |||
1126 | if (!radeon_connector->router.cd_valid) | ||
1127 | return; | ||
1128 | |||
1129 | if (!radeon_connector->router_bus) | ||
1093 | return; | 1130 | return; |
1094 | 1131 | ||
1095 | radeon_i2c_get_byte(radeon_connector->router_bus, | 1132 | radeon_i2c_get_byte(radeon_connector->router_bus, |
1096 | radeon_connector->router.i2c_addr, | 1133 | radeon_connector->router.i2c_addr, |
1097 | 0x3, &val); | 1134 | 0x3, &val); |
1098 | val &= radeon_connector->router.mux_control_pin; | 1135 | val &= ~radeon_connector->router.cd_mux_control_pin; |
1099 | radeon_i2c_put_byte(radeon_connector->router_bus, | 1136 | radeon_i2c_put_byte(radeon_connector->router_bus, |
1100 | radeon_connector->router.i2c_addr, | 1137 | radeon_connector->router.i2c_addr, |
1101 | 0x3, val); | 1138 | 0x3, val); |
1102 | radeon_i2c_get_byte(radeon_connector->router_bus, | 1139 | radeon_i2c_get_byte(radeon_connector->router_bus, |
1103 | radeon_connector->router.i2c_addr, | 1140 | radeon_connector->router.i2c_addr, |
1104 | 0x1, &val); | 1141 | 0x1, &val); |
1105 | val &= radeon_connector->router.mux_control_pin; | 1142 | val &= ~radeon_connector->router.cd_mux_control_pin; |
1106 | val |= radeon_connector->router.mux_state; | 1143 | val |= radeon_connector->router.cd_mux_state; |
1107 | radeon_i2c_put_byte(radeon_connector->router_bus, | 1144 | radeon_i2c_put_byte(radeon_connector->router_bus, |
1108 | radeon_connector->router.i2c_addr, | 1145 | radeon_connector->router.i2c_addr, |
1109 | 0x1, val); | 1146 | 0x1, val); |
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 2f349a300195..465746bd51b7 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c | |||
@@ -76,7 +76,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc) | |||
76 | default: | 76 | default: |
77 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", | 77 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", |
78 | crtc); | 78 | crtc); |
79 | return EINVAL; | 79 | return -EINVAL; |
80 | } | 80 | } |
81 | } else { | 81 | } else { |
82 | switch (crtc) { | 82 | switch (crtc) { |
@@ -89,7 +89,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc) | |||
89 | default: | 89 | default: |
90 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", | 90 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", |
91 | crtc); | 91 | crtc); |
92 | return EINVAL; | 92 | return -EINVAL; |
93 | } | 93 | } |
94 | } | 94 | } |
95 | 95 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index a108c7ed14f5..9ec830c77af0 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -64,15 +64,15 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) | |||
64 | struct radeon_device *rdev = dev->dev_private; | 64 | struct radeon_device *rdev = dev->dev_private; |
65 | unsigned i; | 65 | unsigned i; |
66 | 66 | ||
67 | INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); | ||
68 | |||
69 | /* Disable *all* interrupts */ | 67 | /* Disable *all* interrupts */ |
70 | rdev->irq.sw_int = false; | 68 | rdev->irq.sw_int = false; |
71 | rdev->irq.gui_idle = false; | 69 | rdev->irq.gui_idle = false; |
72 | for (i = 0; i < rdev->num_crtc; i++) | 70 | for (i = 0; i < rdev->num_crtc; i++) |
73 | rdev->irq.crtc_vblank_int[i] = false; | 71 | rdev->irq.crtc_vblank_int[i] = false; |
74 | for (i = 0; i < 6; i++) | 72 | for (i = 0; i < 6; i++) { |
75 | rdev->irq.hpd[i] = false; | 73 | rdev->irq.hpd[i] = false; |
74 | rdev->irq.pflip[i] = false; | ||
75 | } | ||
76 | radeon_irq_set(rdev); | 76 | radeon_irq_set(rdev); |
77 | /* Clear bits */ | 77 | /* Clear bits */ |
78 | radeon_irq_process(rdev); | 78 | radeon_irq_process(rdev); |
@@ -101,16 +101,23 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
101 | rdev->irq.gui_idle = false; | 101 | rdev->irq.gui_idle = false; |
102 | for (i = 0; i < rdev->num_crtc; i++) | 102 | for (i = 0; i < rdev->num_crtc; i++) |
103 | rdev->irq.crtc_vblank_int[i] = false; | 103 | rdev->irq.crtc_vblank_int[i] = false; |
104 | for (i = 0; i < 6; i++) | 104 | for (i = 0; i < 6; i++) { |
105 | rdev->irq.hpd[i] = false; | 105 | rdev->irq.hpd[i] = false; |
106 | rdev->irq.pflip[i] = false; | ||
107 | } | ||
106 | radeon_irq_set(rdev); | 108 | radeon_irq_set(rdev); |
107 | } | 109 | } |
108 | 110 | ||
109 | int radeon_irq_kms_init(struct radeon_device *rdev) | 111 | int radeon_irq_kms_init(struct radeon_device *rdev) |
110 | { | 112 | { |
113 | int i; | ||
111 | int r = 0; | 114 | int r = 0; |
112 | 115 | ||
116 | INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); | ||
117 | |||
113 | spin_lock_init(&rdev->irq.sw_lock); | 118 | spin_lock_init(&rdev->irq.sw_lock); |
119 | for (i = 0; i < rdev->num_crtc; i++) | ||
120 | spin_lock_init(&rdev->irq.pflip_lock[i]); | ||
114 | r = drm_vblank_init(rdev->ddev, rdev->num_crtc); | 121 | r = drm_vblank_init(rdev->ddev, rdev->num_crtc); |
115 | if (r) { | 122 | if (r) { |
116 | return r; | 123 | return r; |
@@ -121,7 +128,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
121 | * chips. Disable MSI on them for now. | 128 | * chips. Disable MSI on them for now. |
122 | */ | 129 | */ |
123 | if ((rdev->family >= CHIP_RV380) && | 130 | if ((rdev->family >= CHIP_RV380) && |
124 | (!(rdev->flags & RADEON_IS_IGP)) && | 131 | ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) && |
125 | (!(rdev->flags & RADEON_IS_AGP))) { | 132 | (!(rdev->flags & RADEON_IS_AGP))) { |
126 | int ret = pci_enable_msi(rdev->pdev); | 133 | int ret = pci_enable_msi(rdev->pdev); |
127 | if (!ret) { | 134 | if (!ret) { |
@@ -148,6 +155,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) | |||
148 | if (rdev->msi_enabled) | 155 | if (rdev->msi_enabled) |
149 | pci_disable_msi(rdev->pdev); | 156 | pci_disable_msi(rdev->pdev); |
150 | } | 157 | } |
158 | flush_work_sync(&rdev->hotplug_work); | ||
151 | } | 159 | } |
152 | 160 | ||
153 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev) | 161 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev) |
@@ -175,3 +183,34 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev) | |||
175 | spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); | 183 | spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); |
176 | } | 184 | } |
177 | 185 | ||
186 | void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) | ||
187 | { | ||
188 | unsigned long irqflags; | ||
189 | |||
190 | if (crtc < 0 || crtc >= rdev->num_crtc) | ||
191 | return; | ||
192 | |||
193 | spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); | ||
194 | if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) { | ||
195 | rdev->irq.pflip[crtc] = true; | ||
196 | radeon_irq_set(rdev); | ||
197 | } | ||
198 | spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags); | ||
199 | } | ||
200 | |||
201 | void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) | ||
202 | { | ||
203 | unsigned long irqflags; | ||
204 | |||
205 | if (crtc < 0 || crtc >= rdev->num_crtc) | ||
206 | return; | ||
207 | |||
208 | spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); | ||
209 | BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0); | ||
210 | if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) { | ||
211 | rdev->irq.pflip[crtc] = false; | ||
212 | radeon_irq_set(rdev); | ||
213 | } | ||
214 | spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags); | ||
215 | } | ||
216 | |||
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 8fbbe1c6ebbd..bd58af658581 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -58,9 +58,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
58 | dev->dev_private = (void *)rdev; | 58 | dev->dev_private = (void *)rdev; |
59 | 59 | ||
60 | /* update BUS flag */ | 60 | /* update BUS flag */ |
61 | if (drm_device_is_agp(dev)) { | 61 | if (drm_pci_device_is_agp(dev)) { |
62 | flags |= RADEON_IS_AGP; | 62 | flags |= RADEON_IS_AGP; |
63 | } else if (drm_device_is_pcie(dev)) { | 63 | } else if (drm_pci_device_is_pcie(dev)) { |
64 | flags |= RADEON_IS_PCIE; | 64 | flags |= RADEON_IS_PCIE; |
65 | } else { | 65 | } else { |
66 | flags |= RADEON_IS_PCI; | 66 | flags |= RADEON_IS_PCI; |
@@ -96,9 +96,27 @@ out: | |||
96 | return r; | 96 | return r; |
97 | } | 97 | } |
98 | 98 | ||
99 | static void radeon_set_filp_rights(struct drm_device *dev, | ||
100 | struct drm_file **owner, | ||
101 | struct drm_file *applier, | ||
102 | uint32_t *value) | ||
103 | { | ||
104 | mutex_lock(&dev->struct_mutex); | ||
105 | if (*value == 1) { | ||
106 | /* wants rights */ | ||
107 | if (!*owner) | ||
108 | *owner = applier; | ||
109 | } else if (*value == 0) { | ||
110 | /* revokes rights */ | ||
111 | if (*owner == applier) | ||
112 | *owner = NULL; | ||
113 | } | ||
114 | *value = *owner == applier ? 1 : 0; | ||
115 | mutex_unlock(&dev->struct_mutex); | ||
116 | } | ||
99 | 117 | ||
100 | /* | 118 | /* |
101 | * Userspace get informations ioctl | 119 | * Userspace get information ioctl |
102 | */ | 120 | */ |
103 | int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | 121 | int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
104 | { | 122 | { |
@@ -151,7 +169,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
151 | value = rdev->accel_working; | 169 | value = rdev->accel_working; |
152 | break; | 170 | break; |
153 | case RADEON_INFO_TILING_CONFIG: | 171 | case RADEON_INFO_TILING_CONFIG: |
154 | if (rdev->family >= CHIP_CEDAR) | 172 | if (rdev->family >= CHIP_CAYMAN) |
173 | value = rdev->config.cayman.tile_config; | ||
174 | else if (rdev->family >= CHIP_CEDAR) | ||
155 | value = rdev->config.evergreen.tile_config; | 175 | value = rdev->config.evergreen.tile_config; |
156 | else if (rdev->family >= CHIP_RV770) | 176 | else if (rdev->family >= CHIP_RV770) |
157 | value = rdev->config.rv770.tile_config; | 177 | value = rdev->config.rv770.tile_config; |
@@ -173,18 +193,49 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
173 | DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value); | 193 | DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value); |
174 | return -EINVAL; | 194 | return -EINVAL; |
175 | } | 195 | } |
176 | mutex_lock(&dev->struct_mutex); | 196 | radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value); |
177 | if (value == 1) { | 197 | break; |
178 | /* wants hyper-z */ | 198 | case RADEON_INFO_WANT_CMASK: |
179 | if (!rdev->hyperz_filp) | 199 | /* The same logic as Hyper-Z. */ |
180 | rdev->hyperz_filp = filp; | 200 | if (value >= 2) { |
181 | } else if (value == 0) { | 201 | DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value); |
182 | /* revokes hyper-z */ | 202 | return -EINVAL; |
183 | if (rdev->hyperz_filp == filp) | 203 | } |
184 | rdev->hyperz_filp = NULL; | 204 | radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); |
205 | break; | ||
206 | case RADEON_INFO_CLOCK_CRYSTAL_FREQ: | ||
207 | /* return clock value in KHz */ | ||
208 | value = rdev->clock.spll.reference_freq * 10; | ||
209 | break; | ||
210 | case RADEON_INFO_NUM_BACKENDS: | ||
211 | if (rdev->family >= CHIP_CAYMAN) | ||
212 | value = rdev->config.cayman.max_backends_per_se * | ||
213 | rdev->config.cayman.max_shader_engines; | ||
214 | else if (rdev->family >= CHIP_CEDAR) | ||
215 | value = rdev->config.evergreen.max_backends; | ||
216 | else if (rdev->family >= CHIP_RV770) | ||
217 | value = rdev->config.rv770.max_backends; | ||
218 | else if (rdev->family >= CHIP_R600) | ||
219 | value = rdev->config.r600.max_backends; | ||
220 | else { | ||
221 | return -EINVAL; | ||
222 | } | ||
223 | break; | ||
224 | case RADEON_INFO_NUM_TILE_PIPES: | ||
225 | if (rdev->family >= CHIP_CAYMAN) | ||
226 | value = rdev->config.cayman.max_tile_pipes; | ||
227 | else if (rdev->family >= CHIP_CEDAR) | ||
228 | value = rdev->config.evergreen.max_tile_pipes; | ||
229 | else if (rdev->family >= CHIP_RV770) | ||
230 | value = rdev->config.rv770.max_tile_pipes; | ||
231 | else if (rdev->family >= CHIP_R600) | ||
232 | value = rdev->config.r600.max_tile_pipes; | ||
233 | else { | ||
234 | return -EINVAL; | ||
185 | } | 235 | } |
186 | value = rdev->hyperz_filp == filp ? 1 : 0; | 236 | break; |
187 | mutex_unlock(&dev->struct_mutex); | 237 | case RADEON_INFO_FUSION_GART_WORKING: |
238 | value = 1; | ||
188 | break; | 239 | break; |
189 | default: | 240 | default: |
190 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); | 241 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); |
@@ -203,10 +254,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
203 | */ | 254 | */ |
204 | int radeon_driver_firstopen_kms(struct drm_device *dev) | 255 | int radeon_driver_firstopen_kms(struct drm_device *dev) |
205 | { | 256 | { |
206 | struct radeon_device *rdev = dev->dev_private; | ||
207 | |||
208 | if (rdev->powered_down) | ||
209 | return -EINVAL; | ||
210 | return 0; | 257 | return 0; |
211 | } | 258 | } |
212 | 259 | ||
@@ -232,6 +279,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev, | |||
232 | struct radeon_device *rdev = dev->dev_private; | 279 | struct radeon_device *rdev = dev->dev_private; |
233 | if (rdev->hyperz_filp == file_priv) | 280 | if (rdev->hyperz_filp == file_priv) |
234 | rdev->hyperz_filp = NULL; | 281 | rdev->hyperz_filp = NULL; |
282 | if (rdev->cmask_filp == file_priv) | ||
283 | rdev->cmask_filp = NULL; | ||
235 | } | 284 | } |
236 | 285 | ||
237 | /* | 286 | /* |
@@ -277,6 +326,27 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) | |||
277 | radeon_irq_set(rdev); | 326 | radeon_irq_set(rdev); |
278 | } | 327 | } |
279 | 328 | ||
329 | int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, | ||
330 | int *max_error, | ||
331 | struct timeval *vblank_time, | ||
332 | unsigned flags) | ||
333 | { | ||
334 | struct drm_crtc *drmcrtc; | ||
335 | struct radeon_device *rdev = dev->dev_private; | ||
336 | |||
337 | if (crtc < 0 || crtc >= dev->num_crtcs) { | ||
338 | DRM_ERROR("Invalid crtc %d\n", crtc); | ||
339 | return -EINVAL; | ||
340 | } | ||
341 | |||
342 | /* Get associated drm_crtc: */ | ||
343 | drmcrtc = &rdev->mode_info.crtcs[crtc]->base; | ||
344 | |||
345 | /* Helper routine in DRM core does all the work: */ | ||
346 | return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, | ||
347 | vblank_time, flags, | ||
348 | drmcrtc); | ||
349 | } | ||
280 | 350 | ||
281 | /* | 351 | /* |
282 | * IOCTL. | 352 | * IOCTL. |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 305049afde15..41a5d48e657b 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -348,10 +348,25 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
348 | int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | 348 | int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, |
349 | struct drm_framebuffer *old_fb) | 349 | struct drm_framebuffer *old_fb) |
350 | { | 350 | { |
351 | return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0); | ||
352 | } | ||
353 | |||
354 | int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, | ||
355 | struct drm_framebuffer *fb, | ||
356 | int x, int y, enum mode_set_atomic state) | ||
357 | { | ||
358 | return radeon_crtc_do_set_base(crtc, fb, x, y, 1); | ||
359 | } | ||
360 | |||
361 | int radeon_crtc_do_set_base(struct drm_crtc *crtc, | ||
362 | struct drm_framebuffer *fb, | ||
363 | int x, int y, int atomic) | ||
364 | { | ||
351 | struct drm_device *dev = crtc->dev; | 365 | struct drm_device *dev = crtc->dev; |
352 | struct radeon_device *rdev = dev->dev_private; | 366 | struct radeon_device *rdev = dev->dev_private; |
353 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 367 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
354 | struct radeon_framebuffer *radeon_fb; | 368 | struct radeon_framebuffer *radeon_fb; |
369 | struct drm_framebuffer *target_fb; | ||
355 | struct drm_gem_object *obj; | 370 | struct drm_gem_object *obj; |
356 | struct radeon_bo *rbo; | 371 | struct radeon_bo *rbo; |
357 | uint64_t base; | 372 | uint64_t base; |
@@ -364,14 +379,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
364 | 379 | ||
365 | DRM_DEBUG_KMS("\n"); | 380 | DRM_DEBUG_KMS("\n"); |
366 | /* no fb bound */ | 381 | /* no fb bound */ |
367 | if (!crtc->fb) { | 382 | if (!atomic && !crtc->fb) { |
368 | DRM_DEBUG_KMS("No FB bound\n"); | 383 | DRM_DEBUG_KMS("No FB bound\n"); |
369 | return 0; | 384 | return 0; |
370 | } | 385 | } |
371 | 386 | ||
372 | radeon_fb = to_radeon_framebuffer(crtc->fb); | 387 | if (atomic) { |
388 | radeon_fb = to_radeon_framebuffer(fb); | ||
389 | target_fb = fb; | ||
390 | } | ||
391 | else { | ||
392 | radeon_fb = to_radeon_framebuffer(crtc->fb); | ||
393 | target_fb = crtc->fb; | ||
394 | } | ||
373 | 395 | ||
374 | switch (crtc->fb->bits_per_pixel) { | 396 | switch (target_fb->bits_per_pixel) { |
375 | case 8: | 397 | case 8: |
376 | format = 2; | 398 | format = 2; |
377 | break; | 399 | break; |
@@ -393,7 +415,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
393 | 415 | ||
394 | /* Pin framebuffer & get tilling informations */ | 416 | /* Pin framebuffer & get tilling informations */ |
395 | obj = radeon_fb->obj; | 417 | obj = radeon_fb->obj; |
396 | rbo = obj->driver_private; | 418 | rbo = gem_to_radeon_bo(obj); |
397 | r = radeon_bo_reserve(rbo, false); | 419 | r = radeon_bo_reserve(rbo, false); |
398 | if (unlikely(r != 0)) | 420 | if (unlikely(r != 0)) |
399 | return r; | 421 | return r; |
@@ -415,13 +437,13 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
415 | 437 | ||
416 | crtc_offset_cntl = 0; | 438 | crtc_offset_cntl = 0; |
417 | 439 | ||
418 | pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); | 440 | pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8); |
419 | crtc_pitch = (((pitch_pixels * crtc->fb->bits_per_pixel) + | 441 | crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) + |
420 | ((crtc->fb->bits_per_pixel * 8) - 1)) / | 442 | ((target_fb->bits_per_pixel * 8) - 1)) / |
421 | (crtc->fb->bits_per_pixel * 8)); | 443 | (target_fb->bits_per_pixel * 8)); |
422 | crtc_pitch |= crtc_pitch << 16; | 444 | crtc_pitch |= crtc_pitch << 16; |
423 | 445 | ||
424 | 446 | crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; | |
425 | if (tiling_flags & RADEON_TILING_MACRO) { | 447 | if (tiling_flags & RADEON_TILING_MACRO) { |
426 | if (ASIC_IS_R300(rdev)) | 448 | if (ASIC_IS_R300(rdev)) |
427 | crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | | 449 | crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | |
@@ -443,14 +465,14 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
443 | crtc_tile_x0_y0 = x | (y << 16); | 465 | crtc_tile_x0_y0 = x | (y << 16); |
444 | base &= ~0x7ff; | 466 | base &= ~0x7ff; |
445 | } else { | 467 | } else { |
446 | int byteshift = crtc->fb->bits_per_pixel >> 4; | 468 | int byteshift = target_fb->bits_per_pixel >> 4; |
447 | int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; | 469 | int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; |
448 | base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); | 470 | base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); |
449 | crtc_offset_cntl |= (y % 16); | 471 | crtc_offset_cntl |= (y % 16); |
450 | } | 472 | } |
451 | } else { | 473 | } else { |
452 | int offset = y * pitch_pixels + x; | 474 | int offset = y * pitch_pixels + x; |
453 | switch (crtc->fb->bits_per_pixel) { | 475 | switch (target_fb->bits_per_pixel) { |
454 | case 8: | 476 | case 8: |
455 | offset *= 1; | 477 | offset *= 1; |
456 | break; | 478 | break; |
@@ -480,6 +502,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
480 | gen_cntl_val = RREG32(gen_cntl_reg); | 502 | gen_cntl_val = RREG32(gen_cntl_reg); |
481 | gen_cntl_val &= ~(0xf << 8); | 503 | gen_cntl_val &= ~(0xf << 8); |
482 | gen_cntl_val |= (format << 8); | 504 | gen_cntl_val |= (format << 8); |
505 | gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK; | ||
483 | WREG32(gen_cntl_reg, gen_cntl_val); | 506 | WREG32(gen_cntl_reg, gen_cntl_val); |
484 | 507 | ||
485 | crtc_offset = (u32)base; | 508 | crtc_offset = (u32)base; |
@@ -496,9 +519,9 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
496 | WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset); | 519 | WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset); |
497 | WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); | 520 | WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); |
498 | 521 | ||
499 | if (old_fb && old_fb != crtc->fb) { | 522 | if (!atomic && fb && fb != crtc->fb) { |
500 | radeon_fb = to_radeon_framebuffer(old_fb); | 523 | radeon_fb = to_radeon_framebuffer(fb); |
501 | rbo = radeon_fb->obj->driver_private; | 524 | rbo = gem_to_radeon_bo(radeon_fb->obj); |
502 | r = radeon_bo_reserve(rbo, false); | 525 | r = radeon_bo_reserve(rbo, false); |
503 | if (unlikely(r != 0)) | 526 | if (unlikely(r != 0)) |
504 | return r; | 527 | return r; |
@@ -717,10 +740,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
717 | pll = &rdev->clock.p1pll; | 740 | pll = &rdev->clock.p1pll; |
718 | 741 | ||
719 | pll->flags = RADEON_PLL_LEGACY; | 742 | pll->flags = RADEON_PLL_LEGACY; |
720 | if (radeon_new_pll == 1) | ||
721 | pll->algo = PLL_ALGO_NEW; | ||
722 | else | ||
723 | pll->algo = PLL_ALGO_LEGACY; | ||
724 | 743 | ||
725 | if (mode->clock > 200000) /* range limits??? */ | 744 | if (mode->clock > 200000) /* range limits??? */ |
726 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 745 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
@@ -760,9 +779,9 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
760 | DRM_DEBUG_KMS("\n"); | 779 | DRM_DEBUG_KMS("\n"); |
761 | 780 | ||
762 | if (!use_bios_divs) { | 781 | if (!use_bios_divs) { |
763 | radeon_compute_pll(pll, mode->clock, | 782 | radeon_compute_pll_legacy(pll, mode->clock, |
764 | &freq, &feedback_div, &frac_fb_div, | 783 | &freq, &feedback_div, &frac_fb_div, |
765 | &reference_div, &post_divider); | 784 | &reference_div, &post_divider); |
766 | 785 | ||
767 | for (post_div = &post_divs[0]; post_div->divider; ++post_div) { | 786 | for (post_div = &post_divs[0]; post_div->divider; ++post_div) { |
768 | if (post_div->divider == post_divider) | 787 | if (post_div->divider == post_divider) |
@@ -870,7 +889,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
870 | } | 889 | } |
871 | 890 | ||
872 | if (rdev->flags & RADEON_IS_MOBILITY) { | 891 | if (rdev->flags & RADEON_IS_MOBILITY) { |
873 | /* A temporal workaround for the occational blanking on certain laptop panels. | 892 | /* A temporal workaround for the occasional blanking on certain laptop panels. |
874 | This appears to related to the PLL divider registers (fail to lock?). | 893 | This appears to related to the PLL divider registers (fail to lock?). |
875 | It occurs even when all dividers are the same with their old settings. | 894 | It occurs even when all dividers are the same with their old settings. |
876 | In this case we really don't need to fiddle with PLL registers. | 895 | In this case we really don't need to fiddle with PLL registers. |
@@ -1040,6 +1059,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = { | |||
1040 | .mode_fixup = radeon_crtc_mode_fixup, | 1059 | .mode_fixup = radeon_crtc_mode_fixup, |
1041 | .mode_set = radeon_crtc_mode_set, | 1060 | .mode_set = radeon_crtc_mode_set, |
1042 | .mode_set_base = radeon_crtc_set_base, | 1061 | .mode_set_base = radeon_crtc_set_base, |
1062 | .mode_set_base_atomic = radeon_crtc_set_base_atomic, | ||
1043 | .prepare = radeon_crtc_prepare, | 1063 | .prepare = radeon_crtc_prepare, |
1044 | .commit = radeon_crtc_commit, | 1064 | .commit = radeon_crtc_commit, |
1045 | .load_lut = radeon_crtc_load_lut, | 1065 | .load_lut = radeon_crtc_load_lut, |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 0b8397000f4c..2f46e0c8df53 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -28,6 +28,10 @@ | |||
28 | #include "radeon_drm.h" | 28 | #include "radeon_drm.h" |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "atom.h" | 30 | #include "atom.h" |
31 | #include <linux/backlight.h> | ||
32 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
33 | #include <asm/backlight.h> | ||
34 | #endif | ||
31 | 35 | ||
32 | static void radeon_legacy_encoder_disable(struct drm_encoder *encoder) | 36 | static void radeon_legacy_encoder_disable(struct drm_encoder *encoder) |
33 | { | 37 | { |
@@ -39,7 +43,7 @@ static void radeon_legacy_encoder_disable(struct drm_encoder *encoder) | |||
39 | radeon_encoder->active_device = 0; | 43 | radeon_encoder->active_device = 0; |
40 | } | 44 | } |
41 | 45 | ||
42 | static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | 46 | static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode) |
43 | { | 47 | { |
44 | struct drm_device *dev = encoder->dev; | 48 | struct drm_device *dev = encoder->dev; |
45 | struct radeon_device *rdev = dev->dev_private; | 49 | struct radeon_device *rdev = dev->dev_private; |
@@ -47,15 +51,23 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
47 | uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; | 51 | uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; |
48 | int panel_pwr_delay = 2000; | 52 | int panel_pwr_delay = 2000; |
49 | bool is_mac = false; | 53 | bool is_mac = false; |
54 | uint8_t backlight_level; | ||
50 | DRM_DEBUG_KMS("\n"); | 55 | DRM_DEBUG_KMS("\n"); |
51 | 56 | ||
57 | lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); | ||
58 | backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; | ||
59 | |||
52 | if (radeon_encoder->enc_priv) { | 60 | if (radeon_encoder->enc_priv) { |
53 | if (rdev->is_atom_bios) { | 61 | if (rdev->is_atom_bios) { |
54 | struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; | 62 | struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; |
55 | panel_pwr_delay = lvds->panel_pwr_delay; | 63 | panel_pwr_delay = lvds->panel_pwr_delay; |
64 | if (lvds->bl_dev) | ||
65 | backlight_level = lvds->backlight_level; | ||
56 | } else { | 66 | } else { |
57 | struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; | 67 | struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; |
58 | panel_pwr_delay = lvds->panel_pwr_delay; | 68 | panel_pwr_delay = lvds->panel_pwr_delay; |
69 | if (lvds->bl_dev) | ||
70 | backlight_level = lvds->backlight_level; | ||
59 | } | 71 | } |
60 | } | 72 | } |
61 | 73 | ||
@@ -82,11 +94,13 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
82 | lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET; | 94 | lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET; |
83 | WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); | 95 | WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); |
84 | 96 | ||
85 | lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); | 97 | lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS | |
86 | lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON); | 98 | RADEON_LVDS_BL_MOD_LEVEL_MASK); |
99 | lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | | ||
100 | RADEON_LVDS_DIGON | RADEON_LVDS_BLON | | ||
101 | (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT)); | ||
87 | if (is_mac) | 102 | if (is_mac) |
88 | lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN; | 103 | lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN; |
89 | lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS); | ||
90 | udelay(panel_pwr_delay * 1000); | 104 | udelay(panel_pwr_delay * 1000); |
91 | WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); | 105 | WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); |
92 | break; | 106 | break; |
@@ -95,7 +109,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
95 | case DRM_MODE_DPMS_OFF: | 109 | case DRM_MODE_DPMS_OFF: |
96 | pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); | 110 | pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); |
97 | WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); | 111 | WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); |
98 | lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); | ||
99 | lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; | 112 | lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; |
100 | if (is_mac) { | 113 | if (is_mac) { |
101 | lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN; | 114 | lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN; |
@@ -119,6 +132,25 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
119 | 132 | ||
120 | } | 133 | } |
121 | 134 | ||
135 | static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | ||
136 | { | ||
137 | struct radeon_device *rdev = encoder->dev->dev_private; | ||
138 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
139 | DRM_DEBUG("\n"); | ||
140 | |||
141 | if (radeon_encoder->enc_priv) { | ||
142 | if (rdev->is_atom_bios) { | ||
143 | struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; | ||
144 | lvds->dpms_mode = mode; | ||
145 | } else { | ||
146 | struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; | ||
147 | lvds->dpms_mode = mode; | ||
148 | } | ||
149 | } | ||
150 | |||
151 | radeon_legacy_lvds_update(encoder, mode); | ||
152 | } | ||
153 | |||
122 | static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) | 154 | static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) |
123 | { | 155 | { |
124 | struct radeon_device *rdev = encoder->dev->dev_private; | 156 | struct radeon_device *rdev = encoder->dev->dev_private; |
@@ -237,9 +269,222 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { | |||
237 | .disable = radeon_legacy_encoder_disable, | 269 | .disable = radeon_legacy_encoder_disable, |
238 | }; | 270 | }; |
239 | 271 | ||
272 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) | ||
273 | |||
274 | #define MAX_RADEON_LEVEL 0xFF | ||
275 | |||
276 | struct radeon_backlight_privdata { | ||
277 | struct radeon_encoder *encoder; | ||
278 | uint8_t negative; | ||
279 | }; | ||
280 | |||
281 | static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd) | ||
282 | { | ||
283 | struct radeon_backlight_privdata *pdata = bl_get_data(bd); | ||
284 | uint8_t level; | ||
285 | |||
286 | /* Convert brightness to hardware level */ | ||
287 | if (bd->props.brightness < 0) | ||
288 | level = 0; | ||
289 | else if (bd->props.brightness > MAX_RADEON_LEVEL) | ||
290 | level = MAX_RADEON_LEVEL; | ||
291 | else | ||
292 | level = bd->props.brightness; | ||
293 | |||
294 | if (pdata->negative) | ||
295 | level = MAX_RADEON_LEVEL - level; | ||
296 | |||
297 | return level; | ||
298 | } | ||
299 | |||
300 | static int radeon_legacy_backlight_update_status(struct backlight_device *bd) | ||
301 | { | ||
302 | struct radeon_backlight_privdata *pdata = bl_get_data(bd); | ||
303 | struct radeon_encoder *radeon_encoder = pdata->encoder; | ||
304 | struct drm_device *dev = radeon_encoder->base.dev; | ||
305 | struct radeon_device *rdev = dev->dev_private; | ||
306 | int dpms_mode = DRM_MODE_DPMS_ON; | ||
307 | |||
308 | if (radeon_encoder->enc_priv) { | ||
309 | if (rdev->is_atom_bios) { | ||
310 | struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; | ||
311 | dpms_mode = lvds->dpms_mode; | ||
312 | lvds->backlight_level = radeon_legacy_lvds_level(bd); | ||
313 | } else { | ||
314 | struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; | ||
315 | dpms_mode = lvds->dpms_mode; | ||
316 | lvds->backlight_level = radeon_legacy_lvds_level(bd); | ||
317 | } | ||
318 | } | ||
319 | |||
320 | if (bd->props.brightness > 0) | ||
321 | radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); | ||
322 | else | ||
323 | radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF); | ||
324 | |||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd) | ||
329 | { | ||
330 | struct radeon_backlight_privdata *pdata = bl_get_data(bd); | ||
331 | struct radeon_encoder *radeon_encoder = pdata->encoder; | ||
332 | struct drm_device *dev = radeon_encoder->base.dev; | ||
333 | struct radeon_device *rdev = dev->dev_private; | ||
334 | uint8_t backlight_level; | ||
335 | |||
336 | backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> | ||
337 | RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; | ||
338 | |||
339 | return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level; | ||
340 | } | ||
341 | |||
342 | static const struct backlight_ops radeon_backlight_ops = { | ||
343 | .get_brightness = radeon_legacy_backlight_get_brightness, | ||
344 | .update_status = radeon_legacy_backlight_update_status, | ||
345 | }; | ||
346 | |||
347 | void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, | ||
348 | struct drm_connector *drm_connector) | ||
349 | { | ||
350 | struct drm_device *dev = radeon_encoder->base.dev; | ||
351 | struct radeon_device *rdev = dev->dev_private; | ||
352 | struct backlight_device *bd; | ||
353 | struct backlight_properties props; | ||
354 | struct radeon_backlight_privdata *pdata; | ||
355 | uint8_t backlight_level; | ||
356 | |||
357 | if (!radeon_encoder->enc_priv) | ||
358 | return; | ||
359 | |||
360 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
361 | if (!pmac_has_backlight_type("ati") && | ||
362 | !pmac_has_backlight_type("mnca")) | ||
363 | return; | ||
364 | #endif | ||
365 | |||
366 | pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL); | ||
367 | if (!pdata) { | ||
368 | DRM_ERROR("Memory allocation failed\n"); | ||
369 | goto error; | ||
370 | } | ||
371 | |||
372 | props.max_brightness = MAX_RADEON_LEVEL; | ||
373 | props.type = BACKLIGHT_RAW; | ||
374 | bd = backlight_device_register("radeon_bl", &drm_connector->kdev, | ||
375 | pdata, &radeon_backlight_ops, &props); | ||
376 | if (IS_ERR(bd)) { | ||
377 | DRM_ERROR("Backlight registration failed\n"); | ||
378 | goto error; | ||
379 | } | ||
380 | |||
381 | pdata->encoder = radeon_encoder; | ||
382 | |||
383 | backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> | ||
384 | RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; | ||
385 | |||
386 | /* First, try to detect backlight level sense based on the assumption | ||
387 | * that firmware set it up at full brightness | ||
388 | */ | ||
389 | if (backlight_level == 0) | ||
390 | pdata->negative = true; | ||
391 | else if (backlight_level == 0xff) | ||
392 | pdata->negative = false; | ||
393 | else { | ||
394 | /* XXX hack... maybe some day we can figure out in what direction | ||
395 | * backlight should work on a given panel? | ||
396 | */ | ||
397 | pdata->negative = (rdev->family != CHIP_RV200 && | ||
398 | rdev->family != CHIP_RV250 && | ||
399 | rdev->family != CHIP_RV280 && | ||
400 | rdev->family != CHIP_RV350); | ||
401 | |||
402 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
403 | pdata->negative = (pdata->negative || | ||
404 | of_machine_is_compatible("PowerBook4,3") || | ||
405 | of_machine_is_compatible("PowerBook6,3") || | ||
406 | of_machine_is_compatible("PowerBook6,5")); | ||
407 | #endif | ||
408 | } | ||
409 | |||
410 | if (rdev->is_atom_bios) { | ||
411 | struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; | ||
412 | lvds->bl_dev = bd; | ||
413 | } else { | ||
414 | struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; | ||
415 | lvds->bl_dev = bd; | ||
416 | } | ||
417 | |||
418 | bd->props.brightness = radeon_legacy_backlight_get_brightness(bd); | ||
419 | bd->props.power = FB_BLANK_UNBLANK; | ||
420 | backlight_update_status(bd); | ||
421 | |||
422 | DRM_INFO("radeon legacy LVDS backlight initialized\n"); | ||
423 | |||
424 | return; | ||
425 | |||
426 | error: | ||
427 | kfree(pdata); | ||
428 | return; | ||
429 | } | ||
430 | |||
431 | static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder) | ||
432 | { | ||
433 | struct drm_device *dev = radeon_encoder->base.dev; | ||
434 | struct radeon_device *rdev = dev->dev_private; | ||
435 | struct backlight_device *bd = NULL; | ||
436 | |||
437 | if (!radeon_encoder->enc_priv) | ||
438 | return; | ||
439 | |||
440 | if (rdev->is_atom_bios) { | ||
441 | struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; | ||
442 | bd = lvds->bl_dev; | ||
443 | lvds->bl_dev = NULL; | ||
444 | } else { | ||
445 | struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; | ||
446 | bd = lvds->bl_dev; | ||
447 | lvds->bl_dev = NULL; | ||
448 | } | ||
449 | |||
450 | if (bd) { | ||
451 | struct radeon_legacy_backlight_privdata *pdata; | ||
452 | |||
453 | pdata = bl_get_data(bd); | ||
454 | backlight_device_unregister(bd); | ||
455 | kfree(pdata); | ||
456 | |||
457 | DRM_INFO("radeon legacy LVDS backlight unloaded\n"); | ||
458 | } | ||
459 | } | ||
460 | |||
461 | #else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */ | ||
462 | |||
463 | void radeon_legacy_backlight_init(struct radeon_encoder *encoder) | ||
464 | { | ||
465 | } | ||
466 | |||
467 | static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder) | ||
468 | { | ||
469 | } | ||
470 | |||
471 | #endif | ||
472 | |||
473 | |||
474 | static void radeon_lvds_enc_destroy(struct drm_encoder *encoder) | ||
475 | { | ||
476 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
477 | |||
478 | if (radeon_encoder->enc_priv) { | ||
479 | radeon_legacy_backlight_exit(radeon_encoder); | ||
480 | kfree(radeon_encoder->enc_priv); | ||
481 | } | ||
482 | drm_encoder_cleanup(encoder); | ||
483 | kfree(radeon_encoder); | ||
484 | } | ||
240 | 485 | ||
241 | static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { | 486 | static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { |
242 | .destroy = radeon_enc_destroy, | 487 | .destroy = radeon_lvds_enc_destroy, |
243 | }; | 488 | }; |
244 | 489 | ||
245 | static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) | 490 | static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) |
@@ -670,7 +915,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
670 | 915 | ||
671 | if (rdev->is_atom_bios) { | 916 | if (rdev->is_atom_bios) { |
672 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 917 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
673 | atombios_external_tmds_setup(encoder, ATOM_ENABLE); | 918 | atombios_dvo_setup(encoder, ATOM_ENABLE); |
674 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); | 919 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
675 | } else { | 920 | } else { |
676 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); | 921 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 17a6602b5885..6df4e3cec0c2 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -35,8 +35,8 @@ | |||
35 | #include <drm_edid.h> | 35 | #include <drm_edid.h> |
36 | #include <drm_dp_helper.h> | 36 | #include <drm_dp_helper.h> |
37 | #include <drm_fixed.h> | 37 | #include <drm_fixed.h> |
38 | #include <drm_crtc_helper.h> | ||
38 | #include <linux/i2c.h> | 39 | #include <linux/i2c.h> |
39 | #include <linux/i2c-id.h> | ||
40 | #include <linux/i2c-algo-bit.h> | 40 | #include <linux/i2c-algo-bit.h> |
41 | 41 | ||
42 | struct radeon_bo; | 42 | struct radeon_bo; |
@@ -149,12 +149,7 @@ struct radeon_tmds_pll { | |||
149 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) | 149 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) |
150 | #define RADEON_PLL_USE_POST_DIV (1 << 12) | 150 | #define RADEON_PLL_USE_POST_DIV (1 << 12) |
151 | #define RADEON_PLL_IS_LCD (1 << 13) | 151 | #define RADEON_PLL_IS_LCD (1 << 13) |
152 | 152 | #define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14) | |
153 | /* pll algo */ | ||
154 | enum radeon_pll_algo { | ||
155 | PLL_ALGO_LEGACY, | ||
156 | PLL_ALGO_NEW | ||
157 | }; | ||
158 | 153 | ||
159 | struct radeon_pll { | 154 | struct radeon_pll { |
160 | /* reference frequency */ | 155 | /* reference frequency */ |
@@ -188,8 +183,6 @@ struct radeon_pll { | |||
188 | 183 | ||
189 | /* pll id */ | 184 | /* pll id */ |
190 | uint32_t id; | 185 | uint32_t id; |
191 | /* pll algo */ | ||
192 | enum radeon_pll_algo algo; | ||
193 | }; | 186 | }; |
194 | 187 | ||
195 | struct radeon_i2c_chan { | 188 | struct radeon_i2c_chan { |
@@ -216,6 +209,7 @@ enum radeon_connector_table { | |||
216 | CT_EMAC, | 209 | CT_EMAC, |
217 | CT_RN50_POWER, | 210 | CT_RN50_POWER, |
218 | CT_MAC_X800, | 211 | CT_MAC_X800, |
212 | CT_MAC_G5_9600, | ||
219 | }; | 213 | }; |
220 | 214 | ||
221 | enum radeon_dvo_chip { | 215 | enum radeon_dvo_chip { |
@@ -241,8 +235,11 @@ struct radeon_mode_info { | |||
241 | struct drm_property *tmds_pll_property; | 235 | struct drm_property *tmds_pll_property; |
242 | /* underscan */ | 236 | /* underscan */ |
243 | struct drm_property *underscan_property; | 237 | struct drm_property *underscan_property; |
238 | struct drm_property *underscan_hborder_property; | ||
239 | struct drm_property *underscan_vborder_property; | ||
244 | /* hardcoded DFP edid from BIOS */ | 240 | /* hardcoded DFP edid from BIOS */ |
245 | struct edid *bios_hardcoded_edid; | 241 | struct edid *bios_hardcoded_edid; |
242 | int bios_hardcoded_edid_size; | ||
246 | 243 | ||
247 | /* pointer to fbdev info structure */ | 244 | /* pointer to fbdev info structure */ |
248 | struct radeon_fbdev *rfbdev; | 245 | struct radeon_fbdev *rfbdev; |
@@ -283,6 +280,9 @@ struct radeon_crtc { | |||
283 | fixed20_12 hsc; | 280 | fixed20_12 hsc; |
284 | struct drm_display_mode native_mode; | 281 | struct drm_display_mode native_mode; |
285 | int pll_id; | 282 | int pll_id; |
283 | /* page flipping */ | ||
284 | struct radeon_unpin_work *unpin_work; | ||
285 | int deferred_flip_completion; | ||
286 | }; | 286 | }; |
287 | 287 | ||
288 | struct radeon_encoder_primary_dac { | 288 | struct radeon_encoder_primary_dac { |
@@ -303,6 +303,9 @@ struct radeon_encoder_lvds { | |||
303 | uint32_t lvds_gen_cntl; | 303 | uint32_t lvds_gen_cntl; |
304 | /* panel mode */ | 304 | /* panel mode */ |
305 | struct drm_display_mode native_mode; | 305 | struct drm_display_mode native_mode; |
306 | struct backlight_device *bl_dev; | ||
307 | int dpms_mode; | ||
308 | uint8_t backlight_level; | ||
306 | }; | 309 | }; |
307 | 310 | ||
308 | struct radeon_encoder_tv_dac { | 311 | struct radeon_encoder_tv_dac { |
@@ -336,24 +339,29 @@ struct radeon_encoder_ext_tmds { | |||
336 | struct radeon_atom_ss { | 339 | struct radeon_atom_ss { |
337 | uint16_t percentage; | 340 | uint16_t percentage; |
338 | uint8_t type; | 341 | uint8_t type; |
339 | uint8_t step; | 342 | uint16_t step; |
340 | uint8_t delay; | 343 | uint8_t delay; |
341 | uint8_t range; | 344 | uint8_t range; |
342 | uint8_t refdiv; | 345 | uint8_t refdiv; |
346 | /* asic_ss */ | ||
347 | uint16_t rate; | ||
348 | uint16_t amount; | ||
343 | }; | 349 | }; |
344 | 350 | ||
345 | struct radeon_encoder_atom_dig { | 351 | struct radeon_encoder_atom_dig { |
346 | bool linkb; | 352 | bool linkb; |
347 | /* atom dig */ | 353 | /* atom dig */ |
348 | bool coherent_mode; | 354 | bool coherent_mode; |
349 | int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ | 355 | int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */ |
350 | /* atom lvds */ | 356 | /* atom lvds/edp */ |
351 | uint32_t lvds_misc; | 357 | uint32_t lcd_misc; |
352 | uint16_t panel_pwr_delay; | 358 | uint16_t panel_pwr_delay; |
353 | enum radeon_pll_algo pll_algo; | 359 | uint32_t lcd_ss_id; |
354 | struct radeon_atom_ss *ss; | ||
355 | /* panel mode */ | 360 | /* panel mode */ |
356 | struct drm_display_mode native_mode; | 361 | struct drm_display_mode native_mode; |
362 | struct backlight_device *bl_dev; | ||
363 | int dpms_mode; | ||
364 | uint8_t backlight_level; | ||
357 | }; | 365 | }; |
358 | 366 | ||
359 | struct radeon_encoder_atom_dac { | 367 | struct radeon_encoder_atom_dac { |
@@ -370,6 +378,8 @@ struct radeon_encoder { | |||
370 | uint32_t pixel_clock; | 378 | uint32_t pixel_clock; |
371 | enum radeon_rmx_type rmx_type; | 379 | enum radeon_rmx_type rmx_type; |
372 | enum radeon_underscan_type underscan_type; | 380 | enum radeon_underscan_type underscan_type; |
381 | uint32_t underscan_hborder; | ||
382 | uint32_t underscan_vborder; | ||
373 | struct drm_display_mode native_mode; | 383 | struct drm_display_mode native_mode; |
374 | void *enc_priv; | 384 | void *enc_priv; |
375 | int audio_polling_active; | 385 | int audio_polling_active; |
@@ -377,6 +387,8 @@ struct radeon_encoder { | |||
377 | int hdmi_config_offset; | 387 | int hdmi_config_offset; |
378 | int hdmi_audio_workaround; | 388 | int hdmi_audio_workaround; |
379 | int hdmi_buffer_status; | 389 | int hdmi_buffer_status; |
390 | bool is_ext_encoder; | ||
391 | u16 caps; | ||
380 | }; | 392 | }; |
381 | 393 | ||
382 | struct radeon_connector_atom_dig { | 394 | struct radeon_connector_atom_dig { |
@@ -387,6 +399,7 @@ struct radeon_connector_atom_dig { | |||
387 | u8 dp_sink_type; | 399 | u8 dp_sink_type; |
388 | int dp_clock; | 400 | int dp_clock; |
389 | int dp_lane_count; | 401 | int dp_lane_count; |
402 | bool edp_on; | ||
390 | }; | 403 | }; |
391 | 404 | ||
392 | struct radeon_gpio_rec { | 405 | struct radeon_gpio_rec { |
@@ -403,13 +416,19 @@ struct radeon_hpd { | |||
403 | }; | 416 | }; |
404 | 417 | ||
405 | struct radeon_router { | 418 | struct radeon_router { |
406 | bool valid; | ||
407 | u32 router_id; | 419 | u32 router_id; |
408 | struct radeon_i2c_bus_rec i2c_info; | 420 | struct radeon_i2c_bus_rec i2c_info; |
409 | u8 i2c_addr; | 421 | u8 i2c_addr; |
410 | u8 mux_type; | 422 | /* i2c mux */ |
411 | u8 mux_control_pin; | 423 | bool ddc_valid; |
412 | u8 mux_state; | 424 | u8 ddc_mux_type; |
425 | u8 ddc_mux_control_pin; | ||
426 | u8 ddc_mux_state; | ||
427 | /* clock/data mux */ | ||
428 | bool cd_valid; | ||
429 | u8 cd_mux_type; | ||
430 | u8 cd_mux_control_pin; | ||
431 | u8 cd_mux_state; | ||
413 | }; | 432 | }; |
414 | 433 | ||
415 | struct radeon_connector { | 434 | struct radeon_connector { |
@@ -436,6 +455,7 @@ struct radeon_framebuffer { | |||
436 | struct drm_gem_object *obj; | 455 | struct drm_gem_object *obj; |
437 | }; | 456 | }; |
438 | 457 | ||
458 | |||
439 | extern enum radeon_tv_std | 459 | extern enum radeon_tv_std |
440 | radeon_combios_get_tv_info(struct radeon_device *rdev); | 460 | radeon_combios_get_tv_info(struct radeon_device *rdev); |
441 | extern enum radeon_tv_std | 461 | extern enum radeon_tv_std |
@@ -444,22 +464,29 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev); | |||
444 | extern struct drm_connector * | 464 | extern struct drm_connector * |
445 | radeon_get_connector_for_encoder(struct drm_encoder *encoder); | 465 | radeon_get_connector_for_encoder(struct drm_encoder *encoder); |
446 | 466 | ||
467 | extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder); | ||
468 | extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector); | ||
469 | extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector); | ||
470 | extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); | ||
471 | |||
447 | extern void radeon_connector_hotplug(struct drm_connector *connector); | 472 | extern void radeon_connector_hotplug(struct drm_connector *connector); |
448 | extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); | 473 | extern int radeon_dp_mode_valid_helper(struct drm_connector *connector, |
449 | extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, | ||
450 | struct drm_display_mode *mode); | 474 | struct drm_display_mode *mode); |
451 | extern void radeon_dp_set_link_config(struct drm_connector *connector, | 475 | extern void radeon_dp_set_link_config(struct drm_connector *connector, |
452 | struct drm_display_mode *mode); | 476 | struct drm_display_mode *mode); |
453 | extern void dp_link_train(struct drm_encoder *encoder, | 477 | extern void radeon_dp_link_train(struct drm_encoder *encoder, |
454 | struct drm_connector *connector); | 478 | struct drm_connector *connector); |
455 | extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); | 479 | extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); |
456 | extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); | 480 | extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); |
457 | extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action); | 481 | extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); |
482 | extern void radeon_atom_encoder_init(struct radeon_device *rdev); | ||
458 | extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, | 483 | extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, |
459 | int action, uint8_t lane_num, | 484 | int action, uint8_t lane_num, |
460 | uint8_t lane_set); | 485 | uint8_t lane_set); |
486 | extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder); | ||
487 | extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder); | ||
461 | extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | 488 | extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, |
462 | uint8_t write_byte, uint8_t *read_byte); | 489 | u8 write_byte, u8 *read_byte); |
463 | 490 | ||
464 | extern void radeon_i2c_init(struct radeon_device *rdev); | 491 | extern void radeon_i2c_init(struct radeon_device *rdev); |
465 | extern void radeon_i2c_fini(struct radeon_device *rdev); | 492 | extern void radeon_i2c_fini(struct radeon_device *rdev); |
@@ -485,19 +512,35 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, | |||
485 | u8 slave_addr, | 512 | u8 slave_addr, |
486 | u8 addr, | 513 | u8 addr, |
487 | u8 val); | 514 | u8 val); |
488 | extern void radeon_router_select_port(struct radeon_connector *radeon_connector); | 515 | extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); |
516 | extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); | ||
489 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); | 517 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); |
490 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); | 518 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); |
491 | 519 | ||
492 | extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); | 520 | extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); |
493 | 521 | ||
494 | extern void radeon_compute_pll(struct radeon_pll *pll, | 522 | extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, |
495 | uint64_t freq, | 523 | struct radeon_atom_ss *ss, |
496 | uint32_t *dot_clock_p, | 524 | int id); |
497 | uint32_t *fb_div_p, | 525 | extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, |
498 | uint32_t *frac_fb_div_p, | 526 | struct radeon_atom_ss *ss, |
499 | uint32_t *ref_div_p, | 527 | int id, u32 clock); |
500 | uint32_t *post_div_p); | 528 | |
529 | extern void radeon_compute_pll_legacy(struct radeon_pll *pll, | ||
530 | uint64_t freq, | ||
531 | uint32_t *dot_clock_p, | ||
532 | uint32_t *fb_div_p, | ||
533 | uint32_t *frac_fb_div_p, | ||
534 | uint32_t *ref_div_p, | ||
535 | uint32_t *post_div_p); | ||
536 | |||
537 | extern void radeon_compute_pll_avivo(struct radeon_pll *pll, | ||
538 | u32 freq, | ||
539 | u32 *dot_clock_p, | ||
540 | u32 *fb_div_p, | ||
541 | u32 *frac_fb_div_p, | ||
542 | u32 *ref_div_p, | ||
543 | u32 *post_div_p); | ||
501 | 544 | ||
502 | extern void radeon_setup_encoder_clones(struct drm_device *dev); | 545 | extern void radeon_setup_encoder_clones(struct drm_device *dev); |
503 | 546 | ||
@@ -506,14 +549,19 @@ struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev | |||
506 | struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); | 549 | struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); |
507 | struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); | 550 | struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); |
508 | struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); | 551 | struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); |
509 | extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); | 552 | extern void atombios_dvo_setup(struct drm_encoder *encoder, int action); |
510 | extern void atombios_digital_setup(struct drm_encoder *encoder, int action); | 553 | extern void atombios_digital_setup(struct drm_encoder *encoder, int action); |
511 | extern int atombios_get_encoder_mode(struct drm_encoder *encoder); | 554 | extern int atombios_get_encoder_mode(struct drm_encoder *encoder); |
555 | extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action); | ||
512 | extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); | 556 | extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); |
513 | 557 | ||
514 | extern void radeon_crtc_load_lut(struct drm_crtc *crtc); | 558 | extern void radeon_crtc_load_lut(struct drm_crtc *crtc); |
515 | extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | 559 | extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, |
516 | struct drm_framebuffer *old_fb); | 560 | struct drm_framebuffer *old_fb); |
561 | extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, | ||
562 | struct drm_framebuffer *fb, | ||
563 | int x, int y, | ||
564 | enum mode_set_atomic state); | ||
517 | extern int atombios_crtc_mode_set(struct drm_crtc *crtc, | 565 | extern int atombios_crtc_mode_set(struct drm_crtc *crtc, |
518 | struct drm_display_mode *mode, | 566 | struct drm_display_mode *mode, |
519 | struct drm_display_mode *adjusted_mode, | 567 | struct drm_display_mode *adjusted_mode, |
@@ -523,7 +571,13 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode); | |||
523 | 571 | ||
524 | extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | 572 | extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, |
525 | struct drm_framebuffer *old_fb); | 573 | struct drm_framebuffer *old_fb); |
526 | 574 | extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, | |
575 | struct drm_framebuffer *fb, | ||
576 | int x, int y, | ||
577 | enum mode_set_atomic state); | ||
578 | extern int radeon_crtc_do_set_base(struct drm_crtc *crtc, | ||
579 | struct drm_framebuffer *fb, | ||
580 | int x, int y, int atomic); | ||
527 | extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, | 581 | extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, |
528 | struct drm_file *file_priv, | 582 | struct drm_file *file_priv, |
529 | uint32_t handle, | 583 | uint32_t handle, |
@@ -532,9 +586,12 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, | |||
532 | extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, | 586 | extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, |
533 | int x, int y); | 587 | int x, int y); |
534 | 588 | ||
589 | extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, | ||
590 | int *vpos, int *hpos); | ||
591 | |||
535 | extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); | 592 | extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); |
536 | extern struct edid * | 593 | extern struct edid * |
537 | radeon_combios_get_hardcoded_edid(struct radeon_device *rdev); | 594 | radeon_bios_get_hardcoded_edid(struct radeon_device *rdev); |
538 | extern bool radeon_atom_get_clock_info(struct drm_device *dev); | 595 | extern bool radeon_atom_get_clock_info(struct drm_device *dev); |
539 | extern bool radeon_combios_get_clock_info(struct drm_device *dev); | 596 | extern bool radeon_combios_get_clock_info(struct drm_device *dev); |
540 | extern struct radeon_encoder_atom_dig * | 597 | extern struct radeon_encoder_atom_dig * |
@@ -630,4 +687,8 @@ int radeon_fbdev_total_size(struct radeon_device *rdev); | |||
630 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); | 687 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); |
631 | 688 | ||
632 | void radeon_fb_output_poll_changed(struct radeon_device *rdev); | 689 | void radeon_fb_output_poll_changed(struct radeon_device *rdev); |
690 | |||
691 | void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id); | ||
692 | |||
693 | int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled); | ||
633 | #endif | 694 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index b3b5306bb578..976c3b1b1b6e 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <drm/drmP.h> | 34 | #include <drm/drmP.h> |
35 | #include "radeon_drm.h" | 35 | #include "radeon_drm.h" |
36 | #include "radeon.h" | 36 | #include "radeon.h" |
37 | #include "radeon_trace.h" | ||
37 | 38 | ||
38 | 39 | ||
39 | int radeon_ttm_init(struct radeon_device *rdev); | 40 | int radeon_ttm_init(struct radeon_device *rdev); |
@@ -54,6 +55,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |||
54 | list_del_init(&bo->list); | 55 | list_del_init(&bo->list); |
55 | mutex_unlock(&bo->rdev->gem.mutex); | 56 | mutex_unlock(&bo->rdev->gem.mutex); |
56 | radeon_bo_clear_surface_reg(bo); | 57 | radeon_bo_clear_surface_reg(bo); |
58 | drm_gem_object_release(&bo->gem_base); | ||
57 | kfree(bo); | 59 | kfree(bo); |
58 | } | 60 | } |
59 | 61 | ||
@@ -69,7 +71,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
69 | u32 c = 0; | 71 | u32 c = 0; |
70 | 72 | ||
71 | rbo->placement.fpfn = 0; | 73 | rbo->placement.fpfn = 0; |
72 | rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT; | 74 | rbo->placement.lpfn = 0; |
73 | rbo->placement.placement = rbo->placements; | 75 | rbo->placement.placement = rbo->placements; |
74 | rbo->placement.busy_placement = rbo->placements; | 76 | rbo->placement.busy_placement = rbo->placements; |
75 | if (domain & RADEON_GEM_DOMAIN_VRAM) | 77 | if (domain & RADEON_GEM_DOMAIN_VRAM) |
@@ -85,14 +87,18 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
85 | rbo->placement.num_busy_placement = c; | 87 | rbo->placement.num_busy_placement = c; |
86 | } | 88 | } |
87 | 89 | ||
88 | int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | 90 | int radeon_bo_create(struct radeon_device *rdev, |
89 | unsigned long size, bool kernel, u32 domain, | 91 | unsigned long size, int byte_align, bool kernel, u32 domain, |
90 | struct radeon_bo **bo_ptr) | 92 | struct radeon_bo **bo_ptr) |
91 | { | 93 | { |
92 | struct radeon_bo *bo; | 94 | struct radeon_bo *bo; |
93 | enum ttm_bo_type type; | 95 | enum ttm_bo_type type; |
96 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; | ||
97 | unsigned long max_size = 0; | ||
94 | int r; | 98 | int r; |
95 | 99 | ||
100 | size = ALIGN(size, PAGE_SIZE); | ||
101 | |||
96 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { | 102 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
97 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; | 103 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; |
98 | } | 104 | } |
@@ -102,20 +108,33 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | |||
102 | type = ttm_bo_type_device; | 108 | type = ttm_bo_type_device; |
103 | } | 109 | } |
104 | *bo_ptr = NULL; | 110 | *bo_ptr = NULL; |
111 | |||
112 | /* maximun bo size is the minimun btw visible vram and gtt size */ | ||
113 | max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); | ||
114 | if ((page_align << PAGE_SHIFT) >= max_size) { | ||
115 | printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n", | ||
116 | __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20); | ||
117 | return -ENOMEM; | ||
118 | } | ||
119 | |||
120 | retry: | ||
105 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); | 121 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
106 | if (bo == NULL) | 122 | if (bo == NULL) |
107 | return -ENOMEM; | 123 | return -ENOMEM; |
124 | r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); | ||
125 | if (unlikely(r)) { | ||
126 | kfree(bo); | ||
127 | return r; | ||
128 | } | ||
108 | bo->rdev = rdev; | 129 | bo->rdev = rdev; |
109 | bo->gobj = gobj; | 130 | bo->gem_base.driver_private = NULL; |
110 | bo->surface_reg = -1; | 131 | bo->surface_reg = -1; |
111 | INIT_LIST_HEAD(&bo->list); | 132 | INIT_LIST_HEAD(&bo->list); |
112 | |||
113 | retry: | ||
114 | radeon_ttm_placement_from_domain(bo, domain); | 133 | radeon_ttm_placement_from_domain(bo, domain); |
115 | /* Kernel allocation are uninterruptible */ | 134 | /* Kernel allocation are uninterruptible */ |
116 | mutex_lock(&rdev->vram_mutex); | 135 | mutex_lock(&rdev->vram_mutex); |
117 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, | 136 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
118 | &bo->placement, 0, 0, !kernel, NULL, size, | 137 | &bo->placement, page_align, 0, !kernel, NULL, size, |
119 | &radeon_ttm_bo_destroy); | 138 | &radeon_ttm_bo_destroy); |
120 | mutex_unlock(&rdev->vram_mutex); | 139 | mutex_unlock(&rdev->vram_mutex); |
121 | if (unlikely(r != 0)) { | 140 | if (unlikely(r != 0)) { |
@@ -131,11 +150,9 @@ retry: | |||
131 | return r; | 150 | return r; |
132 | } | 151 | } |
133 | *bo_ptr = bo; | 152 | *bo_ptr = bo; |
134 | if (gobj) { | 153 | |
135 | mutex_lock(&bo->rdev->gem.mutex); | 154 | trace_radeon_bo_create(bo); |
136 | list_add_tail(&bo->list, &rdev->gem.objects); | 155 | |
137 | mutex_unlock(&bo->rdev->gem.mutex); | ||
138 | } | ||
139 | return 0; | 156 | return 0; |
140 | } | 157 | } |
141 | 158 | ||
@@ -248,7 +265,6 @@ int radeon_bo_evict_vram(struct radeon_device *rdev) | |||
248 | void radeon_bo_force_delete(struct radeon_device *rdev) | 265 | void radeon_bo_force_delete(struct radeon_device *rdev) |
249 | { | 266 | { |
250 | struct radeon_bo *bo, *n; | 267 | struct radeon_bo *bo, *n; |
251 | struct drm_gem_object *gobj; | ||
252 | 268 | ||
253 | if (list_empty(&rdev->gem.objects)) { | 269 | if (list_empty(&rdev->gem.objects)) { |
254 | return; | 270 | return; |
@@ -256,16 +272,14 @@ void radeon_bo_force_delete(struct radeon_device *rdev) | |||
256 | dev_err(rdev->dev, "Userspace still has active objects !\n"); | 272 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
257 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { | 273 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { |
258 | mutex_lock(&rdev->ddev->struct_mutex); | 274 | mutex_lock(&rdev->ddev->struct_mutex); |
259 | gobj = bo->gobj; | ||
260 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", | 275 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
261 | gobj, bo, (unsigned long)gobj->size, | 276 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, |
262 | *((unsigned long *)&gobj->refcount)); | 277 | *((unsigned long *)&bo->gem_base.refcount)); |
263 | mutex_lock(&bo->rdev->gem.mutex); | 278 | mutex_lock(&bo->rdev->gem.mutex); |
264 | list_del_init(&bo->list); | 279 | list_del_init(&bo->list); |
265 | mutex_unlock(&bo->rdev->gem.mutex); | 280 | mutex_unlock(&bo->rdev->gem.mutex); |
266 | radeon_bo_unref(&bo); | 281 | /* this should unref the ttm bo */ |
267 | gobj->driver_private = NULL; | 282 | drm_gem_object_unreference(&bo->gem_base); |
268 | drm_gem_object_unreference(gobj); | ||
269 | mutex_unlock(&rdev->ddev->struct_mutex); | 283 | mutex_unlock(&rdev->ddev->struct_mutex); |
270 | } | 284 | } |
271 | } | 285 | } |
@@ -292,34 +306,9 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | |||
292 | struct list_head *head) | 306 | struct list_head *head) |
293 | { | 307 | { |
294 | if (lobj->wdomain) { | 308 | if (lobj->wdomain) { |
295 | list_add(&lobj->list, head); | 309 | list_add(&lobj->tv.head, head); |
296 | } else { | 310 | } else { |
297 | list_add_tail(&lobj->list, head); | 311 | list_add_tail(&lobj->tv.head, head); |
298 | } | ||
299 | } | ||
300 | |||
301 | int radeon_bo_list_reserve(struct list_head *head) | ||
302 | { | ||
303 | struct radeon_bo_list *lobj; | ||
304 | int r; | ||
305 | |||
306 | list_for_each_entry(lobj, head, list){ | ||
307 | r = radeon_bo_reserve(lobj->bo, false); | ||
308 | if (unlikely(r != 0)) | ||
309 | return r; | ||
310 | lobj->reserved = true; | ||
311 | } | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | void radeon_bo_list_unreserve(struct list_head *head) | ||
316 | { | ||
317 | struct radeon_bo_list *lobj; | ||
318 | |||
319 | list_for_each_entry(lobj, head, list) { | ||
320 | /* only unreserve object we successfully reserved */ | ||
321 | if (lobj->reserved && radeon_bo_is_reserved(lobj->bo)) | ||
322 | radeon_bo_unreserve(lobj->bo); | ||
323 | } | 312 | } |
324 | } | 313 | } |
325 | 314 | ||
@@ -330,14 +319,11 @@ int radeon_bo_list_validate(struct list_head *head) | |||
330 | u32 domain; | 319 | u32 domain; |
331 | int r; | 320 | int r; |
332 | 321 | ||
333 | list_for_each_entry(lobj, head, list) { | 322 | r = ttm_eu_reserve_buffers(head); |
334 | lobj->reserved = false; | ||
335 | } | ||
336 | r = radeon_bo_list_reserve(head); | ||
337 | if (unlikely(r != 0)) { | 323 | if (unlikely(r != 0)) { |
338 | return r; | 324 | return r; |
339 | } | 325 | } |
340 | list_for_each_entry(lobj, head, list) { | 326 | list_for_each_entry(lobj, head, tv.head) { |
341 | bo = lobj->bo; | 327 | bo = lobj->bo; |
342 | if (!bo->pin_count) { | 328 | if (!bo->pin_count) { |
343 | domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; | 329 | domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; |
@@ -360,25 +346,6 @@ int radeon_bo_list_validate(struct list_head *head) | |||
360 | return 0; | 346 | return 0; |
361 | } | 347 | } |
362 | 348 | ||
363 | void radeon_bo_list_fence(struct list_head *head, void *fence) | ||
364 | { | ||
365 | struct radeon_bo_list *lobj; | ||
366 | struct radeon_bo *bo; | ||
367 | struct radeon_fence *old_fence = NULL; | ||
368 | |||
369 | list_for_each_entry(lobj, head, list) { | ||
370 | bo = lobj->bo; | ||
371 | spin_lock(&bo->tbo.lock); | ||
372 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; | ||
373 | bo->tbo.sync_obj = radeon_fence_ref(fence); | ||
374 | bo->tbo.sync_obj_arg = NULL; | ||
375 | spin_unlock(&bo->tbo.lock); | ||
376 | if (old_fence) { | ||
377 | radeon_fence_unref(&old_fence); | ||
378 | } | ||
379 | } | ||
380 | } | ||
381 | |||
382 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 349 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
383 | struct vm_area_struct *vma) | 350 | struct vm_area_struct *vma) |
384 | { | 351 | { |
@@ -435,7 +402,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) | |||
435 | 402 | ||
436 | out: | 403 | out: |
437 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, | 404 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
438 | bo->tbo.mem.mm_node->start << PAGE_SHIFT, | 405 | bo->tbo.mem.start << PAGE_SHIFT, |
439 | bo->tbo.num_pages << PAGE_SHIFT); | 406 | bo->tbo.num_pages << PAGE_SHIFT); |
440 | return 0; | 407 | return 0; |
441 | } | 408 | } |
@@ -532,7 +499,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
532 | rdev = rbo->rdev; | 499 | rdev = rbo->rdev; |
533 | if (bo->mem.mem_type == TTM_PL_VRAM) { | 500 | if (bo->mem.mem_type == TTM_PL_VRAM) { |
534 | size = bo->mem.num_pages << PAGE_SHIFT; | 501 | size = bo->mem.num_pages << PAGE_SHIFT; |
535 | offset = bo->mem.mm_node->start << PAGE_SHIFT; | 502 | offset = bo->mem.start << PAGE_SHIFT; |
536 | if ((offset + size) > rdev->mc.visible_vram_size) { | 503 | if ((offset + size) > rdev->mc.visible_vram_size) { |
537 | /* hurrah the memory is not visible ! */ | 504 | /* hurrah the memory is not visible ! */ |
538 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); | 505 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); |
@@ -540,7 +507,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
540 | r = ttm_bo_validate(bo, &rbo->placement, false, true, false); | 507 | r = ttm_bo_validate(bo, &rbo->placement, false, true, false); |
541 | if (unlikely(r != 0)) | 508 | if (unlikely(r != 0)) |
542 | return r; | 509 | return r; |
543 | offset = bo->mem.mm_node->start << PAGE_SHIFT; | 510 | offset = bo->mem.start << PAGE_SHIFT; |
544 | /* this should not happen */ | 511 | /* this should not happen */ |
545 | if ((offset + size) > rdev->mc.visible_vram_size) | 512 | if ((offset + size) > rdev->mc.visible_vram_size) |
546 | return -EINVAL; | 513 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 3481bc7f6f58..ede6c13628f2 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -87,7 +87,7 @@ static inline void radeon_bo_unreserve(struct radeon_bo *bo) | |||
87 | * Returns current GPU offset of the object. | 87 | * Returns current GPU offset of the object. |
88 | * | 88 | * |
89 | * Note: object should either be pinned or reserved when calling this | 89 | * Note: object should either be pinned or reserved when calling this |
90 | * function, it might be usefull to add check for this for debugging. | 90 | * function, it might be useful to add check for this for debugging. |
91 | */ | 91 | */ |
92 | static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo) | 92 | static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo) |
93 | { | 93 | { |
@@ -126,18 +126,18 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, | |||
126 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | 126 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); |
127 | if (unlikely(r != 0)) | 127 | if (unlikely(r != 0)) |
128 | return r; | 128 | return r; |
129 | spin_lock(&bo->tbo.lock); | 129 | spin_lock(&bo->tbo.bdev->fence_lock); |
130 | if (mem_type) | 130 | if (mem_type) |
131 | *mem_type = bo->tbo.mem.mem_type; | 131 | *mem_type = bo->tbo.mem.mem_type; |
132 | if (bo->tbo.sync_obj) | 132 | if (bo->tbo.sync_obj) |
133 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | 133 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
134 | spin_unlock(&bo->tbo.lock); | 134 | spin_unlock(&bo->tbo.bdev->fence_lock); |
135 | ttm_bo_unreserve(&bo->tbo); | 135 | ttm_bo_unreserve(&bo->tbo); |
136 | return r; | 136 | return r; |
137 | } | 137 | } |
138 | 138 | ||
139 | extern int radeon_bo_create(struct radeon_device *rdev, | 139 | extern int radeon_bo_create(struct radeon_device *rdev, |
140 | struct drm_gem_object *gobj, unsigned long size, | 140 | unsigned long size, int byte_align, |
141 | bool kernel, u32 domain, | 141 | bool kernel, u32 domain, |
142 | struct radeon_bo **bo_ptr); | 142 | struct radeon_bo **bo_ptr); |
143 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); | 143 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); |
@@ -151,10 +151,7 @@ extern int radeon_bo_init(struct radeon_device *rdev); | |||
151 | extern void radeon_bo_fini(struct radeon_device *rdev); | 151 | extern void radeon_bo_fini(struct radeon_device *rdev); |
152 | extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | 152 | extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, |
153 | struct list_head *head); | 153 | struct list_head *head); |
154 | extern int radeon_bo_list_reserve(struct list_head *head); | ||
155 | extern void radeon_bo_list_unreserve(struct list_head *head); | ||
156 | extern int radeon_bo_list_validate(struct list_head *head); | 154 | extern int radeon_bo_list_validate(struct list_head *head); |
157 | extern void radeon_bo_list_fence(struct list_head *head, void *fence); | ||
158 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 155 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
159 | struct vm_area_struct *vma); | 156 | struct vm_area_struct *vma); |
160 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, | 157 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f87efec76236..aaa19dc418a0 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include "drmP.h" | 23 | #include "drmP.h" |
24 | #include "radeon.h" | 24 | #include "radeon.h" |
25 | #include "avivod.h" | 25 | #include "avivod.h" |
26 | #include "atom.h" | ||
26 | #ifdef CONFIG_ACPI | 27 | #ifdef CONFIG_ACPI |
27 | #include <linux/acpi.h> | 28 | #include <linux/acpi.h> |
28 | #endif | 29 | #endif |
@@ -167,13 +168,13 @@ static void radeon_set_power_state(struct radeon_device *rdev) | |||
167 | if (radeon_gui_idle(rdev)) { | 168 | if (radeon_gui_idle(rdev)) { |
168 | sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. | 169 | sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
169 | clock_info[rdev->pm.requested_clock_mode_index].sclk; | 170 | clock_info[rdev->pm.requested_clock_mode_index].sclk; |
170 | if (sclk > rdev->clock.default_sclk) | 171 | if (sclk > rdev->pm.default_sclk) |
171 | sclk = rdev->clock.default_sclk; | 172 | sclk = rdev->pm.default_sclk; |
172 | 173 | ||
173 | mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. | 174 | mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
174 | clock_info[rdev->pm.requested_clock_mode_index].mclk; | 175 | clock_info[rdev->pm.requested_clock_mode_index].mclk; |
175 | if (mclk > rdev->clock.default_mclk) | 176 | if (mclk > rdev->pm.default_mclk) |
176 | mclk = rdev->clock.default_mclk; | 177 | mclk = rdev->pm.default_mclk; |
177 | 178 | ||
178 | /* upvolt before raising clocks, downvolt after lowering clocks */ | 179 | /* upvolt before raising clocks, downvolt after lowering clocks */ |
179 | if (sclk < rdev->pm.current_sclk) | 180 | if (sclk < rdev->pm.current_sclk) |
@@ -365,12 +366,14 @@ static ssize_t radeon_set_pm_profile(struct device *dev, | |||
365 | else if (strncmp("high", buf, strlen("high")) == 0) | 366 | else if (strncmp("high", buf, strlen("high")) == 0) |
366 | rdev->pm.profile = PM_PROFILE_HIGH; | 367 | rdev->pm.profile = PM_PROFILE_HIGH; |
367 | else { | 368 | else { |
368 | DRM_ERROR("invalid power profile!\n"); | 369 | count = -EINVAL; |
369 | goto fail; | 370 | goto fail; |
370 | } | 371 | } |
371 | radeon_pm_update_profile(rdev); | 372 | radeon_pm_update_profile(rdev); |
372 | radeon_pm_set_clocks(rdev); | 373 | radeon_pm_set_clocks(rdev); |
373 | } | 374 | } else |
375 | count = -EINVAL; | ||
376 | |||
374 | fail: | 377 | fail: |
375 | mutex_unlock(&rdev->pm.mutex); | 378 | mutex_unlock(&rdev->pm.mutex); |
376 | 379 | ||
@@ -405,22 +408,15 @@ static ssize_t radeon_set_pm_method(struct device *dev, | |||
405 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | 408 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
406 | mutex_unlock(&rdev->pm.mutex); | 409 | mutex_unlock(&rdev->pm.mutex); |
407 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { | 410 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { |
408 | bool flush_wq = false; | ||
409 | |||
410 | mutex_lock(&rdev->pm.mutex); | 411 | mutex_lock(&rdev->pm.mutex); |
411 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | ||
412 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
413 | flush_wq = true; | ||
414 | } | ||
415 | /* disable dynpm */ | 412 | /* disable dynpm */ |
416 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 413 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
417 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | 414 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
418 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 415 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
419 | mutex_unlock(&rdev->pm.mutex); | 416 | mutex_unlock(&rdev->pm.mutex); |
420 | if (flush_wq) | 417 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); |
421 | flush_workqueue(rdev->wq); | ||
422 | } else { | 418 | } else { |
423 | DRM_ERROR("invalid power method!\n"); | 419 | count = -EINVAL; |
424 | goto fail; | 420 | goto fail; |
425 | } | 421 | } |
426 | radeon_pm_compute_clocks(rdev); | 422 | radeon_pm_compute_clocks(rdev); |
@@ -437,7 +433,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, | |||
437 | { | 433 | { |
438 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 434 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); |
439 | struct radeon_device *rdev = ddev->dev_private; | 435 | struct radeon_device *rdev = ddev->dev_private; |
440 | u32 temp; | 436 | int temp; |
441 | 437 | ||
442 | switch (rdev->pm.int_thermal_type) { | 438 | switch (rdev->pm.int_thermal_type) { |
443 | case THERMAL_TYPE_RV6XX: | 439 | case THERMAL_TYPE_RV6XX: |
@@ -447,8 +443,12 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, | |||
447 | temp = rv770_get_temp(rdev); | 443 | temp = rv770_get_temp(rdev); |
448 | break; | 444 | break; |
449 | case THERMAL_TYPE_EVERGREEN: | 445 | case THERMAL_TYPE_EVERGREEN: |
446 | case THERMAL_TYPE_NI: | ||
450 | temp = evergreen_get_temp(rdev); | 447 | temp = evergreen_get_temp(rdev); |
451 | break; | 448 | break; |
449 | case THERMAL_TYPE_SUMO: | ||
450 | temp = sumo_get_temp(rdev); | ||
451 | break; | ||
452 | default: | 452 | default: |
453 | temp = 0; | 453 | temp = 0; |
454 | break; | 454 | break; |
@@ -487,6 +487,8 @@ static int radeon_hwmon_init(struct radeon_device *rdev) | |||
487 | case THERMAL_TYPE_RV6XX: | 487 | case THERMAL_TYPE_RV6XX: |
488 | case THERMAL_TYPE_RV770: | 488 | case THERMAL_TYPE_RV770: |
489 | case THERMAL_TYPE_EVERGREEN: | 489 | case THERMAL_TYPE_EVERGREEN: |
490 | case THERMAL_TYPE_NI: | ||
491 | case THERMAL_TYPE_SUMO: | ||
490 | rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); | 492 | rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); |
491 | if (IS_ERR(rdev->pm.int_hwmon_dev)) { | 493 | if (IS_ERR(rdev->pm.int_hwmon_dev)) { |
492 | err = PTR_ERR(rdev->pm.int_hwmon_dev); | 494 | err = PTR_ERR(rdev->pm.int_hwmon_dev); |
@@ -520,34 +522,44 @@ static void radeon_hwmon_fini(struct radeon_device *rdev) | |||
520 | 522 | ||
521 | void radeon_pm_suspend(struct radeon_device *rdev) | 523 | void radeon_pm_suspend(struct radeon_device *rdev) |
522 | { | 524 | { |
523 | bool flush_wq = false; | ||
524 | |||
525 | mutex_lock(&rdev->pm.mutex); | 525 | mutex_lock(&rdev->pm.mutex); |
526 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | 526 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
527 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
528 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) | 527 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) |
529 | rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; | 528 | rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; |
530 | flush_wq = true; | ||
531 | } | 529 | } |
532 | mutex_unlock(&rdev->pm.mutex); | 530 | mutex_unlock(&rdev->pm.mutex); |
533 | if (flush_wq) | 531 | |
534 | flush_workqueue(rdev->wq); | 532 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); |
535 | } | 533 | } |
536 | 534 | ||
537 | void radeon_pm_resume(struct radeon_device *rdev) | 535 | void radeon_pm_resume(struct radeon_device *rdev) |
538 | { | 536 | { |
537 | /* set up the default clocks if the MC ucode is loaded */ | ||
538 | if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { | ||
539 | if (rdev->pm.default_vddc) | ||
540 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | ||
541 | SET_VOLTAGE_TYPE_ASIC_VDDC); | ||
542 | if (rdev->pm.default_vddci) | ||
543 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, | ||
544 | SET_VOLTAGE_TYPE_ASIC_VDDCI); | ||
545 | if (rdev->pm.default_sclk) | ||
546 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | ||
547 | if (rdev->pm.default_mclk) | ||
548 | radeon_set_memory_clock(rdev, rdev->pm.default_mclk); | ||
549 | } | ||
539 | /* asic init will reset the default power state */ | 550 | /* asic init will reset the default power state */ |
540 | mutex_lock(&rdev->pm.mutex); | 551 | mutex_lock(&rdev->pm.mutex); |
541 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | 552 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; |
542 | rdev->pm.current_clock_mode_index = 0; | 553 | rdev->pm.current_clock_mode_index = 0; |
543 | rdev->pm.current_sclk = rdev->clock.default_sclk; | 554 | rdev->pm.current_sclk = rdev->pm.default_sclk; |
544 | rdev->pm.current_mclk = rdev->clock.default_mclk; | 555 | rdev->pm.current_mclk = rdev->pm.default_mclk; |
545 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; | 556 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; |
557 | rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; | ||
546 | if (rdev->pm.pm_method == PM_METHOD_DYNPM | 558 | if (rdev->pm.pm_method == PM_METHOD_DYNPM |
547 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { | 559 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { |
548 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | 560 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
549 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | 561 | schedule_delayed_work(&rdev->pm.dynpm_idle_work, |
550 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 562 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
551 | } | 563 | } |
552 | mutex_unlock(&rdev->pm.mutex); | 564 | mutex_unlock(&rdev->pm.mutex); |
553 | radeon_pm_compute_clocks(rdev); | 565 | radeon_pm_compute_clocks(rdev); |
@@ -564,6 +576,8 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
564 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | 576 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
565 | rdev->pm.dynpm_can_upclock = true; | 577 | rdev->pm.dynpm_can_upclock = true; |
566 | rdev->pm.dynpm_can_downclock = true; | 578 | rdev->pm.dynpm_can_downclock = true; |
579 | rdev->pm.default_sclk = rdev->clock.default_sclk; | ||
580 | rdev->pm.default_mclk = rdev->clock.default_mclk; | ||
567 | rdev->pm.current_sclk = rdev->clock.default_sclk; | 581 | rdev->pm.current_sclk = rdev->clock.default_sclk; |
568 | rdev->pm.current_mclk = rdev->clock.default_mclk; | 582 | rdev->pm.current_mclk = rdev->clock.default_mclk; |
569 | rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; | 583 | rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; |
@@ -575,12 +589,25 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
575 | radeon_combios_get_power_modes(rdev); | 589 | radeon_combios_get_power_modes(rdev); |
576 | radeon_pm_print_states(rdev); | 590 | radeon_pm_print_states(rdev); |
577 | radeon_pm_init_profile(rdev); | 591 | radeon_pm_init_profile(rdev); |
592 | /* set up the default clocks if the MC ucode is loaded */ | ||
593 | if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { | ||
594 | if (rdev->pm.default_vddc) | ||
595 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | ||
596 | SET_VOLTAGE_TYPE_ASIC_VDDC); | ||
597 | if (rdev->pm.default_sclk) | ||
598 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | ||
599 | if (rdev->pm.default_mclk) | ||
600 | radeon_set_memory_clock(rdev, rdev->pm.default_mclk); | ||
601 | } | ||
578 | } | 602 | } |
579 | 603 | ||
580 | /* set up the internal thermal sensor if applicable */ | 604 | /* set up the internal thermal sensor if applicable */ |
581 | ret = radeon_hwmon_init(rdev); | 605 | ret = radeon_hwmon_init(rdev); |
582 | if (ret) | 606 | if (ret) |
583 | return ret; | 607 | return ret; |
608 | |||
609 | INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); | ||
610 | |||
584 | if (rdev->pm.num_power_states > 1) { | 611 | if (rdev->pm.num_power_states > 1) { |
585 | /* where's the best place to put these? */ | 612 | /* where's the best place to put these? */ |
586 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); | 613 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); |
@@ -594,8 +621,6 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
594 | rdev->acpi_nb.notifier_call = radeon_acpi_event; | 621 | rdev->acpi_nb.notifier_call = radeon_acpi_event; |
595 | register_acpi_notifier(&rdev->acpi_nb); | 622 | register_acpi_notifier(&rdev->acpi_nb); |
596 | #endif | 623 | #endif |
597 | INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); | ||
598 | |||
599 | if (radeon_debugfs_pm_init(rdev)) { | 624 | if (radeon_debugfs_pm_init(rdev)) { |
600 | DRM_ERROR("Failed to register debugfs file for PM!\n"); | 625 | DRM_ERROR("Failed to register debugfs file for PM!\n"); |
601 | } | 626 | } |
@@ -609,25 +634,20 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
609 | void radeon_pm_fini(struct radeon_device *rdev) | 634 | void radeon_pm_fini(struct radeon_device *rdev) |
610 | { | 635 | { |
611 | if (rdev->pm.num_power_states > 1) { | 636 | if (rdev->pm.num_power_states > 1) { |
612 | bool flush_wq = false; | ||
613 | |||
614 | mutex_lock(&rdev->pm.mutex); | 637 | mutex_lock(&rdev->pm.mutex); |
615 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 638 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
616 | rdev->pm.profile = PM_PROFILE_DEFAULT; | 639 | rdev->pm.profile = PM_PROFILE_DEFAULT; |
617 | radeon_pm_update_profile(rdev); | 640 | radeon_pm_update_profile(rdev); |
618 | radeon_pm_set_clocks(rdev); | 641 | radeon_pm_set_clocks(rdev); |
619 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | 642 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
620 | /* cancel work */ | ||
621 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
622 | flush_wq = true; | ||
623 | /* reset default clocks */ | 643 | /* reset default clocks */ |
624 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 644 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
625 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | 645 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
626 | radeon_pm_set_clocks(rdev); | 646 | radeon_pm_set_clocks(rdev); |
627 | } | 647 | } |
628 | mutex_unlock(&rdev->pm.mutex); | 648 | mutex_unlock(&rdev->pm.mutex); |
629 | if (flush_wq) | 649 | |
630 | flush_workqueue(rdev->wq); | 650 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); |
631 | 651 | ||
632 | device_remove_file(rdev->dev, &dev_attr_power_profile); | 652 | device_remove_file(rdev->dev, &dev_attr_power_profile); |
633 | device_remove_file(rdev->dev, &dev_attr_power_method); | 653 | device_remove_file(rdev->dev, &dev_attr_power_method); |
@@ -636,6 +656,9 @@ void radeon_pm_fini(struct radeon_device *rdev) | |||
636 | #endif | 656 | #endif |
637 | } | 657 | } |
638 | 658 | ||
659 | if (rdev->pm.power_state) | ||
660 | kfree(rdev->pm.power_state); | ||
661 | |||
639 | radeon_hwmon_fini(rdev); | 662 | radeon_hwmon_fini(rdev); |
640 | } | 663 | } |
641 | 664 | ||
@@ -686,12 +709,12 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
686 | radeon_pm_get_dynpm_state(rdev); | 709 | radeon_pm_get_dynpm_state(rdev); |
687 | radeon_pm_set_clocks(rdev); | 710 | radeon_pm_set_clocks(rdev); |
688 | 711 | ||
689 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | 712 | schedule_delayed_work(&rdev->pm.dynpm_idle_work, |
690 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 713 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
691 | } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { | 714 | } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { |
692 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | 715 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
693 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | 716 | schedule_delayed_work(&rdev->pm.dynpm_idle_work, |
694 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 717 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
695 | DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); | 718 | DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); |
696 | } | 719 | } |
697 | } else { /* count == 0 */ | 720 | } else { /* count == 0 */ |
@@ -712,73 +735,21 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
712 | 735 | ||
713 | static bool radeon_pm_in_vbl(struct radeon_device *rdev) | 736 | static bool radeon_pm_in_vbl(struct radeon_device *rdev) |
714 | { | 737 | { |
715 | u32 stat_crtc = 0, vbl = 0, position = 0; | 738 | int crtc, vpos, hpos, vbl_status; |
716 | bool in_vbl = true; | 739 | bool in_vbl = true; |
717 | 740 | ||
718 | if (ASIC_IS_DCE4(rdev)) { | 741 | /* Iterate over all active crtc's. All crtc's must be in vblank, |
719 | if (rdev->pm.active_crtcs & (1 << 0)) { | 742 | * otherwise return in_vbl == false. |
720 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | 743 | */ |
721 | EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; | 744 | for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { |
722 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | 745 | if (rdev->pm.active_crtcs & (1 << crtc)) { |
723 | EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; | 746 | vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos); |
724 | } | 747 | if ((vbl_status & DRM_SCANOUTPOS_VALID) && |
725 | if (rdev->pm.active_crtcs & (1 << 1)) { | 748 | !(vbl_status & DRM_SCANOUTPOS_INVBL)) |
726 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
727 | EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; | ||
728 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
729 | EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; | ||
730 | } | ||
731 | if (rdev->pm.active_crtcs & (1 << 2)) { | ||
732 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
733 | EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; | ||
734 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
735 | EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; | ||
736 | } | ||
737 | if (rdev->pm.active_crtcs & (1 << 3)) { | ||
738 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
739 | EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; | ||
740 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
741 | EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; | ||
742 | } | ||
743 | if (rdev->pm.active_crtcs & (1 << 4)) { | ||
744 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
745 | EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; | ||
746 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
747 | EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; | ||
748 | } | ||
749 | if (rdev->pm.active_crtcs & (1 << 5)) { | ||
750 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
751 | EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; | ||
752 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
753 | EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; | ||
754 | } | ||
755 | } else if (ASIC_IS_AVIVO(rdev)) { | ||
756 | if (rdev->pm.active_crtcs & (1 << 0)) { | ||
757 | vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff; | ||
758 | position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff; | ||
759 | } | ||
760 | if (rdev->pm.active_crtcs & (1 << 1)) { | ||
761 | vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff; | ||
762 | position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff; | ||
763 | } | ||
764 | if (position < vbl && position > 1) | ||
765 | in_vbl = false; | ||
766 | } else { | ||
767 | if (rdev->pm.active_crtcs & (1 << 0)) { | ||
768 | stat_crtc = RREG32(RADEON_CRTC_STATUS); | ||
769 | if (!(stat_crtc & 1)) | ||
770 | in_vbl = false; | ||
771 | } | ||
772 | if (rdev->pm.active_crtcs & (1 << 1)) { | ||
773 | stat_crtc = RREG32(RADEON_CRTC2_STATUS); | ||
774 | if (!(stat_crtc & 1)) | ||
775 | in_vbl = false; | 749 | in_vbl = false; |
776 | } | 750 | } |
777 | } | 751 | } |
778 | 752 | ||
779 | if (position < vbl && position > 1) | ||
780 | in_vbl = false; | ||
781 | |||
782 | return in_vbl; | 753 | return in_vbl; |
783 | } | 754 | } |
784 | 755 | ||
@@ -848,8 +819,8 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) | |||
848 | radeon_pm_set_clocks(rdev); | 819 | radeon_pm_set_clocks(rdev); |
849 | } | 820 | } |
850 | 821 | ||
851 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | 822 | schedule_delayed_work(&rdev->pm.dynpm_idle_work, |
852 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 823 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
853 | } | 824 | } |
854 | mutex_unlock(&rdev->pm.mutex); | 825 | mutex_unlock(&rdev->pm.mutex); |
855 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 826 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
@@ -866,9 +837,9 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) | |||
866 | struct drm_device *dev = node->minor->dev; | 837 | struct drm_device *dev = node->minor->dev; |
867 | struct radeon_device *rdev = dev->dev_private; | 838 | struct radeon_device *rdev = dev->dev_private; |
868 | 839 | ||
869 | seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); | 840 | seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); |
870 | seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); | 841 | seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); |
871 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); | 842 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); |
872 | if (rdev->asic->get_memory_clock) | 843 | if (rdev->asic->get_memory_clock) |
873 | seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); | 844 | seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); |
874 | if (rdev->pm.current_vddc) | 845 | if (rdev->pm.current_vddc) |
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index c332f46340d5..bc44a3d35ec6 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -55,6 +55,7 @@ | |||
55 | #include "r500_reg.h" | 55 | #include "r500_reg.h" |
56 | #include "r600_reg.h" | 56 | #include "r600_reg.h" |
57 | #include "evergreen_reg.h" | 57 | #include "evergreen_reg.h" |
58 | #include "ni_reg.h" | ||
58 | 59 | ||
59 | #define RADEON_MC_AGP_LOCATION 0x014c | 60 | #define RADEON_MC_AGP_LOCATION 0x014c |
60 | #define RADEON_MC_AGP_START_MASK 0x0000FFFF | 61 | #define RADEON_MC_AGP_START_MASK 0x0000FFFF |
@@ -299,6 +300,8 @@ | |||
299 | # define RADEON_BUS_READ_BURST (1 << 30) | 300 | # define RADEON_BUS_READ_BURST (1 << 30) |
300 | #define RADEON_BUS_CNTL1 0x0034 | 301 | #define RADEON_BUS_CNTL1 0x0034 |
301 | # define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4) | 302 | # define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4) |
303 | #define RV370_BUS_CNTL 0x004c | ||
304 | # define RV370_BUS_BIOS_DIS_ROM (1 << 2) | ||
302 | /* rv370/rv380, rv410, r423/r430/r480, r5xx */ | 305 | /* rv370/rv380, rv410, r423/r430/r480, r5xx */ |
303 | #define RADEON_MSI_REARM_EN 0x0160 | 306 | #define RADEON_MSI_REARM_EN 0x0160 |
304 | # define RV370_MSI_REARM_EN (1 << 0) | 307 | # define RV370_MSI_REARM_EN (1 << 0) |
@@ -320,6 +323,15 @@ | |||
320 | # define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8) | 323 | # define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8) |
321 | # define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9) | 324 | # define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9) |
322 | # define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10) | 325 | # define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10) |
326 | # define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) | ||
327 | # define R600_PCIE_LC_RENEGOTIATION_SUPPORT (1 << 9) | ||
328 | # define R600_PCIE_LC_RENEGOTIATE_EN (1 << 10) | ||
329 | # define R600_PCIE_LC_SHORT_RECONFIG_EN (1 << 11) | ||
330 | # define R600_PCIE_LC_UPCONFIGURE_SUPPORT (1 << 12) | ||
331 | # define R600_PCIE_LC_UPCONFIGURE_DIS (1 << 13) | ||
332 | |||
333 | #define R600_TARGET_AND_CURRENT_PROFILE_INDEX 0x70c | ||
334 | #define R700_TARGET_AND_CURRENT_PROFILE_INDEX 0x66c | ||
323 | 335 | ||
324 | #define RADEON_CACHE_CNTL 0x1724 | 336 | #define RADEON_CACHE_CNTL 0x1724 |
325 | #define RADEON_CACHE_LINE 0x0f0c /* PCI */ | 337 | #define RADEON_CACHE_LINE 0x0f0c /* PCI */ |
@@ -365,6 +377,8 @@ | |||
365 | #define RADEON_CONFIG_APER_SIZE 0x0108 | 377 | #define RADEON_CONFIG_APER_SIZE 0x0108 |
366 | #define RADEON_CONFIG_BONDS 0x00e8 | 378 | #define RADEON_CONFIG_BONDS 0x00e8 |
367 | #define RADEON_CONFIG_CNTL 0x00e0 | 379 | #define RADEON_CONFIG_CNTL 0x00e0 |
380 | # define RADEON_CFG_VGA_RAM_EN (1 << 8) | ||
381 | # define RADEON_CFG_VGA_IO_DIS (1 << 9) | ||
368 | # define RADEON_CFG_ATI_REV_A11 (0 << 16) | 382 | # define RADEON_CFG_ATI_REV_A11 (0 << 16) |
369 | # define RADEON_CFG_ATI_REV_A12 (1 << 16) | 383 | # define RADEON_CFG_ATI_REV_A12 (1 << 16) |
370 | # define RADEON_CFG_ATI_REV_A13 (2 << 16) | 384 | # define RADEON_CFG_ATI_REV_A13 (2 << 16) |
@@ -422,6 +436,7 @@ | |||
422 | # define RADEON_CRTC_CSYNC_EN (1 << 4) | 436 | # define RADEON_CRTC_CSYNC_EN (1 << 4) |
423 | # define RADEON_CRTC_ICON_EN (1 << 15) | 437 | # define RADEON_CRTC_ICON_EN (1 << 15) |
424 | # define RADEON_CRTC_CUR_EN (1 << 16) | 438 | # define RADEON_CRTC_CUR_EN (1 << 16) |
439 | # define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17) | ||
425 | # define RADEON_CRTC_CUR_MODE_MASK (7 << 20) | 440 | # define RADEON_CRTC_CUR_MODE_MASK (7 << 20) |
426 | # define RADEON_CRTC_CUR_MODE_SHIFT 20 | 441 | # define RADEON_CRTC_CUR_MODE_SHIFT 20 |
427 | # define RADEON_CRTC_CUR_MODE_MONO 0 | 442 | # define RADEON_CRTC_CUR_MODE_MONO 0 |
@@ -509,6 +524,8 @@ | |||
509 | # define RADEON_CRTC_TILE_EN (1 << 15) | 524 | # define RADEON_CRTC_TILE_EN (1 << 15) |
510 | # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) | 525 | # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) |
511 | # define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17) | 526 | # define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17) |
527 | # define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN (1 << 28) | ||
528 | # define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN (1 << 29) | ||
512 | 529 | ||
513 | #define R300_CRTC_TILE_X0_Y0 0x0350 | 530 | #define R300_CRTC_TILE_X0_Y0 0x0350 |
514 | #define R300_CRTC2_TILE_X0_Y0 0x0358 | 531 | #define R300_CRTC2_TILE_X0_Y0 0x0358 |
@@ -2836,6 +2853,7 @@ | |||
2836 | # define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24) | 2853 | # define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24) |
2837 | # define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24) | 2854 | # define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24) |
2838 | # define R200_TXFORMAT_ST_ROUTE_SHIFT 24 | 2855 | # define R200_TXFORMAT_ST_ROUTE_SHIFT 24 |
2856 | # define R200_TXFORMAT_LOOKUP_DISABLE (1 << 27) | ||
2839 | # define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28) | 2857 | # define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28) |
2840 | # define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29) | 2858 | # define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29) |
2841 | # define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30) | 2859 | # define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30) |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 261e98a276db..08c0233db1b8 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -151,7 +151,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | |||
151 | /* 64 dwords should be enough for fence too */ | 151 | /* 64 dwords should be enough for fence too */ |
152 | r = radeon_ring_lock(rdev, 64); | 152 | r = radeon_ring_lock(rdev, 64); |
153 | if (r) { | 153 | if (r) { |
154 | DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); | 154 | DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); |
155 | return r; | 155 | return r; |
156 | } | 156 | } |
157 | radeon_ring_ib_execute(rdev, ib); | 157 | radeon_ring_ib_execute(rdev, ib); |
@@ -175,9 +175,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
175 | return 0; | 175 | return 0; |
176 | INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); | 176 | INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); |
177 | /* Allocate 1M object buffer */ | 177 | /* Allocate 1M object buffer */ |
178 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, | 178 | r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024, |
179 | true, RADEON_GEM_DOMAIN_GTT, | 179 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, |
180 | &rdev->ib_pool.robj); | 180 | &rdev->ib_pool.robj); |
181 | if (r) { | 181 | if (r) { |
182 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); | 182 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); |
183 | return r; | 183 | return r; |
@@ -194,7 +194,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
194 | r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr); | 194 | r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr); |
195 | radeon_bo_unreserve(rdev->ib_pool.robj); | 195 | radeon_bo_unreserve(rdev->ib_pool.robj); |
196 | if (r) { | 196 | if (r) { |
197 | DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); | 197 | DRM_ERROR("radeon: failed to map ib pool (%d).\n", r); |
198 | return r; | 198 | return r; |
199 | } | 199 | } |
200 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { | 200 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
@@ -247,10 +247,14 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) | |||
247 | */ | 247 | */ |
248 | void radeon_ring_free_size(struct radeon_device *rdev) | 248 | void radeon_ring_free_size(struct radeon_device *rdev) |
249 | { | 249 | { |
250 | if (rdev->family >= CHIP_R600) | 250 | if (rdev->wb.enabled) |
251 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); | 251 | rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]); |
252 | else | 252 | else { |
253 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | 253 | if (rdev->family >= CHIP_R600) |
254 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); | ||
255 | else | ||
256 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | ||
257 | } | ||
254 | /* This works because ring_size is a power of 2 */ | 258 | /* This works because ring_size is a power of 2 */ |
255 | rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); | 259 | rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); |
256 | rdev->cp.ring_free_dw -= rdev->cp.wptr; | 260 | rdev->cp.ring_free_dw -= rdev->cp.wptr; |
@@ -328,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
328 | rdev->cp.ring_size = ring_size; | 332 | rdev->cp.ring_size = ring_size; |
329 | /* Allocate ring buffer */ | 333 | /* Allocate ring buffer */ |
330 | if (rdev->cp.ring_obj == NULL) { | 334 | if (rdev->cp.ring_obj == NULL) { |
331 | r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true, | 335 | r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true, |
332 | RADEON_GEM_DOMAIN_GTT, | 336 | RADEON_GEM_DOMAIN_GTT, |
333 | &rdev->cp.ring_obj); | 337 | &rdev->cp.ring_obj); |
334 | if (r) { | 338 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 4ae5a3d1074e..92e7ea73b7c5 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c | |||
@@ -980,7 +980,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
980 | } | 980 | } |
981 | 981 | ||
982 | /* hyper z clear */ | 982 | /* hyper z clear */ |
983 | /* no docs available, based on reverse engeneering by Stephane Marchesin */ | 983 | /* no docs available, based on reverse engineering by Stephane Marchesin */ |
984 | if ((flags & (RADEON_DEPTH | RADEON_STENCIL)) | 984 | if ((flags & (RADEON_DEPTH | RADEON_STENCIL)) |
985 | && (flags & RADEON_CLEAR_FASTZ)) { | 985 | && (flags & RADEON_CLEAR_FASTZ)) { |
986 | 986 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 313c96bc09da..dee4a0c1b4b2 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
52 | goto out_cleanup; | 52 | goto out_cleanup; |
53 | } | 53 | } |
54 | 54 | ||
55 | r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, | 55 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
56 | &vram_obj); | 56 | &vram_obj); |
57 | if (r) { | 57 | if (r) { |
58 | DRM_ERROR("Failed to create VRAM object\n"); | 58 | DRM_ERROR("Failed to create VRAM object\n"); |
@@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
71 | void **gtt_start, **gtt_end; | 71 | void **gtt_start, **gtt_end; |
72 | void **vram_start, **vram_end; | 72 | void **vram_start, **vram_end; |
73 | 73 | ||
74 | r = radeon_bo_create(rdev, NULL, size, true, | 74 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, |
75 | RADEON_GEM_DOMAIN_GTT, gtt_obj + i); | 75 | RADEON_GEM_DOMAIN_GTT, gtt_obj + i); |
76 | if (r) { | 76 | if (r) { |
77 | DRM_ERROR("Failed to create GTT object %d\n", i); | 77 | DRM_ERROR("Failed to create GTT object %d\n", i); |
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h new file mode 100644 index 000000000000..eafd8160a155 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_trace.h | |||
@@ -0,0 +1,82 @@ | |||
1 | #if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _RADEON_TRACE_H_ | ||
3 | |||
4 | #include <linux/stringify.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <linux/tracepoint.h> | ||
7 | |||
8 | #include <drm/drmP.h> | ||
9 | |||
10 | #undef TRACE_SYSTEM | ||
11 | #define TRACE_SYSTEM radeon | ||
12 | #define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) | ||
13 | #define TRACE_INCLUDE_FILE radeon_trace | ||
14 | |||
15 | TRACE_EVENT(radeon_bo_create, | ||
16 | TP_PROTO(struct radeon_bo *bo), | ||
17 | TP_ARGS(bo), | ||
18 | TP_STRUCT__entry( | ||
19 | __field(struct radeon_bo *, bo) | ||
20 | __field(u32, pages) | ||
21 | ), | ||
22 | |||
23 | TP_fast_assign( | ||
24 | __entry->bo = bo; | ||
25 | __entry->pages = bo->tbo.num_pages; | ||
26 | ), | ||
27 | TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) | ||
28 | ); | ||
29 | |||
30 | DECLARE_EVENT_CLASS(radeon_fence_request, | ||
31 | |||
32 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
33 | |||
34 | TP_ARGS(dev, seqno), | ||
35 | |||
36 | TP_STRUCT__entry( | ||
37 | __field(u32, dev) | ||
38 | __field(u32, seqno) | ||
39 | ), | ||
40 | |||
41 | TP_fast_assign( | ||
42 | __entry->dev = dev->primary->index; | ||
43 | __entry->seqno = seqno; | ||
44 | ), | ||
45 | |||
46 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | ||
47 | ); | ||
48 | |||
49 | DEFINE_EVENT(radeon_fence_request, radeon_fence_emit, | ||
50 | |||
51 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
52 | |||
53 | TP_ARGS(dev, seqno) | ||
54 | ); | ||
55 | |||
56 | DEFINE_EVENT(radeon_fence_request, radeon_fence_retire, | ||
57 | |||
58 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
59 | |||
60 | TP_ARGS(dev, seqno) | ||
61 | ); | ||
62 | |||
63 | DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, | ||
64 | |||
65 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
66 | |||
67 | TP_ARGS(dev, seqno) | ||
68 | ); | ||
69 | |||
70 | DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end, | ||
71 | |||
72 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
73 | |||
74 | TP_ARGS(dev, seqno) | ||
75 | ); | ||
76 | |||
77 | #endif | ||
78 | |||
79 | /* This part must be outside protection */ | ||
80 | #undef TRACE_INCLUDE_PATH | ||
81 | #define TRACE_INCLUDE_PATH . | ||
82 | #include <trace/define_trace.h> | ||
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c new file mode 100644 index 000000000000..8175993df84d --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_trace_points.c | |||
@@ -0,0 +1,9 @@ | |||
1 | /* Copyright Red Hat Inc 2010. | ||
2 | * Author : Dave Airlie <airlied@redhat.com> | ||
3 | */ | ||
4 | #include <drm/drmP.h> | ||
5 | #include "radeon_drm.h" | ||
6 | #include "radeon.h" | ||
7 | |||
8 | #define CREATE_TRACE_POINTS | ||
9 | #include "radeon_trace.h" | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 84c53e41a88f..60125ddba1e9 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -152,6 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
152 | man->default_caching = TTM_PL_FLAG_CACHED; | 152 | man->default_caching = TTM_PL_FLAG_CACHED; |
153 | break; | 153 | break; |
154 | case TTM_PL_TT: | 154 | case TTM_PL_TT: |
155 | man->func = &ttm_bo_manager_func; | ||
155 | man->gpu_offset = rdev->mc.gtt_start; | 156 | man->gpu_offset = rdev->mc.gtt_start; |
156 | man->available_caching = TTM_PL_MASK_CACHING; | 157 | man->available_caching = TTM_PL_MASK_CACHING; |
157 | man->default_caching = TTM_PL_FLAG_CACHED; | 158 | man->default_caching = TTM_PL_FLAG_CACHED; |
@@ -173,6 +174,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
173 | break; | 174 | break; |
174 | case TTM_PL_VRAM: | 175 | case TTM_PL_VRAM: |
175 | /* "On-card" video ram */ | 176 | /* "On-card" video ram */ |
177 | man->func = &ttm_bo_manager_func; | ||
176 | man->gpu_offset = rdev->mc.vram_start; | 178 | man->gpu_offset = rdev->mc.vram_start; |
177 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | 179 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
178 | TTM_MEMTYPE_FLAG_MAPPABLE; | 180 | TTM_MEMTYPE_FLAG_MAPPABLE; |
@@ -246,8 +248,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
246 | if (unlikely(r)) { | 248 | if (unlikely(r)) { |
247 | return r; | 249 | return r; |
248 | } | 250 | } |
249 | old_start = old_mem->mm_node->start << PAGE_SHIFT; | 251 | old_start = old_mem->start << PAGE_SHIFT; |
250 | new_start = new_mem->mm_node->start << PAGE_SHIFT; | 252 | new_start = new_mem->start << PAGE_SHIFT; |
251 | 253 | ||
252 | switch (old_mem->mem_type) { | 254 | switch (old_mem->mem_type) { |
253 | case TTM_PL_VRAM: | 255 | case TTM_PL_VRAM: |
@@ -326,14 +328,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |||
326 | } | 328 | } |
327 | r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); | 329 | r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); |
328 | out_cleanup: | 330 | out_cleanup: |
329 | if (tmp_mem.mm_node) { | 331 | ttm_bo_mem_put(bo, &tmp_mem); |
330 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | ||
331 | |||
332 | spin_lock(&glob->lru_lock); | ||
333 | drm_mm_put_block(tmp_mem.mm_node); | ||
334 | spin_unlock(&glob->lru_lock); | ||
335 | return r; | ||
336 | } | ||
337 | return r; | 332 | return r; |
338 | } | 333 | } |
339 | 334 | ||
@@ -372,14 +367,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, | |||
372 | goto out_cleanup; | 367 | goto out_cleanup; |
373 | } | 368 | } |
374 | out_cleanup: | 369 | out_cleanup: |
375 | if (tmp_mem.mm_node) { | 370 | ttm_bo_mem_put(bo, &tmp_mem); |
376 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | ||
377 | |||
378 | spin_lock(&glob->lru_lock); | ||
379 | drm_mm_put_block(tmp_mem.mm_node); | ||
380 | spin_unlock(&glob->lru_lock); | ||
381 | return r; | ||
382 | } | ||
383 | return r; | 371 | return r; |
384 | } | 372 | } |
385 | 373 | ||
@@ -449,14 +437,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ | |||
449 | #if __OS_HAS_AGP | 437 | #if __OS_HAS_AGP |
450 | if (rdev->flags & RADEON_IS_AGP) { | 438 | if (rdev->flags & RADEON_IS_AGP) { |
451 | /* RADEON_IS_AGP is set only if AGP is active */ | 439 | /* RADEON_IS_AGP is set only if AGP is active */ |
452 | mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; | 440 | mem->bus.offset = mem->start << PAGE_SHIFT; |
453 | mem->bus.base = rdev->mc.agp_base; | 441 | mem->bus.base = rdev->mc.agp_base; |
454 | mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; | 442 | mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; |
455 | } | 443 | } |
456 | #endif | 444 | #endif |
457 | break; | 445 | break; |
458 | case TTM_PL_VRAM: | 446 | case TTM_PL_VRAM: |
459 | mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; | 447 | mem->bus.offset = mem->start << PAGE_SHIFT; |
460 | /* check if it's visible */ | 448 | /* check if it's visible */ |
461 | if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) | 449 | if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) |
462 | return -EINVAL; | 450 | return -EINVAL; |
@@ -541,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
541 | DRM_ERROR("Failed initializing VRAM heap.\n"); | 529 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
542 | return r; | 530 | return r; |
543 | } | 531 | } |
544 | r = radeon_bo_create(rdev, NULL, 256 * 1024, true, | 532 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, |
545 | RADEON_GEM_DOMAIN_VRAM, | 533 | RADEON_GEM_DOMAIN_VRAM, |
546 | &rdev->stollen_vga_memory); | 534 | &rdev->stollen_vga_memory); |
547 | if (r) { | 535 | if (r) { |
@@ -601,6 +589,20 @@ void radeon_ttm_fini(struct radeon_device *rdev) | |||
601 | DRM_INFO("radeon: ttm finalized\n"); | 589 | DRM_INFO("radeon: ttm finalized\n"); |
602 | } | 590 | } |
603 | 591 | ||
592 | /* this should only be called at bootup or when userspace | ||
593 | * isn't running */ | ||
594 | void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) | ||
595 | { | ||
596 | struct ttm_mem_type_manager *man; | ||
597 | |||
598 | if (!rdev->mman.initialized) | ||
599 | return; | ||
600 | |||
601 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | ||
602 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | ||
603 | man->size = size >> PAGE_SHIFT; | ||
604 | } | ||
605 | |||
604 | static struct vm_operations_struct radeon_ttm_vm_ops; | 606 | static struct vm_operations_struct radeon_ttm_vm_ops; |
605 | static const struct vm_operations_struct *ttm_vm_ops = NULL; | 607 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
606 | 608 | ||
@@ -631,7 +633,7 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) | |||
631 | return drm_mmap(filp, vma); | 633 | return drm_mmap(filp, vma); |
632 | } | 634 | } |
633 | 635 | ||
634 | file_priv = (struct drm_file *)filp->private_data; | 636 | file_priv = filp->private_data; |
635 | rdev = file_priv->minor->dev->dev_private; | 637 | rdev = file_priv->minor->dev->dev_private; |
636 | if (rdev == NULL) { | 638 | if (rdev == NULL) { |
637 | return -EINVAL; | 639 | return -EINVAL; |
@@ -659,6 +661,7 @@ struct radeon_ttm_backend { | |||
659 | unsigned long num_pages; | 661 | unsigned long num_pages; |
660 | struct page **pages; | 662 | struct page **pages; |
661 | struct page *dummy_read_page; | 663 | struct page *dummy_read_page; |
664 | dma_addr_t *dma_addrs; | ||
662 | bool populated; | 665 | bool populated; |
663 | bool bound; | 666 | bool bound; |
664 | unsigned offset; | 667 | unsigned offset; |
@@ -667,12 +670,14 @@ struct radeon_ttm_backend { | |||
667 | static int radeon_ttm_backend_populate(struct ttm_backend *backend, | 670 | static int radeon_ttm_backend_populate(struct ttm_backend *backend, |
668 | unsigned long num_pages, | 671 | unsigned long num_pages, |
669 | struct page **pages, | 672 | struct page **pages, |
670 | struct page *dummy_read_page) | 673 | struct page *dummy_read_page, |
674 | dma_addr_t *dma_addrs) | ||
671 | { | 675 | { |
672 | struct radeon_ttm_backend *gtt; | 676 | struct radeon_ttm_backend *gtt; |
673 | 677 | ||
674 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | 678 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
675 | gtt->pages = pages; | 679 | gtt->pages = pages; |
680 | gtt->dma_addrs = dma_addrs; | ||
676 | gtt->num_pages = num_pages; | 681 | gtt->num_pages = num_pages; |
677 | gtt->dummy_read_page = dummy_read_page; | 682 | gtt->dummy_read_page = dummy_read_page; |
678 | gtt->populated = true; | 683 | gtt->populated = true; |
@@ -685,6 +690,7 @@ static void radeon_ttm_backend_clear(struct ttm_backend *backend) | |||
685 | 690 | ||
686 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | 691 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
687 | gtt->pages = NULL; | 692 | gtt->pages = NULL; |
693 | gtt->dma_addrs = NULL; | ||
688 | gtt->num_pages = 0; | 694 | gtt->num_pages = 0; |
689 | gtt->dummy_read_page = NULL; | 695 | gtt->dummy_read_page = NULL; |
690 | gtt->populated = false; | 696 | gtt->populated = false; |
@@ -699,12 +705,13 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend, | |||
699 | int r; | 705 | int r; |
700 | 706 | ||
701 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | 707 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
702 | gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT; | 708 | gtt->offset = bo_mem->start << PAGE_SHIFT; |
703 | if (!gtt->num_pages) { | 709 | if (!gtt->num_pages) { |
704 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); | 710 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", |
711 | gtt->num_pages, bo_mem, backend); | ||
705 | } | 712 | } |
706 | r = radeon_gart_bind(gtt->rdev, gtt->offset, | 713 | r = radeon_gart_bind(gtt->rdev, gtt->offset, |
707 | gtt->num_pages, gtt->pages); | 714 | gtt->num_pages, gtt->pages, gtt->dma_addrs); |
708 | if (r) { | 715 | if (r) { |
709 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | 716 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", |
710 | gtt->num_pages, gtt->offset); | 717 | gtt->num_pages, gtt->offset); |
@@ -798,9 +805,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | |||
798 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; | 805 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; |
799 | radeon_mem_types_list[i].driver_features = 0; | 806 | radeon_mem_types_list[i].driver_features = 0; |
800 | if (i == 0) | 807 | if (i == 0) |
801 | radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager; | 808 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; |
802 | else | 809 | else |
803 | radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; | 810 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; |
804 | 811 | ||
805 | } | 812 | } |
806 | /* Add ttm page pool to debugfs */ | 813 | /* Add ttm page pool to debugfs */ |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman new file mode 100644 index 000000000000..0aa8e85a9457 --- /dev/null +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman | |||
@@ -0,0 +1,620 @@ | |||
1 | cayman 0x9400 | ||
2 | 0x0000802C GRBM_GFX_INDEX | ||
3 | 0x000088B0 VGT_VTX_VECT_EJECT_REG | ||
4 | 0x000088C4 VGT_CACHE_INVALIDATION | ||
5 | 0x000088D4 VGT_GS_VERTEX_REUSE | ||
6 | 0x00008958 VGT_PRIMITIVE_TYPE | ||
7 | 0x0000895C VGT_INDEX_TYPE | ||
8 | 0x00008970 VGT_NUM_INDICES | ||
9 | 0x00008974 VGT_NUM_INSTANCES | ||
10 | 0x00008990 VGT_COMPUTE_DIM_X | ||
11 | 0x00008994 VGT_COMPUTE_DIM_Y | ||
12 | 0x00008998 VGT_COMPUTE_DIM_Z | ||
13 | 0x0000899C VGT_COMPUTE_START_X | ||
14 | 0x000089A0 VGT_COMPUTE_START_Y | ||
15 | 0x000089A4 VGT_COMPUTE_START_Z | ||
16 | 0x000089A8 VGT_COMPUTE_INDEX | ||
17 | 0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE | ||
18 | 0x000089B0 VGT_HS_OFFCHIP_PARAM | ||
19 | 0x00008A14 PA_CL_ENHANCE | ||
20 | 0x00008A60 PA_SC_LINE_STIPPLE_VALUE | ||
21 | 0x00008B10 PA_SC_LINE_STIPPLE_STATE | ||
22 | 0x00008BF0 PA_SC_ENHANCE | ||
23 | 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ | ||
24 | 0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN | ||
25 | 0x00008C00 SQ_CONFIG | ||
26 | 0x00008C04 SQ_GPR_RESOURCE_MGMT_1 | ||
27 | 0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1 | ||
28 | 0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2 | ||
29 | 0x00008DF8 SQ_CONST_MEM_BASE | ||
30 | 0x00008E20 SQ_STATIC_THREAD_MGMT_1 | ||
31 | 0x00008E24 SQ_STATIC_THREAD_MGMT_2 | ||
32 | 0x00008E28 SQ_STATIC_THREAD_MGMT_3 | ||
33 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS | ||
34 | 0x00009100 SPI_CONFIG_CNTL | ||
35 | 0x0000913C SPI_CONFIG_CNTL_1 | ||
36 | 0x00009508 TA_CNTL_AUX | ||
37 | 0x00009830 DB_DEBUG | ||
38 | 0x00009834 DB_DEBUG2 | ||
39 | 0x00009838 DB_DEBUG3 | ||
40 | 0x0000983C DB_DEBUG4 | ||
41 | 0x00009854 DB_WATERMARKS | ||
42 | 0x0000A400 TD_PS_BORDER_COLOR_INDEX | ||
43 | 0x0000A404 TD_PS_BORDER_COLOR_RED | ||
44 | 0x0000A408 TD_PS_BORDER_COLOR_GREEN | ||
45 | 0x0000A40C TD_PS_BORDER_COLOR_BLUE | ||
46 | 0x0000A410 TD_PS_BORDER_COLOR_ALPHA | ||
47 | 0x0000A414 TD_VS_BORDER_COLOR_INDEX | ||
48 | 0x0000A418 TD_VS_BORDER_COLOR_RED | ||
49 | 0x0000A41C TD_VS_BORDER_COLOR_GREEN | ||
50 | 0x0000A420 TD_VS_BORDER_COLOR_BLUE | ||
51 | 0x0000A424 TD_VS_BORDER_COLOR_ALPHA | ||
52 | 0x0000A428 TD_GS_BORDER_COLOR_INDEX | ||
53 | 0x0000A42C TD_GS_BORDER_COLOR_RED | ||
54 | 0x0000A430 TD_GS_BORDER_COLOR_GREEN | ||
55 | 0x0000A434 TD_GS_BORDER_COLOR_BLUE | ||
56 | 0x0000A438 TD_GS_BORDER_COLOR_ALPHA | ||
57 | 0x0000A43C TD_HS_BORDER_COLOR_INDEX | ||
58 | 0x0000A440 TD_HS_BORDER_COLOR_RED | ||
59 | 0x0000A444 TD_HS_BORDER_COLOR_GREEN | ||
60 | 0x0000A448 TD_HS_BORDER_COLOR_BLUE | ||
61 | 0x0000A44C TD_HS_BORDER_COLOR_ALPHA | ||
62 | 0x0000A450 TD_LS_BORDER_COLOR_INDEX | ||
63 | 0x0000A454 TD_LS_BORDER_COLOR_RED | ||
64 | 0x0000A458 TD_LS_BORDER_COLOR_GREEN | ||
65 | 0x0000A45C TD_LS_BORDER_COLOR_BLUE | ||
66 | 0x0000A460 TD_LS_BORDER_COLOR_ALPHA | ||
67 | 0x0000A464 TD_CS_BORDER_COLOR_INDEX | ||
68 | 0x0000A468 TD_CS_BORDER_COLOR_RED | ||
69 | 0x0000A46C TD_CS_BORDER_COLOR_GREEN | ||
70 | 0x0000A470 TD_CS_BORDER_COLOR_BLUE | ||
71 | 0x0000A474 TD_CS_BORDER_COLOR_ALPHA | ||
72 | 0x00028000 DB_RENDER_CONTROL | ||
73 | 0x00028004 DB_COUNT_CONTROL | ||
74 | 0x0002800C DB_RENDER_OVERRIDE | ||
75 | 0x00028010 DB_RENDER_OVERRIDE2 | ||
76 | 0x00028028 DB_STENCIL_CLEAR | ||
77 | 0x0002802C DB_DEPTH_CLEAR | ||
78 | 0x00028030 PA_SC_SCREEN_SCISSOR_TL | ||
79 | 0x00028034 PA_SC_SCREEN_SCISSOR_BR | ||
80 | 0x0002805C DB_DEPTH_SLICE | ||
81 | 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 | ||
82 | 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 | ||
83 | 0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2 | ||
84 | 0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3 | ||
85 | 0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4 | ||
86 | 0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5 | ||
87 | 0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6 | ||
88 | 0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7 | ||
89 | 0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8 | ||
90 | 0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9 | ||
91 | 0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10 | ||
92 | 0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11 | ||
93 | 0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12 | ||
94 | 0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13 | ||
95 | 0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14 | ||
96 | 0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15 | ||
97 | 0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0 | ||
98 | 0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1 | ||
99 | 0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2 | ||
100 | 0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3 | ||
101 | 0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4 | ||
102 | 0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5 | ||
103 | 0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6 | ||
104 | 0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7 | ||
105 | 0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8 | ||
106 | 0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9 | ||
107 | 0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10 | ||
108 | 0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11 | ||
109 | 0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12 | ||
110 | 0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 | ||
111 | 0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 | ||
112 | 0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 | ||
113 | 0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 | ||
114 | 0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 | ||
115 | 0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 | ||
116 | 0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3 | ||
117 | 0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4 | ||
118 | 0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5 | ||
119 | 0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6 | ||
120 | 0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7 | ||
121 | 0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8 | ||
122 | 0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9 | ||
123 | 0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10 | ||
124 | 0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11 | ||
125 | 0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12 | ||
126 | 0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13 | ||
127 | 0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14 | ||
128 | 0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15 | ||
129 | 0x00028200 PA_SC_WINDOW_OFFSET | ||
130 | 0x00028204 PA_SC_WINDOW_SCISSOR_TL | ||
131 | 0x00028208 PA_SC_WINDOW_SCISSOR_BR | ||
132 | 0x0002820C PA_SC_CLIPRECT_RULE | ||
133 | 0x00028210 PA_SC_CLIPRECT_0_TL | ||
134 | 0x00028214 PA_SC_CLIPRECT_0_BR | ||
135 | 0x00028218 PA_SC_CLIPRECT_1_TL | ||
136 | 0x0002821C PA_SC_CLIPRECT_1_BR | ||
137 | 0x00028220 PA_SC_CLIPRECT_2_TL | ||
138 | 0x00028224 PA_SC_CLIPRECT_2_BR | ||
139 | 0x00028228 PA_SC_CLIPRECT_3_TL | ||
140 | 0x0002822C PA_SC_CLIPRECT_3_BR | ||
141 | 0x00028230 PA_SC_EDGERULE | ||
142 | 0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET | ||
143 | 0x00028240 PA_SC_GENERIC_SCISSOR_TL | ||
144 | 0x00028244 PA_SC_GENERIC_SCISSOR_BR | ||
145 | 0x00028250 PA_SC_VPORT_SCISSOR_0_TL | ||
146 | 0x00028254 PA_SC_VPORT_SCISSOR_0_BR | ||
147 | 0x00028258 PA_SC_VPORT_SCISSOR_1_TL | ||
148 | 0x0002825C PA_SC_VPORT_SCISSOR_1_BR | ||
149 | 0x00028260 PA_SC_VPORT_SCISSOR_2_TL | ||
150 | 0x00028264 PA_SC_VPORT_SCISSOR_2_BR | ||
151 | 0x00028268 PA_SC_VPORT_SCISSOR_3_TL | ||
152 | 0x0002826C PA_SC_VPORT_SCISSOR_3_BR | ||
153 | 0x00028270 PA_SC_VPORT_SCISSOR_4_TL | ||
154 | 0x00028274 PA_SC_VPORT_SCISSOR_4_BR | ||
155 | 0x00028278 PA_SC_VPORT_SCISSOR_5_TL | ||
156 | 0x0002827C PA_SC_VPORT_SCISSOR_5_BR | ||
157 | 0x00028280 PA_SC_VPORT_SCISSOR_6_TL | ||
158 | 0x00028284 PA_SC_VPORT_SCISSOR_6_BR | ||
159 | 0x00028288 PA_SC_VPORT_SCISSOR_7_TL | ||
160 | 0x0002828C PA_SC_VPORT_SCISSOR_7_BR | ||
161 | 0x00028290 PA_SC_VPORT_SCISSOR_8_TL | ||
162 | 0x00028294 PA_SC_VPORT_SCISSOR_8_BR | ||
163 | 0x00028298 PA_SC_VPORT_SCISSOR_9_TL | ||
164 | 0x0002829C PA_SC_VPORT_SCISSOR_9_BR | ||
165 | 0x000282A0 PA_SC_VPORT_SCISSOR_10_TL | ||
166 | 0x000282A4 PA_SC_VPORT_SCISSOR_10_BR | ||
167 | 0x000282A8 PA_SC_VPORT_SCISSOR_11_TL | ||
168 | 0x000282AC PA_SC_VPORT_SCISSOR_11_BR | ||
169 | 0x000282B0 PA_SC_VPORT_SCISSOR_12_TL | ||
170 | 0x000282B4 PA_SC_VPORT_SCISSOR_12_BR | ||
171 | 0x000282B8 PA_SC_VPORT_SCISSOR_13_TL | ||
172 | 0x000282BC PA_SC_VPORT_SCISSOR_13_BR | ||
173 | 0x000282C0 PA_SC_VPORT_SCISSOR_14_TL | ||
174 | 0x000282C4 PA_SC_VPORT_SCISSOR_14_BR | ||
175 | 0x000282C8 PA_SC_VPORT_SCISSOR_15_TL | ||
176 | 0x000282CC PA_SC_VPORT_SCISSOR_15_BR | ||
177 | 0x000282D0 PA_SC_VPORT_ZMIN_0 | ||
178 | 0x000282D4 PA_SC_VPORT_ZMAX_0 | ||
179 | 0x000282D8 PA_SC_VPORT_ZMIN_1 | ||
180 | 0x000282DC PA_SC_VPORT_ZMAX_1 | ||
181 | 0x000282E0 PA_SC_VPORT_ZMIN_2 | ||
182 | 0x000282E4 PA_SC_VPORT_ZMAX_2 | ||
183 | 0x000282E8 PA_SC_VPORT_ZMIN_3 | ||
184 | 0x000282EC PA_SC_VPORT_ZMAX_3 | ||
185 | 0x000282F0 PA_SC_VPORT_ZMIN_4 | ||
186 | 0x000282F4 PA_SC_VPORT_ZMAX_4 | ||
187 | 0x000282F8 PA_SC_VPORT_ZMIN_5 | ||
188 | 0x000282FC PA_SC_VPORT_ZMAX_5 | ||
189 | 0x00028300 PA_SC_VPORT_ZMIN_6 | ||
190 | 0x00028304 PA_SC_VPORT_ZMAX_6 | ||
191 | 0x00028308 PA_SC_VPORT_ZMIN_7 | ||
192 | 0x0002830C PA_SC_VPORT_ZMAX_7 | ||
193 | 0x00028310 PA_SC_VPORT_ZMIN_8 | ||
194 | 0x00028314 PA_SC_VPORT_ZMAX_8 | ||
195 | 0x00028318 PA_SC_VPORT_ZMIN_9 | ||
196 | 0x0002831C PA_SC_VPORT_ZMAX_9 | ||
197 | 0x00028320 PA_SC_VPORT_ZMIN_10 | ||
198 | 0x00028324 PA_SC_VPORT_ZMAX_10 | ||
199 | 0x00028328 PA_SC_VPORT_ZMIN_11 | ||
200 | 0x0002832C PA_SC_VPORT_ZMAX_11 | ||
201 | 0x00028330 PA_SC_VPORT_ZMIN_12 | ||
202 | 0x00028334 PA_SC_VPORT_ZMAX_12 | ||
203 | 0x00028338 PA_SC_VPORT_ZMIN_13 | ||
204 | 0x0002833C PA_SC_VPORT_ZMAX_13 | ||
205 | 0x00028340 PA_SC_VPORT_ZMIN_14 | ||
206 | 0x00028344 PA_SC_VPORT_ZMAX_14 | ||
207 | 0x00028348 PA_SC_VPORT_ZMIN_15 | ||
208 | 0x0002834C PA_SC_VPORT_ZMAX_15 | ||
209 | 0x00028350 SX_MISC | ||
210 | 0x00028354 SX_SURFACE_SYNC | ||
211 | 0x00028380 SQ_VTX_SEMANTIC_0 | ||
212 | 0x00028384 SQ_VTX_SEMANTIC_1 | ||
213 | 0x00028388 SQ_VTX_SEMANTIC_2 | ||
214 | 0x0002838C SQ_VTX_SEMANTIC_3 | ||
215 | 0x00028390 SQ_VTX_SEMANTIC_4 | ||
216 | 0x00028394 SQ_VTX_SEMANTIC_5 | ||
217 | 0x00028398 SQ_VTX_SEMANTIC_6 | ||
218 | 0x0002839C SQ_VTX_SEMANTIC_7 | ||
219 | 0x000283A0 SQ_VTX_SEMANTIC_8 | ||
220 | 0x000283A4 SQ_VTX_SEMANTIC_9 | ||
221 | 0x000283A8 SQ_VTX_SEMANTIC_10 | ||
222 | 0x000283AC SQ_VTX_SEMANTIC_11 | ||
223 | 0x000283B0 SQ_VTX_SEMANTIC_12 | ||
224 | 0x000283B4 SQ_VTX_SEMANTIC_13 | ||
225 | 0x000283B8 SQ_VTX_SEMANTIC_14 | ||
226 | 0x000283BC SQ_VTX_SEMANTIC_15 | ||
227 | 0x000283C0 SQ_VTX_SEMANTIC_16 | ||
228 | 0x000283C4 SQ_VTX_SEMANTIC_17 | ||
229 | 0x000283C8 SQ_VTX_SEMANTIC_18 | ||
230 | 0x000283CC SQ_VTX_SEMANTIC_19 | ||
231 | 0x000283D0 SQ_VTX_SEMANTIC_20 | ||
232 | 0x000283D4 SQ_VTX_SEMANTIC_21 | ||
233 | 0x000283D8 SQ_VTX_SEMANTIC_22 | ||
234 | 0x000283DC SQ_VTX_SEMANTIC_23 | ||
235 | 0x000283E0 SQ_VTX_SEMANTIC_24 | ||
236 | 0x000283E4 SQ_VTX_SEMANTIC_25 | ||
237 | 0x000283E8 SQ_VTX_SEMANTIC_26 | ||
238 | 0x000283EC SQ_VTX_SEMANTIC_27 | ||
239 | 0x000283F0 SQ_VTX_SEMANTIC_28 | ||
240 | 0x000283F4 SQ_VTX_SEMANTIC_29 | ||
241 | 0x000283F8 SQ_VTX_SEMANTIC_30 | ||
242 | 0x000283FC SQ_VTX_SEMANTIC_31 | ||
243 | 0x00028400 VGT_MAX_VTX_INDX | ||
244 | 0x00028404 VGT_MIN_VTX_INDX | ||
245 | 0x00028408 VGT_INDX_OFFSET | ||
246 | 0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX | ||
247 | 0x00028410 SX_ALPHA_TEST_CONTROL | ||
248 | 0x00028414 CB_BLEND_RED | ||
249 | 0x00028418 CB_BLEND_GREEN | ||
250 | 0x0002841C CB_BLEND_BLUE | ||
251 | 0x00028420 CB_BLEND_ALPHA | ||
252 | 0x00028430 DB_STENCILREFMASK | ||
253 | 0x00028434 DB_STENCILREFMASK_BF | ||
254 | 0x00028438 SX_ALPHA_REF | ||
255 | 0x0002843C PA_CL_VPORT_XSCALE_0 | ||
256 | 0x00028440 PA_CL_VPORT_XOFFSET_0 | ||
257 | 0x00028444 PA_CL_VPORT_YSCALE_0 | ||
258 | 0x00028448 PA_CL_VPORT_YOFFSET_0 | ||
259 | 0x0002844C PA_CL_VPORT_ZSCALE_0 | ||
260 | 0x00028450 PA_CL_VPORT_ZOFFSET_0 | ||
261 | 0x00028454 PA_CL_VPORT_XSCALE_1 | ||
262 | 0x00028458 PA_CL_VPORT_XOFFSET_1 | ||
263 | 0x0002845C PA_CL_VPORT_YSCALE_1 | ||
264 | 0x00028460 PA_CL_VPORT_YOFFSET_1 | ||
265 | 0x00028464 PA_CL_VPORT_ZSCALE_1 | ||
266 | 0x00028468 PA_CL_VPORT_ZOFFSET_1 | ||
267 | 0x0002846C PA_CL_VPORT_XSCALE_2 | ||
268 | 0x00028470 PA_CL_VPORT_XOFFSET_2 | ||
269 | 0x00028474 PA_CL_VPORT_YSCALE_2 | ||
270 | 0x00028478 PA_CL_VPORT_YOFFSET_2 | ||
271 | 0x0002847C PA_CL_VPORT_ZSCALE_2 | ||
272 | 0x00028480 PA_CL_VPORT_ZOFFSET_2 | ||
273 | 0x00028484 PA_CL_VPORT_XSCALE_3 | ||
274 | 0x00028488 PA_CL_VPORT_XOFFSET_3 | ||
275 | 0x0002848C PA_CL_VPORT_YSCALE_3 | ||
276 | 0x00028490 PA_CL_VPORT_YOFFSET_3 | ||
277 | 0x00028494 PA_CL_VPORT_ZSCALE_3 | ||
278 | 0x00028498 PA_CL_VPORT_ZOFFSET_3 | ||
279 | 0x0002849C PA_CL_VPORT_XSCALE_4 | ||
280 | 0x000284A0 PA_CL_VPORT_XOFFSET_4 | ||
281 | 0x000284A4 PA_CL_VPORT_YSCALE_4 | ||
282 | 0x000284A8 PA_CL_VPORT_YOFFSET_4 | ||
283 | 0x000284AC PA_CL_VPORT_ZSCALE_4 | ||
284 | 0x000284B0 PA_CL_VPORT_ZOFFSET_4 | ||
285 | 0x000284B4 PA_CL_VPORT_XSCALE_5 | ||
286 | 0x000284B8 PA_CL_VPORT_XOFFSET_5 | ||
287 | 0x000284BC PA_CL_VPORT_YSCALE_5 | ||
288 | 0x000284C0 PA_CL_VPORT_YOFFSET_5 | ||
289 | 0x000284C4 PA_CL_VPORT_ZSCALE_5 | ||
290 | 0x000284C8 PA_CL_VPORT_ZOFFSET_5 | ||
291 | 0x000284CC PA_CL_VPORT_XSCALE_6 | ||
292 | 0x000284D0 PA_CL_VPORT_XOFFSET_6 | ||
293 | 0x000284D4 PA_CL_VPORT_YSCALE_6 | ||
294 | 0x000284D8 PA_CL_VPORT_YOFFSET_6 | ||
295 | 0x000284DC PA_CL_VPORT_ZSCALE_6 | ||
296 | 0x000284E0 PA_CL_VPORT_ZOFFSET_6 | ||
297 | 0x000284E4 PA_CL_VPORT_XSCALE_7 | ||
298 | 0x000284E8 PA_CL_VPORT_XOFFSET_7 | ||
299 | 0x000284EC PA_CL_VPORT_YSCALE_7 | ||
300 | 0x000284F0 PA_CL_VPORT_YOFFSET_7 | ||
301 | 0x000284F4 PA_CL_VPORT_ZSCALE_7 | ||
302 | 0x000284F8 PA_CL_VPORT_ZOFFSET_7 | ||
303 | 0x000284FC PA_CL_VPORT_XSCALE_8 | ||
304 | 0x00028500 PA_CL_VPORT_XOFFSET_8 | ||
305 | 0x00028504 PA_CL_VPORT_YSCALE_8 | ||
306 | 0x00028508 PA_CL_VPORT_YOFFSET_8 | ||
307 | 0x0002850C PA_CL_VPORT_ZSCALE_8 | ||
308 | 0x00028510 PA_CL_VPORT_ZOFFSET_8 | ||
309 | 0x00028514 PA_CL_VPORT_XSCALE_9 | ||
310 | 0x00028518 PA_CL_VPORT_XOFFSET_9 | ||
311 | 0x0002851C PA_CL_VPORT_YSCALE_9 | ||
312 | 0x00028520 PA_CL_VPORT_YOFFSET_9 | ||
313 | 0x00028524 PA_CL_VPORT_ZSCALE_9 | ||
314 | 0x00028528 PA_CL_VPORT_ZOFFSET_9 | ||
315 | 0x0002852C PA_CL_VPORT_XSCALE_10 | ||
316 | 0x00028530 PA_CL_VPORT_XOFFSET_10 | ||
317 | 0x00028534 PA_CL_VPORT_YSCALE_10 | ||
318 | 0x00028538 PA_CL_VPORT_YOFFSET_10 | ||
319 | 0x0002853C PA_CL_VPORT_ZSCALE_10 | ||
320 | 0x00028540 PA_CL_VPORT_ZOFFSET_10 | ||
321 | 0x00028544 PA_CL_VPORT_XSCALE_11 | ||
322 | 0x00028548 PA_CL_VPORT_XOFFSET_11 | ||
323 | 0x0002854C PA_CL_VPORT_YSCALE_11 | ||
324 | 0x00028550 PA_CL_VPORT_YOFFSET_11 | ||
325 | 0x00028554 PA_CL_VPORT_ZSCALE_11 | ||
326 | 0x00028558 PA_CL_VPORT_ZOFFSET_11 | ||
327 | 0x0002855C PA_CL_VPORT_XSCALE_12 | ||
328 | 0x00028560 PA_CL_VPORT_XOFFSET_12 | ||
329 | 0x00028564 PA_CL_VPORT_YSCALE_12 | ||
330 | 0x00028568 PA_CL_VPORT_YOFFSET_12 | ||
331 | 0x0002856C PA_CL_VPORT_ZSCALE_12 | ||
332 | 0x00028570 PA_CL_VPORT_ZOFFSET_12 | ||
333 | 0x00028574 PA_CL_VPORT_XSCALE_13 | ||
334 | 0x00028578 PA_CL_VPORT_XOFFSET_13 | ||
335 | 0x0002857C PA_CL_VPORT_YSCALE_13 | ||
336 | 0x00028580 PA_CL_VPORT_YOFFSET_13 | ||
337 | 0x00028584 PA_CL_VPORT_ZSCALE_13 | ||
338 | 0x00028588 PA_CL_VPORT_ZOFFSET_13 | ||
339 | 0x0002858C PA_CL_VPORT_XSCALE_14 | ||
340 | 0x00028590 PA_CL_VPORT_XOFFSET_14 | ||
341 | 0x00028594 PA_CL_VPORT_YSCALE_14 | ||
342 | 0x00028598 PA_CL_VPORT_YOFFSET_14 | ||
343 | 0x0002859C PA_CL_VPORT_ZSCALE_14 | ||
344 | 0x000285A0 PA_CL_VPORT_ZOFFSET_14 | ||
345 | 0x000285A4 PA_CL_VPORT_XSCALE_15 | ||
346 | 0x000285A8 PA_CL_VPORT_XOFFSET_15 | ||
347 | 0x000285AC PA_CL_VPORT_YSCALE_15 | ||
348 | 0x000285B0 PA_CL_VPORT_YOFFSET_15 | ||
349 | 0x000285B4 PA_CL_VPORT_ZSCALE_15 | ||
350 | 0x000285B8 PA_CL_VPORT_ZOFFSET_15 | ||
351 | 0x000285BC PA_CL_UCP_0_X | ||
352 | 0x000285C0 PA_CL_UCP_0_Y | ||
353 | 0x000285C4 PA_CL_UCP_0_Z | ||
354 | 0x000285C8 PA_CL_UCP_0_W | ||
355 | 0x000285CC PA_CL_UCP_1_X | ||
356 | 0x000285D0 PA_CL_UCP_1_Y | ||
357 | 0x000285D4 PA_CL_UCP_1_Z | ||
358 | 0x000285D8 PA_CL_UCP_1_W | ||
359 | 0x000285DC PA_CL_UCP_2_X | ||
360 | 0x000285E0 PA_CL_UCP_2_Y | ||
361 | 0x000285E4 PA_CL_UCP_2_Z | ||
362 | 0x000285E8 PA_CL_UCP_2_W | ||
363 | 0x000285EC PA_CL_UCP_3_X | ||
364 | 0x000285F0 PA_CL_UCP_3_Y | ||
365 | 0x000285F4 PA_CL_UCP_3_Z | ||
366 | 0x000285F8 PA_CL_UCP_3_W | ||
367 | 0x000285FC PA_CL_UCP_4_X | ||
368 | 0x00028600 PA_CL_UCP_4_Y | ||
369 | 0x00028604 PA_CL_UCP_4_Z | ||
370 | 0x00028608 PA_CL_UCP_4_W | ||
371 | 0x0002860C PA_CL_UCP_5_X | ||
372 | 0x00028610 PA_CL_UCP_5_Y | ||
373 | 0x00028614 PA_CL_UCP_5_Z | ||
374 | 0x00028618 PA_CL_UCP_5_W | ||
375 | 0x0002861C SPI_VS_OUT_ID_0 | ||
376 | 0x00028620 SPI_VS_OUT_ID_1 | ||
377 | 0x00028624 SPI_VS_OUT_ID_2 | ||
378 | 0x00028628 SPI_VS_OUT_ID_3 | ||
379 | 0x0002862C SPI_VS_OUT_ID_4 | ||
380 | 0x00028630 SPI_VS_OUT_ID_5 | ||
381 | 0x00028634 SPI_VS_OUT_ID_6 | ||
382 | 0x00028638 SPI_VS_OUT_ID_7 | ||
383 | 0x0002863C SPI_VS_OUT_ID_8 | ||
384 | 0x00028640 SPI_VS_OUT_ID_9 | ||
385 | 0x00028644 SPI_PS_INPUT_CNTL_0 | ||
386 | 0x00028648 SPI_PS_INPUT_CNTL_1 | ||
387 | 0x0002864C SPI_PS_INPUT_CNTL_2 | ||
388 | 0x00028650 SPI_PS_INPUT_CNTL_3 | ||
389 | 0x00028654 SPI_PS_INPUT_CNTL_4 | ||
390 | 0x00028658 SPI_PS_INPUT_CNTL_5 | ||
391 | 0x0002865C SPI_PS_INPUT_CNTL_6 | ||
392 | 0x00028660 SPI_PS_INPUT_CNTL_7 | ||
393 | 0x00028664 SPI_PS_INPUT_CNTL_8 | ||
394 | 0x00028668 SPI_PS_INPUT_CNTL_9 | ||
395 | 0x0002866C SPI_PS_INPUT_CNTL_10 | ||
396 | 0x00028670 SPI_PS_INPUT_CNTL_11 | ||
397 | 0x00028674 SPI_PS_INPUT_CNTL_12 | ||
398 | 0x00028678 SPI_PS_INPUT_CNTL_13 | ||
399 | 0x0002867C SPI_PS_INPUT_CNTL_14 | ||
400 | 0x00028680 SPI_PS_INPUT_CNTL_15 | ||
401 | 0x00028684 SPI_PS_INPUT_CNTL_16 | ||
402 | 0x00028688 SPI_PS_INPUT_CNTL_17 | ||
403 | 0x0002868C SPI_PS_INPUT_CNTL_18 | ||
404 | 0x00028690 SPI_PS_INPUT_CNTL_19 | ||
405 | 0x00028694 SPI_PS_INPUT_CNTL_20 | ||
406 | 0x00028698 SPI_PS_INPUT_CNTL_21 | ||
407 | 0x0002869C SPI_PS_INPUT_CNTL_22 | ||
408 | 0x000286A0 SPI_PS_INPUT_CNTL_23 | ||
409 | 0x000286A4 SPI_PS_INPUT_CNTL_24 | ||
410 | 0x000286A8 SPI_PS_INPUT_CNTL_25 | ||
411 | 0x000286AC SPI_PS_INPUT_CNTL_26 | ||
412 | 0x000286B0 SPI_PS_INPUT_CNTL_27 | ||
413 | 0x000286B4 SPI_PS_INPUT_CNTL_28 | ||
414 | 0x000286B8 SPI_PS_INPUT_CNTL_29 | ||
415 | 0x000286BC SPI_PS_INPUT_CNTL_30 | ||
416 | 0x000286C0 SPI_PS_INPUT_CNTL_31 | ||
417 | 0x000286C4 SPI_VS_OUT_CONFIG | ||
418 | 0x000286C8 SPI_THREAD_GROUPING | ||
419 | 0x000286CC SPI_PS_IN_CONTROL_0 | ||
420 | 0x000286D0 SPI_PS_IN_CONTROL_1 | ||
421 | 0x000286D4 SPI_INTERP_CONTROL_0 | ||
422 | 0x000286D8 SPI_INPUT_Z | ||
423 | 0x000286DC SPI_FOG_CNTL | ||
424 | 0x000286E0 SPI_BARYC_CNTL | ||
425 | 0x000286E4 SPI_PS_IN_CONTROL_2 | ||
426 | 0x000286E8 SPI_COMPUTE_INPUT_CNTL | ||
427 | 0x000286EC SPI_COMPUTE_NUM_THREAD_X | ||
428 | 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y | ||
429 | 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z | ||
430 | 0x000286F8 SPI_GPR_MGMT | ||
431 | 0x000286FC SPI_LDS_MGMT | ||
432 | 0x00028700 SPI_STACK_MGMT | ||
433 | 0x00028704 SPI_WAVE_MGMT_1 | ||
434 | 0x00028708 SPI_WAVE_MGMT_2 | ||
435 | 0x00028724 GDS_ADDR_SIZE | ||
436 | 0x00028780 CB_BLEND0_CONTROL | ||
437 | 0x00028784 CB_BLEND1_CONTROL | ||
438 | 0x00028788 CB_BLEND2_CONTROL | ||
439 | 0x0002878C CB_BLEND3_CONTROL | ||
440 | 0x00028790 CB_BLEND4_CONTROL | ||
441 | 0x00028794 CB_BLEND5_CONTROL | ||
442 | 0x00028798 CB_BLEND6_CONTROL | ||
443 | 0x0002879C CB_BLEND7_CONTROL | ||
444 | 0x000287CC CS_COPY_STATE | ||
445 | 0x000287D0 GFX_COPY_STATE | ||
446 | 0x000287D4 PA_CL_POINT_X_RAD | ||
447 | 0x000287D8 PA_CL_POINT_Y_RAD | ||
448 | 0x000287DC PA_CL_POINT_SIZE | ||
449 | 0x000287E0 PA_CL_POINT_CULL_RAD | ||
450 | 0x00028808 CB_COLOR_CONTROL | ||
451 | 0x0002880C DB_SHADER_CONTROL | ||
452 | 0x00028810 PA_CL_CLIP_CNTL | ||
453 | 0x00028814 PA_SU_SC_MODE_CNTL | ||
454 | 0x00028818 PA_CL_VTE_CNTL | ||
455 | 0x0002881C PA_CL_VS_OUT_CNTL | ||
456 | 0x00028820 PA_CL_NANINF_CNTL | ||
457 | 0x00028824 PA_SU_LINE_STIPPLE_CNTL | ||
458 | 0x00028828 PA_SU_LINE_STIPPLE_SCALE | ||
459 | 0x0002882C PA_SU_PRIM_FILTER_CNTL | ||
460 | 0x00028844 SQ_PGM_RESOURCES_PS | ||
461 | 0x00028848 SQ_PGM_RESOURCES_2_PS | ||
462 | 0x0002884C SQ_PGM_EXPORTS_PS | ||
463 | 0x00028860 SQ_PGM_RESOURCES_VS | ||
464 | 0x00028864 SQ_PGM_RESOURCES_2_VS | ||
465 | 0x00028878 SQ_PGM_RESOURCES_GS | ||
466 | 0x0002887C SQ_PGM_RESOURCES_2_GS | ||
467 | 0x00028890 SQ_PGM_RESOURCES_ES | ||
468 | 0x00028894 SQ_PGM_RESOURCES_2_ES | ||
469 | 0x000288A8 SQ_PGM_RESOURCES_FS | ||
470 | 0x000288BC SQ_PGM_RESOURCES_HS | ||
471 | 0x000288C0 SQ_PGM_RESOURCES_2_HS | ||
472 | 0x000288D4 SQ_PGM_RESOURCES_LS | ||
473 | 0x000288D8 SQ_PGM_RESOURCES_2_LS | ||
474 | 0x000288E8 SQ_LDS_ALLOC | ||
475 | 0x000288EC SQ_LDS_ALLOC_PS | ||
476 | 0x000288F0 SQ_VTX_SEMANTIC_CLEAR | ||
477 | 0x00028A00 PA_SU_POINT_SIZE | ||
478 | 0x00028A04 PA_SU_POINT_MINMAX | ||
479 | 0x00028A08 PA_SU_LINE_CNTL | ||
480 | 0x00028A0C PA_SC_LINE_STIPPLE | ||
481 | 0x00028A10 VGT_OUTPUT_PATH_CNTL | ||
482 | 0x00028A14 VGT_HOS_CNTL | ||
483 | 0x00028A18 VGT_HOS_MAX_TESS_LEVEL | ||
484 | 0x00028A1C VGT_HOS_MIN_TESS_LEVEL | ||
485 | 0x00028A20 VGT_HOS_REUSE_DEPTH | ||
486 | 0x00028A24 VGT_GROUP_PRIM_TYPE | ||
487 | 0x00028A28 VGT_GROUP_FIRST_DECR | ||
488 | 0x00028A2C VGT_GROUP_DECR | ||
489 | 0x00028A30 VGT_GROUP_VECT_0_CNTL | ||
490 | 0x00028A34 VGT_GROUP_VECT_1_CNTL | ||
491 | 0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL | ||
492 | 0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL | ||
493 | 0x00028A40 VGT_GS_MODE | ||
494 | 0x00028A48 PA_SC_MODE_CNTL_0 | ||
495 | 0x00028A4C PA_SC_MODE_CNTL_1 | ||
496 | 0x00028A50 VGT_ENHANCE | ||
497 | 0x00028A54 VGT_GS_PER_ES | ||
498 | 0x00028A58 VGT_ES_PER_GS | ||
499 | 0x00028A5C VGT_GS_PER_VS | ||
500 | 0x00028A6C VGT_GS_OUT_PRIM_TYPE | ||
501 | 0x00028A70 IA_ENHANCE | ||
502 | 0x00028A84 VGT_PRIMITIVEID_EN | ||
503 | 0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN | ||
504 | 0x00028AA0 VGT_INSTANCE_STEP_RATE_0 | ||
505 | 0x00028AA4 VGT_INSTANCE_STEP_RATE_1 | ||
506 | 0x00028AA8 IA_MULTI_VGT_PARAM | ||
507 | 0x00028AB4 VGT_REUSE_OFF | ||
508 | 0x00028AB8 VGT_VTX_CNT_EN | ||
509 | 0x00028ABC DB_HTILE_SURFACE | ||
510 | 0x00028AC0 DB_SRESULTS_COMPARE_STATE0 | ||
511 | 0x00028AC4 DB_SRESULTS_COMPARE_STATE1 | ||
512 | 0x00028AC8 DB_PRELOAD_CONTROL | ||
513 | 0x00028B38 VGT_GS_MAX_VERT_OUT | ||
514 | 0x00028B54 VGT_SHADER_STAGES_EN | ||
515 | 0x00028B58 VGT_LS_HS_CONFIG | ||
516 | 0x00028B6C VGT_TF_PARAM | ||
517 | 0x00028B70 DB_ALPHA_TO_MASK | ||
518 | 0x00028B74 VGT_DISPATCH_INITIATOR | ||
519 | 0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL | ||
520 | 0x00028B7C PA_SU_POLY_OFFSET_CLAMP | ||
521 | 0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE | ||
522 | 0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET | ||
523 | 0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE | ||
524 | 0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET | ||
525 | 0x00028B74 VGT_GS_INSTANCE_CNT | ||
526 | 0x00028BD4 PA_SC_CENTROID_PRIORITY_0 | ||
527 | 0x00028BD8 PA_SC_CENTROID_PRIORITY_1 | ||
528 | 0x00028BDC PA_SC_LINE_CNTL | ||
529 | 0x00028BE4 PA_SU_VTX_CNTL | ||
530 | 0x00028BE8 PA_CL_GB_VERT_CLIP_ADJ | ||
531 | 0x00028BEC PA_CL_GB_VERT_DISC_ADJ | ||
532 | 0x00028BF0 PA_CL_GB_HORZ_CLIP_ADJ | ||
533 | 0x00028BF4 PA_CL_GB_HORZ_DISC_ADJ | ||
534 | 0x00028BF8 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_0 | ||
535 | 0x00028BFC PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_1 | ||
536 | 0x00028C00 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_2 | ||
537 | 0x00028C04 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_3 | ||
538 | 0x00028C08 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_0 | ||
539 | 0x00028C0C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_1 | ||
540 | 0x00028C10 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_2 | ||
541 | 0x00028C14 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_3 | ||
542 | 0x00028C18 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_0 | ||
543 | 0x00028C1C PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_1 | ||
544 | 0x00028C20 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_2 | ||
545 | 0x00028C24 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_3 | ||
546 | 0x00028C28 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_0 | ||
547 | 0x00028C2C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_1 | ||
548 | 0x00028C30 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_2 | ||
549 | 0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3 | ||
550 | 0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0 | ||
551 | 0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1 | ||
552 | 0x00028C8C CB_COLOR0_CLEAR_WORD0 | ||
553 | 0x00028C90 CB_COLOR0_CLEAR_WORD1 | ||
554 | 0x00028C94 CB_COLOR0_CLEAR_WORD2 | ||
555 | 0x00028C98 CB_COLOR0_CLEAR_WORD3 | ||
556 | 0x00028CC8 CB_COLOR1_CLEAR_WORD0 | ||
557 | 0x00028CCC CB_COLOR1_CLEAR_WORD1 | ||
558 | 0x00028CD0 CB_COLOR1_CLEAR_WORD2 | ||
559 | 0x00028CD4 CB_COLOR1_CLEAR_WORD3 | ||
560 | 0x00028D04 CB_COLOR2_CLEAR_WORD0 | ||
561 | 0x00028D08 CB_COLOR2_CLEAR_WORD1 | ||
562 | 0x00028D0C CB_COLOR2_CLEAR_WORD2 | ||
563 | 0x00028D10 CB_COLOR2_CLEAR_WORD3 | ||
564 | 0x00028D40 CB_COLOR3_CLEAR_WORD0 | ||
565 | 0x00028D44 CB_COLOR3_CLEAR_WORD1 | ||
566 | 0x00028D48 CB_COLOR3_CLEAR_WORD2 | ||
567 | 0x00028D4C CB_COLOR3_CLEAR_WORD3 | ||
568 | 0x00028D7C CB_COLOR4_CLEAR_WORD0 | ||
569 | 0x00028D80 CB_COLOR4_CLEAR_WORD1 | ||
570 | 0x00028D84 CB_COLOR4_CLEAR_WORD2 | ||
571 | 0x00028D88 CB_COLOR4_CLEAR_WORD3 | ||
572 | 0x00028DB8 CB_COLOR5_CLEAR_WORD0 | ||
573 | 0x00028DBC CB_COLOR5_CLEAR_WORD1 | ||
574 | 0x00028DC0 CB_COLOR5_CLEAR_WORD2 | ||
575 | 0x00028DC4 CB_COLOR5_CLEAR_WORD3 | ||
576 | 0x00028DF4 CB_COLOR6_CLEAR_WORD0 | ||
577 | 0x00028DF8 CB_COLOR6_CLEAR_WORD1 | ||
578 | 0x00028DFC CB_COLOR6_CLEAR_WORD2 | ||
579 | 0x00028E00 CB_COLOR6_CLEAR_WORD3 | ||
580 | 0x00028E30 CB_COLOR7_CLEAR_WORD0 | ||
581 | 0x00028E34 CB_COLOR7_CLEAR_WORD1 | ||
582 | 0x00028E38 CB_COLOR7_CLEAR_WORD2 | ||
583 | 0x00028E3C CB_COLOR7_CLEAR_WORD3 | ||
584 | 0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0 | ||
585 | 0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1 | ||
586 | 0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2 | ||
587 | 0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3 | ||
588 | 0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4 | ||
589 | 0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5 | ||
590 | 0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6 | ||
591 | 0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7 | ||
592 | 0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8 | ||
593 | 0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9 | ||
594 | 0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10 | ||
595 | 0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11 | ||
596 | 0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12 | ||
597 | 0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13 | ||
598 | 0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14 | ||
599 | 0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15 | ||
600 | 0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0 | ||
601 | 0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1 | ||
602 | 0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2 | ||
603 | 0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3 | ||
604 | 0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4 | ||
605 | 0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5 | ||
606 | 0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6 | ||
607 | 0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7 | ||
608 | 0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8 | ||
609 | 0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9 | ||
610 | 0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10 | ||
611 | 0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11 | ||
612 | 0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12 | ||
613 | 0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13 | ||
614 | 0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14 | ||
615 | 0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15 | ||
616 | 0x0003CFF0 SQ_VTX_BASE_VTX_LOC | ||
617 | 0x0003CFF4 SQ_VTX_START_INST_LOC | ||
618 | 0x0003FF00 SQ_TEX_SAMPLER_CLEAR | ||
619 | 0x0003FF04 SQ_TEX_RESOURCE_CLEAR | ||
620 | 0x0003FF08 SQ_LOOP_BOOL_CLEAR | ||
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index f78fd592544d..0e28cae7ea43 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen | |||
@@ -1,4 +1,5 @@ | |||
1 | evergreen 0x9400 | 1 | evergreen 0x9400 |
2 | 0x0000802C GRBM_GFX_INDEX | ||
2 | 0x00008040 WAIT_UNTIL | 3 | 0x00008040 WAIT_UNTIL |
3 | 0x00008044 WAIT_UNTIL_POLL_CNTL | 4 | 0x00008044 WAIT_UNTIL_POLL_CNTL |
4 | 0x00008048 WAIT_UNTIL_POLL_MASK | 5 | 0x00008048 WAIT_UNTIL_POLL_MASK |
@@ -22,6 +23,10 @@ evergreen 0x9400 | |||
22 | 0x00008B10 PA_SC_LINE_STIPPLE_STATE | 23 | 0x00008B10 PA_SC_LINE_STIPPLE_STATE |
23 | 0x00008BF0 PA_SC_ENHANCE | 24 | 0x00008BF0 PA_SC_ENHANCE |
24 | 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ | 25 | 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ |
26 | 0x00008D90 SQ_DYN_GPR_OPTIMIZATION | ||
27 | 0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN | ||
28 | 0x00008D98 SQ_DYN_GPR_THREAD_LIMIT | ||
29 | 0x00008D9C SQ_DYN_GPR_LDS_LIMIT | ||
25 | 0x00008C00 SQ_CONFIG | 30 | 0x00008C00 SQ_CONFIG |
26 | 0x00008C04 SQ_GPR_RESOURCE_MGMT_1 | 31 | 0x00008C04 SQ_GPR_RESOURCE_MGMT_1 |
27 | 0x00008C08 SQ_GPR_RESOURCE_MGMT_2 | 32 | 0x00008C08 SQ_GPR_RESOURCE_MGMT_2 |
@@ -34,9 +39,14 @@ evergreen 0x9400 | |||
34 | 0x00008C24 SQ_STACK_RESOURCE_MGMT_2 | 39 | 0x00008C24 SQ_STACK_RESOURCE_MGMT_2 |
35 | 0x00008C28 SQ_STACK_RESOURCE_MGMT_3 | 40 | 0x00008C28 SQ_STACK_RESOURCE_MGMT_3 |
36 | 0x00008DF8 SQ_CONST_MEM_BASE | 41 | 0x00008DF8 SQ_CONST_MEM_BASE |
42 | 0x00008E20 SQ_STATIC_THREAD_MGMT_1 | ||
43 | 0x00008E24 SQ_STATIC_THREAD_MGMT_2 | ||
44 | 0x00008E28 SQ_STATIC_THREAD_MGMT_3 | ||
45 | 0x00008E2C SQ_LDS_RESOURCE_MGMT | ||
37 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS | 46 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS |
38 | 0x00009100 SPI_CONFIG_CNTL | 47 | 0x00009100 SPI_CONFIG_CNTL |
39 | 0x0000913C SPI_CONFIG_CNTL_1 | 48 | 0x0000913C SPI_CONFIG_CNTL_1 |
49 | 0x00009508 TA_CNTL_AUX | ||
40 | 0x00009700 VC_CNTL | 50 | 0x00009700 VC_CNTL |
41 | 0x00009714 VC_ENHANCE | 51 | 0x00009714 VC_ENHANCE |
42 | 0x00009830 DB_DEBUG | 52 | 0x00009830 DB_DEBUG |
@@ -212,6 +222,7 @@ evergreen 0x9400 | |||
212 | 0x00028348 PA_SC_VPORT_ZMIN_15 | 222 | 0x00028348 PA_SC_VPORT_ZMIN_15 |
213 | 0x0002834C PA_SC_VPORT_ZMAX_15 | 223 | 0x0002834C PA_SC_VPORT_ZMAX_15 |
214 | 0x00028350 SX_MISC | 224 | 0x00028350 SX_MISC |
225 | 0x00028354 SX_SURFACE_SYNC | ||
215 | 0x00028380 SQ_VTX_SEMANTIC_0 | 226 | 0x00028380 SQ_VTX_SEMANTIC_0 |
216 | 0x00028384 SQ_VTX_SEMANTIC_1 | 227 | 0x00028384 SQ_VTX_SEMANTIC_1 |
217 | 0x00028388 SQ_VTX_SEMANTIC_2 | 228 | 0x00028388 SQ_VTX_SEMANTIC_2 |
@@ -431,7 +442,7 @@ evergreen 0x9400 | |||
431 | 0x000286EC SPI_COMPUTE_NUM_THREAD_X | 442 | 0x000286EC SPI_COMPUTE_NUM_THREAD_X |
432 | 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y | 443 | 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y |
433 | 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z | 444 | 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z |
434 | 0x000286F8 GDS_ADDR_SIZE | 445 | 0x00028724 GDS_ADDR_SIZE |
435 | 0x00028780 CB_BLEND0_CONTROL | 446 | 0x00028780 CB_BLEND0_CONTROL |
436 | 0x00028784 CB_BLEND1_CONTROL | 447 | 0x00028784 CB_BLEND1_CONTROL |
437 | 0x00028788 CB_BLEND2_CONTROL | 448 | 0x00028788 CB_BLEND2_CONTROL |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300 index b506ec1cab4b..e8a1786b6426 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r300 +++ b/drivers/gpu/drm/radeon/reg_srcs/r300 | |||
@@ -683,9 +683,7 @@ r300 0x4f60 | |||
683 | 0x4DF4 US_ALU_CONST_G_31 | 683 | 0x4DF4 US_ALU_CONST_G_31 |
684 | 0x4DF8 US_ALU_CONST_B_31 | 684 | 0x4DF8 US_ALU_CONST_B_31 |
685 | 0x4DFC US_ALU_CONST_A_31 | 685 | 0x4DFC US_ALU_CONST_A_31 |
686 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
687 | 0x4E08 RB3D_ABLENDCNTL_R3 | 686 | 0x4E08 RB3D_ABLENDCNTL_R3 |
688 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
689 | 0x4E10 RB3D_CONSTANT_COLOR | 687 | 0x4E10 RB3D_CONSTANT_COLOR |
690 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | 688 | 0x4E14 RB3D_COLOR_CLEAR_VALUE |
691 | 0x4E18 RB3D_ROPCNTL_R3 | 689 | 0x4E18 RB3D_ROPCNTL_R3 |
@@ -706,13 +704,11 @@ r300 0x4f60 | |||
706 | 0x4E74 RB3D_CMASK_WRINDEX | 704 | 0x4E74 RB3D_CMASK_WRINDEX |
707 | 0x4E78 RB3D_CMASK_DWORD | 705 | 0x4E78 RB3D_CMASK_DWORD |
708 | 0x4E7C RB3D_CMASK_RDINDEX | 706 | 0x4E7C RB3D_CMASK_RDINDEX |
709 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
710 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
711 | 0x4E88 RB3D_AARESOLVE_CTL | ||
712 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | 707 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD |
713 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | 708 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD |
714 | 0x4F04 ZB_ZSTENCILCNTL | 709 | 0x4F04 ZB_ZSTENCILCNTL |
715 | 0x4F08 ZB_STENCILREFMASK | 710 | 0x4F08 ZB_STENCILREFMASK |
716 | 0x4F14 ZB_ZTOP | 711 | 0x4F14 ZB_ZTOP |
717 | 0x4F18 ZB_ZCACHE_CTLSTAT | 712 | 0x4F18 ZB_ZCACHE_CTLSTAT |
713 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
718 | 0x4F58 ZB_ZPASS_DATA | 714 | 0x4F58 ZB_ZPASS_DATA |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420 index 8c1214c2390f..722074e21e2f 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r420 +++ b/drivers/gpu/drm/radeon/reg_srcs/r420 | |||
@@ -130,7 +130,6 @@ r420 0x4f60 | |||
130 | 0x401C GB_SELECT | 130 | 0x401C GB_SELECT |
131 | 0x4020 GB_AA_CONFIG | 131 | 0x4020 GB_AA_CONFIG |
132 | 0x4024 GB_FIFO_SIZE | 132 | 0x4024 GB_FIFO_SIZE |
133 | 0x4028 GB_Z_PEQ_CONFIG | ||
134 | 0x4100 TX_INVALTAGS | 133 | 0x4100 TX_INVALTAGS |
135 | 0x4200 GA_POINT_S0 | 134 | 0x4200 GA_POINT_S0 |
136 | 0x4204 GA_POINT_T0 | 135 | 0x4204 GA_POINT_T0 |
@@ -750,9 +749,7 @@ r420 0x4f60 | |||
750 | 0x4DF4 US_ALU_CONST_G_31 | 749 | 0x4DF4 US_ALU_CONST_G_31 |
751 | 0x4DF8 US_ALU_CONST_B_31 | 750 | 0x4DF8 US_ALU_CONST_B_31 |
752 | 0x4DFC US_ALU_CONST_A_31 | 751 | 0x4DFC US_ALU_CONST_A_31 |
753 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
754 | 0x4E08 RB3D_ABLENDCNTL_R3 | 752 | 0x4E08 RB3D_ABLENDCNTL_R3 |
755 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
756 | 0x4E10 RB3D_CONSTANT_COLOR | 753 | 0x4E10 RB3D_CONSTANT_COLOR |
757 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | 754 | 0x4E14 RB3D_COLOR_CLEAR_VALUE |
758 | 0x4E18 RB3D_ROPCNTL_R3 | 755 | 0x4E18 RB3D_ROPCNTL_R3 |
@@ -773,13 +770,11 @@ r420 0x4f60 | |||
773 | 0x4E74 RB3D_CMASK_WRINDEX | 770 | 0x4E74 RB3D_CMASK_WRINDEX |
774 | 0x4E78 RB3D_CMASK_DWORD | 771 | 0x4E78 RB3D_CMASK_DWORD |
775 | 0x4E7C RB3D_CMASK_RDINDEX | 772 | 0x4E7C RB3D_CMASK_RDINDEX |
776 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
777 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
778 | 0x4E88 RB3D_AARESOLVE_CTL | ||
779 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | 773 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD |
780 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | 774 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD |
781 | 0x4F04 ZB_ZSTENCILCNTL | 775 | 0x4F04 ZB_ZSTENCILCNTL |
782 | 0x4F08 ZB_STENCILREFMASK | 776 | 0x4F08 ZB_STENCILREFMASK |
783 | 0x4F14 ZB_ZTOP | 777 | 0x4F14 ZB_ZTOP |
784 | 0x4F18 ZB_ZCACHE_CTLSTAT | 778 | 0x4F18 ZB_ZCACHE_CTLSTAT |
779 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
785 | 0x4F58 ZB_ZPASS_DATA | 780 | 0x4F58 ZB_ZPASS_DATA |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index af0da4ae3f55..ea49752ee99c 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 | |||
@@ -708,6 +708,7 @@ r600 0x9400 | |||
708 | 0x00028D0C DB_RENDER_CONTROL | 708 | 0x00028D0C DB_RENDER_CONTROL |
709 | 0x00028D10 DB_RENDER_OVERRIDE | 709 | 0x00028D10 DB_RENDER_OVERRIDE |
710 | 0x0002880C DB_SHADER_CONTROL | 710 | 0x0002880C DB_SHADER_CONTROL |
711 | 0x00028D28 DB_SRESULTS_COMPARE_STATE0 | ||
711 | 0x00028D2C DB_SRESULTS_COMPARE_STATE1 | 712 | 0x00028D2C DB_SRESULTS_COMPARE_STATE1 |
712 | 0x00028430 DB_STENCILREFMASK | 713 | 0x00028430 DB_STENCILREFMASK |
713 | 0x00028434 DB_STENCILREFMASK_BF | 714 | 0x00028434 DB_STENCILREFMASK_BF |
@@ -757,6 +758,5 @@ r600 0x9400 | |||
757 | 0x00009714 VC_ENHANCE | 758 | 0x00009714 VC_ENHANCE |
758 | 0x00009830 DB_DEBUG | 759 | 0x00009830 DB_DEBUG |
759 | 0x00009838 DB_WATERMARKS | 760 | 0x00009838 DB_WATERMARKS |
760 | 0x00028D28 DB_SRESULTS_COMPARE_STATE0 | ||
761 | 0x00028D44 DB_ALPHA_TO_MASK | 761 | 0x00028D44 DB_ALPHA_TO_MASK |
762 | 0x00009700 VC_CNTL | 762 | 0x00009700 VC_CNTL |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600 index 0828d80396f2..d9f62866bbc1 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rs600 +++ b/drivers/gpu/drm/radeon/reg_srcs/rs600 | |||
@@ -749,9 +749,7 @@ rs600 0x6d40 | |||
749 | 0x4DF4 US_ALU_CONST_G_31 | 749 | 0x4DF4 US_ALU_CONST_G_31 |
750 | 0x4DF8 US_ALU_CONST_B_31 | 750 | 0x4DF8 US_ALU_CONST_B_31 |
751 | 0x4DFC US_ALU_CONST_A_31 | 751 | 0x4DFC US_ALU_CONST_A_31 |
752 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
753 | 0x4E08 RB3D_ABLENDCNTL_R3 | 752 | 0x4E08 RB3D_ABLENDCNTL_R3 |
754 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
755 | 0x4E10 RB3D_CONSTANT_COLOR | 753 | 0x4E10 RB3D_CONSTANT_COLOR |
756 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | 754 | 0x4E14 RB3D_COLOR_CLEAR_VALUE |
757 | 0x4E18 RB3D_ROPCNTL_R3 | 755 | 0x4E18 RB3D_ROPCNTL_R3 |
@@ -772,13 +770,11 @@ rs600 0x6d40 | |||
772 | 0x4E74 RB3D_CMASK_WRINDEX | 770 | 0x4E74 RB3D_CMASK_WRINDEX |
773 | 0x4E78 RB3D_CMASK_DWORD | 771 | 0x4E78 RB3D_CMASK_DWORD |
774 | 0x4E7C RB3D_CMASK_RDINDEX | 772 | 0x4E7C RB3D_CMASK_RDINDEX |
775 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
776 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
777 | 0x4E88 RB3D_AARESOLVE_CTL | ||
778 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | 773 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD |
779 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | 774 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD |
780 | 0x4F04 ZB_ZSTENCILCNTL | 775 | 0x4F04 ZB_ZSTENCILCNTL |
781 | 0x4F08 ZB_STENCILREFMASK | 776 | 0x4F08 ZB_STENCILREFMASK |
782 | 0x4F14 ZB_ZTOP | 777 | 0x4F14 ZB_ZTOP |
783 | 0x4F18 ZB_ZCACHE_CTLSTAT | 778 | 0x4F18 ZB_ZCACHE_CTLSTAT |
779 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
784 | 0x4F58 ZB_ZPASS_DATA | 780 | 0x4F58 ZB_ZPASS_DATA |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index b3f9f1d92005..911a8fbd32bb 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 | |||
@@ -164,7 +164,6 @@ rv515 0x6d40 | |||
164 | 0x401C GB_SELECT | 164 | 0x401C GB_SELECT |
165 | 0x4020 GB_AA_CONFIG | 165 | 0x4020 GB_AA_CONFIG |
166 | 0x4024 GB_FIFO_SIZE | 166 | 0x4024 GB_FIFO_SIZE |
167 | 0x4028 GB_Z_PEQ_CONFIG | ||
168 | 0x4100 TX_INVALTAGS | 167 | 0x4100 TX_INVALTAGS |
169 | 0x4114 SU_TEX_WRAP_PS3 | 168 | 0x4114 SU_TEX_WRAP_PS3 |
170 | 0x4118 PS3_ENABLE | 169 | 0x4118 PS3_ENABLE |
@@ -304,6 +303,22 @@ rv515 0x6d40 | |||
304 | 0x4630 US_CODE_ADDR | 303 | 0x4630 US_CODE_ADDR |
305 | 0x4634 US_CODE_RANGE | 304 | 0x4634 US_CODE_RANGE |
306 | 0x4638 US_CODE_OFFSET | 305 | 0x4638 US_CODE_OFFSET |
306 | 0x4640 US_FORMAT0_0 | ||
307 | 0x4644 US_FORMAT0_1 | ||
308 | 0x4648 US_FORMAT0_2 | ||
309 | 0x464C US_FORMAT0_3 | ||
310 | 0x4650 US_FORMAT0_4 | ||
311 | 0x4654 US_FORMAT0_5 | ||
312 | 0x4658 US_FORMAT0_6 | ||
313 | 0x465C US_FORMAT0_7 | ||
314 | 0x4660 US_FORMAT0_8 | ||
315 | 0x4664 US_FORMAT0_9 | ||
316 | 0x4668 US_FORMAT0_10 | ||
317 | 0x466C US_FORMAT0_11 | ||
318 | 0x4670 US_FORMAT0_12 | ||
319 | 0x4674 US_FORMAT0_13 | ||
320 | 0x4678 US_FORMAT0_14 | ||
321 | 0x467C US_FORMAT0_15 | ||
307 | 0x46A4 US_OUT_FMT_0 | 322 | 0x46A4 US_OUT_FMT_0 |
308 | 0x46A8 US_OUT_FMT_1 | 323 | 0x46A8 US_OUT_FMT_1 |
309 | 0x46AC US_OUT_FMT_2 | 324 | 0x46AC US_OUT_FMT_2 |
@@ -445,9 +460,7 @@ rv515 0x6d40 | |||
445 | 0x4DF4 US_ALU_CONST_G_31 | 460 | 0x4DF4 US_ALU_CONST_G_31 |
446 | 0x4DF8 US_ALU_CONST_B_31 | 461 | 0x4DF8 US_ALU_CONST_B_31 |
447 | 0x4DFC US_ALU_CONST_A_31 | 462 | 0x4DFC US_ALU_CONST_A_31 |
448 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
449 | 0x4E08 RB3D_ABLENDCNTL_R3 | 463 | 0x4E08 RB3D_ABLENDCNTL_R3 |
450 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
451 | 0x4E10 RB3D_CONSTANT_COLOR | 464 | 0x4E10 RB3D_CONSTANT_COLOR |
452 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | 465 | 0x4E14 RB3D_COLOR_CLEAR_VALUE |
453 | 0x4E18 RB3D_ROPCNTL_R3 | 466 | 0x4E18 RB3D_ROPCNTL_R3 |
@@ -468,9 +481,6 @@ rv515 0x6d40 | |||
468 | 0x4E74 RB3D_CMASK_WRINDEX | 481 | 0x4E74 RB3D_CMASK_WRINDEX |
469 | 0x4E78 RB3D_CMASK_DWORD | 482 | 0x4E78 RB3D_CMASK_DWORD |
470 | 0x4E7C RB3D_CMASK_RDINDEX | 483 | 0x4E7C RB3D_CMASK_RDINDEX |
471 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
472 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
473 | 0x4E88 RB3D_AARESOLVE_CTL | ||
474 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | 484 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD |
475 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | 485 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD |
476 | 0x4EF8 RB3D_CONSTANT_COLOR_AR | 486 | 0x4EF8 RB3D_CONSTANT_COLOR_AR |
@@ -480,4 +490,5 @@ rv515 0x6d40 | |||
480 | 0x4F14 ZB_ZTOP | 490 | 0x4F14 ZB_ZTOP |
481 | 0x4F18 ZB_ZCACHE_CTLSTAT | 491 | 0x4F18 ZB_ZCACHE_CTLSTAT |
482 | 0x4F58 ZB_ZPASS_DATA | 492 | 0x4F58 ZB_ZPASS_DATA |
493 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
483 | 0x4FD4 ZB_STENCILREFMASK_BF | 494 | 0x4FD4 ZB_STENCILREFMASK_BF |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index ae2b76b9a388..aa6a66eeb4ec 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev) | |||
78 | int r; | 78 | int r; |
79 | 79 | ||
80 | if (rdev->gart.table.ram.ptr) { | 80 | if (rdev->gart.table.ram.ptr) { |
81 | WARN(1, "RS400 GART already initialized.\n"); | 81 | WARN(1, "RS400 GART already initialized\n"); |
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | /* Check gart size */ | 84 | /* Check gart size */ |
@@ -203,6 +203,9 @@ void rs400_gart_fini(struct radeon_device *rdev) | |||
203 | radeon_gart_table_ram_free(rdev); | 203 | radeon_gart_table_ram_free(rdev); |
204 | } | 204 | } |
205 | 205 | ||
206 | #define RS400_PTE_WRITEABLE (1 << 2) | ||
207 | #define RS400_PTE_READABLE (1 << 3) | ||
208 | |||
206 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 209 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
207 | { | 210 | { |
208 | uint32_t entry; | 211 | uint32_t entry; |
@@ -213,7 +216,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
213 | 216 | ||
214 | entry = (lower_32_bits(addr) & PAGE_MASK) | | 217 | entry = (lower_32_bits(addr) & PAGE_MASK) | |
215 | ((upper_32_bits(addr) & 0xff) << 4) | | 218 | ((upper_32_bits(addr) & 0xff) << 4) | |
216 | 0xc; | 219 | RS400_PTE_WRITEABLE | RS400_PTE_READABLE; |
217 | entry = cpu_to_le32(entry); | 220 | entry = cpu_to_le32(entry); |
218 | rdev->gart.table.ram.ptr[i] = entry; | 221 | rdev->gart.table.ram.ptr[i] = entry; |
219 | return 0; | 222 | return 0; |
@@ -226,8 +229,8 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev) | |||
226 | 229 | ||
227 | for (i = 0; i < rdev->usec_timeout; i++) { | 230 | for (i = 0; i < rdev->usec_timeout; i++) { |
228 | /* read MC_STATUS */ | 231 | /* read MC_STATUS */ |
229 | tmp = RREG32(0x0150); | 232 | tmp = RREG32(RADEON_MC_STATUS); |
230 | if (tmp & (1 << 2)) { | 233 | if (tmp & RADEON_MC_IDLE) { |
231 | return 0; | 234 | return 0; |
232 | } | 235 | } |
233 | DRM_UDELAY(1); | 236 | DRM_UDELAY(1); |
@@ -241,7 +244,7 @@ void rs400_gpu_init(struct radeon_device *rdev) | |||
241 | r420_pipes_init(rdev); | 244 | r420_pipes_init(rdev); |
242 | if (rs400_mc_wait_for_idle(rdev)) { | 245 | if (rs400_mc_wait_for_idle(rdev)) { |
243 | printk(KERN_WARNING "rs400: Failed to wait MC idle while " | 246 | printk(KERN_WARNING "rs400: Failed to wait MC idle while " |
244 | "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); | 247 | "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS)); |
245 | } | 248 | } |
246 | } | 249 | } |
247 | 250 | ||
@@ -300,9 +303,9 @@ static int rs400_debugfs_gart_info(struct seq_file *m, void *data) | |||
300 | seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp); | 303 | seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp); |
301 | tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION); | 304 | tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION); |
302 | seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp); | 305 | seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp); |
303 | tmp = RREG32_MC(0x100); | 306 | tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION); |
304 | seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp); | 307 | seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp); |
305 | tmp = RREG32(0x134); | 308 | tmp = RREG32(RS690_HDP_FB_LOCATION); |
306 | seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp); | 309 | seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp); |
307 | } else { | 310 | } else { |
308 | tmp = RREG32(RADEON_AGP_BASE); | 311 | tmp = RREG32(RADEON_AGP_BASE); |
@@ -397,21 +400,24 @@ static int rs400_startup(struct radeon_device *rdev) | |||
397 | r = rs400_gart_enable(rdev); | 400 | r = rs400_gart_enable(rdev); |
398 | if (r) | 401 | if (r) |
399 | return r; | 402 | return r; |
403 | |||
404 | /* allocate wb buffer */ | ||
405 | r = radeon_wb_init(rdev); | ||
406 | if (r) | ||
407 | return r; | ||
408 | |||
400 | /* Enable IRQ */ | 409 | /* Enable IRQ */ |
401 | r100_irq_set(rdev); | 410 | r100_irq_set(rdev); |
402 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 411 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
403 | /* 1M ring buffer */ | 412 | /* 1M ring buffer */ |
404 | r = r100_cp_init(rdev, 1024 * 1024); | 413 | r = r100_cp_init(rdev, 1024 * 1024); |
405 | if (r) { | 414 | if (r) { |
406 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 415 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
407 | return r; | 416 | return r; |
408 | } | 417 | } |
409 | r = r100_wb_init(rdev); | ||
410 | if (r) | ||
411 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
412 | r = r100_ib_init(rdev); | 418 | r = r100_ib_init(rdev); |
413 | if (r) { | 419 | if (r) { |
414 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 420 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); |
415 | return r; | 421 | return r; |
416 | } | 422 | } |
417 | return 0; | 423 | return 0; |
@@ -443,7 +449,7 @@ int rs400_resume(struct radeon_device *rdev) | |||
443 | int rs400_suspend(struct radeon_device *rdev) | 449 | int rs400_suspend(struct radeon_device *rdev) |
444 | { | 450 | { |
445 | r100_cp_disable(rdev); | 451 | r100_cp_disable(rdev); |
446 | r100_wb_disable(rdev); | 452 | radeon_wb_disable(rdev); |
447 | r100_irq_disable(rdev); | 453 | r100_irq_disable(rdev); |
448 | rs400_gart_disable(rdev); | 454 | rs400_gart_disable(rdev); |
449 | return 0; | 455 | return 0; |
@@ -452,7 +458,7 @@ int rs400_suspend(struct radeon_device *rdev) | |||
452 | void rs400_fini(struct radeon_device *rdev) | 458 | void rs400_fini(struct radeon_device *rdev) |
453 | { | 459 | { |
454 | r100_cp_fini(rdev); | 460 | r100_cp_fini(rdev); |
455 | r100_wb_fini(rdev); | 461 | radeon_wb_fini(rdev); |
456 | r100_ib_fini(rdev); | 462 | r100_ib_fini(rdev); |
457 | radeon_gem_fini(rdev); | 463 | radeon_gem_fini(rdev); |
458 | rs400_gart_fini(rdev); | 464 | rs400_gart_fini(rdev); |
@@ -526,7 +532,7 @@ int rs400_init(struct radeon_device *rdev) | |||
526 | /* Somethings want wront with the accel init stop accel */ | 532 | /* Somethings want wront with the accel init stop accel */ |
527 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 533 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
528 | r100_cp_fini(rdev); | 534 | r100_cp_fini(rdev); |
529 | r100_wb_fini(rdev); | 535 | radeon_wb_fini(rdev); |
530 | r100_ib_fini(rdev); | 536 | r100_ib_fini(rdev); |
531 | rs400_gart_fini(rdev); | 537 | rs400_gart_fini(rdev); |
532 | radeon_irq_kms_fini(rdev); | 538 | radeon_irq_kms_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 51d5f7b5ab21..1f5850e473cc 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -46,6 +46,45 @@ | |||
46 | void rs600_gpu_init(struct radeon_device *rdev); | 46 | void rs600_gpu_init(struct radeon_device *rdev); |
47 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | 47 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
48 | 48 | ||
49 | void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) | ||
50 | { | ||
51 | /* enable the pflip int */ | ||
52 | radeon_irq_kms_pflip_irq_get(rdev, crtc); | ||
53 | } | ||
54 | |||
55 | void rs600_post_page_flip(struct radeon_device *rdev, int crtc) | ||
56 | { | ||
57 | /* disable the pflip int */ | ||
58 | radeon_irq_kms_pflip_irq_put(rdev, crtc); | ||
59 | } | ||
60 | |||
61 | u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | ||
62 | { | ||
63 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | ||
64 | u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); | ||
65 | |||
66 | /* Lock the graphics update lock */ | ||
67 | tmp |= AVIVO_D1GRPH_UPDATE_LOCK; | ||
68 | WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
69 | |||
70 | /* update the scanout addresses */ | ||
71 | WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
72 | (u32)crtc_base); | ||
73 | WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
74 | (u32)crtc_base); | ||
75 | |||
76 | /* Wait for update_pending to go high. */ | ||
77 | while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); | ||
78 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | ||
79 | |||
80 | /* Unlock the lock, so double-buffering can take place inside vblank */ | ||
81 | tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; | ||
82 | WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
83 | |||
84 | /* Return current update_pending status: */ | ||
85 | return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; | ||
86 | } | ||
87 | |||
49 | void rs600_pm_misc(struct radeon_device *rdev) | 88 | void rs600_pm_misc(struct radeon_device *rdev) |
50 | { | 89 | { |
51 | int requested_index = rdev->pm.requested_power_state_index; | 90 | int requested_index = rdev->pm.requested_power_state_index; |
@@ -75,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev) | |||
75 | udelay(voltage->delay); | 114 | udelay(voltage->delay); |
76 | } | 115 | } |
77 | } else if (voltage->type == VOLTAGE_VDDC) | 116 | } else if (voltage->type == VOLTAGE_VDDC) |
78 | radeon_atom_set_voltage(rdev, voltage->vddc_id); | 117 | radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC); |
79 | 118 | ||
80 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); | 119 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); |
81 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); | 120 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); |
@@ -289,16 +328,16 @@ void rs600_bm_disable(struct radeon_device *rdev) | |||
289 | 328 | ||
290 | int rs600_asic_reset(struct radeon_device *rdev) | 329 | int rs600_asic_reset(struct radeon_device *rdev) |
291 | { | 330 | { |
292 | u32 status, tmp; | ||
293 | |||
294 | struct rv515_mc_save save; | 331 | struct rv515_mc_save save; |
332 | u32 status, tmp; | ||
333 | int ret = 0; | ||
295 | 334 | ||
296 | /* Stops all mc clients */ | ||
297 | rv515_mc_stop(rdev, &save); | ||
298 | status = RREG32(R_000E40_RBBM_STATUS); | 335 | status = RREG32(R_000E40_RBBM_STATUS); |
299 | if (!G_000E40_GUI_ACTIVE(status)) { | 336 | if (!G_000E40_GUI_ACTIVE(status)) { |
300 | return 0; | 337 | return 0; |
301 | } | 338 | } |
339 | /* Stops all mc clients */ | ||
340 | rv515_mc_stop(rdev, &save); | ||
302 | status = RREG32(R_000E40_RBBM_STATUS); | 341 | status = RREG32(R_000E40_RBBM_STATUS); |
303 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 342 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); |
304 | /* stop CP */ | 343 | /* stop CP */ |
@@ -342,11 +381,11 @@ int rs600_asic_reset(struct radeon_device *rdev) | |||
342 | if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { | 381 | if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { |
343 | dev_err(rdev->dev, "failed to reset GPU\n"); | 382 | dev_err(rdev->dev, "failed to reset GPU\n"); |
344 | rdev->gpu_lockup = true; | 383 | rdev->gpu_lockup = true; |
345 | return -1; | 384 | ret = -1; |
346 | } | 385 | } else |
386 | dev_info(rdev->dev, "GPU reset succeed\n"); | ||
347 | rv515_mc_resume(rdev, &save); | 387 | rv515_mc_resume(rdev, &save); |
348 | dev_info(rdev->dev, "GPU reset succeed\n"); | 388 | return ret; |
349 | return 0; | ||
350 | } | 389 | } |
351 | 390 | ||
352 | /* | 391 | /* |
@@ -375,7 +414,7 @@ int rs600_gart_init(struct radeon_device *rdev) | |||
375 | int r; | 414 | int r; |
376 | 415 | ||
377 | if (rdev->gart.table.vram.robj) { | 416 | if (rdev->gart.table.vram.robj) { |
378 | WARN(1, "RS600 GART already initialized.\n"); | 417 | WARN(1, "RS600 GART already initialized\n"); |
379 | return 0; | 418 | return 0; |
380 | } | 419 | } |
381 | /* Initialize common gart structure */ | 420 | /* Initialize common gart structure */ |
@@ -387,7 +426,7 @@ int rs600_gart_init(struct radeon_device *rdev) | |||
387 | return radeon_gart_table_vram_alloc(rdev); | 426 | return radeon_gart_table_vram_alloc(rdev); |
388 | } | 427 | } |
389 | 428 | ||
390 | int rs600_gart_enable(struct radeon_device *rdev) | 429 | static int rs600_gart_enable(struct radeon_device *rdev) |
391 | { | 430 | { |
392 | u32 tmp; | 431 | u32 tmp; |
393 | int r, i; | 432 | int r, i; |
@@ -401,8 +440,8 @@ int rs600_gart_enable(struct radeon_device *rdev) | |||
401 | return r; | 440 | return r; |
402 | radeon_gart_restore(rdev); | 441 | radeon_gart_restore(rdev); |
403 | /* Enable bus master */ | 442 | /* Enable bus master */ |
404 | tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; | 443 | tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; |
405 | WREG32(R_00004C_BUS_CNTL, tmp); | 444 | WREG32(RADEON_BUS_CNTL, tmp); |
406 | /* FIXME: setup default page */ | 445 | /* FIXME: setup default page */ |
407 | WREG32_MC(R_000100_MC_PT0_CNTL, | 446 | WREG32_MC(R_000100_MC_PT0_CNTL, |
408 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | | 447 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | |
@@ -505,7 +544,7 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
505 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); | 544 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); |
506 | 545 | ||
507 | if (!rdev->irq.installed) { | 546 | if (!rdev->irq.installed) { |
508 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 547 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
509 | WREG32(R_000040_GEN_INT_CNTL, 0); | 548 | WREG32(R_000040_GEN_INT_CNTL, 0); |
510 | return -EINVAL; | 549 | return -EINVAL; |
511 | } | 550 | } |
@@ -515,10 +554,12 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
515 | if (rdev->irq.gui_idle) { | 554 | if (rdev->irq.gui_idle) { |
516 | tmp |= S_000040_GUI_IDLE(1); | 555 | tmp |= S_000040_GUI_IDLE(1); |
517 | } | 556 | } |
518 | if (rdev->irq.crtc_vblank_int[0]) { | 557 | if (rdev->irq.crtc_vblank_int[0] || |
558 | rdev->irq.pflip[0]) { | ||
519 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); | 559 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); |
520 | } | 560 | } |
521 | if (rdev->irq.crtc_vblank_int[1]) { | 561 | if (rdev->irq.crtc_vblank_int[1] || |
562 | rdev->irq.pflip[1]) { | ||
522 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); | 563 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); |
523 | } | 564 | } |
524 | if (rdev->irq.hpd[0]) { | 565 | if (rdev->irq.hpd[0]) { |
@@ -534,7 +575,7 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
534 | return 0; | 575 | return 0; |
535 | } | 576 | } |
536 | 577 | ||
537 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) | 578 | static inline u32 rs600_irq_ack(struct radeon_device *rdev) |
538 | { | 579 | { |
539 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); | 580 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); |
540 | uint32_t irq_mask = S_000044_SW_INT(1); | 581 | uint32_t irq_mask = S_000044_SW_INT(1); |
@@ -547,27 +588,27 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ | |||
547 | } | 588 | } |
548 | 589 | ||
549 | if (G_000044_DISPLAY_INT_STAT(irqs)) { | 590 | if (G_000044_DISPLAY_INT_STAT(irqs)) { |
550 | *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); | 591 | rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); |
551 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { | 592 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
552 | WREG32(R_006534_D1MODE_VBLANK_STATUS, | 593 | WREG32(R_006534_D1MODE_VBLANK_STATUS, |
553 | S_006534_D1MODE_VBLANK_ACK(1)); | 594 | S_006534_D1MODE_VBLANK_ACK(1)); |
554 | } | 595 | } |
555 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { | 596 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
556 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, | 597 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, |
557 | S_006D34_D2MODE_VBLANK_ACK(1)); | 598 | S_006D34_D2MODE_VBLANK_ACK(1)); |
558 | } | 599 | } |
559 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) { | 600 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
560 | tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); | 601 | tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); |
561 | tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); | 602 | tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); |
562 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | 603 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); |
563 | } | 604 | } |
564 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) { | 605 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
565 | tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); | 606 | tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); |
566 | tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); | 607 | tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); |
567 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | 608 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); |
568 | } | 609 | } |
569 | } else { | 610 | } else { |
570 | *r500_disp_int = 0; | 611 | rdev->irq.stat_regs.r500.disp_int = 0; |
571 | } | 612 | } |
572 | 613 | ||
573 | if (irqs) { | 614 | if (irqs) { |
@@ -578,32 +619,30 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ | |||
578 | 619 | ||
579 | void rs600_irq_disable(struct radeon_device *rdev) | 620 | void rs600_irq_disable(struct radeon_device *rdev) |
580 | { | 621 | { |
581 | u32 tmp; | ||
582 | |||
583 | WREG32(R_000040_GEN_INT_CNTL, 0); | 622 | WREG32(R_000040_GEN_INT_CNTL, 0); |
584 | WREG32(R_006540_DxMODE_INT_MASK, 0); | 623 | WREG32(R_006540_DxMODE_INT_MASK, 0); |
585 | /* Wait and acknowledge irq */ | 624 | /* Wait and acknowledge irq */ |
586 | mdelay(1); | 625 | mdelay(1); |
587 | rs600_irq_ack(rdev, &tmp); | 626 | rs600_irq_ack(rdev); |
588 | } | 627 | } |
589 | 628 | ||
590 | int rs600_irq_process(struct radeon_device *rdev) | 629 | int rs600_irq_process(struct radeon_device *rdev) |
591 | { | 630 | { |
592 | uint32_t status, msi_rearm; | 631 | u32 status, msi_rearm; |
593 | uint32_t r500_disp_int; | ||
594 | bool queue_hotplug = false; | 632 | bool queue_hotplug = false; |
595 | 633 | ||
596 | /* reset gui idle ack. the status bit is broken */ | 634 | /* reset gui idle ack. the status bit is broken */ |
597 | rdev->irq.gui_idle_acked = false; | 635 | rdev->irq.gui_idle_acked = false; |
598 | 636 | ||
599 | status = rs600_irq_ack(rdev, &r500_disp_int); | 637 | status = rs600_irq_ack(rdev); |
600 | if (!status && !r500_disp_int) { | 638 | if (!status && !rdev->irq.stat_regs.r500.disp_int) { |
601 | return IRQ_NONE; | 639 | return IRQ_NONE; |
602 | } | 640 | } |
603 | while (status || r500_disp_int) { | 641 | while (status || rdev->irq.stat_regs.r500.disp_int) { |
604 | /* SW interrupt */ | 642 | /* SW interrupt */ |
605 | if (G_000044_SW_INT(status)) | 643 | if (G_000044_SW_INT(status)) { |
606 | radeon_fence_process(rdev); | 644 | radeon_fence_process(rdev); |
645 | } | ||
607 | /* GUI idle */ | 646 | /* GUI idle */ |
608 | if (G_000040_GUI_IDLE(status)) { | 647 | if (G_000040_GUI_IDLE(status)) { |
609 | rdev->irq.gui_idle_acked = true; | 648 | rdev->irq.gui_idle_acked = true; |
@@ -611,30 +650,38 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
611 | wake_up(&rdev->irq.idle_queue); | 650 | wake_up(&rdev->irq.idle_queue); |
612 | } | 651 | } |
613 | /* Vertical blank interrupts */ | 652 | /* Vertical blank interrupts */ |
614 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { | 653 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
615 | drm_handle_vblank(rdev->ddev, 0); | 654 | if (rdev->irq.crtc_vblank_int[0]) { |
616 | rdev->pm.vblank_sync = true; | 655 | drm_handle_vblank(rdev->ddev, 0); |
617 | wake_up(&rdev->irq.vblank_queue); | 656 | rdev->pm.vblank_sync = true; |
657 | wake_up(&rdev->irq.vblank_queue); | ||
658 | } | ||
659 | if (rdev->irq.pflip[0]) | ||
660 | radeon_crtc_handle_flip(rdev, 0); | ||
618 | } | 661 | } |
619 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { | 662 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
620 | drm_handle_vblank(rdev->ddev, 1); | 663 | if (rdev->irq.crtc_vblank_int[1]) { |
621 | rdev->pm.vblank_sync = true; | 664 | drm_handle_vblank(rdev->ddev, 1); |
622 | wake_up(&rdev->irq.vblank_queue); | 665 | rdev->pm.vblank_sync = true; |
666 | wake_up(&rdev->irq.vblank_queue); | ||
667 | } | ||
668 | if (rdev->irq.pflip[1]) | ||
669 | radeon_crtc_handle_flip(rdev, 1); | ||
623 | } | 670 | } |
624 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { | 671 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
625 | queue_hotplug = true; | 672 | queue_hotplug = true; |
626 | DRM_DEBUG("HPD1\n"); | 673 | DRM_DEBUG("HPD1\n"); |
627 | } | 674 | } |
628 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) { | 675 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
629 | queue_hotplug = true; | 676 | queue_hotplug = true; |
630 | DRM_DEBUG("HPD2\n"); | 677 | DRM_DEBUG("HPD2\n"); |
631 | } | 678 | } |
632 | status = rs600_irq_ack(rdev, &r500_disp_int); | 679 | status = rs600_irq_ack(rdev); |
633 | } | 680 | } |
634 | /* reset gui idle ack. the status bit is broken */ | 681 | /* reset gui idle ack. the status bit is broken */ |
635 | rdev->irq.gui_idle_acked = false; | 682 | rdev->irq.gui_idle_acked = false; |
636 | if (queue_hotplug) | 683 | if (queue_hotplug) |
637 | queue_work(rdev->wq, &rdev->hotplug_work); | 684 | schedule_work(&rdev->hotplug_work); |
638 | if (rdev->msi_enabled) { | 685 | if (rdev->msi_enabled) { |
639 | switch (rdev->family) { | 686 | switch (rdev->family) { |
640 | case CHIP_RS600: | 687 | case CHIP_RS600: |
@@ -693,7 +740,6 @@ void rs600_mc_init(struct radeon_device *rdev) | |||
693 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 740 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
694 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | 741 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
695 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 742 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
696 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
697 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 743 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
698 | base = RREG32_MC(R_000004_MC_FB_LOCATION); | 744 | base = RREG32_MC(R_000004_MC_FB_LOCATION); |
699 | base = G_000004_MC_FB_START(base) << 16; | 745 | base = G_000004_MC_FB_START(base) << 16; |
@@ -796,21 +842,24 @@ static int rs600_startup(struct radeon_device *rdev) | |||
796 | r = rs600_gart_enable(rdev); | 842 | r = rs600_gart_enable(rdev); |
797 | if (r) | 843 | if (r) |
798 | return r; | 844 | return r; |
845 | |||
846 | /* allocate wb buffer */ | ||
847 | r = radeon_wb_init(rdev); | ||
848 | if (r) | ||
849 | return r; | ||
850 | |||
799 | /* Enable IRQ */ | 851 | /* Enable IRQ */ |
800 | rs600_irq_set(rdev); | 852 | rs600_irq_set(rdev); |
801 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 853 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
802 | /* 1M ring buffer */ | 854 | /* 1M ring buffer */ |
803 | r = r100_cp_init(rdev, 1024 * 1024); | 855 | r = r100_cp_init(rdev, 1024 * 1024); |
804 | if (r) { | 856 | if (r) { |
805 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 857 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
806 | return r; | 858 | return r; |
807 | } | 859 | } |
808 | r = r100_wb_init(rdev); | ||
809 | if (r) | ||
810 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
811 | r = r100_ib_init(rdev); | 860 | r = r100_ib_init(rdev); |
812 | if (r) { | 861 | if (r) { |
813 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 862 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); |
814 | return r; | 863 | return r; |
815 | } | 864 | } |
816 | 865 | ||
@@ -848,7 +897,7 @@ int rs600_suspend(struct radeon_device *rdev) | |||
848 | { | 897 | { |
849 | r600_audio_fini(rdev); | 898 | r600_audio_fini(rdev); |
850 | r100_cp_disable(rdev); | 899 | r100_cp_disable(rdev); |
851 | r100_wb_disable(rdev); | 900 | radeon_wb_disable(rdev); |
852 | rs600_irq_disable(rdev); | 901 | rs600_irq_disable(rdev); |
853 | rs600_gart_disable(rdev); | 902 | rs600_gart_disable(rdev); |
854 | return 0; | 903 | return 0; |
@@ -858,7 +907,7 @@ void rs600_fini(struct radeon_device *rdev) | |||
858 | { | 907 | { |
859 | r600_audio_fini(rdev); | 908 | r600_audio_fini(rdev); |
860 | r100_cp_fini(rdev); | 909 | r100_cp_fini(rdev); |
861 | r100_wb_fini(rdev); | 910 | radeon_wb_fini(rdev); |
862 | r100_ib_fini(rdev); | 911 | r100_ib_fini(rdev); |
863 | radeon_gem_fini(rdev); | 912 | radeon_gem_fini(rdev); |
864 | rs600_gart_fini(rdev); | 913 | rs600_gart_fini(rdev); |
@@ -932,7 +981,7 @@ int rs600_init(struct radeon_device *rdev) | |||
932 | /* Somethings want wront with the accel init stop accel */ | 981 | /* Somethings want wront with the accel init stop accel */ |
933 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 982 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
934 | r100_cp_fini(rdev); | 983 | r100_cp_fini(rdev); |
935 | r100_wb_fini(rdev); | 984 | radeon_wb_fini(rdev); |
936 | r100_ib_fini(rdev); | 985 | r100_ib_fini(rdev); |
937 | rs600_gart_fini(rdev); | 986 | rs600_gart_fini(rdev); |
938 | radeon_irq_kms_fini(rdev); | 987 | radeon_irq_kms_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 4dc2a87ea680..a9049ed1a519 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -77,9 +77,9 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
77 | switch (crev) { | 77 | switch (crev) { |
78 | case 1: | 78 | case 1: |
79 | tmp.full = dfixed_const(100); | 79 | tmp.full = dfixed_const(100); |
80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); | 80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock)); |
81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
82 | if (info->info.usK8MemoryClock) | 82 | if (le16_to_cpu(info->info.usK8MemoryClock)) |
83 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); | 83 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); |
84 | else if (rdev->clock.default_mclk) { | 84 | else if (rdev->clock.default_mclk) { |
85 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | 85 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); |
@@ -91,16 +91,16 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
91 | break; | 91 | break; |
92 | case 2: | 92 | case 2: |
93 | tmp.full = dfixed_const(100); | 93 | tmp.full = dfixed_const(100); |
94 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); | 94 | rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock)); |
95 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 95 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
96 | if (info->info_v2.ulBootUpUMAClock) | 96 | if (le32_to_cpu(info->info_v2.ulBootUpUMAClock)) |
97 | rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); | 97 | rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock)); |
98 | else if (rdev->clock.default_mclk) | 98 | else if (rdev->clock.default_mclk) |
99 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | 99 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); |
100 | else | 100 | else |
101 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); | 101 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); |
102 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | 102 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
103 | rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); | 103 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq)); |
104 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); | 104 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
105 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); | 105 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
106 | break; | 106 | break; |
@@ -157,7 +157,6 @@ void rs690_mc_init(struct radeon_device *rdev) | |||
157 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); | 157 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
158 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | 158 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
159 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 159 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
160 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
161 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); | 160 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); |
162 | base = G_000100_MC_FB_START(base) << 16; | 161 | base = G_000100_MC_FB_START(base) << 16; |
163 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 162 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
@@ -616,21 +615,24 @@ static int rs690_startup(struct radeon_device *rdev) | |||
616 | r = rs400_gart_enable(rdev); | 615 | r = rs400_gart_enable(rdev); |
617 | if (r) | 616 | if (r) |
618 | return r; | 617 | return r; |
618 | |||
619 | /* allocate wb buffer */ | ||
620 | r = radeon_wb_init(rdev); | ||
621 | if (r) | ||
622 | return r; | ||
623 | |||
619 | /* Enable IRQ */ | 624 | /* Enable IRQ */ |
620 | rs600_irq_set(rdev); | 625 | rs600_irq_set(rdev); |
621 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 626 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
622 | /* 1M ring buffer */ | 627 | /* 1M ring buffer */ |
623 | r = r100_cp_init(rdev, 1024 * 1024); | 628 | r = r100_cp_init(rdev, 1024 * 1024); |
624 | if (r) { | 629 | if (r) { |
625 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 630 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
626 | return r; | 631 | return r; |
627 | } | 632 | } |
628 | r = r100_wb_init(rdev); | ||
629 | if (r) | ||
630 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
631 | r = r100_ib_init(rdev); | 633 | r = r100_ib_init(rdev); |
632 | if (r) { | 634 | if (r) { |
633 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 635 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); |
634 | return r; | 636 | return r; |
635 | } | 637 | } |
636 | 638 | ||
@@ -668,7 +670,7 @@ int rs690_suspend(struct radeon_device *rdev) | |||
668 | { | 670 | { |
669 | r600_audio_fini(rdev); | 671 | r600_audio_fini(rdev); |
670 | r100_cp_disable(rdev); | 672 | r100_cp_disable(rdev); |
671 | r100_wb_disable(rdev); | 673 | radeon_wb_disable(rdev); |
672 | rs600_irq_disable(rdev); | 674 | rs600_irq_disable(rdev); |
673 | rs400_gart_disable(rdev); | 675 | rs400_gart_disable(rdev); |
674 | return 0; | 676 | return 0; |
@@ -678,7 +680,7 @@ void rs690_fini(struct radeon_device *rdev) | |||
678 | { | 680 | { |
679 | r600_audio_fini(rdev); | 681 | r600_audio_fini(rdev); |
680 | r100_cp_fini(rdev); | 682 | r100_cp_fini(rdev); |
681 | r100_wb_fini(rdev); | 683 | radeon_wb_fini(rdev); |
682 | r100_ib_fini(rdev); | 684 | r100_ib_fini(rdev); |
683 | radeon_gem_fini(rdev); | 685 | radeon_gem_fini(rdev); |
684 | rs400_gart_fini(rdev); | 686 | rs400_gart_fini(rdev); |
@@ -753,7 +755,7 @@ int rs690_init(struct radeon_device *rdev) | |||
753 | /* Somethings want wront with the accel init stop accel */ | 755 | /* Somethings want wront with the accel init stop accel */ |
754 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 756 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
755 | r100_cp_fini(rdev); | 757 | r100_cp_fini(rdev); |
756 | r100_wb_fini(rdev); | 758 | radeon_wb_fini(rdev); |
757 | r100_ib_fini(rdev); | 759 | r100_ib_fini(rdev); |
758 | rs400_gart_fini(rdev); | 760 | rs400_gart_fini(rdev); |
759 | radeon_irq_kms_fini(rdev); | 761 | radeon_irq_kms_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 4d6e86041a9f..6613ee9ecca3 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -69,13 +69,13 @@ void rv515_ring_start(struct radeon_device *rdev) | |||
69 | ISYNC_CPSCRATCH_IDLEGUI); | 69 | ISYNC_CPSCRATCH_IDLEGUI); |
70 | radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); | 70 | radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); |
71 | radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); | 71 | radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); |
72 | radeon_ring_write(rdev, PACKET0(0x170C, 0)); | 72 | radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0)); |
73 | radeon_ring_write(rdev, 1 << 31); | 73 | radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG); |
74 | radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); | 74 | radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); |
75 | radeon_ring_write(rdev, 0); | 75 | radeon_ring_write(rdev, 0); |
76 | radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); | 76 | radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); |
77 | radeon_ring_write(rdev, 0); | 77 | radeon_ring_write(rdev, 0); |
78 | radeon_ring_write(rdev, PACKET0(0x42C8, 0)); | 78 | radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0)); |
79 | radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); | 79 | radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); |
80 | radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); | 80 | radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); |
81 | radeon_ring_write(rdev, 0); | 81 | radeon_ring_write(rdev, 0); |
@@ -153,8 +153,8 @@ void rv515_gpu_init(struct radeon_device *rdev) | |||
153 | } | 153 | } |
154 | rv515_vga_render_disable(rdev); | 154 | rv515_vga_render_disable(rdev); |
155 | r420_pipes_init(rdev); | 155 | r420_pipes_init(rdev); |
156 | gb_pipe_select = RREG32(0x402C); | 156 | gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); |
157 | tmp = RREG32(0x170C); | 157 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
158 | pipe_select_current = (tmp >> 2) & 3; | 158 | pipe_select_current = (tmp >> 2) & 3; |
159 | tmp = (1 << pipe_select_current) | | 159 | tmp = (1 << pipe_select_current) | |
160 | (((gb_pipe_select >> 8) & 0xF) << 4); | 160 | (((gb_pipe_select >> 8) & 0xF) << 4); |
@@ -386,21 +386,24 @@ static int rv515_startup(struct radeon_device *rdev) | |||
386 | if (r) | 386 | if (r) |
387 | return r; | 387 | return r; |
388 | } | 388 | } |
389 | |||
390 | /* allocate wb buffer */ | ||
391 | r = radeon_wb_init(rdev); | ||
392 | if (r) | ||
393 | return r; | ||
394 | |||
389 | /* Enable IRQ */ | 395 | /* Enable IRQ */ |
390 | rs600_irq_set(rdev); | 396 | rs600_irq_set(rdev); |
391 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 397 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
392 | /* 1M ring buffer */ | 398 | /* 1M ring buffer */ |
393 | r = r100_cp_init(rdev, 1024 * 1024); | 399 | r = r100_cp_init(rdev, 1024 * 1024); |
394 | if (r) { | 400 | if (r) { |
395 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 401 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
396 | return r; | 402 | return r; |
397 | } | 403 | } |
398 | r = r100_wb_init(rdev); | ||
399 | if (r) | ||
400 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
401 | r = r100_ib_init(rdev); | 404 | r = r100_ib_init(rdev); |
402 | if (r) { | 405 | if (r) { |
403 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 406 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); |
404 | return r; | 407 | return r; |
405 | } | 408 | } |
406 | return 0; | 409 | return 0; |
@@ -431,7 +434,7 @@ int rv515_resume(struct radeon_device *rdev) | |||
431 | int rv515_suspend(struct radeon_device *rdev) | 434 | int rv515_suspend(struct radeon_device *rdev) |
432 | { | 435 | { |
433 | r100_cp_disable(rdev); | 436 | r100_cp_disable(rdev); |
434 | r100_wb_disable(rdev); | 437 | radeon_wb_disable(rdev); |
435 | rs600_irq_disable(rdev); | 438 | rs600_irq_disable(rdev); |
436 | if (rdev->flags & RADEON_IS_PCIE) | 439 | if (rdev->flags & RADEON_IS_PCIE) |
437 | rv370_pcie_gart_disable(rdev); | 440 | rv370_pcie_gart_disable(rdev); |
@@ -447,7 +450,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev) | |||
447 | void rv515_fini(struct radeon_device *rdev) | 450 | void rv515_fini(struct radeon_device *rdev) |
448 | { | 451 | { |
449 | r100_cp_fini(rdev); | 452 | r100_cp_fini(rdev); |
450 | r100_wb_fini(rdev); | 453 | radeon_wb_fini(rdev); |
451 | r100_ib_fini(rdev); | 454 | r100_ib_fini(rdev); |
452 | radeon_gem_fini(rdev); | 455 | radeon_gem_fini(rdev); |
453 | rv370_pcie_gart_fini(rdev); | 456 | rv370_pcie_gart_fini(rdev); |
@@ -527,7 +530,7 @@ int rv515_init(struct radeon_device *rdev) | |||
527 | /* Somethings want wront with the accel init stop accel */ | 530 | /* Somethings want wront with the accel init stop accel */ |
528 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 531 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
529 | r100_cp_fini(rdev); | 532 | r100_cp_fini(rdev); |
530 | r100_wb_fini(rdev); | 533 | radeon_wb_fini(rdev); |
531 | r100_ib_fini(rdev); | 534 | r100_ib_fini(rdev); |
532 | radeon_irq_kms_fini(rdev); | 535 | radeon_irq_kms_fini(rdev); |
533 | rv370_pcie_gart_fini(rdev); | 536 | rv370_pcie_gart_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 9490da700749..4de51891aa6d 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -41,20 +41,60 @@ | |||
41 | 41 | ||
42 | static void rv770_gpu_init(struct radeon_device *rdev); | 42 | static void rv770_gpu_init(struct radeon_device *rdev); |
43 | void rv770_fini(struct radeon_device *rdev); | 43 | void rv770_fini(struct radeon_device *rdev); |
44 | static void rv770_pcie_gen2_enable(struct radeon_device *rdev); | ||
45 | |||
46 | u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | ||
47 | { | ||
48 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | ||
49 | u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); | ||
50 | |||
51 | /* Lock the graphics update lock */ | ||
52 | tmp |= AVIVO_D1GRPH_UPDATE_LOCK; | ||
53 | WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
54 | |||
55 | /* update the scanout addresses */ | ||
56 | if (radeon_crtc->crtc_id) { | ||
57 | WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); | ||
58 | WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); | ||
59 | } else { | ||
60 | WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); | ||
61 | WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); | ||
62 | } | ||
63 | WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
64 | (u32)crtc_base); | ||
65 | WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
66 | (u32)crtc_base); | ||
67 | |||
68 | /* Wait for update_pending to go high. */ | ||
69 | while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); | ||
70 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | ||
71 | |||
72 | /* Unlock the lock, so double-buffering can take place inside vblank */ | ||
73 | tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; | ||
74 | WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
75 | |||
76 | /* Return current update_pending status: */ | ||
77 | return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; | ||
78 | } | ||
44 | 79 | ||
45 | /* get temperature in millidegrees */ | 80 | /* get temperature in millidegrees */ |
46 | u32 rv770_get_temp(struct radeon_device *rdev) | 81 | int rv770_get_temp(struct radeon_device *rdev) |
47 | { | 82 | { |
48 | u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> | 83 | u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> |
49 | ASIC_T_SHIFT; | 84 | ASIC_T_SHIFT; |
50 | u32 actual_temp = 0; | 85 | int actual_temp; |
51 | 86 | ||
52 | if ((temp >> 9) & 1) | 87 | if (temp & 0x400) |
53 | actual_temp = 0; | 88 | actual_temp = -256; |
54 | else | 89 | else if (temp & 0x200) |
55 | actual_temp = (temp >> 1) & 0xff; | 90 | actual_temp = 255; |
56 | 91 | else if (temp & 0x100) { | |
57 | return actual_temp * 1000; | 92 | actual_temp = temp & 0x1ff; |
93 | actual_temp |= ~0x1ff; | ||
94 | } else | ||
95 | actual_temp = temp & 0xff; | ||
96 | |||
97 | return (actual_temp * 1000) / 2; | ||
58 | } | 98 | } |
59 | 99 | ||
60 | void rv770_pm_misc(struct radeon_device *rdev) | 100 | void rv770_pm_misc(struct radeon_device *rdev) |
@@ -65,8 +105,11 @@ void rv770_pm_misc(struct radeon_device *rdev) | |||
65 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; | 105 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; |
66 | 106 | ||
67 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | 107 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { |
108 | /* 0xff01 is a flag rather then an actual voltage */ | ||
109 | if (voltage->voltage == 0xff01) | ||
110 | return; | ||
68 | if (voltage->voltage != rdev->pm.current_vddc) { | 111 | if (voltage->voltage != rdev->pm.current_vddc) { |
69 | radeon_atom_set_voltage(rdev, voltage->voltage); | 112 | radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); |
70 | rdev->pm.current_vddc = voltage->voltage; | 113 | rdev->pm.current_vddc = voltage->voltage; |
71 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); | 114 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); |
72 | } | 115 | } |
@@ -267,8 +310,9 @@ static void rv770_mc_program(struct radeon_device *rdev) | |||
267 | */ | 310 | */ |
268 | void r700_cp_stop(struct radeon_device *rdev) | 311 | void r700_cp_stop(struct radeon_device *rdev) |
269 | { | 312 | { |
270 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 313 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
271 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); | 314 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); |
315 | WREG32(SCRATCH_UMSK, 0); | ||
272 | } | 316 | } |
273 | 317 | ||
274 | static int rv770_cp_load_microcode(struct radeon_device *rdev) | 318 | static int rv770_cp_load_microcode(struct radeon_device *rdev) |
@@ -280,7 +324,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) | |||
280 | return -EINVAL; | 324 | return -EINVAL; |
281 | 325 | ||
282 | r700_cp_stop(rdev); | 326 | r700_cp_stop(rdev); |
283 | WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); | 327 | WREG32(CP_RB_CNTL, |
328 | #ifdef __BIG_ENDIAN | ||
329 | BUF_SWAP_32BIT | | ||
330 | #endif | ||
331 | RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | ||
284 | 332 | ||
285 | /* Reset cp */ | 333 | /* Reset cp */ |
286 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); | 334 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); |
@@ -488,6 +536,55 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
488 | return backend_map; | 536 | return backend_map; |
489 | } | 537 | } |
490 | 538 | ||
539 | static void rv770_program_channel_remap(struct radeon_device *rdev) | ||
540 | { | ||
541 | u32 tcp_chan_steer, mc_shared_chremap, tmp; | ||
542 | bool force_no_swizzle; | ||
543 | |||
544 | switch (rdev->family) { | ||
545 | case CHIP_RV770: | ||
546 | case CHIP_RV730: | ||
547 | force_no_swizzle = false; | ||
548 | break; | ||
549 | case CHIP_RV710: | ||
550 | case CHIP_RV740: | ||
551 | default: | ||
552 | force_no_swizzle = true; | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | tmp = RREG32(MC_SHARED_CHMAP); | ||
557 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
558 | case 0: | ||
559 | case 1: | ||
560 | default: | ||
561 | /* default mapping */ | ||
562 | mc_shared_chremap = 0x00fac688; | ||
563 | break; | ||
564 | case 2: | ||
565 | case 3: | ||
566 | if (force_no_swizzle) | ||
567 | mc_shared_chremap = 0x00fac688; | ||
568 | else | ||
569 | mc_shared_chremap = 0x00bbc298; | ||
570 | break; | ||
571 | } | ||
572 | |||
573 | if (rdev->family == CHIP_RV740) | ||
574 | tcp_chan_steer = 0x00ef2a60; | ||
575 | else | ||
576 | tcp_chan_steer = 0x00fac688; | ||
577 | |||
578 | /* RV770 CE has special chremap setup */ | ||
579 | if (rdev->pdev->device == 0x944e) { | ||
580 | tcp_chan_steer = 0x00b08b08; | ||
581 | mc_shared_chremap = 0x00b08b08; | ||
582 | } | ||
583 | |||
584 | WREG32(TCP_CHAN_STEER, tcp_chan_steer); | ||
585 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
586 | } | ||
587 | |||
491 | static void rv770_gpu_init(struct radeon_device *rdev) | 588 | static void rv770_gpu_init(struct radeon_device *rdev) |
492 | { | 589 | { |
493 | int i, j, num_qd_pipes; | 590 | int i, j, num_qd_pipes; |
@@ -643,10 +740,11 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
643 | else | 740 | else |
644 | gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); | 741 | gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); |
645 | rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); | 742 | rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); |
646 | 743 | gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); | |
647 | gb_tiling_config |= GROUP_SIZE(0); | 744 | if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) |
648 | rdev->config.rv770.tiling_group_size = 256; | 745 | rdev->config.rv770.tiling_group_size = 512; |
649 | 746 | else | |
747 | rdev->config.rv770.tiling_group_size = 256; | ||
650 | if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { | 748 | if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { |
651 | gb_tiling_config |= ROW_TILING(3); | 749 | gb_tiling_config |= ROW_TILING(3); |
652 | gb_tiling_config |= SAMPLE_SPLIT(3); | 750 | gb_tiling_config |= SAMPLE_SPLIT(3); |
@@ -686,6 +784,8 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
686 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 784 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
687 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 785 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
688 | 786 | ||
787 | rv770_program_channel_remap(rdev); | ||
788 | |||
689 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 789 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
690 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 790 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
691 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 791 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
@@ -912,9 +1012,9 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev) | |||
912 | u64 gpu_addr; | 1012 | u64 gpu_addr; |
913 | 1013 | ||
914 | if (rdev->vram_scratch.robj == NULL) { | 1014 | if (rdev->vram_scratch.robj == NULL) { |
915 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, | 1015 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, |
916 | true, RADEON_GEM_DOMAIN_VRAM, | 1016 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
917 | &rdev->vram_scratch.robj); | 1017 | &rdev->vram_scratch.robj); |
918 | if (r) { | 1018 | if (r) { |
919 | return r; | 1019 | return r; |
920 | } | 1020 | } |
@@ -954,6 +1054,45 @@ static void rv770_vram_scratch_fini(struct radeon_device *rdev) | |||
954 | radeon_bo_unref(&rdev->vram_scratch.robj); | 1054 | radeon_bo_unref(&rdev->vram_scratch.robj); |
955 | } | 1055 | } |
956 | 1056 | ||
1057 | void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | ||
1058 | { | ||
1059 | u64 size_bf, size_af; | ||
1060 | |||
1061 | if (mc->mc_vram_size > 0xE0000000) { | ||
1062 | /* leave room for at least 512M GTT */ | ||
1063 | dev_warn(rdev->dev, "limiting VRAM\n"); | ||
1064 | mc->real_vram_size = 0xE0000000; | ||
1065 | mc->mc_vram_size = 0xE0000000; | ||
1066 | } | ||
1067 | if (rdev->flags & RADEON_IS_AGP) { | ||
1068 | size_bf = mc->gtt_start; | ||
1069 | size_af = 0xFFFFFFFF - mc->gtt_end + 1; | ||
1070 | if (size_bf > size_af) { | ||
1071 | if (mc->mc_vram_size > size_bf) { | ||
1072 | dev_warn(rdev->dev, "limiting VRAM\n"); | ||
1073 | mc->real_vram_size = size_bf; | ||
1074 | mc->mc_vram_size = size_bf; | ||
1075 | } | ||
1076 | mc->vram_start = mc->gtt_start - mc->mc_vram_size; | ||
1077 | } else { | ||
1078 | if (mc->mc_vram_size > size_af) { | ||
1079 | dev_warn(rdev->dev, "limiting VRAM\n"); | ||
1080 | mc->real_vram_size = size_af; | ||
1081 | mc->mc_vram_size = size_af; | ||
1082 | } | ||
1083 | mc->vram_start = mc->gtt_end; | ||
1084 | } | ||
1085 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | ||
1086 | dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", | ||
1087 | mc->mc_vram_size >> 20, mc->vram_start, | ||
1088 | mc->vram_end, mc->real_vram_size >> 20); | ||
1089 | } else { | ||
1090 | radeon_vram_location(rdev, &rdev->mc, 0); | ||
1091 | rdev->mc.gtt_base_align = 0; | ||
1092 | radeon_gtt_location(rdev, mc); | ||
1093 | } | ||
1094 | } | ||
1095 | |||
957 | int rv770_mc_init(struct radeon_device *rdev) | 1096 | int rv770_mc_init(struct radeon_device *rdev) |
958 | { | 1097 | { |
959 | u32 tmp; | 1098 | u32 tmp; |
@@ -993,8 +1132,7 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
993 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); | 1132 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
994 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | 1133 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
995 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1134 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
996 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 1135 | r700_vram_gtt_location(rdev, &rdev->mc); |
997 | r600_vram_gtt_location(rdev, &rdev->mc); | ||
998 | radeon_update_bandwidth_info(rdev); | 1136 | radeon_update_bandwidth_info(rdev); |
999 | 1137 | ||
1000 | return 0; | 1138 | return 0; |
@@ -1004,6 +1142,9 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1004 | { | 1142 | { |
1005 | int r; | 1143 | int r; |
1006 | 1144 | ||
1145 | /* enable pcie gen2 link */ | ||
1146 | rv770_pcie_gen2_enable(rdev); | ||
1147 | |||
1007 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 1148 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
1008 | r = r600_init_microcode(rdev); | 1149 | r = r600_init_microcode(rdev); |
1009 | if (r) { | 1150 | if (r) { |
@@ -1030,19 +1171,12 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1030 | rdev->asic->copy = NULL; | 1171 | rdev->asic->copy = NULL; |
1031 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | 1172 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
1032 | } | 1173 | } |
1033 | /* pin copy shader into vram */ | 1174 | |
1034 | if (rdev->r600_blit.shader_obj) { | 1175 | /* allocate wb buffer */ |
1035 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 1176 | r = radeon_wb_init(rdev); |
1036 | if (unlikely(r != 0)) | 1177 | if (r) |
1037 | return r; | 1178 | return r; |
1038 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 1179 | |
1039 | &rdev->r600_blit.shader_gpu_addr); | ||
1040 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1041 | if (r) { | ||
1042 | DRM_ERROR("failed to pin blit object %d\n", r); | ||
1043 | return r; | ||
1044 | } | ||
1045 | } | ||
1046 | /* Enable IRQ */ | 1180 | /* Enable IRQ */ |
1047 | r = r600_irq_init(rdev); | 1181 | r = r600_irq_init(rdev); |
1048 | if (r) { | 1182 | if (r) { |
@@ -1061,8 +1195,7 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1061 | r = r600_cp_resume(rdev); | 1195 | r = r600_cp_resume(rdev); |
1062 | if (r) | 1196 | if (r) |
1063 | return r; | 1197 | return r; |
1064 | /* write back buffer are not vital so don't worry about failure */ | 1198 | |
1065 | r600_wb_enable(rdev); | ||
1066 | return 0; | 1199 | return 0; |
1067 | } | 1200 | } |
1068 | 1201 | ||
@@ -1085,7 +1218,7 @@ int rv770_resume(struct radeon_device *rdev) | |||
1085 | 1218 | ||
1086 | r = r600_ib_test(rdev); | 1219 | r = r600_ib_test(rdev); |
1087 | if (r) { | 1220 | if (r) { |
1088 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1221 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); |
1089 | return r; | 1222 | return r; |
1090 | } | 1223 | } |
1091 | 1224 | ||
@@ -1108,7 +1241,7 @@ int rv770_suspend(struct radeon_device *rdev) | |||
1108 | r700_cp_stop(rdev); | 1241 | r700_cp_stop(rdev); |
1109 | rdev->cp.ready = false; | 1242 | rdev->cp.ready = false; |
1110 | r600_irq_suspend(rdev); | 1243 | r600_irq_suspend(rdev); |
1111 | r600_wb_disable(rdev); | 1244 | radeon_wb_disable(rdev); |
1112 | rv770_pcie_gart_disable(rdev); | 1245 | rv770_pcie_gart_disable(rdev); |
1113 | /* unpin shaders bo */ | 1246 | /* unpin shaders bo */ |
1114 | if (rdev->r600_blit.shader_obj) { | 1247 | if (rdev->r600_blit.shader_obj) { |
@@ -1131,9 +1264,6 @@ int rv770_init(struct radeon_device *rdev) | |||
1131 | { | 1264 | { |
1132 | int r; | 1265 | int r; |
1133 | 1266 | ||
1134 | r = radeon_dummy_page_init(rdev); | ||
1135 | if (r) | ||
1136 | return r; | ||
1137 | /* This don't do much */ | 1267 | /* This don't do much */ |
1138 | r = radeon_gem_init(rdev); | 1268 | r = radeon_gem_init(rdev); |
1139 | if (r) | 1269 | if (r) |
@@ -1152,7 +1282,7 @@ int rv770_init(struct radeon_device *rdev) | |||
1152 | if (r) | 1282 | if (r) |
1153 | return r; | 1283 | return r; |
1154 | /* Post card if necessary */ | 1284 | /* Post card if necessary */ |
1155 | if (!r600_card_posted(rdev)) { | 1285 | if (!radeon_card_posted(rdev)) { |
1156 | if (!rdev->bios) { | 1286 | if (!rdev->bios) { |
1157 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | 1287 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); |
1158 | return -EINVAL; | 1288 | return -EINVAL; |
@@ -1203,8 +1333,8 @@ int rv770_init(struct radeon_device *rdev) | |||
1203 | if (r) { | 1333 | if (r) { |
1204 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 1334 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
1205 | r700_cp_fini(rdev); | 1335 | r700_cp_fini(rdev); |
1206 | r600_wb_fini(rdev); | ||
1207 | r600_irq_fini(rdev); | 1336 | r600_irq_fini(rdev); |
1337 | radeon_wb_fini(rdev); | ||
1208 | radeon_irq_kms_fini(rdev); | 1338 | radeon_irq_kms_fini(rdev); |
1209 | rv770_pcie_gart_fini(rdev); | 1339 | rv770_pcie_gart_fini(rdev); |
1210 | rdev->accel_working = false; | 1340 | rdev->accel_working = false; |
@@ -1236,8 +1366,9 @@ void rv770_fini(struct radeon_device *rdev) | |||
1236 | { | 1366 | { |
1237 | r600_blit_fini(rdev); | 1367 | r600_blit_fini(rdev); |
1238 | r700_cp_fini(rdev); | 1368 | r700_cp_fini(rdev); |
1239 | r600_wb_fini(rdev); | ||
1240 | r600_irq_fini(rdev); | 1369 | r600_irq_fini(rdev); |
1370 | radeon_wb_fini(rdev); | ||
1371 | radeon_ib_pool_fini(rdev); | ||
1241 | radeon_irq_kms_fini(rdev); | 1372 | radeon_irq_kms_fini(rdev); |
1242 | rv770_pcie_gart_fini(rdev); | 1373 | rv770_pcie_gart_fini(rdev); |
1243 | rv770_vram_scratch_fini(rdev); | 1374 | rv770_vram_scratch_fini(rdev); |
@@ -1248,5 +1379,79 @@ void rv770_fini(struct radeon_device *rdev) | |||
1248 | radeon_atombios_fini(rdev); | 1379 | radeon_atombios_fini(rdev); |
1249 | kfree(rdev->bios); | 1380 | kfree(rdev->bios); |
1250 | rdev->bios = NULL; | 1381 | rdev->bios = NULL; |
1251 | radeon_dummy_page_fini(rdev); | 1382 | } |
1383 | |||
1384 | static void rv770_pcie_gen2_enable(struct radeon_device *rdev) | ||
1385 | { | ||
1386 | u32 link_width_cntl, lanes, speed_cntl, tmp; | ||
1387 | u16 link_cntl2; | ||
1388 | |||
1389 | if (radeon_pcie_gen2 == 0) | ||
1390 | return; | ||
1391 | |||
1392 | if (rdev->flags & RADEON_IS_IGP) | ||
1393 | return; | ||
1394 | |||
1395 | if (!(rdev->flags & RADEON_IS_PCIE)) | ||
1396 | return; | ||
1397 | |||
1398 | /* x2 cards have a special sequence */ | ||
1399 | if (ASIC_IS_X2(rdev)) | ||
1400 | return; | ||
1401 | |||
1402 | /* advertise upconfig capability */ | ||
1403 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
1404 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | ||
1405 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
1406 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
1407 | if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { | ||
1408 | lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; | ||
1409 | link_width_cntl &= ~(LC_LINK_WIDTH_MASK | | ||
1410 | LC_RECONFIG_ARC_MISSING_ESCAPE); | ||
1411 | link_width_cntl |= lanes | LC_RECONFIG_NOW | | ||
1412 | LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT; | ||
1413 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
1414 | } else { | ||
1415 | link_width_cntl |= LC_UPCONFIGURE_DIS; | ||
1416 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
1417 | } | ||
1418 | |||
1419 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
1420 | if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && | ||
1421 | (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { | ||
1422 | |||
1423 | tmp = RREG32(0x541c); | ||
1424 | WREG32(0x541c, tmp | 0x8); | ||
1425 | WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); | ||
1426 | link_cntl2 = RREG16(0x4088); | ||
1427 | link_cntl2 &= ~TARGET_LINK_SPEED_MASK; | ||
1428 | link_cntl2 |= 0x2; | ||
1429 | WREG16(0x4088, link_cntl2); | ||
1430 | WREG32(MM_CFGREGS_CNTL, 0); | ||
1431 | |||
1432 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
1433 | speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; | ||
1434 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
1435 | |||
1436 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
1437 | speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; | ||
1438 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
1439 | |||
1440 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
1441 | speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; | ||
1442 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
1443 | |||
1444 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
1445 | speed_cntl |= LC_GEN2_EN_STRAP; | ||
1446 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
1447 | |||
1448 | } else { | ||
1449 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
1450 | /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ | ||
1451 | if (1) | ||
1452 | link_width_cntl |= LC_UPCONFIGURE_DIS; | ||
1453 | else | ||
1454 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | ||
1455 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
1456 | } | ||
1252 | } | 1457 | } |
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index b7a5a20e81dc..79fa588e9ed5 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h | |||
@@ -76,10 +76,10 @@ | |||
76 | #define ROQ_IB1_START(x) ((x) << 0) | 76 | #define ROQ_IB1_START(x) ((x) << 0) |
77 | #define ROQ_IB2_START(x) ((x) << 8) | 77 | #define ROQ_IB2_START(x) ((x) << 8) |
78 | #define CP_RB_CNTL 0xC104 | 78 | #define CP_RB_CNTL 0xC104 |
79 | #define RB_BUFSZ(x) ((x)<<0) | 79 | #define RB_BUFSZ(x) ((x) << 0) |
80 | #define RB_BLKSZ(x) ((x)<<8) | 80 | #define RB_BLKSZ(x) ((x) << 8) |
81 | #define RB_NO_UPDATE (1<<27) | 81 | #define RB_NO_UPDATE (1 << 27) |
82 | #define RB_RPTR_WR_ENA (1<<31) | 82 | #define RB_RPTR_WR_ENA (1 << 31) |
83 | #define BUF_SWAP_32BIT (2 << 16) | 83 | #define BUF_SWAP_32BIT (2 << 16) |
84 | #define CP_RB_RPTR 0x8700 | 84 | #define CP_RB_RPTR 0x8700 |
85 | #define CP_RB_RPTR_ADDR 0xC10C | 85 | #define CP_RB_RPTR_ADDR 0xC10C |
@@ -138,6 +138,7 @@ | |||
138 | #define MC_SHARED_CHMAP 0x2004 | 138 | #define MC_SHARED_CHMAP 0x2004 |
139 | #define NOOFCHAN_SHIFT 12 | 139 | #define NOOFCHAN_SHIFT 12 |
140 | #define NOOFCHAN_MASK 0x00003000 | 140 | #define NOOFCHAN_MASK 0x00003000 |
141 | #define MC_SHARED_CHREMAP 0x2008 | ||
141 | 142 | ||
142 | #define MC_ARB_RAMCFG 0x2760 | 143 | #define MC_ARB_RAMCFG 0x2760 |
143 | #define NOOFBANK_SHIFT 0 | 144 | #define NOOFBANK_SHIFT 0 |
@@ -303,6 +304,7 @@ | |||
303 | #define BILINEAR_PRECISION_8_BIT (1 << 31) | 304 | #define BILINEAR_PRECISION_8_BIT (1 << 31) |
304 | 305 | ||
305 | #define TCP_CNTL 0x9610 | 306 | #define TCP_CNTL 0x9610 |
307 | #define TCP_CHAN_STEER 0x9614 | ||
306 | 308 | ||
307 | #define VGT_CACHE_INVALIDATION 0x88C4 | 309 | #define VGT_CACHE_INVALIDATION 0x88C4 |
308 | #define CACHE_INVALIDATION(x) ((x)<<0) | 310 | #define CACHE_INVALIDATION(x) ((x)<<0) |
@@ -351,4 +353,49 @@ | |||
351 | 353 | ||
352 | #define SRBM_STATUS 0x0E50 | 354 | #define SRBM_STATUS 0x0E50 |
353 | 355 | ||
356 | #define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 | ||
357 | #define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 | ||
358 | #define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 | ||
359 | #define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 | ||
360 | #define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c | ||
361 | #define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c | ||
362 | |||
363 | /* PCIE link stuff */ | ||
364 | #define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ | ||
365 | #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ | ||
366 | # define LC_LINK_WIDTH_SHIFT 0 | ||
367 | # define LC_LINK_WIDTH_MASK 0x7 | ||
368 | # define LC_LINK_WIDTH_X0 0 | ||
369 | # define LC_LINK_WIDTH_X1 1 | ||
370 | # define LC_LINK_WIDTH_X2 2 | ||
371 | # define LC_LINK_WIDTH_X4 3 | ||
372 | # define LC_LINK_WIDTH_X8 4 | ||
373 | # define LC_LINK_WIDTH_X16 6 | ||
374 | # define LC_LINK_WIDTH_RD_SHIFT 4 | ||
375 | # define LC_LINK_WIDTH_RD_MASK 0x70 | ||
376 | # define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) | ||
377 | # define LC_RECONFIG_NOW (1 << 8) | ||
378 | # define LC_RENEGOTIATION_SUPPORT (1 << 9) | ||
379 | # define LC_RENEGOTIATE_EN (1 << 10) | ||
380 | # define LC_SHORT_RECONFIG_EN (1 << 11) | ||
381 | # define LC_UPCONFIGURE_SUPPORT (1 << 12) | ||
382 | # define LC_UPCONFIGURE_DIS (1 << 13) | ||
383 | #define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ | ||
384 | # define LC_GEN2_EN_STRAP (1 << 0) | ||
385 | # define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) | ||
386 | # define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) | ||
387 | # define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) | ||
388 | # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) | ||
389 | # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 | ||
390 | # define LC_CURRENT_DATA_RATE (1 << 11) | ||
391 | # define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) | ||
392 | # define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) | ||
393 | # define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) | ||
394 | # define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) | ||
395 | #define MM_CFGREGS_CNTL 0x544c | ||
396 | # define MM_WR_TO_CFG_EN (1 << 3) | ||
397 | #define LINK_CNTL2 0x88 /* F0 */ | ||
398 | # define TARGET_LINK_SPEED_MASK (0xf << 0) | ||
399 | # define SELECTABLE_DEEMPHASIS (1 << 6) | ||
400 | |||
354 | #endif | 401 | #endif |