diff options
| author | Alex Deucher <alexander.deucher@amd.com> | 2015-04-20 16:55:21 -0400 |
|---|---|---|
| committer | Alex Deucher <alexander.deucher@amd.com> | 2015-06-03 21:03:15 -0400 |
| commit | d38ceaf99ed015f2a0b9af3499791bd3a3daae21 (patch) | |
| tree | c8e237ea218e8ed8a5f64c1654fc01fe5d2239cb | |
| parent | 97b2e202fba05b87d720318a6500a337100dab4d (diff) | |
drm/amdgpu: add core driver (v4)
This adds the non-asic specific core driver code.
v2: remove extra kconfig option
v3: implement minor fixes from Fengguang Wu
v4: fix cast in amdgpu_ucode.c
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Jammy Zhou <Jammy.Zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
78 files changed, 33596 insertions, 0 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index d4b65457122d..c46ca311d8c3 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
| @@ -120,6 +120,27 @@ config DRM_RADEON | |||
| 120 | 120 | ||
| 121 | source "drivers/gpu/drm/radeon/Kconfig" | 121 | source "drivers/gpu/drm/radeon/Kconfig" |
| 122 | 122 | ||
| 123 | config DRM_AMDGPU | ||
| 124 | tristate "AMD GPU" | ||
| 125 | depends on DRM && PCI | ||
| 126 | select FB_CFB_FILLRECT | ||
| 127 | select FB_CFB_COPYAREA | ||
| 128 | select FB_CFB_IMAGEBLIT | ||
| 129 | select FW_LOADER | ||
| 130 | select DRM_KMS_HELPER | ||
| 131 | select DRM_KMS_FB_HELPER | ||
| 132 | select DRM_TTM | ||
| 133 | select POWER_SUPPLY | ||
| 134 | select HWMON | ||
| 135 | select BACKLIGHT_CLASS_DEVICE | ||
| 136 | select INTERVAL_TREE | ||
| 137 | help | ||
| 138 | Choose this option if you have a recent AMD Radeon graphics card. | ||
| 139 | |||
| 140 | If M is selected, the module will be called amdgpu. | ||
| 141 | |||
| 142 | source "drivers/gpu/drm/amd/amdgpu/Kconfig" | ||
| 143 | |||
| 123 | source "drivers/gpu/drm/nouveau/Kconfig" | 144 | source "drivers/gpu/drm/nouveau/Kconfig" |
| 124 | 145 | ||
| 125 | config DRM_I810 | 146 | config DRM_I810 |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 15eb710ec114..5713d0534504 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
| @@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_TDFX) += tdfx/ | |||
| 37 | obj-$(CONFIG_DRM_R128) += r128/ | 37 | obj-$(CONFIG_DRM_R128) += r128/ |
| 38 | obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ | 38 | obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ |
| 39 | obj-$(CONFIG_DRM_RADEON)+= radeon/ | 39 | obj-$(CONFIG_DRM_RADEON)+= radeon/ |
| 40 | obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/ | ||
| 40 | obj-$(CONFIG_DRM_MGA) += mga/ | 41 | obj-$(CONFIG_DRM_MGA) += mga/ |
| 41 | obj-$(CONFIG_DRM_I810) += i810/ | 42 | obj-$(CONFIG_DRM_I810) += i810/ |
| 42 | obj-$(CONFIG_DRM_I915) += i915/ | 43 | obj-$(CONFIG_DRM_I915) += i915/ |
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig new file mode 100644 index 000000000000..b30fcfa4b1f2 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | config DRM_AMDGPU_CIK | ||
| 2 | bool "Enable amdgpu support for CIK parts" | ||
| 3 | depends on DRM_AMDGPU | ||
| 4 | help | ||
| 5 | Choose this option if you want to enable experimental support | ||
| 6 | for CIK asics. | ||
| 7 | |||
| 8 | CIK is already supported in radeon. CIK support in amdgpu | ||
| 9 | is for experimentation and testing. | ||
| 10 | |||
| 11 | config DRM_AMDGPU_USERPTR | ||
| 12 | bool "Always enable userptr write support" | ||
| 13 | depends on DRM_AMDGPU | ||
| 14 | select MMU_NOTIFIER | ||
| 15 | help | ||
| 16 | This option selects CONFIG_MMU_NOTIFIER if it isn't already | ||
| 17 | selected to enabled full userptr support. | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile new file mode 100644 index 000000000000..01276a592bc5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/Makefile | |||
| @@ -0,0 +1,49 @@ | |||
| 1 | # | ||
| 2 | # Makefile for the drm device driver. This driver provides support for the | ||
| 3 | # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. | ||
| 4 | |||
| 5 | ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg | ||
| 6 | |||
| 7 | amdgpu-y := amdgpu_drv.o | ||
| 8 | |||
| 9 | # add KMS driver | ||
| 10 | amdgpu-y += amdgpu_device.o amdgpu_kms.o \ | ||
| 11 | amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \ | ||
| 12 | atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \ | ||
| 13 | amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \ | ||
| 14 | amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \ | ||
| 15 | amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \ | ||
| 16 | amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ | ||
| 17 | atombios_encoders.o amdgpu_semaphore.o amdgpu_sa.o atombios_i2c.o \ | ||
| 18 | amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ | ||
| 19 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o | ||
| 20 | |||
| 21 | # add IH block | ||
| 22 | amdgpu-y += \ | ||
| 23 | amdgpu_irq.o \ | ||
| 24 | amdgpu_ih.o | ||
| 25 | |||
| 26 | # add SMC block | ||
| 27 | amdgpu-y += \ | ||
| 28 | amdgpu_dpm.o | ||
| 29 | |||
| 30 | # add GFX block | ||
| 31 | amdgpu-y += \ | ||
| 32 | amdgpu_gfx.o | ||
| 33 | |||
| 34 | # add UVD block | ||
| 35 | amdgpu-y += \ | ||
| 36 | amdgpu_uvd.o | ||
| 37 | |||
| 38 | # add VCE block | ||
| 39 | amdgpu-y += \ | ||
| 40 | amdgpu_vce.o | ||
| 41 | |||
| 42 | amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o | ||
| 43 | amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o | ||
| 44 | amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o | ||
| 45 | amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o | ||
| 46 | |||
| 47 | obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o | ||
| 48 | |||
| 49 | CFLAGS_amdgpu_trace_points.o := -I$(src) | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c new file mode 100644 index 000000000000..aef4a7aac0f7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | |||
| @@ -0,0 +1,768 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2012 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/pci.h> | ||
| 25 | #include <linux/acpi.h> | ||
| 26 | #include <linux/slab.h> | ||
| 27 | #include <linux/power_supply.h> | ||
| 28 | #include <linux/vga_switcheroo.h> | ||
| 29 | #include <acpi/video.h> | ||
| 30 | #include <drm/drmP.h> | ||
| 31 | #include <drm/drm_crtc_helper.h> | ||
| 32 | #include "amdgpu.h" | ||
| 33 | #include "amdgpu_acpi.h" | ||
| 34 | #include "atom.h" | ||
| 35 | |||
| 36 | #define ACPI_AC_CLASS "ac_adapter" | ||
| 37 | |||
| 38 | extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); | ||
| 39 | |||
| 40 | struct atif_verify_interface { | ||
| 41 | u16 size; /* structure size in bytes (includes size field) */ | ||
| 42 | u16 version; /* version */ | ||
| 43 | u32 notification_mask; /* supported notifications mask */ | ||
| 44 | u32 function_bits; /* supported functions bit vector */ | ||
| 45 | } __packed; | ||
| 46 | |||
| 47 | struct atif_system_params { | ||
| 48 | u16 size; /* structure size in bytes (includes size field) */ | ||
| 49 | u32 valid_mask; /* valid flags mask */ | ||
| 50 | u32 flags; /* flags */ | ||
| 51 | u8 command_code; /* notify command code */ | ||
| 52 | } __packed; | ||
| 53 | |||
| 54 | struct atif_sbios_requests { | ||
| 55 | u16 size; /* structure size in bytes (includes size field) */ | ||
| 56 | u32 pending; /* pending sbios requests */ | ||
| 57 | u8 panel_exp_mode; /* panel expansion mode */ | ||
| 58 | u8 thermal_gfx; /* thermal state: target gfx controller */ | ||
| 59 | u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */ | ||
| 60 | u8 forced_power_gfx; /* forced power state: target gfx controller */ | ||
| 61 | u8 forced_power_state; /* forced power state: state id */ | ||
| 62 | u8 system_power_src; /* system power source */ | ||
| 63 | u8 backlight_level; /* panel backlight level (0-255) */ | ||
| 64 | } __packed; | ||
| 65 | |||
| 66 | #define ATIF_NOTIFY_MASK 0x3 | ||
| 67 | #define ATIF_NOTIFY_NONE 0 | ||
| 68 | #define ATIF_NOTIFY_81 1 | ||
| 69 | #define ATIF_NOTIFY_N 2 | ||
| 70 | |||
| 71 | struct atcs_verify_interface { | ||
| 72 | u16 size; /* structure size in bytes (includes size field) */ | ||
| 73 | u16 version; /* version */ | ||
| 74 | u32 function_bits; /* supported functions bit vector */ | ||
| 75 | } __packed; | ||
| 76 | |||
| 77 | #define ATCS_VALID_FLAGS_MASK 0x3 | ||
| 78 | |||
| 79 | struct atcs_pref_req_input { | ||
| 80 | u16 size; /* structure size in bytes (includes size field) */ | ||
| 81 | u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */ | ||
| 82 | u16 valid_flags_mask; /* valid flags mask */ | ||
| 83 | u16 flags; /* flags */ | ||
| 84 | u8 req_type; /* request type */ | ||
| 85 | u8 perf_req; /* performance request */ | ||
| 86 | } __packed; | ||
| 87 | |||
| 88 | struct atcs_pref_req_output { | ||
| 89 | u16 size; /* structure size in bytes (includes size field) */ | ||
| 90 | u8 ret_val; /* return value */ | ||
| 91 | } __packed; | ||
| 92 | |||
| 93 | /* Call the ATIF method | ||
| 94 | */ | ||
| 95 | /** | ||
| 96 | * amdgpu_atif_call - call an ATIF method | ||
| 97 | * | ||
| 98 | * @handle: acpi handle | ||
| 99 | * @function: the ATIF function to execute | ||
| 100 | * @params: ATIF function params | ||
| 101 | * | ||
| 102 | * Executes the requested ATIF function (all asics). | ||
| 103 | * Returns a pointer to the acpi output buffer. | ||
| 104 | */ | ||
| 105 | static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, | ||
| 106 | struct acpi_buffer *params) | ||
| 107 | { | ||
| 108 | acpi_status status; | ||
| 109 | union acpi_object atif_arg_elements[2]; | ||
| 110 | struct acpi_object_list atif_arg; | ||
| 111 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
| 112 | |||
| 113 | atif_arg.count = 2; | ||
| 114 | atif_arg.pointer = &atif_arg_elements[0]; | ||
| 115 | |||
| 116 | atif_arg_elements[0].type = ACPI_TYPE_INTEGER; | ||
| 117 | atif_arg_elements[0].integer.value = function; | ||
| 118 | |||
| 119 | if (params) { | ||
| 120 | atif_arg_elements[1].type = ACPI_TYPE_BUFFER; | ||
| 121 | atif_arg_elements[1].buffer.length = params->length; | ||
| 122 | atif_arg_elements[1].buffer.pointer = params->pointer; | ||
| 123 | } else { | ||
| 124 | /* We need a second fake parameter */ | ||
| 125 | atif_arg_elements[1].type = ACPI_TYPE_INTEGER; | ||
| 126 | atif_arg_elements[1].integer.value = 0; | ||
| 127 | } | ||
| 128 | |||
| 129 | status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); | ||
| 130 | |||
| 131 | /* Fail only if calling the method fails and ATIF is supported */ | ||
| 132 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | ||
| 133 | DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", | ||
| 134 | acpi_format_exception(status)); | ||
| 135 | kfree(buffer.pointer); | ||
| 136 | return NULL; | ||
| 137 | } | ||
| 138 | |||
| 139 | return buffer.pointer; | ||
| 140 | } | ||
| 141 | |||
| 142 | /** | ||
| 143 | * amdgpu_atif_parse_notification - parse supported notifications | ||
| 144 | * | ||
| 145 | * @n: supported notifications struct | ||
| 146 | * @mask: supported notifications mask from ATIF | ||
| 147 | * | ||
| 148 | * Use the supported notifications mask from ATIF function | ||
| 149 | * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications | ||
| 150 | * are supported (all asics). | ||
| 151 | */ | ||
| 152 | static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask) | ||
| 153 | { | ||
| 154 | n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED; | ||
| 155 | n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED; | ||
| 156 | n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED; | ||
| 157 | n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED; | ||
| 158 | n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED; | ||
| 159 | n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED; | ||
| 160 | n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED; | ||
| 161 | n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED; | ||
| 162 | n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED; | ||
| 163 | } | ||
| 164 | |||
| 165 | /** | ||
| 166 | * amdgpu_atif_parse_functions - parse supported functions | ||
| 167 | * | ||
| 168 | * @f: supported functions struct | ||
| 169 | * @mask: supported functions mask from ATIF | ||
| 170 | * | ||
| 171 | * Use the supported functions mask from ATIF function | ||
| 172 | * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions | ||
| 173 | * are supported (all asics). | ||
| 174 | */ | ||
| 175 | static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mask) | ||
| 176 | { | ||
| 177 | f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED; | ||
| 178 | f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED; | ||
| 179 | f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED; | ||
| 180 | f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED; | ||
| 181 | f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED; | ||
| 182 | f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED; | ||
| 183 | f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED; | ||
| 184 | f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED; | ||
| 185 | f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED; | ||
| 186 | f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED; | ||
| 187 | } | ||
| 188 | |||
| 189 | /** | ||
| 190 | * amdgpu_atif_verify_interface - verify ATIF | ||
| 191 | * | ||
| 192 | * @handle: acpi handle | ||
| 193 | * @atif: amdgpu atif struct | ||
| 194 | * | ||
| 195 | * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function | ||
| 196 | * to initialize ATIF and determine what features are supported | ||
| 197 | * (all asics). | ||
| 198 | * returns 0 on success, error on failure. | ||
| 199 | */ | ||
| 200 | static int amdgpu_atif_verify_interface(acpi_handle handle, | ||
| 201 | struct amdgpu_atif *atif) | ||
| 202 | { | ||
| 203 | union acpi_object *info; | ||
| 204 | struct atif_verify_interface output; | ||
| 205 | size_t size; | ||
| 206 | int err = 0; | ||
| 207 | |||
| 208 | info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); | ||
| 209 | if (!info) | ||
| 210 | return -EIO; | ||
| 211 | |||
| 212 | memset(&output, 0, sizeof(output)); | ||
| 213 | |||
| 214 | size = *(u16 *) info->buffer.pointer; | ||
| 215 | if (size < 12) { | ||
| 216 | DRM_INFO("ATIF buffer is too small: %zu\n", size); | ||
| 217 | err = -EINVAL; | ||
| 218 | goto out; | ||
| 219 | } | ||
| 220 | size = min(sizeof(output), size); | ||
| 221 | |||
| 222 | memcpy(&output, info->buffer.pointer, size); | ||
| 223 | |||
| 224 | /* TODO: check version? */ | ||
| 225 | DRM_DEBUG_DRIVER("ATIF version %u\n", output.version); | ||
| 226 | |||
| 227 | amdgpu_atif_parse_notification(&atif->notifications, output.notification_mask); | ||
| 228 | amdgpu_atif_parse_functions(&atif->functions, output.function_bits); | ||
| 229 | |||
| 230 | out: | ||
| 231 | kfree(info); | ||
| 232 | return err; | ||
| 233 | } | ||
| 234 | |||
| 235 | /** | ||
| 236 | * amdgpu_atif_get_notification_params - determine notify configuration | ||
| 237 | * | ||
| 238 | * @handle: acpi handle | ||
| 239 | * @n: atif notification configuration struct | ||
| 240 | * | ||
| 241 | * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function | ||
| 242 | * to determine if a notifier is used and if so which one | ||
| 243 | * (all asics). This is either Notify(VGA, 0x81) or Notify(VGA, n) | ||
| 244 | * where n is specified in the result if a notifier is used. | ||
| 245 | * Returns 0 on success, error on failure. | ||
| 246 | */ | ||
| 247 | static int amdgpu_atif_get_notification_params(acpi_handle handle, | ||
| 248 | struct amdgpu_atif_notification_cfg *n) | ||
| 249 | { | ||
| 250 | union acpi_object *info; | ||
| 251 | struct atif_system_params params; | ||
| 252 | size_t size; | ||
| 253 | int err = 0; | ||
| 254 | |||
| 255 | info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL); | ||
| 256 | if (!info) { | ||
| 257 | err = -EIO; | ||
| 258 | goto out; | ||
| 259 | } | ||
| 260 | |||
| 261 | size = *(u16 *) info->buffer.pointer; | ||
| 262 | if (size < 10) { | ||
| 263 | err = -EINVAL; | ||
| 264 | goto out; | ||
| 265 | } | ||
| 266 | |||
| 267 | memset(¶ms, 0, sizeof(params)); | ||
| 268 | size = min(sizeof(params), size); | ||
| 269 | memcpy(¶ms, info->buffer.pointer, size); | ||
| 270 | |||
| 271 | DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n", | ||
| 272 | params.flags, params.valid_mask); | ||
| 273 | params.flags = params.flags & params.valid_mask; | ||
| 274 | |||
| 275 | if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) { | ||
| 276 | n->enabled = false; | ||
| 277 | n->command_code = 0; | ||
| 278 | } else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) { | ||
| 279 | n->enabled = true; | ||
| 280 | n->command_code = 0x81; | ||
| 281 | } else { | ||
| 282 | if (size < 11) { | ||
| 283 | err = -EINVAL; | ||
| 284 | goto out; | ||
| 285 | } | ||
| 286 | n->enabled = true; | ||
| 287 | n->command_code = params.command_code; | ||
| 288 | } | ||
| 289 | |||
| 290 | out: | ||
| 291 | DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n", | ||
| 292 | (n->enabled ? "enabled" : "disabled"), | ||
| 293 | n->command_code); | ||
| 294 | kfree(info); | ||
| 295 | return err; | ||
| 296 | } | ||
| 297 | |||
| 298 | /** | ||
| 299 | * amdgpu_atif_get_sbios_requests - get requested sbios event | ||
| 300 | * | ||
| 301 | * @handle: acpi handle | ||
| 302 | * @req: atif sbios request struct | ||
| 303 | * | ||
| 304 | * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function | ||
| 305 | * to determine what requests the sbios is making to the driver | ||
| 306 | * (all asics). | ||
| 307 | * Returns 0 on success, error on failure. | ||
| 308 | */ | ||
| 309 | static int amdgpu_atif_get_sbios_requests(acpi_handle handle, | ||
| 310 | struct atif_sbios_requests *req) | ||
| 311 | { | ||
| 312 | union acpi_object *info; | ||
| 313 | size_t size; | ||
| 314 | int count = 0; | ||
| 315 | |||
| 316 | info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL); | ||
| 317 | if (!info) | ||
| 318 | return -EIO; | ||
| 319 | |||
| 320 | size = *(u16 *)info->buffer.pointer; | ||
| 321 | if (size < 0xd) { | ||
| 322 | count = -EINVAL; | ||
| 323 | goto out; | ||
| 324 | } | ||
| 325 | memset(req, 0, sizeof(*req)); | ||
| 326 | |||
| 327 | size = min(sizeof(*req), size); | ||
| 328 | memcpy(req, info->buffer.pointer, size); | ||
| 329 | DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending); | ||
| 330 | |||
| 331 | count = hweight32(req->pending); | ||
| 332 | |||
| 333 | out: | ||
| 334 | kfree(info); | ||
| 335 | return count; | ||
| 336 | } | ||
| 337 | |||
| 338 | /** | ||
| 339 | * amdgpu_atif_handler - handle ATIF notify requests | ||
| 340 | * | ||
| 341 | * @adev: amdgpu_device pointer | ||
| 342 | * @event: atif sbios request struct | ||
| 343 | * | ||
| 344 | * Checks the acpi event and if it matches an atif event, | ||
| 345 | * handles it. | ||
| 346 | * Returns NOTIFY code | ||
| 347 | */ | ||
| 348 | int amdgpu_atif_handler(struct amdgpu_device *adev, | ||
| 349 | struct acpi_bus_event *event) | ||
| 350 | { | ||
| 351 | struct amdgpu_atif *atif = &adev->atif; | ||
| 352 | struct atif_sbios_requests req; | ||
| 353 | acpi_handle handle; | ||
| 354 | int count; | ||
| 355 | |||
| 356 | DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", | ||
| 357 | event->device_class, event->type); | ||
| 358 | |||
| 359 | if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) | ||
| 360 | return NOTIFY_DONE; | ||
| 361 | |||
| 362 | if (!atif->notification_cfg.enabled || | ||
| 363 | event->type != atif->notification_cfg.command_code) | ||
| 364 | /* Not our event */ | ||
| 365 | return NOTIFY_DONE; | ||
| 366 | |||
| 367 | /* Check pending SBIOS requests */ | ||
| 368 | handle = ACPI_HANDLE(&adev->pdev->dev); | ||
| 369 | count = amdgpu_atif_get_sbios_requests(handle, &req); | ||
| 370 | |||
| 371 | if (count <= 0) | ||
| 372 | return NOTIFY_DONE; | ||
| 373 | |||
| 374 | DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); | ||
| 375 | |||
| 376 | if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) { | ||
| 377 | struct amdgpu_encoder *enc = atif->encoder_for_bl; | ||
| 378 | |||
| 379 | if (enc) { | ||
| 380 | struct amdgpu_encoder_atom_dig *dig = enc->enc_priv; | ||
| 381 | |||
| 382 | DRM_DEBUG_DRIVER("Changing brightness to %d\n", | ||
| 383 | req.backlight_level); | ||
| 384 | |||
| 385 | amdgpu_display_backlight_set_level(adev, enc, req.backlight_level); | ||
| 386 | |||
| 387 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) | ||
| 388 | backlight_force_update(dig->bl_dev, | ||
| 389 | BACKLIGHT_UPDATE_HOTKEY); | ||
| 390 | #endif | ||
| 391 | } | ||
| 392 | } | ||
| 393 | /* TODO: check other events */ | ||
| 394 | |||
| 395 | /* We've handled the event, stop the notifier chain. The ACPI interface | ||
| 396 | * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to | ||
| 397 | * userspace if the event was generated only to signal a SBIOS | ||
| 398 | * request. | ||
| 399 | */ | ||
| 400 | return NOTIFY_BAD; | ||
| 401 | } | ||
| 402 | |||
| 403 | /* Call the ATCS method | ||
| 404 | */ | ||
| 405 | /** | ||
| 406 | * amdgpu_atcs_call - call an ATCS method | ||
| 407 | * | ||
| 408 | * @handle: acpi handle | ||
| 409 | * @function: the ATCS function to execute | ||
| 410 | * @params: ATCS function params | ||
| 411 | * | ||
| 412 | * Executes the requested ATCS function (all asics). | ||
| 413 | * Returns a pointer to the acpi output buffer. | ||
| 414 | */ | ||
| 415 | static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function, | ||
| 416 | struct acpi_buffer *params) | ||
| 417 | { | ||
| 418 | acpi_status status; | ||
| 419 | union acpi_object atcs_arg_elements[2]; | ||
| 420 | struct acpi_object_list atcs_arg; | ||
| 421 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
| 422 | |||
| 423 | atcs_arg.count = 2; | ||
| 424 | atcs_arg.pointer = &atcs_arg_elements[0]; | ||
| 425 | |||
| 426 | atcs_arg_elements[0].type = ACPI_TYPE_INTEGER; | ||
| 427 | atcs_arg_elements[0].integer.value = function; | ||
| 428 | |||
| 429 | if (params) { | ||
| 430 | atcs_arg_elements[1].type = ACPI_TYPE_BUFFER; | ||
| 431 | atcs_arg_elements[1].buffer.length = params->length; | ||
| 432 | atcs_arg_elements[1].buffer.pointer = params->pointer; | ||
| 433 | } else { | ||
| 434 | /* We need a second fake parameter */ | ||
| 435 | atcs_arg_elements[1].type = ACPI_TYPE_INTEGER; | ||
| 436 | atcs_arg_elements[1].integer.value = 0; | ||
| 437 | } | ||
| 438 | |||
| 439 | status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer); | ||
| 440 | |||
| 441 | /* Fail only if calling the method fails and ATIF is supported */ | ||
| 442 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | ||
| 443 | DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n", | ||
| 444 | acpi_format_exception(status)); | ||
| 445 | kfree(buffer.pointer); | ||
| 446 | return NULL; | ||
| 447 | } | ||
| 448 | |||
| 449 | return buffer.pointer; | ||
| 450 | } | ||
| 451 | |||
| 452 | /** | ||
| 453 | * amdgpu_atcs_parse_functions - parse supported functions | ||
| 454 | * | ||
| 455 | * @f: supported functions struct | ||
| 456 | * @mask: supported functions mask from ATCS | ||
| 457 | * | ||
| 458 | * Use the supported functions mask from ATCS function | ||
| 459 | * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions | ||
| 460 | * are supported (all asics). | ||
| 461 | */ | ||
| 462 | static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mask) | ||
| 463 | { | ||
| 464 | f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED; | ||
| 465 | f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED; | ||
| 466 | f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED; | ||
| 467 | f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED; | ||
| 468 | } | ||
| 469 | |||
| 470 | /** | ||
| 471 | * amdgpu_atcs_verify_interface - verify ATCS | ||
| 472 | * | ||
| 473 | * @handle: acpi handle | ||
| 474 | * @atcs: amdgpu atcs struct | ||
| 475 | * | ||
| 476 | * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function | ||
| 477 | * to initialize ATCS and determine what features are supported | ||
| 478 | * (all asics). | ||
| 479 | * returns 0 on success, error on failure. | ||
| 480 | */ | ||
| 481 | static int amdgpu_atcs_verify_interface(acpi_handle handle, | ||
| 482 | struct amdgpu_atcs *atcs) | ||
| 483 | { | ||
| 484 | union acpi_object *info; | ||
| 485 | struct atcs_verify_interface output; | ||
| 486 | size_t size; | ||
| 487 | int err = 0; | ||
| 488 | |||
| 489 | info = amdgpu_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL); | ||
| 490 | if (!info) | ||
| 491 | return -EIO; | ||
| 492 | |||
| 493 | memset(&output, 0, sizeof(output)); | ||
| 494 | |||
| 495 | size = *(u16 *) info->buffer.pointer; | ||
| 496 | if (size < 8) { | ||
| 497 | DRM_INFO("ATCS buffer is too small: %zu\n", size); | ||
| 498 | err = -EINVAL; | ||
| 499 | goto out; | ||
| 500 | } | ||
| 501 | size = min(sizeof(output), size); | ||
| 502 | |||
| 503 | memcpy(&output, info->buffer.pointer, size); | ||
| 504 | |||
| 505 | /* TODO: check version? */ | ||
| 506 | DRM_DEBUG_DRIVER("ATCS version %u\n", output.version); | ||
| 507 | |||
| 508 | amdgpu_atcs_parse_functions(&atcs->functions, output.function_bits); | ||
| 509 | |||
| 510 | out: | ||
| 511 | kfree(info); | ||
| 512 | return err; | ||
| 513 | } | ||
| 514 | |||
| 515 | /** | ||
| 516 | * amdgpu_acpi_is_pcie_performance_request_supported | ||
| 517 | * | ||
| 518 | * @adev: amdgpu_device pointer | ||
| 519 | * | ||
| 520 | * Check if the ATCS pcie_perf_req and pcie_dev_rdy methods | ||
| 521 | * are supported (all asics). | ||
| 522 | * returns true if supported, false if not. | ||
| 523 | */ | ||
| 524 | bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev) | ||
| 525 | { | ||
| 526 | struct amdgpu_atcs *atcs = &adev->atcs; | ||
| 527 | |||
| 528 | if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy) | ||
| 529 | return true; | ||
| 530 | |||
| 531 | return false; | ||
| 532 | } | ||
| 533 | |||
| 534 | /** | ||
| 535 | * amdgpu_acpi_pcie_notify_device_ready | ||
| 536 | * | ||
| 537 | * @adev: amdgpu_device pointer | ||
| 538 | * | ||
| 539 | * Executes the PCIE_DEVICE_READY_NOTIFICATION method | ||
| 540 | * (all asics). | ||
| 541 | * returns 0 on success, error on failure. | ||
| 542 | */ | ||
| 543 | int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev) | ||
| 544 | { | ||
| 545 | acpi_handle handle; | ||
| 546 | union acpi_object *info; | ||
| 547 | struct amdgpu_atcs *atcs = &adev->atcs; | ||
| 548 | |||
| 549 | /* Get the device handle */ | ||
| 550 | handle = ACPI_HANDLE(&adev->pdev->dev); | ||
| 551 | if (!handle) | ||
| 552 | return -EINVAL; | ||
| 553 | |||
| 554 | if (!atcs->functions.pcie_dev_rdy) | ||
| 555 | return -EINVAL; | ||
| 556 | |||
| 557 | info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL); | ||
| 558 | if (!info) | ||
| 559 | return -EIO; | ||
| 560 | |||
| 561 | kfree(info); | ||
| 562 | |||
| 563 | return 0; | ||
| 564 | } | ||
| 565 | |||
| 566 | /** | ||
| 567 | * amdgpu_acpi_pcie_performance_request | ||
| 568 | * | ||
| 569 | * @adev: amdgpu_device pointer | ||
| 570 | * @perf_req: requested perf level (pcie gen speed) | ||
| 571 | * @advertise: set advertise caps flag if set | ||
| 572 | * | ||
| 573 | * Executes the PCIE_PERFORMANCE_REQUEST method to | ||
| 574 | * change the pcie gen speed (all asics). | ||
| 575 | * returns 0 on success, error on failure. | ||
| 576 | */ | ||
| 577 | int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, | ||
| 578 | u8 perf_req, bool advertise) | ||
| 579 | { | ||
| 580 | acpi_handle handle; | ||
| 581 | union acpi_object *info; | ||
| 582 | struct amdgpu_atcs *atcs = &adev->atcs; | ||
| 583 | struct atcs_pref_req_input atcs_input; | ||
| 584 | struct atcs_pref_req_output atcs_output; | ||
| 585 | struct acpi_buffer params; | ||
| 586 | size_t size; | ||
| 587 | u32 retry = 3; | ||
| 588 | |||
| 589 | /* Get the device handle */ | ||
| 590 | handle = ACPI_HANDLE(&adev->pdev->dev); | ||
| 591 | if (!handle) | ||
| 592 | return -EINVAL; | ||
| 593 | |||
| 594 | if (!atcs->functions.pcie_perf_req) | ||
| 595 | return -EINVAL; | ||
| 596 | |||
| 597 | atcs_input.size = sizeof(struct atcs_pref_req_input); | ||
| 598 | /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */ | ||
| 599 | atcs_input.client_id = adev->pdev->devfn | (adev->pdev->bus->number << 8); | ||
| 600 | atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK; | ||
| 601 | atcs_input.flags = ATCS_WAIT_FOR_COMPLETION; | ||
| 602 | if (advertise) | ||
| 603 | atcs_input.flags |= ATCS_ADVERTISE_CAPS; | ||
| 604 | atcs_input.req_type = ATCS_PCIE_LINK_SPEED; | ||
| 605 | atcs_input.perf_req = perf_req; | ||
| 606 | |||
| 607 | params.length = sizeof(struct atcs_pref_req_input); | ||
| 608 | params.pointer = &atcs_input; | ||
| 609 | |||
| 610 | while (retry--) { | ||
| 611 | info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, ¶ms); | ||
| 612 | if (!info) | ||
| 613 | return -EIO; | ||
| 614 | |||
| 615 | memset(&atcs_output, 0, sizeof(atcs_output)); | ||
| 616 | |||
| 617 | size = *(u16 *) info->buffer.pointer; | ||
| 618 | if (size < 3) { | ||
| 619 | DRM_INFO("ATCS buffer is too small: %zu\n", size); | ||
| 620 | kfree(info); | ||
| 621 | return -EINVAL; | ||
| 622 | } | ||
| 623 | size = min(sizeof(atcs_output), size); | ||
| 624 | |||
| 625 | memcpy(&atcs_output, info->buffer.pointer, size); | ||
| 626 | |||
| 627 | kfree(info); | ||
| 628 | |||
| 629 | switch (atcs_output.ret_val) { | ||
| 630 | case ATCS_REQUEST_REFUSED: | ||
| 631 | default: | ||
| 632 | return -EINVAL; | ||
| 633 | case ATCS_REQUEST_COMPLETE: | ||
| 634 | return 0; | ||
| 635 | case ATCS_REQUEST_IN_PROGRESS: | ||
| 636 | udelay(10); | ||
| 637 | break; | ||
| 638 | } | ||
| 639 | } | ||
| 640 | |||
| 641 | return 0; | ||
| 642 | } | ||
| 643 | |||
| 644 | /** | ||
| 645 | * amdgpu_acpi_event - handle notify events | ||
| 646 | * | ||
| 647 | * @nb: notifier block | ||
| 648 | * @val: val | ||
| 649 | * @data: acpi event | ||
| 650 | * | ||
| 651 | * Calls relevant amdgpu functions in response to various | ||
| 652 | * acpi events. | ||
| 653 | * Returns NOTIFY code | ||
| 654 | */ | ||
| 655 | static int amdgpu_acpi_event(struct notifier_block *nb, | ||
| 656 | unsigned long val, | ||
| 657 | void *data) | ||
| 658 | { | ||
| 659 | struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, acpi_nb); | ||
| 660 | struct acpi_bus_event *entry = (struct acpi_bus_event *)data; | ||
| 661 | |||
| 662 | if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { | ||
| 663 | if (power_supply_is_system_supplied() > 0) | ||
| 664 | DRM_DEBUG_DRIVER("pm: AC\n"); | ||
| 665 | else | ||
| 666 | DRM_DEBUG_DRIVER("pm: DC\n"); | ||
| 667 | |||
| 668 | amdgpu_pm_acpi_event_handler(adev); | ||
| 669 | } | ||
| 670 | |||
| 671 | /* Check for pending SBIOS requests */ | ||
| 672 | return amdgpu_atif_handler(adev, entry); | ||
| 673 | } | ||
| 674 | |||
| 675 | /* Call all ACPI methods here */ | ||
| 676 | /** | ||
| 677 | * amdgpu_acpi_init - init driver acpi support | ||
| 678 | * | ||
| 679 | * @adev: amdgpu_device pointer | ||
| 680 | * | ||
| 681 | * Verifies the AMD ACPI interfaces and registers with the acpi | ||
| 682 | * notifier chain (all asics). | ||
| 683 | * Returns 0 on success, error on failure. | ||
| 684 | */ | ||
| 685 | int amdgpu_acpi_init(struct amdgpu_device *adev) | ||
| 686 | { | ||
| 687 | acpi_handle handle; | ||
| 688 | struct amdgpu_atif *atif = &adev->atif; | ||
| 689 | struct amdgpu_atcs *atcs = &adev->atcs; | ||
| 690 | int ret; | ||
| 691 | |||
| 692 | /* Get the device handle */ | ||
| 693 | handle = ACPI_HANDLE(&adev->pdev->dev); | ||
| 694 | |||
| 695 | if (!adev->bios || !handle) | ||
| 696 | return 0; | ||
| 697 | |||
| 698 | /* Call the ATCS method */ | ||
| 699 | ret = amdgpu_atcs_verify_interface(handle, atcs); | ||
| 700 | if (ret) { | ||
| 701 | DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); | ||
| 702 | } | ||
| 703 | |||
| 704 | /* Call the ATIF method */ | ||
| 705 | ret = amdgpu_atif_verify_interface(handle, atif); | ||
| 706 | if (ret) { | ||
| 707 | DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); | ||
| 708 | goto out; | ||
| 709 | } | ||
| 710 | |||
| 711 | if (atif->notifications.brightness_change) { | ||
| 712 | struct drm_encoder *tmp; | ||
| 713 | |||
| 714 | /* Find the encoder controlling the brightness */ | ||
| 715 | list_for_each_entry(tmp, &adev->ddev->mode_config.encoder_list, | ||
| 716 | head) { | ||
| 717 | struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp); | ||
| 718 | |||
| 719 | if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) && | ||
| 720 | enc->enc_priv) { | ||
| 721 | if (adev->is_atom_bios) { | ||
| 722 | struct amdgpu_encoder_atom_dig *dig = enc->enc_priv; | ||
| 723 | if (dig->bl_dev) { | ||
| 724 | atif->encoder_for_bl = enc; | ||
| 725 | break; | ||
| 726 | } | ||
| 727 | } | ||
| 728 | } | ||
| 729 | } | ||
| 730 | } | ||
| 731 | |||
| 732 | if (atif->functions.sbios_requests && !atif->functions.system_params) { | ||
| 733 | /* XXX check this workraround, if sbios request function is | ||
| 734 | * present we have to see how it's configured in the system | ||
| 735 | * params | ||
| 736 | */ | ||
| 737 | atif->functions.system_params = true; | ||
| 738 | } | ||
| 739 | |||
| 740 | if (atif->functions.system_params) { | ||
| 741 | ret = amdgpu_atif_get_notification_params(handle, | ||
| 742 | &atif->notification_cfg); | ||
| 743 | if (ret) { | ||
| 744 | DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", | ||
| 745 | ret); | ||
| 746 | /* Disable notification */ | ||
| 747 | atif->notification_cfg.enabled = false; | ||
| 748 | } | ||
| 749 | } | ||
| 750 | |||
| 751 | out: | ||
| 752 | adev->acpi_nb.notifier_call = amdgpu_acpi_event; | ||
| 753 | register_acpi_notifier(&adev->acpi_nb); | ||
| 754 | |||
| 755 | return ret; | ||
| 756 | } | ||
| 757 | |||
| 758 | /** | ||
| 759 | * amdgpu_acpi_fini - tear down driver acpi support | ||
| 760 | * | ||
| 761 | * @adev: amdgpu_device pointer | ||
| 762 | * | ||
| 763 | * Unregisters with the acpi notifier chain (all asics). | ||
| 764 | */ | ||
| 765 | void amdgpu_acpi_fini(struct amdgpu_device *adev) | ||
| 766 | { | ||
| 767 | unregister_acpi_notifier(&adev->acpi_nb); | ||
| 768 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h new file mode 100644 index 000000000000..01a29c3d7011 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h | |||
| @@ -0,0 +1,445 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2012 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef AMDGPU_ACPI_H | ||
| 25 | #define AMDGPU_ACPI_H | ||
| 26 | |||
| 27 | struct amdgpu_device; | ||
| 28 | struct acpi_bus_event; | ||
| 29 | |||
| 30 | int amdgpu_atif_handler(struct amdgpu_device *adev, | ||
| 31 | struct acpi_bus_event *event); | ||
| 32 | |||
| 33 | /* AMD hw uses four ACPI control methods: | ||
| 34 | * 1. ATIF | ||
| 35 | * ARG0: (ACPI_INTEGER) function code | ||
| 36 | * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes | ||
| 37 | * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes | ||
| 38 | * ATIF provides an entry point for the gfx driver to interact with the sbios. | ||
| 39 | * The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom | ||
| 40 | * notification. Which notification is used as indicated by the ATIF Control | ||
| 41 | * Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or | ||
| 42 | * a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS | ||
| 43 | * to identify pending System BIOS requests and associated parameters. For | ||
| 44 | * example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver | ||
| 45 | * will perform display device detection and invoke ATIF Control Method | ||
| 46 | * SELECT_ACTIVE_DISPLAYS. | ||
| 47 | * | ||
| 48 | * 2. ATPX | ||
| 49 | * ARG0: (ACPI_INTEGER) function code | ||
| 50 | * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes | ||
| 51 | * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes | ||
| 52 | * ATPX methods are used on PowerXpress systems to handle mux switching and | ||
| 53 | * discrete GPU power control. | ||
| 54 | * | ||
| 55 | * 3. ATRM | ||
| 56 | * ARG0: (ACPI_INTEGER) offset of vbios rom data | ||
| 57 | * ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K). | ||
| 58 | * OUTPUT: (ACPI_BUFFER) output buffer | ||
| 59 | * ATRM provides an interfacess to access the discrete GPU vbios image on | ||
| 60 | * PowerXpress systems with multiple GPUs. | ||
| 61 | * | ||
| 62 | * 4. ATCS | ||
| 63 | * ARG0: (ACPI_INTEGER) function code | ||
| 64 | * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes | ||
| 65 | * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes | ||
| 66 | * ATCS provides an interface to AMD chipset specific functionality. | ||
| 67 | * | ||
| 68 | */ | ||
| 69 | /* ATIF */ | ||
| 70 | #define ATIF_FUNCTION_VERIFY_INTERFACE 0x0 | ||
| 71 | /* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE | ||
| 72 | * ARG1: none | ||
| 73 | * OUTPUT: | ||
| 74 | * WORD - structure size in bytes (includes size field) | ||
| 75 | * WORD - version | ||
| 76 | * DWORD - supported notifications mask | ||
| 77 | * DWORD - supported functions bit vector | ||
| 78 | */ | ||
| 79 | /* Notifications mask */ | ||
| 80 | # define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0) | ||
| 81 | # define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1) | ||
| 82 | # define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2) | ||
| 83 | # define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3) | ||
| 84 | # define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4) | ||
| 85 | # define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5) | ||
| 86 | # define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6) | ||
| 87 | # define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7) | ||
| 88 | # define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8) | ||
| 89 | /* supported functions vector */ | ||
| 90 | # define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0) | ||
| 91 | # define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1) | ||
| 92 | # define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2) | ||
| 93 | # define ATIF_GET_LID_STATE_SUPPORTED (1 << 3) | ||
| 94 | # define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4) | ||
| 95 | # define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5) | ||
| 96 | # define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6) | ||
| 97 | # define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7) | ||
| 98 | # define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12) | ||
| 99 | # define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14) | ||
| 100 | #define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1 | ||
| 101 | /* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS | ||
| 102 | * ARG1: none | ||
| 103 | * OUTPUT: | ||
| 104 | * WORD - structure size in bytes (includes size field) | ||
| 105 | * DWORD - valid flags mask | ||
| 106 | * DWORD - flags | ||
| 107 | * | ||
| 108 | * OR | ||
| 109 | * | ||
| 110 | * WORD - structure size in bytes (includes size field) | ||
| 111 | * DWORD - valid flags mask | ||
| 112 | * DWORD - flags | ||
| 113 | * BYTE - notify command code | ||
| 114 | * | ||
| 115 | * flags | ||
| 116 | * bits 1:0: | ||
| 117 | * 0 - Notify(VGA, 0x81) is not used for notification | ||
| 118 | * 1 - Notify(VGA, 0x81) is used for notification | ||
| 119 | * 2 - Notify(VGA, n) is used for notification where | ||
| 120 | * n (0xd0-0xd9) is specified in notify command code. | ||
| 121 | * bit 2: | ||
| 122 | * 1 - lid changes not reported though int10 | ||
| 123 | */ | ||
| 124 | #define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2 | ||
| 125 | /* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS | ||
| 126 | * ARG1: none | ||
| 127 | * OUTPUT: | ||
| 128 | * WORD - structure size in bytes (includes size field) | ||
| 129 | * DWORD - pending sbios requests | ||
| 130 | * BYTE - panel expansion mode | ||
| 131 | * BYTE - thermal state: target gfx controller | ||
| 132 | * BYTE - thermal state: state id (0: exit state, non-0: state) | ||
| 133 | * BYTE - forced power state: target gfx controller | ||
| 134 | * BYTE - forced power state: state id | ||
| 135 | * BYTE - system power source | ||
| 136 | * BYTE - panel backlight level (0-255) | ||
| 137 | */ | ||
| 138 | /* pending sbios requests */ | ||
| 139 | # define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0) | ||
| 140 | # define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1) | ||
| 141 | # define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2) | ||
| 142 | # define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3) | ||
| 143 | # define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4) | ||
| 144 | # define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5) | ||
| 145 | # define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6) | ||
| 146 | # define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7) | ||
| 147 | # define ATIF_DGPU_DISPLAY_EVENT (1 << 8) | ||
| 148 | /* panel expansion mode */ | ||
| 149 | # define ATIF_PANEL_EXPANSION_DISABLE 0 | ||
| 150 | # define ATIF_PANEL_EXPANSION_FULL 1 | ||
| 151 | # define ATIF_PANEL_EXPANSION_ASPECT 2 | ||
| 152 | /* target gfx controller */ | ||
| 153 | # define ATIF_TARGET_GFX_SINGLE 0 | ||
| 154 | # define ATIF_TARGET_GFX_PX_IGPU 1 | ||
| 155 | # define ATIF_TARGET_GFX_PX_DGPU 2 | ||
| 156 | /* system power source */ | ||
| 157 | # define ATIF_POWER_SOURCE_AC 1 | ||
| 158 | # define ATIF_POWER_SOURCE_DC 2 | ||
| 159 | # define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3 | ||
| 160 | # define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4 | ||
| 161 | #define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3 | ||
| 162 | /* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS | ||
| 163 | * ARG1: | ||
| 164 | * WORD - structure size in bytes (includes size field) | ||
| 165 | * WORD - selected displays | ||
| 166 | * WORD - connected displays | ||
| 167 | * OUTPUT: | ||
| 168 | * WORD - structure size in bytes (includes size field) | ||
| 169 | * WORD - selected displays | ||
| 170 | */ | ||
| 171 | # define ATIF_LCD1 (1 << 0) | ||
| 172 | # define ATIF_CRT1 (1 << 1) | ||
| 173 | # define ATIF_TV (1 << 2) | ||
| 174 | # define ATIF_DFP1 (1 << 3) | ||
| 175 | # define ATIF_CRT2 (1 << 4) | ||
| 176 | # define ATIF_LCD2 (1 << 5) | ||
| 177 | # define ATIF_DFP2 (1 << 7) | ||
| 178 | # define ATIF_CV (1 << 8) | ||
| 179 | # define ATIF_DFP3 (1 << 9) | ||
| 180 | # define ATIF_DFP4 (1 << 10) | ||
| 181 | # define ATIF_DFP5 (1 << 11) | ||
| 182 | # define ATIF_DFP6 (1 << 12) | ||
| 183 | #define ATIF_FUNCTION_GET_LID_STATE 0x4 | ||
| 184 | /* ARG0: ATIF_FUNCTION_GET_LID_STATE | ||
| 185 | * ARG1: none | ||
| 186 | * OUTPUT: | ||
| 187 | * WORD - structure size in bytes (includes size field) | ||
| 188 | * BYTE - lid state (0: open, 1: closed) | ||
| 189 | * | ||
| 190 | * GET_LID_STATE only works at boot and resume, for general lid | ||
| 191 | * status, use the kernel provided status | ||
| 192 | */ | ||
| 193 | #define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5 | ||
| 194 | /* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS | ||
| 195 | * ARG1: none | ||
| 196 | * OUTPUT: | ||
| 197 | * WORD - structure size in bytes (includes size field) | ||
| 198 | * BYTE - 0 | ||
| 199 | * BYTE - TV standard | ||
| 200 | */ | ||
| 201 | # define ATIF_TV_STD_NTSC 0 | ||
| 202 | # define ATIF_TV_STD_PAL 1 | ||
| 203 | # define ATIF_TV_STD_PALM 2 | ||
| 204 | # define ATIF_TV_STD_PAL60 3 | ||
| 205 | # define ATIF_TV_STD_NTSCJ 4 | ||
| 206 | # define ATIF_TV_STD_PALCN 5 | ||
| 207 | # define ATIF_TV_STD_PALN 6 | ||
| 208 | # define ATIF_TV_STD_SCART_RGB 9 | ||
| 209 | #define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6 | ||
| 210 | /* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS | ||
| 211 | * ARG1: | ||
| 212 | * WORD - structure size in bytes (includes size field) | ||
| 213 | * BYTE - 0 | ||
| 214 | * BYTE - TV standard | ||
| 215 | * OUTPUT: none | ||
| 216 | */ | ||
| 217 | #define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7 | ||
| 218 | /* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS | ||
| 219 | * ARG1: none | ||
| 220 | * OUTPUT: | ||
| 221 | * WORD - structure size in bytes (includes size field) | ||
| 222 | * BYTE - panel expansion mode | ||
| 223 | */ | ||
| 224 | #define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8 | ||
| 225 | /* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS | ||
| 226 | * ARG1: | ||
| 227 | * WORD - structure size in bytes (includes size field) | ||
| 228 | * BYTE - panel expansion mode | ||
| 229 | * OUTPUT: none | ||
| 230 | */ | ||
| 231 | #define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD | ||
| 232 | /* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION | ||
| 233 | * ARG1: | ||
| 234 | * WORD - structure size in bytes (includes size field) | ||
| 235 | * WORD - gfx controller id | ||
| 236 | * BYTE - current temperature (degress Celsius) | ||
| 237 | * OUTPUT: none | ||
| 238 | */ | ||
| 239 | #define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF | ||
| 240 | /* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES | ||
| 241 | * ARG1: none | ||
| 242 | * OUTPUT: | ||
| 243 | * WORD - number of gfx devices | ||
| 244 | * WORD - device structure size in bytes (excludes device size field) | ||
| 245 | * DWORD - flags \ | ||
| 246 | * WORD - bus number } repeated structure | ||
| 247 | * WORD - device number / | ||
| 248 | */ | ||
| 249 | /* flags */ | ||
| 250 | # define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0) | ||
| 251 | # define ATIF_XGP_PORT (1 << 1) | ||
| 252 | # define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2) | ||
| 253 | # define ATIF_XGP_PORT_IN_DOCK (1 << 3) | ||
| 254 | |||
| 255 | /* ATPX */ | ||
| 256 | #define ATPX_FUNCTION_VERIFY_INTERFACE 0x0 | ||
| 257 | /* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE | ||
| 258 | * ARG1: none | ||
| 259 | * OUTPUT: | ||
| 260 | * WORD - structure size in bytes (includes size field) | ||
| 261 | * WORD - version | ||
| 262 | * DWORD - supported functions bit vector | ||
| 263 | */ | ||
| 264 | /* supported functions vector */ | ||
| 265 | # define ATPX_GET_PX_PARAMETERS_SUPPORTED (1 << 0) | ||
| 266 | # define ATPX_POWER_CONTROL_SUPPORTED (1 << 1) | ||
| 267 | # define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED (1 << 2) | ||
| 268 | # define ATPX_I2C_MUX_CONTROL_SUPPORTED (1 << 3) | ||
| 269 | # define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4) | ||
| 270 | # define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED (1 << 5) | ||
| 271 | # define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED (1 << 7) | ||
| 272 | # define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED (1 << 8) | ||
| 273 | #define ATPX_FUNCTION_GET_PX_PARAMETERS 0x1 | ||
| 274 | /* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS | ||
| 275 | * ARG1: none | ||
| 276 | * OUTPUT: | ||
| 277 | * WORD - structure size in bytes (includes size field) | ||
| 278 | * DWORD - valid flags mask | ||
| 279 | * DWORD - flags | ||
| 280 | */ | ||
| 281 | /* flags */ | ||
| 282 | # define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 0) | ||
| 283 | # define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 1) | ||
| 284 | # define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 2) | ||
| 285 | # define ATPX_CRT1_RGB_SIGNAL_MUXED (1 << 3) | ||
| 286 | # define ATPX_TV_SIGNAL_MUXED (1 << 4) | ||
| 287 | # define ATPX_DFP_SIGNAL_MUXED (1 << 5) | ||
| 288 | # define ATPX_SEPARATE_MUX_FOR_I2C (1 << 6) | ||
| 289 | # define ATPX_DYNAMIC_PX_SUPPORTED (1 << 7) | ||
| 290 | # define ATPX_ACF_NOT_SUPPORTED (1 << 8) | ||
| 291 | # define ATPX_FIXED_NOT_SUPPORTED (1 << 9) | ||
| 292 | # define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10) | ||
| 293 | # define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11) | ||
| 294 | #define ATPX_FUNCTION_POWER_CONTROL 0x2 | ||
| 295 | /* ARG0: ATPX_FUNCTION_POWER_CONTROL | ||
| 296 | * ARG1: | ||
| 297 | * WORD - structure size in bytes (includes size field) | ||
| 298 | * BYTE - dGPU power state (0: power off, 1: power on) | ||
| 299 | * OUTPUT: none | ||
| 300 | */ | ||
| 301 | #define ATPX_FUNCTION_DISPLAY_MUX_CONTROL 0x3 | ||
| 302 | /* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL | ||
| 303 | * ARG1: | ||
| 304 | * WORD - structure size in bytes (includes size field) | ||
| 305 | * WORD - display mux control (0: iGPU, 1: dGPU) | ||
| 306 | * OUTPUT: none | ||
| 307 | */ | ||
| 308 | # define ATPX_INTEGRATED_GPU 0 | ||
| 309 | # define ATPX_DISCRETE_GPU 1 | ||
| 310 | #define ATPX_FUNCTION_I2C_MUX_CONTROL 0x4 | ||
| 311 | /* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL | ||
| 312 | * ARG1: | ||
| 313 | * WORD - structure size in bytes (includes size field) | ||
| 314 | * WORD - i2c/aux/hpd mux control (0: iGPU, 1: dGPU) | ||
| 315 | * OUTPUT: none | ||
| 316 | */ | ||
| 317 | #define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION 0x5 | ||
| 318 | /* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION | ||
| 319 | * ARG1: | ||
| 320 | * WORD - structure size in bytes (includes size field) | ||
| 321 | * WORD - target gpu (0: iGPU, 1: dGPU) | ||
| 322 | * OUTPUT: none | ||
| 323 | */ | ||
| 324 | #define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION 0x6 | ||
| 325 | /* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION | ||
| 326 | * ARG1: | ||
| 327 | * WORD - structure size in bytes (includes size field) | ||
| 328 | * WORD - target gpu (0: iGPU, 1: dGPU) | ||
| 329 | * OUTPUT: none | ||
| 330 | */ | ||
| 331 | #define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING 0x8 | ||
| 332 | /* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING | ||
| 333 | * ARG1: none | ||
| 334 | * OUTPUT: | ||
| 335 | * WORD - number of display connectors | ||
| 336 | * WORD - connector structure size in bytes (excludes connector size field) | ||
| 337 | * BYTE - flags \ | ||
| 338 | * BYTE - ATIF display vector bit position } repeated | ||
| 339 | * BYTE - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure | ||
| 340 | * WORD - connector ACPI id / | ||
| 341 | */ | ||
| 342 | /* flags */ | ||
| 343 | # define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 0) | ||
| 344 | # define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 1) | ||
| 345 | # define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 2) | ||
| 346 | #define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS 0x9 | ||
| 347 | /* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS | ||
| 348 | * ARG1: none | ||
| 349 | * OUTPUT: | ||
| 350 | * WORD - number of HPD/DDC ports | ||
| 351 | * WORD - port structure size in bytes (excludes port size field) | ||
| 352 | * BYTE - ATIF display vector bit position \ | ||
| 353 | * BYTE - hpd id } reapeated structure | ||
| 354 | * BYTE - ddc id / | ||
| 355 | * | ||
| 356 | * available on A+A systems only | ||
| 357 | */ | ||
| 358 | /* hpd id */ | ||
| 359 | # define ATPX_HPD_NONE 0 | ||
| 360 | # define ATPX_HPD1 1 | ||
| 361 | # define ATPX_HPD2 2 | ||
| 362 | # define ATPX_HPD3 3 | ||
| 363 | # define ATPX_HPD4 4 | ||
| 364 | # define ATPX_HPD5 5 | ||
| 365 | # define ATPX_HPD6 6 | ||
| 366 | /* ddc id */ | ||
| 367 | # define ATPX_DDC_NONE 0 | ||
| 368 | # define ATPX_DDC1 1 | ||
| 369 | # define ATPX_DDC2 2 | ||
| 370 | # define ATPX_DDC3 3 | ||
| 371 | # define ATPX_DDC4 4 | ||
| 372 | # define ATPX_DDC5 5 | ||
| 373 | # define ATPX_DDC6 6 | ||
| 374 | # define ATPX_DDC7 7 | ||
| 375 | # define ATPX_DDC8 8 | ||
| 376 | |||
| 377 | /* ATCS */ | ||
| 378 | #define ATCS_FUNCTION_VERIFY_INTERFACE 0x0 | ||
| 379 | /* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE | ||
| 380 | * ARG1: none | ||
| 381 | * OUTPUT: | ||
| 382 | * WORD - structure size in bytes (includes size field) | ||
| 383 | * WORD - version | ||
| 384 | * DWORD - supported functions bit vector | ||
| 385 | */ | ||
| 386 | /* supported functions vector */ | ||
| 387 | # define ATCS_GET_EXTERNAL_STATE_SUPPORTED (1 << 0) | ||
| 388 | # define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED (1 << 1) | ||
| 389 | # define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED (1 << 2) | ||
| 390 | # define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED (1 << 3) | ||
| 391 | #define ATCS_FUNCTION_GET_EXTERNAL_STATE 0x1 | ||
| 392 | /* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE | ||
| 393 | * ARG1: none | ||
| 394 | * OUTPUT: | ||
| 395 | * WORD - structure size in bytes (includes size field) | ||
| 396 | * DWORD - valid flags mask | ||
| 397 | * DWORD - flags (0: undocked, 1: docked) | ||
| 398 | */ | ||
| 399 | /* flags */ | ||
| 400 | # define ATCS_DOCKED (1 << 0) | ||
| 401 | #define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST 0x2 | ||
| 402 | /* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST | ||
| 403 | * ARG1: | ||
| 404 | * WORD - structure size in bytes (includes size field) | ||
| 405 | * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) | ||
| 406 | * WORD - valid flags mask | ||
| 407 | * WORD - flags | ||
| 408 | * BYTE - request type | ||
| 409 | * BYTE - performance request | ||
| 410 | * OUTPUT: | ||
| 411 | * WORD - structure size in bytes (includes size field) | ||
| 412 | * BYTE - return value | ||
| 413 | */ | ||
| 414 | /* flags */ | ||
| 415 | # define ATCS_ADVERTISE_CAPS (1 << 0) | ||
| 416 | # define ATCS_WAIT_FOR_COMPLETION (1 << 1) | ||
| 417 | /* request type */ | ||
| 418 | # define ATCS_PCIE_LINK_SPEED 1 | ||
| 419 | /* performance request */ | ||
| 420 | # define ATCS_REMOVE 0 | ||
| 421 | # define ATCS_FORCE_LOW_POWER 1 | ||
| 422 | # define ATCS_PERF_LEVEL_1 2 /* PCIE Gen 1 */ | ||
| 423 | # define ATCS_PERF_LEVEL_2 3 /* PCIE Gen 2 */ | ||
| 424 | # define ATCS_PERF_LEVEL_3 4 /* PCIE Gen 3 */ | ||
| 425 | /* return value */ | ||
| 426 | # define ATCS_REQUEST_REFUSED 1 | ||
| 427 | # define ATCS_REQUEST_COMPLETE 2 | ||
| 428 | # define ATCS_REQUEST_IN_PROGRESS 3 | ||
| 429 | #define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION 0x3 | ||
| 430 | /* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION | ||
| 431 | * ARG1: none | ||
| 432 | * OUTPUT: none | ||
| 433 | */ | ||
| 434 | #define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH 0x4 | ||
| 435 | /* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH | ||
| 436 | * ARG1: | ||
| 437 | * WORD - structure size in bytes (includes size field) | ||
| 438 | * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) | ||
| 439 | * BYTE - number of active lanes | ||
| 440 | * OUTPUT: | ||
| 441 | * WORD - structure size in bytes (includes size field) | ||
| 442 | * BYTE - number of active lanes | ||
| 443 | */ | ||
| 444 | |||
| 445 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c new file mode 100644 index 000000000000..857ba0897159 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c | |||
| @@ -0,0 +1,105 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Christian König. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Christian König | ||
| 25 | */ | ||
| 26 | #include <linux/hdmi.h> | ||
| 27 | #include <linux/gcd.h> | ||
| 28 | #include <drm/drmP.h> | ||
| 29 | #include <drm/amdgpu_drm.h> | ||
| 30 | #include "amdgpu.h" | ||
| 31 | |||
| 32 | static const struct amdgpu_afmt_acr amdgpu_afmt_predefined_acr[] = { | ||
| 33 | /* 32kHz 44.1kHz 48kHz */ | ||
| 34 | /* Clock N CTS N CTS N CTS */ | ||
| 35 | { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */ | ||
| 36 | { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */ | ||
| 37 | { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */ | ||
| 38 | { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */ | ||
| 39 | { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */ | ||
| 40 | { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */ | ||
| 41 | { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */ | ||
| 42 | { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */ | ||
| 43 | { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */ | ||
| 44 | { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */ | ||
| 45 | }; | ||
| 46 | |||
| 47 | |||
| 48 | /* | ||
| 49 | * calculate CTS and N values if they are not found in the table | ||
| 50 | */ | ||
| 51 | static void amdgpu_afmt_calc_cts(uint32_t clock, int *CTS, int *N, int freq) | ||
| 52 | { | ||
| 53 | int n, cts; | ||
| 54 | unsigned long div, mul; | ||
| 55 | |||
| 56 | /* Safe, but overly large values */ | ||
| 57 | n = 128 * freq; | ||
| 58 | cts = clock * 1000; | ||
| 59 | |||
| 60 | /* Smallest valid fraction */ | ||
| 61 | div = gcd(n, cts); | ||
| 62 | |||
| 63 | n /= div; | ||
| 64 | cts /= div; | ||
| 65 | |||
| 66 | /* | ||
| 67 | * The optimal N is 128*freq/1000. Calculate the closest larger | ||
| 68 | * value that doesn't truncate any bits. | ||
| 69 | */ | ||
| 70 | mul = ((128*freq/1000) + (n-1))/n; | ||
| 71 | |||
| 72 | n *= mul; | ||
| 73 | cts *= mul; | ||
| 74 | |||
| 75 | /* Check that we are in spec (not always possible) */ | ||
| 76 | if (n < (128*freq/1500)) | ||
| 77 | printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n"); | ||
| 78 | if (n > (128*freq/300)) | ||
| 79 | printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n"); | ||
| 80 | |||
| 81 | *N = n; | ||
| 82 | *CTS = cts; | ||
| 83 | |||
| 84 | DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n", | ||
| 85 | *N, *CTS, freq); | ||
| 86 | } | ||
| 87 | |||
| 88 | struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock) | ||
| 89 | { | ||
| 90 | struct amdgpu_afmt_acr res; | ||
| 91 | u8 i; | ||
| 92 | |||
| 93 | /* Precalculated values for common clocks */ | ||
| 94 | for (i = 0; i < ARRAY_SIZE(amdgpu_afmt_predefined_acr); i++) { | ||
| 95 | if (amdgpu_afmt_predefined_acr[i].clock == clock) | ||
| 96 | return amdgpu_afmt_predefined_acr[i]; | ||
| 97 | } | ||
| 98 | |||
| 99 | /* And odd clocks get manually calculated */ | ||
| 100 | amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000); | ||
| 101 | amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100); | ||
| 102 | amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000); | ||
| 103 | |||
| 104 | return res; | ||
| 105 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c new file mode 100644 index 000000000000..6a588371d54a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | |||
| @@ -0,0 +1,1598 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2007-8 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the "Software"), | ||
| 7 | * to deal in the Software without restriction, including without limitation | ||
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 10 | * Software is furnished to do so, subject to the following conditions: | ||
| 11 | * | ||
| 12 | * The above copyright notice and this permission notice shall be included in | ||
| 13 | * all copies or substantial portions of the Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 22 | * | ||
| 23 | * Authors: Dave Airlie | ||
| 24 | * Alex Deucher | ||
| 25 | */ | ||
| 26 | #include <drm/drmP.h> | ||
| 27 | #include <drm/amdgpu_drm.h> | ||
| 28 | #include "amdgpu.h" | ||
| 29 | #include "amdgpu_atombios.h" | ||
| 30 | #include "amdgpu_i2c.h" | ||
| 31 | |||
| 32 | #include "atom.h" | ||
| 33 | #include "atom-bits.h" | ||
| 34 | #include "atombios_encoders.h" | ||
| 35 | #include "bif/bif_4_1_d.h" | ||
| 36 | |||
| 37 | static void amdgpu_atombios_lookup_i2c_gpio_quirks(struct amdgpu_device *adev, | ||
| 38 | ATOM_GPIO_I2C_ASSIGMENT *gpio, | ||
| 39 | u8 index) | ||
| 40 | { | ||
| 41 | |||
| 42 | } | ||
| 43 | |||
| 44 | static struct amdgpu_i2c_bus_rec amdgpu_atombios_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio) | ||
| 45 | { | ||
| 46 | struct amdgpu_i2c_bus_rec i2c; | ||
| 47 | |||
| 48 | memset(&i2c, 0, sizeof(struct amdgpu_i2c_bus_rec)); | ||
| 49 | |||
| 50 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex); | ||
| 51 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex); | ||
| 52 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex); | ||
| 53 | i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex); | ||
| 54 | i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex); | ||
| 55 | i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex); | ||
| 56 | i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex); | ||
| 57 | i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex); | ||
| 58 | i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); | ||
| 59 | i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); | ||
| 60 | i2c.en_clk_mask = (1 << gpio->ucClkEnShift); | ||
| 61 | i2c.en_data_mask = (1 << gpio->ucDataEnShift); | ||
| 62 | i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); | ||
| 63 | i2c.y_data_mask = (1 << gpio->ucDataY_Shift); | ||
| 64 | i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); | ||
| 65 | i2c.a_data_mask = (1 << gpio->ucDataA_Shift); | ||
| 66 | |||
| 67 | if (gpio->sucI2cId.sbfAccess.bfHW_Capable) | ||
| 68 | i2c.hw_capable = true; | ||
| 69 | else | ||
| 70 | i2c.hw_capable = false; | ||
| 71 | |||
| 72 | if (gpio->sucI2cId.ucAccess == 0xa0) | ||
| 73 | i2c.mm_i2c = true; | ||
| 74 | else | ||
| 75 | i2c.mm_i2c = false; | ||
| 76 | |||
| 77 | i2c.i2c_id = gpio->sucI2cId.ucAccess; | ||
| 78 | |||
| 79 | if (i2c.mask_clk_reg) | ||
| 80 | i2c.valid = true; | ||
| 81 | else | ||
| 82 | i2c.valid = false; | ||
| 83 | |||
| 84 | return i2c; | ||
| 85 | } | ||
| 86 | |||
| 87 | struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev, | ||
| 88 | uint8_t id) | ||
| 89 | { | ||
| 90 | struct atom_context *ctx = adev->mode_info.atom_context; | ||
| 91 | ATOM_GPIO_I2C_ASSIGMENT *gpio; | ||
| 92 | struct amdgpu_i2c_bus_rec i2c; | ||
| 93 | int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); | ||
| 94 | struct _ATOM_GPIO_I2C_INFO *i2c_info; | ||
| 95 | uint16_t data_offset, size; | ||
| 96 | int i, num_indices; | ||
| 97 | |||
| 98 | memset(&i2c, 0, sizeof(struct amdgpu_i2c_bus_rec)); | ||
| 99 | i2c.valid = false; | ||
| 100 | |||
| 101 | if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { | ||
| 102 | i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); | ||
| 103 | |||
| 104 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | ||
| 105 | sizeof(ATOM_GPIO_I2C_ASSIGMENT); | ||
| 106 | |||
| 107 | gpio = &i2c_info->asGPIO_Info[0]; | ||
| 108 | for (i = 0; i < num_indices; i++) { | ||
| 109 | |||
| 110 | amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i); | ||
| 111 | |||
| 112 | if (gpio->sucI2cId.ucAccess == id) { | ||
| 113 | i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio); | ||
| 114 | break; | ||
| 115 | } | ||
| 116 | gpio = (ATOM_GPIO_I2C_ASSIGMENT *) | ||
| 117 | ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT)); | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | return i2c; | ||
| 122 | } | ||
| 123 | |||
| 124 | void amdgpu_atombios_i2c_init(struct amdgpu_device *adev) | ||
| 125 | { | ||
| 126 | struct atom_context *ctx = adev->mode_info.atom_context; | ||
| 127 | ATOM_GPIO_I2C_ASSIGMENT *gpio; | ||
| 128 | struct amdgpu_i2c_bus_rec i2c; | ||
| 129 | int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); | ||
| 130 | struct _ATOM_GPIO_I2C_INFO *i2c_info; | ||
| 131 | uint16_t data_offset, size; | ||
| 132 | int i, num_indices; | ||
| 133 | char stmp[32]; | ||
| 134 | |||
| 135 | if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { | ||
| 136 | i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); | ||
| 137 | |||
| 138 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | ||
| 139 | sizeof(ATOM_GPIO_I2C_ASSIGMENT); | ||
| 140 | |||
| 141 | gpio = &i2c_info->asGPIO_Info[0]; | ||
| 142 | for (i = 0; i < num_indices; i++) { | ||
| 143 | amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i); | ||
| 144 | |||
| 145 | i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio); | ||
| 146 | |||
| 147 | if (i2c.valid) { | ||
| 148 | sprintf(stmp, "0x%x", i2c.i2c_id); | ||
| 149 | adev->i2c_bus[i] = amdgpu_i2c_create(adev->ddev, &i2c, stmp); | ||
| 150 | } | ||
| 151 | gpio = (ATOM_GPIO_I2C_ASSIGMENT *) | ||
| 152 | ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT)); | ||
| 153 | } | ||
| 154 | } | ||
| 155 | } | ||
| 156 | |||
| 157 | struct amdgpu_gpio_rec | ||
| 158 | amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev, | ||
| 159 | u8 id) | ||
| 160 | { | ||
| 161 | struct atom_context *ctx = adev->mode_info.atom_context; | ||
| 162 | struct amdgpu_gpio_rec gpio; | ||
| 163 | int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT); | ||
| 164 | struct _ATOM_GPIO_PIN_LUT *gpio_info; | ||
| 165 | ATOM_GPIO_PIN_ASSIGNMENT *pin; | ||
| 166 | u16 data_offset, size; | ||
| 167 | int i, num_indices; | ||
| 168 | |||
| 169 | memset(&gpio, 0, sizeof(struct amdgpu_gpio_rec)); | ||
| 170 | gpio.valid = false; | ||
| 171 | |||
| 172 | if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { | ||
| 173 | gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); | ||
| 174 | |||
| 175 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | ||
| 176 | sizeof(ATOM_GPIO_PIN_ASSIGNMENT); | ||
| 177 | |||
| 178 | pin = gpio_info->asGPIO_Pin; | ||
| 179 | for (i = 0; i < num_indices; i++) { | ||
| 180 | if (id == pin->ucGPIO_ID) { | ||
| 181 | gpio.id = pin->ucGPIO_ID; | ||
| 182 | gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex); | ||
| 183 | gpio.shift = pin->ucGpioPinBitShift; | ||
| 184 | gpio.mask = (1 << pin->ucGpioPinBitShift); | ||
| 185 | gpio.valid = true; | ||
| 186 | break; | ||
| 187 | } | ||
| 188 | pin = (ATOM_GPIO_PIN_ASSIGNMENT *) | ||
| 189 | ((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT)); | ||
| 190 | } | ||
| 191 | } | ||
| 192 | |||
| 193 | return gpio; | ||
| 194 | } | ||
| 195 | |||
| 196 | static struct amdgpu_hpd | ||
| 197 | amdgpu_atombios_get_hpd_info_from_gpio(struct amdgpu_device *adev, | ||
| 198 | struct amdgpu_gpio_rec *gpio) | ||
| 199 | { | ||
| 200 | struct amdgpu_hpd hpd; | ||
| 201 | u32 reg; | ||
| 202 | |||
| 203 | memset(&hpd, 0, sizeof(struct amdgpu_hpd)); | ||
| 204 | |||
| 205 | reg = amdgpu_display_hpd_get_gpio_reg(adev); | ||
| 206 | |||
| 207 | hpd.gpio = *gpio; | ||
| 208 | if (gpio->reg == reg) { | ||
| 209 | switch(gpio->mask) { | ||
| 210 | case (1 << 0): | ||
| 211 | hpd.hpd = AMDGPU_HPD_1; | ||
| 212 | break; | ||
| 213 | case (1 << 8): | ||
| 214 | hpd.hpd = AMDGPU_HPD_2; | ||
| 215 | break; | ||
| 216 | case (1 << 16): | ||
| 217 | hpd.hpd = AMDGPU_HPD_3; | ||
| 218 | break; | ||
| 219 | case (1 << 24): | ||
| 220 | hpd.hpd = AMDGPU_HPD_4; | ||
| 221 | break; | ||
| 222 | case (1 << 26): | ||
| 223 | hpd.hpd = AMDGPU_HPD_5; | ||
| 224 | break; | ||
| 225 | case (1 << 28): | ||
| 226 | hpd.hpd = AMDGPU_HPD_6; | ||
| 227 | break; | ||
| 228 | default: | ||
| 229 | hpd.hpd = AMDGPU_HPD_NONE; | ||
| 230 | break; | ||
| 231 | } | ||
| 232 | } else | ||
| 233 | hpd.hpd = AMDGPU_HPD_NONE; | ||
| 234 | return hpd; | ||
| 235 | } | ||
| 236 | |||
| 237 | static bool amdgpu_atombios_apply_quirks(struct amdgpu_device *adev, | ||
| 238 | uint32_t supported_device, | ||
| 239 | int *connector_type, | ||
| 240 | struct amdgpu_i2c_bus_rec *i2c_bus, | ||
| 241 | uint16_t *line_mux, | ||
| 242 | struct amdgpu_hpd *hpd) | ||
| 243 | { | ||
| 244 | return true; | ||
| 245 | } | ||
| 246 | |||
| 247 | static const int object_connector_convert[] = { | ||
| 248 | DRM_MODE_CONNECTOR_Unknown, | ||
| 249 | DRM_MODE_CONNECTOR_DVII, | ||
| 250 | DRM_MODE_CONNECTOR_DVII, | ||
| 251 | DRM_MODE_CONNECTOR_DVID, | ||
| 252 | DRM_MODE_CONNECTOR_DVID, | ||
| 253 | DRM_MODE_CONNECTOR_VGA, | ||
| 254 | DRM_MODE_CONNECTOR_Composite, | ||
| 255 | DRM_MODE_CONNECTOR_SVIDEO, | ||
| 256 | DRM_MODE_CONNECTOR_Unknown, | ||
| 257 | DRM_MODE_CONNECTOR_Unknown, | ||
| 258 | DRM_MODE_CONNECTOR_9PinDIN, | ||
| 259 | DRM_MODE_CONNECTOR_Unknown, | ||
| 260 | DRM_MODE_CONNECTOR_HDMIA, | ||
| 261 | DRM_MODE_CONNECTOR_HDMIB, | ||
| 262 | DRM_MODE_CONNECTOR_LVDS, | ||
| 263 | DRM_MODE_CONNECTOR_9PinDIN, | ||
| 264 | DRM_MODE_CONNECTOR_Unknown, | ||
| 265 | DRM_MODE_CONNECTOR_Unknown, | ||
| 266 | DRM_MODE_CONNECTOR_Unknown, | ||
| 267 | DRM_MODE_CONNECTOR_DisplayPort, | ||
| 268 | DRM_MODE_CONNECTOR_eDP, | ||
| 269 | DRM_MODE_CONNECTOR_Unknown | ||
| 270 | }; | ||
| 271 | |||
| 272 | bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev) | ||
| 273 | { | ||
| 274 | struct amdgpu_mode_info *mode_info = &adev->mode_info; | ||
| 275 | struct atom_context *ctx = mode_info->atom_context; | ||
| 276 | int index = GetIndexIntoMasterTable(DATA, Object_Header); | ||
| 277 | u16 size, data_offset; | ||
| 278 | u8 frev, crev; | ||
| 279 | ATOM_CONNECTOR_OBJECT_TABLE *con_obj; | ||
| 280 | ATOM_ENCODER_OBJECT_TABLE *enc_obj; | ||
| 281 | ATOM_OBJECT_TABLE *router_obj; | ||
| 282 | ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; | ||
| 283 | ATOM_OBJECT_HEADER *obj_header; | ||
| 284 | int i, j, k, path_size, device_support; | ||
| 285 | int connector_type; | ||
| 286 | u16 conn_id, connector_object_id; | ||
| 287 | struct amdgpu_i2c_bus_rec ddc_bus; | ||
| 288 | struct amdgpu_router router; | ||
| 289 | struct amdgpu_gpio_rec gpio; | ||
| 290 | struct amdgpu_hpd hpd; | ||
| 291 | |||
| 292 | if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) | ||
| 293 | return false; | ||
| 294 | |||
| 295 | if (crev < 2) | ||
| 296 | return false; | ||
| 297 | |||
| 298 | obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); | ||
| 299 | path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) | ||
| 300 | (ctx->bios + data_offset + | ||
| 301 | le16_to_cpu(obj_header->usDisplayPathTableOffset)); | ||
| 302 | con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *) | ||
| 303 | (ctx->bios + data_offset + | ||
| 304 | le16_to_cpu(obj_header->usConnectorObjectTableOffset)); | ||
| 305 | enc_obj = (ATOM_ENCODER_OBJECT_TABLE *) | ||
| 306 | (ctx->bios + data_offset + | ||
| 307 | le16_to_cpu(obj_header->usEncoderObjectTableOffset)); | ||
| 308 | router_obj = (ATOM_OBJECT_TABLE *) | ||
| 309 | (ctx->bios + data_offset + | ||
| 310 | le16_to_cpu(obj_header->usRouterObjectTableOffset)); | ||
| 311 | device_support = le16_to_cpu(obj_header->usDeviceSupport); | ||
| 312 | |||
| 313 | path_size = 0; | ||
| 314 | for (i = 0; i < path_obj->ucNumOfDispPath; i++) { | ||
| 315 | uint8_t *addr = (uint8_t *) path_obj->asDispPath; | ||
| 316 | ATOM_DISPLAY_OBJECT_PATH *path; | ||
| 317 | addr += path_size; | ||
| 318 | path = (ATOM_DISPLAY_OBJECT_PATH *) addr; | ||
| 319 | path_size += le16_to_cpu(path->usSize); | ||
| 320 | |||
| 321 | if (device_support & le16_to_cpu(path->usDeviceTag)) { | ||
| 322 | uint8_t con_obj_id, con_obj_num, con_obj_type; | ||
| 323 | |||
| 324 | con_obj_id = | ||
| 325 | (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK) | ||
| 326 | >> OBJECT_ID_SHIFT; | ||
| 327 | con_obj_num = | ||
| 328 | (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK) | ||
| 329 | >> ENUM_ID_SHIFT; | ||
| 330 | con_obj_type = | ||
| 331 | (le16_to_cpu(path->usConnObjectId) & | ||
| 332 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; | ||
| 333 | |||
| 334 | connector_type = | ||
| 335 | object_connector_convert[con_obj_id]; | ||
| 336 | connector_object_id = con_obj_id; | ||
| 337 | |||
| 338 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | ||
| 339 | continue; | ||
| 340 | |||
| 341 | router.ddc_valid = false; | ||
| 342 | router.cd_valid = false; | ||
| 343 | for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { | ||
| 344 | uint8_t grph_obj_id, grph_obj_num, grph_obj_type; | ||
| 345 | |||
| 346 | grph_obj_id = | ||
| 347 | (le16_to_cpu(path->usGraphicObjIds[j]) & | ||
| 348 | OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
| 349 | grph_obj_num = | ||
| 350 | (le16_to_cpu(path->usGraphicObjIds[j]) & | ||
| 351 | ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
| 352 | grph_obj_type = | ||
| 353 | (le16_to_cpu(path->usGraphicObjIds[j]) & | ||
| 354 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; | ||
| 355 | |||
| 356 | if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { | ||
| 357 | for (k = 0; k < enc_obj->ucNumberOfObjects; k++) { | ||
| 358 | u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID); | ||
| 359 | if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) { | ||
| 360 | ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) | ||
| 361 | (ctx->bios + data_offset + | ||
| 362 | le16_to_cpu(enc_obj->asObjects[k].usRecordOffset)); | ||
| 363 | ATOM_ENCODER_CAP_RECORD *cap_record; | ||
| 364 | u16 caps = 0; | ||
| 365 | |||
| 366 | while (record->ucRecordSize > 0 && | ||
| 367 | record->ucRecordType > 0 && | ||
| 368 | record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { | ||
| 369 | switch (record->ucRecordType) { | ||
| 370 | case ATOM_ENCODER_CAP_RECORD_TYPE: | ||
| 371 | cap_record =(ATOM_ENCODER_CAP_RECORD *) | ||
| 372 | record; | ||
| 373 | caps = le16_to_cpu(cap_record->usEncoderCap); | ||
| 374 | break; | ||
| 375 | } | ||
| 376 | record = (ATOM_COMMON_RECORD_HEADER *) | ||
| 377 | ((char *)record + record->ucRecordSize); | ||
| 378 | } | ||
| 379 | amdgpu_display_add_encoder(adev, encoder_obj, | ||
| 380 | le16_to_cpu(path->usDeviceTag), | ||
| 381 | caps); | ||
| 382 | } | ||
| 383 | } | ||
| 384 | } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { | ||
| 385 | for (k = 0; k < router_obj->ucNumberOfObjects; k++) { | ||
| 386 | u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); | ||
| 387 | if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { | ||
| 388 | ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) | ||
| 389 | (ctx->bios + data_offset + | ||
| 390 | le16_to_cpu(router_obj->asObjects[k].usRecordOffset)); | ||
| 391 | ATOM_I2C_RECORD *i2c_record; | ||
| 392 | ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; | ||
| 393 | ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; | ||
| 394 | ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path; | ||
| 395 | ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = | ||
| 396 | (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) | ||
| 397 | (ctx->bios + data_offset + | ||
| 398 | le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset)); | ||
| 399 | u8 *num_dst_objs = (u8 *) | ||
| 400 | ((u8 *)router_src_dst_table + 1 + | ||
| 401 | (router_src_dst_table->ucNumberOfSrc * 2)); | ||
| 402 | u16 *dst_objs = (u16 *)(num_dst_objs + 1); | ||
| 403 | int enum_id; | ||
| 404 | |||
| 405 | router.router_id = router_obj_id; | ||
| 406 | for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) { | ||
| 407 | if (le16_to_cpu(path->usConnObjectId) == | ||
| 408 | le16_to_cpu(dst_objs[enum_id])) | ||
| 409 | break; | ||
| 410 | } | ||
| 411 | |||
| 412 | while (record->ucRecordSize > 0 && | ||
| 413 | record->ucRecordType > 0 && | ||
| 414 | record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { | ||
| 415 | switch (record->ucRecordType) { | ||
| 416 | case ATOM_I2C_RECORD_TYPE: | ||
| 417 | i2c_record = | ||
| 418 | (ATOM_I2C_RECORD *) | ||
| 419 | record; | ||
| 420 | i2c_config = | ||
| 421 | (ATOM_I2C_ID_CONFIG_ACCESS *) | ||
| 422 | &i2c_record->sucI2cId; | ||
| 423 | router.i2c_info = | ||
| 424 | amdgpu_atombios_lookup_i2c_gpio(adev, | ||
| 425 | i2c_config-> | ||
| 426 | ucAccess); | ||
| 427 | router.i2c_addr = i2c_record->ucI2CAddr >> 1; | ||
| 428 | break; | ||
| 429 | case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: | ||
| 430 | ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) | ||
| 431 | record; | ||
| 432 | router.ddc_valid = true; | ||
| 433 | router.ddc_mux_type = ddc_path->ucMuxType; | ||
| 434 | router.ddc_mux_control_pin = ddc_path->ucMuxControlPin; | ||
| 435 | router.ddc_mux_state = ddc_path->ucMuxState[enum_id]; | ||
| 436 | break; | ||
| 437 | case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE: | ||
| 438 | cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *) | ||
| 439 | record; | ||
| 440 | router.cd_valid = true; | ||
| 441 | router.cd_mux_type = cd_path->ucMuxType; | ||
| 442 | router.cd_mux_control_pin = cd_path->ucMuxControlPin; | ||
| 443 | router.cd_mux_state = cd_path->ucMuxState[enum_id]; | ||
| 444 | break; | ||
| 445 | } | ||
| 446 | record = (ATOM_COMMON_RECORD_HEADER *) | ||
| 447 | ((char *)record + record->ucRecordSize); | ||
| 448 | } | ||
| 449 | } | ||
| 450 | } | ||
| 451 | } | ||
| 452 | } | ||
| 453 | |||
| 454 | /* look up gpio for ddc, hpd */ | ||
| 455 | ddc_bus.valid = false; | ||
| 456 | hpd.hpd = AMDGPU_HPD_NONE; | ||
| 457 | if ((le16_to_cpu(path->usDeviceTag) & | ||
| 458 | (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { | ||
| 459 | for (j = 0; j < con_obj->ucNumberOfObjects; j++) { | ||
| 460 | if (le16_to_cpu(path->usConnObjectId) == | ||
| 461 | le16_to_cpu(con_obj->asObjects[j]. | ||
| 462 | usObjectID)) { | ||
| 463 | ATOM_COMMON_RECORD_HEADER | ||
| 464 | *record = | ||
| 465 | (ATOM_COMMON_RECORD_HEADER | ||
| 466 | *) | ||
| 467 | (ctx->bios + data_offset + | ||
| 468 | le16_to_cpu(con_obj-> | ||
| 469 | asObjects[j]. | ||
| 470 | usRecordOffset)); | ||
| 471 | ATOM_I2C_RECORD *i2c_record; | ||
| 472 | ATOM_HPD_INT_RECORD *hpd_record; | ||
| 473 | ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; | ||
| 474 | |||
| 475 | while (record->ucRecordSize > 0 && | ||
| 476 | record->ucRecordType > 0 && | ||
| 477 | record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { | ||
| 478 | switch (record->ucRecordType) { | ||
| 479 | case ATOM_I2C_RECORD_TYPE: | ||
| 480 | i2c_record = | ||
| 481 | (ATOM_I2C_RECORD *) | ||
| 482 | record; | ||
| 483 | i2c_config = | ||
| 484 | (ATOM_I2C_ID_CONFIG_ACCESS *) | ||
| 485 | &i2c_record->sucI2cId; | ||
| 486 | ddc_bus = amdgpu_atombios_lookup_i2c_gpio(adev, | ||
| 487 | i2c_config-> | ||
| 488 | ucAccess); | ||
| 489 | break; | ||
| 490 | case ATOM_HPD_INT_RECORD_TYPE: | ||
| 491 | hpd_record = | ||
| 492 | (ATOM_HPD_INT_RECORD *) | ||
| 493 | record; | ||
| 494 | gpio = amdgpu_atombios_lookup_gpio(adev, | ||
| 495 | hpd_record->ucHPDIntGPIOID); | ||
| 496 | hpd = amdgpu_atombios_get_hpd_info_from_gpio(adev, &gpio); | ||
| 497 | hpd.plugged_state = hpd_record->ucPlugged_PinState; | ||
| 498 | break; | ||
| 499 | } | ||
| 500 | record = | ||
| 501 | (ATOM_COMMON_RECORD_HEADER | ||
| 502 | *) ((char *)record | ||
| 503 | + | ||
| 504 | record-> | ||
| 505 | ucRecordSize); | ||
| 506 | } | ||
| 507 | break; | ||
| 508 | } | ||
| 509 | } | ||
| 510 | } | ||
| 511 | |||
| 512 | /* needed for aux chan transactions */ | ||
| 513 | ddc_bus.hpd = hpd.hpd; | ||
| 514 | |||
| 515 | conn_id = le16_to_cpu(path->usConnObjectId); | ||
| 516 | |||
| 517 | if (!amdgpu_atombios_apply_quirks | ||
| 518 | (adev, le16_to_cpu(path->usDeviceTag), &connector_type, | ||
| 519 | &ddc_bus, &conn_id, &hpd)) | ||
| 520 | continue; | ||
| 521 | |||
| 522 | amdgpu_display_add_connector(adev, | ||
| 523 | conn_id, | ||
| 524 | le16_to_cpu(path->usDeviceTag), | ||
| 525 | connector_type, &ddc_bus, | ||
| 526 | connector_object_id, | ||
| 527 | &hpd, | ||
| 528 | &router); | ||
| 529 | |||
| 530 | } | ||
| 531 | } | ||
| 532 | |||
| 533 | amdgpu_link_encoder_connector(adev->ddev); | ||
| 534 | |||
| 535 | return true; | ||
| 536 | } | ||
| 537 | |||
| 538 | union firmware_info { | ||
| 539 | ATOM_FIRMWARE_INFO info; | ||
| 540 | ATOM_FIRMWARE_INFO_V1_2 info_12; | ||
| 541 | ATOM_FIRMWARE_INFO_V1_3 info_13; | ||
| 542 | ATOM_FIRMWARE_INFO_V1_4 info_14; | ||
| 543 | ATOM_FIRMWARE_INFO_V2_1 info_21; | ||
| 544 | ATOM_FIRMWARE_INFO_V2_2 info_22; | ||
| 545 | }; | ||
| 546 | |||
| 547 | int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) | ||
| 548 | { | ||
| 549 | struct amdgpu_mode_info *mode_info = &adev->mode_info; | ||
| 550 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); | ||
| 551 | uint8_t frev, crev; | ||
| 552 | uint16_t data_offset; | ||
| 553 | int ret = -EINVAL; | ||
| 554 | |||
| 555 | if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
| 556 | &frev, &crev, &data_offset)) { | ||
| 557 | int i; | ||
| 558 | struct amdgpu_pll *ppll = &adev->clock.ppll[0]; | ||
| 559 | struct amdgpu_pll *spll = &adev->clock.spll; | ||
| 560 | struct amdgpu_pll *mpll = &adev->clock.mpll; | ||
| 561 | union firmware_info *firmware_info = | ||
| 562 | (union firmware_info *)(mode_info->atom_context->bios + | ||
| 563 | data_offset); | ||
| 564 | /* pixel clocks */ | ||
| 565 | ppll->reference_freq = | ||
| 566 | le16_to_cpu(firmware_info->info.usReferenceClock); | ||
| 567 | ppll->reference_div = 0; | ||
| 568 | |||
| 569 | if (crev < 2) | ||
| 570 | ppll->pll_out_min = | ||
| 571 | le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); | ||
| 572 | else | ||
| 573 | ppll->pll_out_min = | ||
| 574 | le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); | ||
| 575 | ppll->pll_out_max = | ||
| 576 | le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); | ||
| 577 | |||
| 578 | if (crev >= 4) { | ||
| 579 | ppll->lcd_pll_out_min = | ||
| 580 | le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; | ||
| 581 | if (ppll->lcd_pll_out_min == 0) | ||
| 582 | ppll->lcd_pll_out_min = ppll->pll_out_min; | ||
| 583 | ppll->lcd_pll_out_max = | ||
| 584 | le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; | ||
| 585 | if (ppll->lcd_pll_out_max == 0) | ||
| 586 | ppll->lcd_pll_out_max = ppll->pll_out_max; | ||
| 587 | } else { | ||
| 588 | ppll->lcd_pll_out_min = ppll->pll_out_min; | ||
| 589 | ppll->lcd_pll_out_max = ppll->pll_out_max; | ||
| 590 | } | ||
| 591 | |||
| 592 | if (ppll->pll_out_min == 0) | ||
| 593 | ppll->pll_out_min = 64800; | ||
| 594 | |||
| 595 | ppll->pll_in_min = | ||
| 596 | le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input); | ||
| 597 | ppll->pll_in_max = | ||
| 598 | le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input); | ||
| 599 | |||
| 600 | ppll->min_post_div = 2; | ||
| 601 | ppll->max_post_div = 0x7f; | ||
| 602 | ppll->min_frac_feedback_div = 0; | ||
| 603 | ppll->max_frac_feedback_div = 9; | ||
| 604 | ppll->min_ref_div = 2; | ||
| 605 | ppll->max_ref_div = 0x3ff; | ||
| 606 | ppll->min_feedback_div = 4; | ||
| 607 | ppll->max_feedback_div = 0xfff; | ||
| 608 | ppll->best_vco = 0; | ||
| 609 | |||
| 610 | for (i = 1; i < AMDGPU_MAX_PPLL; i++) | ||
| 611 | adev->clock.ppll[i] = *ppll; | ||
| 612 | |||
| 613 | /* system clock */ | ||
| 614 | spll->reference_freq = | ||
| 615 | le16_to_cpu(firmware_info->info_21.usCoreReferenceClock); | ||
| 616 | spll->reference_div = 0; | ||
| 617 | |||
| 618 | spll->pll_out_min = | ||
| 619 | le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output); | ||
| 620 | spll->pll_out_max = | ||
| 621 | le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output); | ||
| 622 | |||
| 623 | /* ??? */ | ||
| 624 | if (spll->pll_out_min == 0) | ||
| 625 | spll->pll_out_min = 64800; | ||
| 626 | |||
| 627 | spll->pll_in_min = | ||
| 628 | le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input); | ||
| 629 | spll->pll_in_max = | ||
| 630 | le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input); | ||
| 631 | |||
| 632 | spll->min_post_div = 1; | ||
| 633 | spll->max_post_div = 1; | ||
| 634 | spll->min_ref_div = 2; | ||
| 635 | spll->max_ref_div = 0xff; | ||
| 636 | spll->min_feedback_div = 4; | ||
| 637 | spll->max_feedback_div = 0xff; | ||
| 638 | spll->best_vco = 0; | ||
| 639 | |||
| 640 | /* memory clock */ | ||
| 641 | mpll->reference_freq = | ||
| 642 | le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock); | ||
| 643 | mpll->reference_div = 0; | ||
| 644 | |||
| 645 | mpll->pll_out_min = | ||
| 646 | le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output); | ||
| 647 | mpll->pll_out_max = | ||
| 648 | le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output); | ||
| 649 | |||
| 650 | /* ??? */ | ||
| 651 | if (mpll->pll_out_min == 0) | ||
| 652 | mpll->pll_out_min = 64800; | ||
| 653 | |||
| 654 | mpll->pll_in_min = | ||
| 655 | le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input); | ||
| 656 | mpll->pll_in_max = | ||
| 657 | le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input); | ||
| 658 | |||
| 659 | adev->clock.default_sclk = | ||
| 660 | le32_to_cpu(firmware_info->info.ulDefaultEngineClock); | ||
| 661 | adev->clock.default_mclk = | ||
| 662 | le32_to_cpu(firmware_info->info.ulDefaultMemoryClock); | ||
| 663 | |||
| 664 | mpll->min_post_div = 1; | ||
| 665 | mpll->max_post_div = 1; | ||
| 666 | mpll->min_ref_div = 2; | ||
| 667 | mpll->max_ref_div = 0xff; | ||
| 668 | mpll->min_feedback_div = 4; | ||
| 669 | mpll->max_feedback_div = 0xff; | ||
| 670 | mpll->best_vco = 0; | ||
| 671 | |||
| 672 | /* disp clock */ | ||
| 673 | adev->clock.default_dispclk = | ||
| 674 | le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); | ||
| 675 | if (adev->clock.default_dispclk == 0) | ||
| 676 | adev->clock.default_dispclk = 54000; /* 540 Mhz */ | ||
| 677 | adev->clock.dp_extclk = | ||
| 678 | le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); | ||
| 679 | adev->clock.current_dispclk = adev->clock.default_dispclk; | ||
| 680 | |||
| 681 | adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock); | ||
| 682 | if (adev->clock.max_pixel_clock == 0) | ||
| 683 | adev->clock.max_pixel_clock = 40000; | ||
| 684 | |||
| 685 | /* not technically a clock, but... */ | ||
| 686 | adev->mode_info.firmware_flags = | ||
| 687 | le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess); | ||
| 688 | |||
| 689 | ret = 0; | ||
| 690 | } | ||
| 691 | |||
| 692 | adev->pm.current_sclk = adev->clock.default_sclk; | ||
| 693 | adev->pm.current_mclk = adev->clock.default_mclk; | ||
| 694 | |||
| 695 | return ret; | ||
| 696 | } | ||
| 697 | |||
| 698 | union igp_info { | ||
| 699 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; | ||
| 700 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; | ||
| 701 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; | ||
| 702 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; | ||
| 703 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; | ||
| 704 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9; | ||
| 705 | }; | ||
| 706 | |||
| 707 | static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device *adev, | ||
| 708 | struct amdgpu_atom_ss *ss, | ||
| 709 | int id) | ||
| 710 | { | ||
| 711 | struct amdgpu_mode_info *mode_info = &adev->mode_info; | ||
| 712 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | ||
| 713 | u16 data_offset, size; | ||
| 714 | union igp_info *igp_info; | ||
| 715 | u8 frev, crev; | ||
| 716 | u16 percentage = 0, rate = 0; | ||
| 717 | |||
| 718 | /* get any igp specific overrides */ | ||
| 719 | if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, | ||
| 720 | &frev, &crev, &data_offset)) { | ||
| 721 | igp_info = (union igp_info *) | ||
| 722 | (mode_info->atom_context->bios + data_offset); | ||
| 723 | switch (crev) { | ||
| 724 | case 6: | ||
| 725 | switch (id) { | ||
| 726 | case ASIC_INTERNAL_SS_ON_TMDS: | ||
| 727 | percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage); | ||
| 728 | rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz); | ||
| 729 | break; | ||
| 730 | case ASIC_INTERNAL_SS_ON_HDMI: | ||
| 731 | percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage); | ||
| 732 | rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz); | ||
| 733 | break; | ||
| 734 | case ASIC_INTERNAL_SS_ON_LVDS: | ||
| 735 | percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage); | ||
| 736 | rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz); | ||
| 737 | break; | ||
| 738 | } | ||
| 739 | break; | ||
| 740 | case 7: | ||
| 741 | switch (id) { | ||
| 742 | case ASIC_INTERNAL_SS_ON_TMDS: | ||
| 743 | percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage); | ||
| 744 | rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz); | ||
| 745 | break; | ||
| 746 | case ASIC_INTERNAL_SS_ON_HDMI: | ||
| 747 | percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage); | ||
| 748 | rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz); | ||
| 749 | break; | ||
| 750 | case ASIC_INTERNAL_SS_ON_LVDS: | ||
| 751 | percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage); | ||
| 752 | rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz); | ||
| 753 | break; | ||
| 754 | } | ||
| 755 | break; | ||
| 756 | case 8: | ||
| 757 | switch (id) { | ||
| 758 | case ASIC_INTERNAL_SS_ON_TMDS: | ||
| 759 | percentage = le16_to_cpu(igp_info->info_8.usDVISSPercentage); | ||
| 760 | rate = le16_to_cpu(igp_info->info_8.usDVISSpreadRateIn10Hz); | ||
| 761 | break; | ||
| 762 | case ASIC_INTERNAL_SS_ON_HDMI: | ||
| 763 | percentage = le16_to_cpu(igp_info->info_8.usHDMISSPercentage); | ||
| 764 | rate = le16_to_cpu(igp_info->info_8.usHDMISSpreadRateIn10Hz); | ||
| 765 | break; | ||
| 766 | case ASIC_INTERNAL_SS_ON_LVDS: | ||
| 767 | percentage = le16_to_cpu(igp_info->info_8.usLvdsSSPercentage); | ||
| 768 | rate = le16_to_cpu(igp_info->info_8.usLvdsSSpreadRateIn10Hz); | ||
| 769 | break; | ||
| 770 | } | ||
| 771 | break; | ||
| 772 | case 9: | ||
| 773 | switch (id) { | ||
| 774 | case ASIC_INTERNAL_SS_ON_TMDS: | ||
| 775 | percentage = le16_to_cpu(igp_info->info_9.usDVISSPercentage); | ||
| 776 | rate = le16_to_cpu(igp_info->info_9.usDVISSpreadRateIn10Hz); | ||
| 777 | break; | ||
| 778 | case ASIC_INTERNAL_SS_ON_HDMI: | ||
| 779 | percentage = le16_to_cpu(igp_info->info_9.usHDMISSPercentage); | ||
| 780 | rate = le16_to_cpu(igp_info->info_9.usHDMISSpreadRateIn10Hz); | ||
| 781 | break; | ||
| 782 | case ASIC_INTERNAL_SS_ON_LVDS: | ||
| 783 | percentage = le16_to_cpu(igp_info->info_9.usLvdsSSPercentage); | ||
| 784 | rate = le16_to_cpu(igp_info->info_9.usLvdsSSpreadRateIn10Hz); | ||
| 785 | break; | ||
| 786 | } | ||
| 787 | break; | ||
| 788 | default: | ||
| 789 | DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); | ||
| 790 | break; | ||
| 791 | } | ||
| 792 | if (percentage) | ||
| 793 | ss->percentage = percentage; | ||
| 794 | if (rate) | ||
| 795 | ss->rate = rate; | ||
| 796 | } | ||
| 797 | } | ||
| 798 | |||
| 799 | union asic_ss_info { | ||
| 800 | struct _ATOM_ASIC_INTERNAL_SS_INFO info; | ||
| 801 | struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; | ||
| 802 | struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; | ||
| 803 | }; | ||
| 804 | |||
| 805 | union asic_ss_assignment { | ||
| 806 | struct _ATOM_ASIC_SS_ASSIGNMENT v1; | ||
| 807 | struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2; | ||
| 808 | struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3; | ||
| 809 | }; | ||
| 810 | |||
| 811 | bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev, | ||
| 812 | struct amdgpu_atom_ss *ss, | ||
| 813 | int id, u32 clock) | ||
| 814 | { | ||
| 815 | struct amdgpu_mode_info *mode_info = &adev->mode_info; | ||
| 816 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
| 817 | uint16_t data_offset, size; | ||
| 818 | union asic_ss_info *ss_info; | ||
| 819 | union asic_ss_assignment *ss_assign; | ||
| 820 | uint8_t frev, crev; | ||
| 821 | int i, num_indices; | ||
| 822 | |||
| 823 | if (id == ASIC_INTERNAL_MEMORY_SS) { | ||
| 824 | if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT)) | ||
| 825 | return false; | ||
| 826 | } | ||
| 827 | if (id == ASIC_INTERNAL_ENGINE_SS) { | ||
| 828 | if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT)) | ||
| 829 | return false; | ||
| 830 | } | ||
| 831 | |||
| 832 | memset(ss, 0, sizeof(struct amdgpu_atom_ss)); | ||
| 833 | if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, | ||
| 834 | &frev, &crev, &data_offset)) { | ||
| 835 | |||
| 836 | ss_info = | ||
| 837 | (union asic_ss_info *)(mode_info->atom_context->bios + data_offset); | ||
| 838 | |||
| 839 | switch (frev) { | ||
| 840 | case 1: | ||
| 841 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | ||
| 842 | sizeof(ATOM_ASIC_SS_ASSIGNMENT); | ||
| 843 | |||
| 844 | ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]); | ||
| 845 | for (i = 0; i < num_indices; i++) { | ||
| 846 | if ((ss_assign->v1.ucClockIndication == id) && | ||
| 847 | (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) { | ||
| 848 | ss->percentage = | ||
| 849 | le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage); | ||
| 850 | ss->type = ss_assign->v1.ucSpreadSpectrumMode; | ||
| 851 | ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz); | ||
| 852 | ss->percentage_divider = 100; | ||
| 853 | return true; | ||
| 854 | } | ||
| 855 | ss_assign = (union asic_ss_assignment *) | ||
| 856 | ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT)); | ||
| 857 | } | ||
| 858 | break; | ||
| 859 | case 2: | ||
| 860 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | ||
| 861 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); | ||
| 862 | ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]); | ||
| 863 | for (i = 0; i < num_indices; i++) { | ||
| 864 | if ((ss_assign->v2.ucClockIndication == id) && | ||
| 865 | (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) { | ||
| 866 | ss->percentage = | ||
| 867 | le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage); | ||
| 868 | ss->type = ss_assign->v2.ucSpreadSpectrumMode; | ||
| 869 | ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz); | ||
| 870 | ss->percentage_divider = 100; | ||
| 871 | if ((crev == 2) && | ||
| 872 | ((id == ASIC_INTERNAL_ENGINE_SS) || | ||
| 873 | (id == ASIC_INTERNAL_MEMORY_SS))) | ||
| 874 | ss->rate /= 100; | ||
| 875 | return true; | ||
| 876 | } | ||
| 877 | ss_assign = (union asic_ss_assignment *) | ||
| 878 | ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2)); | ||
| 879 | } | ||
| 880 | break; | ||
| 881 | case 3: | ||
| 882 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | ||
| 883 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); | ||
| 884 | ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]); | ||
| 885 | for (i = 0; i < num_indices; i++) { | ||
| 886 | if ((ss_assign->v3.ucClockIndication == id) && | ||
| 887 | (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) { | ||
| 888 | ss->percentage = | ||
| 889 | le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage); | ||
| 890 | ss->type = ss_assign->v3.ucSpreadSpectrumMode; | ||
| 891 | ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz); | ||
| 892 | if (ss_assign->v3.ucSpreadSpectrumMode & | ||
| 893 | SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK) | ||
| 894 | ss->percentage_divider = 1000; | ||
| 895 | else | ||
| 896 | ss->percentage_divider = 100; | ||
| 897 | if ((id == ASIC_INTERNAL_ENGINE_SS) || | ||
| 898 | (id == ASIC_INTERNAL_MEMORY_SS)) | ||
| 899 | ss->rate /= 100; | ||
| 900 | if (adev->flags & AMDGPU_IS_APU) | ||
| 901 | amdgpu_atombios_get_igp_ss_overrides(adev, ss, id); | ||
| 902 | return true; | ||
| 903 | } | ||
| 904 | ss_assign = (union asic_ss_assignment *) | ||
| 905 | ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3)); | ||
| 906 | } | ||
| 907 | break; | ||
| 908 | default: | ||
| 909 | DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev); | ||
| 910 | break; | ||
| 911 | } | ||
| 912 | |||
| 913 | } | ||
| 914 | return false; | ||
| 915 | } | ||
| 916 | |||
| 917 | union get_clock_dividers { | ||
| 918 | struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1; | ||
| 919 | struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2; | ||
| 920 | struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3; | ||
| 921 | struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4; | ||
| 922 | struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5; | ||
| 923 | struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in; | ||
| 924 | struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out; | ||
| 925 | }; | ||
| 926 | |||
| 927 | int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, | ||
| 928 | u8 clock_type, | ||
| 929 | u32 clock, | ||
| 930 | bool strobe_mode, | ||
| 931 | struct atom_clock_dividers *dividers) | ||
| 932 | { | ||
| 933 | union get_clock_dividers args; | ||
| 934 | int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL); | ||
| 935 | u8 frev, crev; | ||
| 936 | |||
| 937 | memset(&args, 0, sizeof(args)); | ||
| 938 | memset(dividers, 0, sizeof(struct atom_clock_dividers)); | ||
| 939 | |||
| 940 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
| 941 | return -EINVAL; | ||
| 942 | |||
| 943 | switch (crev) { | ||
| 944 | case 4: | ||
| 945 | /* fusion */ | ||
| 946 | args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */ | ||
| 947 | |||
| 948 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 949 | |||
| 950 | dividers->post_divider = dividers->post_div = args.v4.ucPostDiv; | ||
| 951 | dividers->real_clock = le32_to_cpu(args.v4.ulClock); | ||
| 952 | break; | ||
| 953 | case 6: | ||
| 954 | /* CI */ | ||
| 955 | /* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */ | ||
| 956 | args.v6_in.ulClock.ulComputeClockFlag = clock_type; | ||
| 957 | args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */ | ||
| 958 | |||
| 959 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 960 | |||
| 961 | dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv); | ||
| 962 | dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac); | ||
| 963 | dividers->ref_div = args.v6_out.ucPllRefDiv; | ||
| 964 | dividers->post_div = args.v6_out.ucPllPostDiv; | ||
| 965 | dividers->flags = args.v6_out.ucPllCntlFlag; | ||
| 966 | dividers->real_clock = le32_to_cpu(args.v6_out.ulClock.ulClock); | ||
| 967 | dividers->post_divider = args.v6_out.ulClock.ucPostDiv; | ||
| 968 | break; | ||
| 969 | default: | ||
| 970 | return -EINVAL; | ||
| 971 | } | ||
| 972 | return 0; | ||
| 973 | } | ||
| 974 | |||
| 975 | int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, | ||
| 976 | u32 clock, | ||
| 977 | bool strobe_mode, | ||
| 978 | struct atom_mpll_param *mpll_param) | ||
| 979 | { | ||
| 980 | COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args; | ||
| 981 | int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam); | ||
| 982 | u8 frev, crev; | ||
| 983 | |||
| 984 | memset(&args, 0, sizeof(args)); | ||
| 985 | memset(mpll_param, 0, sizeof(struct atom_mpll_param)); | ||
| 986 | |||
| 987 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
| 988 | return -EINVAL; | ||
| 989 | |||
| 990 | switch (frev) { | ||
| 991 | case 2: | ||
| 992 | switch (crev) { | ||
| 993 | case 1: | ||
| 994 | /* SI */ | ||
| 995 | args.ulClock = cpu_to_le32(clock); /* 10 khz */ | ||
| 996 | args.ucInputFlag = 0; | ||
| 997 | if (strobe_mode) | ||
| 998 | args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN; | ||
| 999 | |||
| 1000 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1001 | |||
| 1002 | mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac); | ||
| 1003 | mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv); | ||
| 1004 | mpll_param->post_div = args.ucPostDiv; | ||
| 1005 | mpll_param->dll_speed = args.ucDllSpeed; | ||
| 1006 | mpll_param->bwcntl = args.ucBWCntl; | ||
| 1007 | mpll_param->vco_mode = | ||
| 1008 | (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK); | ||
| 1009 | mpll_param->yclk_sel = | ||
| 1010 | (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0; | ||
| 1011 | mpll_param->qdr = | ||
| 1012 | (args.ucPllCntlFlag & MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0; | ||
| 1013 | mpll_param->half_rate = | ||
| 1014 | (args.ucPllCntlFlag & MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0; | ||
| 1015 | break; | ||
| 1016 | default: | ||
| 1017 | return -EINVAL; | ||
| 1018 | } | ||
| 1019 | break; | ||
| 1020 | default: | ||
| 1021 | return -EINVAL; | ||
| 1022 | } | ||
| 1023 | return 0; | ||
| 1024 | } | ||
| 1025 | |||
| 1026 | uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev) | ||
| 1027 | { | ||
| 1028 | GET_ENGINE_CLOCK_PS_ALLOCATION args; | ||
| 1029 | int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); | ||
| 1030 | |||
| 1031 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1032 | return le32_to_cpu(args.ulReturnEngineClock); | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev) | ||
| 1036 | { | ||
| 1037 | GET_MEMORY_CLOCK_PS_ALLOCATION args; | ||
| 1038 | int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); | ||
| 1039 | |||
| 1040 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1041 | return le32_to_cpu(args.ulReturnMemoryClock); | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev, | ||
| 1045 | uint32_t eng_clock) | ||
| 1046 | { | ||
| 1047 | SET_ENGINE_CLOCK_PS_ALLOCATION args; | ||
| 1048 | int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); | ||
| 1049 | |||
| 1050 | args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */ | ||
| 1051 | |||
| 1052 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev, | ||
| 1056 | uint32_t mem_clock) | ||
| 1057 | { | ||
| 1058 | SET_MEMORY_CLOCK_PS_ALLOCATION args; | ||
| 1059 | int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock); | ||
| 1060 | |||
| 1061 | if (adev->flags & AMDGPU_IS_APU) | ||
| 1062 | return; | ||
| 1063 | |||
| 1064 | args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ | ||
| 1065 | |||
| 1066 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1067 | } | ||
| 1068 | |||
| 1069 | void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, | ||
| 1070 | u32 eng_clock, u32 mem_clock) | ||
| 1071 | { | ||
| 1072 | SET_ENGINE_CLOCK_PS_ALLOCATION args; | ||
| 1073 | int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings); | ||
| 1074 | u32 tmp; | ||
| 1075 | |||
| 1076 | memset(&args, 0, sizeof(args)); | ||
| 1077 | |||
| 1078 | tmp = eng_clock & SET_CLOCK_FREQ_MASK; | ||
| 1079 | tmp |= (COMPUTE_ENGINE_PLL_PARAM << 24); | ||
| 1080 | |||
| 1081 | args.ulTargetEngineClock = cpu_to_le32(tmp); | ||
| 1082 | if (mem_clock) | ||
| 1083 | args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK); | ||
| 1084 | |||
| 1085 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | union set_voltage { | ||
| 1089 | struct _SET_VOLTAGE_PS_ALLOCATION alloc; | ||
| 1090 | struct _SET_VOLTAGE_PARAMETERS v1; | ||
| 1091 | struct _SET_VOLTAGE_PARAMETERS_V2 v2; | ||
| 1092 | struct _SET_VOLTAGE_PARAMETERS_V1_3 v3; | ||
| 1093 | }; | ||
| 1094 | |||
| 1095 | void amdgpu_atombios_set_voltage(struct amdgpu_device *adev, | ||
| 1096 | u16 voltage_level, | ||
| 1097 | u8 voltage_type) | ||
| 1098 | { | ||
| 1099 | union set_voltage args; | ||
| 1100 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); | ||
| 1101 | u8 frev, crev, volt_index = voltage_level; | ||
| 1102 | |||
| 1103 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
| 1104 | return; | ||
| 1105 | |||
| 1106 | /* 0xff01 is a flag rather then an actual voltage */ | ||
| 1107 | if (voltage_level == 0xff01) | ||
| 1108 | return; | ||
| 1109 | |||
| 1110 | switch (crev) { | ||
| 1111 | case 1: | ||
| 1112 | args.v1.ucVoltageType = voltage_type; | ||
| 1113 | args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; | ||
| 1114 | args.v1.ucVoltageIndex = volt_index; | ||
| 1115 | break; | ||
| 1116 | case 2: | ||
| 1117 | args.v2.ucVoltageType = voltage_type; | ||
| 1118 | args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; | ||
| 1119 | args.v2.usVoltageLevel = cpu_to_le16(voltage_level); | ||
| 1120 | break; | ||
| 1121 | case 3: | ||
| 1122 | args.v3.ucVoltageType = voltage_type; | ||
| 1123 | args.v3.ucVoltageMode = ATOM_SET_VOLTAGE; | ||
| 1124 | args.v3.usVoltageLevel = cpu_to_le16(voltage_level); | ||
| 1125 | break; | ||
| 1126 | default: | ||
| 1127 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 1128 | return; | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1132 | } | ||
| 1133 | |||
| 1134 | int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev, | ||
| 1135 | u16 *leakage_id) | ||
| 1136 | { | ||
| 1137 | union set_voltage args; | ||
| 1138 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); | ||
| 1139 | u8 frev, crev; | ||
| 1140 | |||
| 1141 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
| 1142 | return -EINVAL; | ||
| 1143 | |||
| 1144 | switch (crev) { | ||
| 1145 | case 3: | ||
| 1146 | case 4: | ||
| 1147 | args.v3.ucVoltageType = 0; | ||
| 1148 | args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID; | ||
| 1149 | args.v3.usVoltageLevel = 0; | ||
| 1150 | |||
| 1151 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1152 | |||
| 1153 | *leakage_id = le16_to_cpu(args.v3.usVoltageLevel); | ||
| 1154 | break; | ||
| 1155 | default: | ||
| 1156 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 1157 | return -EINVAL; | ||
| 1158 | } | ||
| 1159 | |||
| 1160 | return 0; | ||
| 1161 | } | ||
| 1162 | |||
| 1163 | int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev, | ||
| 1164 | u16 *vddc, u16 *vddci, | ||
| 1165 | u16 virtual_voltage_id, | ||
| 1166 | u16 vbios_voltage_id) | ||
| 1167 | { | ||
| 1168 | int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo); | ||
| 1169 | u8 frev, crev; | ||
| 1170 | u16 data_offset, size; | ||
| 1171 | int i, j; | ||
| 1172 | ATOM_ASIC_PROFILING_INFO_V2_1 *profile; | ||
| 1173 | u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf; | ||
| 1174 | |||
| 1175 | *vddc = 0; | ||
| 1176 | *vddci = 0; | ||
| 1177 | |||
| 1178 | if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, | ||
| 1179 | &frev, &crev, &data_offset)) | ||
| 1180 | return -EINVAL; | ||
| 1181 | |||
| 1182 | profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *) | ||
| 1183 | (adev->mode_info.atom_context->bios + data_offset); | ||
| 1184 | |||
| 1185 | switch (frev) { | ||
| 1186 | case 1: | ||
| 1187 | return -EINVAL; | ||
| 1188 | case 2: | ||
| 1189 | switch (crev) { | ||
| 1190 | case 1: | ||
| 1191 | if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1)) | ||
| 1192 | return -EINVAL; | ||
| 1193 | leakage_bin = (u16 *) | ||
| 1194 | (adev->mode_info.atom_context->bios + data_offset + | ||
| 1195 | le16_to_cpu(profile->usLeakageBinArrayOffset)); | ||
| 1196 | vddc_id_buf = (u16 *) | ||
| 1197 | (adev->mode_info.atom_context->bios + data_offset + | ||
| 1198 | le16_to_cpu(profile->usElbVDDC_IdArrayOffset)); | ||
| 1199 | vddc_buf = (u16 *) | ||
| 1200 | (adev->mode_info.atom_context->bios + data_offset + | ||
| 1201 | le16_to_cpu(profile->usElbVDDC_LevelArrayOffset)); | ||
| 1202 | vddci_id_buf = (u16 *) | ||
| 1203 | (adev->mode_info.atom_context->bios + data_offset + | ||
| 1204 | le16_to_cpu(profile->usElbVDDCI_IdArrayOffset)); | ||
| 1205 | vddci_buf = (u16 *) | ||
| 1206 | (adev->mode_info.atom_context->bios + data_offset + | ||
| 1207 | le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset)); | ||
| 1208 | |||
| 1209 | if (profile->ucElbVDDC_Num > 0) { | ||
| 1210 | for (i = 0; i < profile->ucElbVDDC_Num; i++) { | ||
| 1211 | if (vddc_id_buf[i] == virtual_voltage_id) { | ||
| 1212 | for (j = 0; j < profile->ucLeakageBinNum; j++) { | ||
| 1213 | if (vbios_voltage_id <= leakage_bin[j]) { | ||
| 1214 | *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i]; | ||
| 1215 | break; | ||
| 1216 | } | ||
| 1217 | } | ||
| 1218 | break; | ||
| 1219 | } | ||
| 1220 | } | ||
| 1221 | } | ||
| 1222 | if (profile->ucElbVDDCI_Num > 0) { | ||
| 1223 | for (i = 0; i < profile->ucElbVDDCI_Num; i++) { | ||
| 1224 | if (vddci_id_buf[i] == virtual_voltage_id) { | ||
| 1225 | for (j = 0; j < profile->ucLeakageBinNum; j++) { | ||
| 1226 | if (vbios_voltage_id <= leakage_bin[j]) { | ||
| 1227 | *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i]; | ||
| 1228 | break; | ||
| 1229 | } | ||
| 1230 | } | ||
| 1231 | break; | ||
| 1232 | } | ||
| 1233 | } | ||
| 1234 | } | ||
| 1235 | break; | ||
| 1236 | default: | ||
| 1237 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 1238 | return -EINVAL; | ||
| 1239 | } | ||
| 1240 | break; | ||
| 1241 | default: | ||
| 1242 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 1243 | return -EINVAL; | ||
| 1244 | } | ||
| 1245 | |||
| 1246 | return 0; | ||
| 1247 | } | ||
| 1248 | |||
| 1249 | union get_voltage_info { | ||
| 1250 | struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in; | ||
| 1251 | struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out; | ||
| 1252 | }; | ||
| 1253 | |||
| 1254 | int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev, | ||
| 1255 | u16 virtual_voltage_id, | ||
| 1256 | u16 *voltage) | ||
| 1257 | { | ||
| 1258 | int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo); | ||
| 1259 | u32 entry_id; | ||
| 1260 | u32 count = adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; | ||
| 1261 | union get_voltage_info args; | ||
| 1262 | |||
| 1263 | for (entry_id = 0; entry_id < count; entry_id++) { | ||
| 1264 | if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v == | ||
| 1265 | virtual_voltage_id) | ||
| 1266 | break; | ||
| 1267 | } | ||
| 1268 | |||
| 1269 | if (entry_id >= count) | ||
| 1270 | return -EINVAL; | ||
| 1271 | |||
| 1272 | args.in.ucVoltageType = VOLTAGE_TYPE_VDDC; | ||
| 1273 | args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; | ||
| 1274 | args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id); | ||
| 1275 | args.in.ulSCLKFreq = | ||
| 1276 | cpu_to_le32(adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk); | ||
| 1277 | |||
| 1278 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1279 | |||
| 1280 | *voltage = le16_to_cpu(args.evv_out.usVoltageLevel); | ||
| 1281 | |||
| 1282 | return 0; | ||
| 1283 | } | ||
| 1284 | |||
| 1285 | union voltage_object_info { | ||
| 1286 | struct _ATOM_VOLTAGE_OBJECT_INFO v1; | ||
| 1287 | struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2; | ||
| 1288 | struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3; | ||
| 1289 | }; | ||
| 1290 | |||
| 1291 | union voltage_object { | ||
| 1292 | struct _ATOM_VOLTAGE_OBJECT v1; | ||
| 1293 | struct _ATOM_VOLTAGE_OBJECT_V2 v2; | ||
| 1294 | union _ATOM_VOLTAGE_OBJECT_V3 v3; | ||
| 1295 | }; | ||
| 1296 | |||
| 1297 | |||
| 1298 | static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *v3, | ||
| 1299 | u8 voltage_type, u8 voltage_mode) | ||
| 1300 | { | ||
| 1301 | u32 size = le16_to_cpu(v3->sHeader.usStructureSize); | ||
| 1302 | u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]); | ||
| 1303 | u8 *start = (u8*)v3; | ||
| 1304 | |||
| 1305 | while (offset < size) { | ||
| 1306 | ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset); | ||
| 1307 | if ((vo->asGpioVoltageObj.sHeader.ucVoltageType == voltage_type) && | ||
| 1308 | (vo->asGpioVoltageObj.sHeader.ucVoltageMode == voltage_mode)) | ||
| 1309 | return vo; | ||
| 1310 | offset += le16_to_cpu(vo->asGpioVoltageObj.sHeader.usSize); | ||
| 1311 | } | ||
| 1312 | return NULL; | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | bool | ||
| 1316 | amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev, | ||
| 1317 | u8 voltage_type, u8 voltage_mode) | ||
| 1318 | { | ||
| 1319 | int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo); | ||
| 1320 | u8 frev, crev; | ||
| 1321 | u16 data_offset, size; | ||
| 1322 | union voltage_object_info *voltage_info; | ||
| 1323 | |||
| 1324 | if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, | ||
| 1325 | &frev, &crev, &data_offset)) { | ||
| 1326 | voltage_info = (union voltage_object_info *) | ||
| 1327 | (adev->mode_info.atom_context->bios + data_offset); | ||
| 1328 | |||
| 1329 | switch (frev) { | ||
| 1330 | case 3: | ||
| 1331 | switch (crev) { | ||
| 1332 | case 1: | ||
| 1333 | if (amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3, | ||
| 1334 | voltage_type, voltage_mode)) | ||
| 1335 | return true; | ||
| 1336 | break; | ||
| 1337 | default: | ||
| 1338 | DRM_ERROR("unknown voltage object table\n"); | ||
| 1339 | return false; | ||
| 1340 | } | ||
| 1341 | break; | ||
| 1342 | default: | ||
| 1343 | DRM_ERROR("unknown voltage object table\n"); | ||
| 1344 | return false; | ||
| 1345 | } | ||
| 1346 | |||
| 1347 | } | ||
| 1348 | return false; | ||
| 1349 | } | ||
| 1350 | |||
| 1351 | int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev, | ||
| 1352 | u8 voltage_type, u8 voltage_mode, | ||
| 1353 | struct atom_voltage_table *voltage_table) | ||
| 1354 | { | ||
| 1355 | int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo); | ||
| 1356 | u8 frev, crev; | ||
| 1357 | u16 data_offset, size; | ||
| 1358 | int i; | ||
| 1359 | union voltage_object_info *voltage_info; | ||
| 1360 | union voltage_object *voltage_object = NULL; | ||
| 1361 | |||
| 1362 | if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, | ||
| 1363 | &frev, &crev, &data_offset)) { | ||
| 1364 | voltage_info = (union voltage_object_info *) | ||
| 1365 | (adev->mode_info.atom_context->bios + data_offset); | ||
| 1366 | |||
| 1367 | switch (frev) { | ||
| 1368 | case 3: | ||
| 1369 | switch (crev) { | ||
| 1370 | case 1: | ||
| 1371 | voltage_object = (union voltage_object *) | ||
| 1372 | amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3, | ||
| 1373 | voltage_type, voltage_mode); | ||
| 1374 | if (voltage_object) { | ||
| 1375 | ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio = | ||
| 1376 | &voltage_object->v3.asGpioVoltageObj; | ||
| 1377 | VOLTAGE_LUT_ENTRY_V2 *lut; | ||
| 1378 | if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES) | ||
| 1379 | return -EINVAL; | ||
| 1380 | lut = &gpio->asVolGpioLut[0]; | ||
| 1381 | for (i = 0; i < gpio->ucGpioEntryNum; i++) { | ||
| 1382 | voltage_table->entries[i].value = | ||
| 1383 | le16_to_cpu(lut->usVoltageValue); | ||
| 1384 | voltage_table->entries[i].smio_low = | ||
| 1385 | le32_to_cpu(lut->ulVoltageId); | ||
| 1386 | lut = (VOLTAGE_LUT_ENTRY_V2 *) | ||
| 1387 | ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2)); | ||
| 1388 | } | ||
| 1389 | voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal); | ||
| 1390 | voltage_table->count = gpio->ucGpioEntryNum; | ||
| 1391 | voltage_table->phase_delay = gpio->ucPhaseDelay; | ||
| 1392 | return 0; | ||
| 1393 | } | ||
| 1394 | break; | ||
| 1395 | default: | ||
| 1396 | DRM_ERROR("unknown voltage object table\n"); | ||
| 1397 | return -EINVAL; | ||
| 1398 | } | ||
| 1399 | break; | ||
| 1400 | default: | ||
| 1401 | DRM_ERROR("unknown voltage object table\n"); | ||
| 1402 | return -EINVAL; | ||
| 1403 | } | ||
| 1404 | } | ||
| 1405 | return -EINVAL; | ||
| 1406 | } | ||
| 1407 | |||
| 1408 | union vram_info { | ||
| 1409 | struct _ATOM_VRAM_INFO_V3 v1_3; | ||
| 1410 | struct _ATOM_VRAM_INFO_V4 v1_4; | ||
| 1411 | struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1; | ||
| 1412 | }; | ||
| 1413 | |||
| 1414 | #define MEM_ID_MASK 0xff000000 | ||
| 1415 | #define MEM_ID_SHIFT 24 | ||
| 1416 | #define CLOCK_RANGE_MASK 0x00ffffff | ||
| 1417 | #define CLOCK_RANGE_SHIFT 0 | ||
| 1418 | #define LOW_NIBBLE_MASK 0xf | ||
| 1419 | #define DATA_EQU_PREV 0 | ||
| 1420 | #define DATA_FROM_TABLE 4 | ||
| 1421 | |||
| 1422 | int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev, | ||
| 1423 | u8 module_index, | ||
| 1424 | struct atom_mc_reg_table *reg_table) | ||
| 1425 | { | ||
| 1426 | int index = GetIndexIntoMasterTable(DATA, VRAM_Info); | ||
| 1427 | u8 frev, crev, num_entries, t_mem_id, num_ranges = 0; | ||
| 1428 | u32 i = 0, j; | ||
| 1429 | u16 data_offset, size; | ||
| 1430 | union vram_info *vram_info; | ||
| 1431 | |||
| 1432 | memset(reg_table, 0, sizeof(struct atom_mc_reg_table)); | ||
| 1433 | |||
| 1434 | if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, | ||
| 1435 | &frev, &crev, &data_offset)) { | ||
| 1436 | vram_info = (union vram_info *) | ||
| 1437 | (adev->mode_info.atom_context->bios + data_offset); | ||
| 1438 | switch (frev) { | ||
| 1439 | case 1: | ||
| 1440 | DRM_ERROR("old table version %d, %d\n", frev, crev); | ||
| 1441 | return -EINVAL; | ||
| 1442 | case 2: | ||
| 1443 | switch (crev) { | ||
| 1444 | case 1: | ||
| 1445 | if (module_index < vram_info->v2_1.ucNumOfVRAMModule) { | ||
| 1446 | ATOM_INIT_REG_BLOCK *reg_block = | ||
| 1447 | (ATOM_INIT_REG_BLOCK *) | ||
| 1448 | ((u8 *)vram_info + le16_to_cpu(vram_info->v2_1.usMemClkPatchTblOffset)); | ||
| 1449 | ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = | ||
| 1450 | (ATOM_MEMORY_SETTING_DATA_BLOCK *) | ||
| 1451 | ((u8 *)reg_block + (2 * sizeof(u16)) + | ||
| 1452 | le16_to_cpu(reg_block->usRegIndexTblSize)); | ||
| 1453 | ATOM_INIT_REG_INDEX_FORMAT *format = ®_block->asRegIndexBuf[0]; | ||
| 1454 | num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) / | ||
| 1455 | sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1; | ||
| 1456 | if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE) | ||
| 1457 | return -EINVAL; | ||
| 1458 | while (i < num_entries) { | ||
| 1459 | if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER) | ||
| 1460 | break; | ||
| 1461 | reg_table->mc_reg_address[i].s1 = | ||
| 1462 | (u16)(le16_to_cpu(format->usRegIndex)); | ||
| 1463 | reg_table->mc_reg_address[i].pre_reg_data = | ||
| 1464 | (u8)(format->ucPreRegDataLength); | ||
| 1465 | i++; | ||
| 1466 | format = (ATOM_INIT_REG_INDEX_FORMAT *) | ||
| 1467 | ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT)); | ||
| 1468 | } | ||
| 1469 | reg_table->last = i; | ||
| 1470 | while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) && | ||
| 1471 | (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) { | ||
| 1472 | t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK) | ||
| 1473 | >> MEM_ID_SHIFT); | ||
| 1474 | if (module_index == t_mem_id) { | ||
| 1475 | reg_table->mc_reg_table_entry[num_ranges].mclk_max = | ||
| 1476 | (u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK) | ||
| 1477 | >> CLOCK_RANGE_SHIFT); | ||
| 1478 | for (i = 0, j = 1; i < reg_table->last; i++) { | ||
| 1479 | if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) { | ||
| 1480 | reg_table->mc_reg_table_entry[num_ranges].mc_data[i] = | ||
| 1481 | (u32)le32_to_cpu(*((u32 *)reg_data + j)); | ||
| 1482 | j++; | ||
| 1483 | } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) { | ||
| 1484 | reg_table->mc_reg_table_entry[num_ranges].mc_data[i] = | ||
| 1485 | reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1]; | ||
| 1486 | } | ||
| 1487 | } | ||
| 1488 | num_ranges++; | ||
| 1489 | } | ||
| 1490 | reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *) | ||
| 1491 | ((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)); | ||
| 1492 | } | ||
| 1493 | if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) | ||
| 1494 | return -EINVAL; | ||
| 1495 | reg_table->num_entries = num_ranges; | ||
| 1496 | } else | ||
| 1497 | return -EINVAL; | ||
| 1498 | break; | ||
| 1499 | default: | ||
| 1500 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 1501 | return -EINVAL; | ||
| 1502 | } | ||
| 1503 | break; | ||
| 1504 | default: | ||
| 1505 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 1506 | return -EINVAL; | ||
| 1507 | } | ||
| 1508 | return 0; | ||
| 1509 | } | ||
| 1510 | return -EINVAL; | ||
| 1511 | } | ||
| 1512 | |||
| 1513 | void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock) | ||
| 1514 | { | ||
| 1515 | uint32_t bios_6_scratch; | ||
| 1516 | |||
| 1517 | bios_6_scratch = RREG32(mmBIOS_SCRATCH_6); | ||
| 1518 | |||
| 1519 | if (lock) { | ||
| 1520 | bios_6_scratch |= ATOM_S6_CRITICAL_STATE; | ||
| 1521 | bios_6_scratch &= ~ATOM_S6_ACC_MODE; | ||
| 1522 | } else { | ||
| 1523 | bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; | ||
| 1524 | bios_6_scratch |= ATOM_S6_ACC_MODE; | ||
| 1525 | } | ||
| 1526 | |||
| 1527 | WREG32(mmBIOS_SCRATCH_6, bios_6_scratch); | ||
| 1528 | } | ||
| 1529 | |||
| 1530 | void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev) | ||
| 1531 | { | ||
| 1532 | uint32_t bios_2_scratch, bios_6_scratch; | ||
| 1533 | |||
| 1534 | bios_2_scratch = RREG32(mmBIOS_SCRATCH_2); | ||
| 1535 | bios_6_scratch = RREG32(mmBIOS_SCRATCH_6); | ||
| 1536 | |||
| 1537 | /* let the bios control the backlight */ | ||
| 1538 | bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; | ||
| 1539 | |||
| 1540 | /* tell the bios not to handle mode switching */ | ||
| 1541 | bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; | ||
| 1542 | |||
| 1543 | /* clear the vbios dpms state */ | ||
| 1544 | bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE; | ||
| 1545 | |||
| 1546 | WREG32(mmBIOS_SCRATCH_2, bios_2_scratch); | ||
| 1547 | WREG32(mmBIOS_SCRATCH_6, bios_6_scratch); | ||
| 1548 | } | ||
| 1549 | |||
| 1550 | void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev) | ||
| 1551 | { | ||
| 1552 | int i; | ||
| 1553 | |||
| 1554 | for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) | ||
| 1555 | adev->bios_scratch[i] = RREG32(mmBIOS_SCRATCH_0 + i); | ||
| 1556 | } | ||
| 1557 | |||
| 1558 | void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) | ||
| 1559 | { | ||
| 1560 | int i; | ||
| 1561 | |||
| 1562 | for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) | ||
| 1563 | WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]); | ||
| 1564 | } | ||
| 1565 | |||
| 1566 | /* Atom needs data in little endian format | ||
| 1567 | * so swap as appropriate when copying data to | ||
| 1568 | * or from atom. Note that atom operates on | ||
| 1569 | * dw units. | ||
| 1570 | */ | ||
| 1571 | void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) | ||
| 1572 | { | ||
| 1573 | #ifdef __BIG_ENDIAN | ||
| 1574 | u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ | ||
| 1575 | u32 *dst32, *src32; | ||
| 1576 | int i; | ||
| 1577 | |||
| 1578 | memcpy(src_tmp, src, num_bytes); | ||
| 1579 | src32 = (u32 *)src_tmp; | ||
| 1580 | dst32 = (u32 *)dst_tmp; | ||
| 1581 | if (to_le) { | ||
| 1582 | for (i = 0; i < ((num_bytes + 3) / 4); i++) | ||
| 1583 | dst32[i] = cpu_to_le32(src32[i]); | ||
| 1584 | memcpy(dst, dst_tmp, num_bytes); | ||
| 1585 | } else { | ||
| 1586 | u8 dws = num_bytes & ~3; | ||
| 1587 | for (i = 0; i < ((num_bytes + 3) / 4); i++) | ||
| 1588 | dst32[i] = le32_to_cpu(src32[i]); | ||
| 1589 | memcpy(dst, dst_tmp, dws); | ||
| 1590 | if (num_bytes % 4) { | ||
| 1591 | for (i = 0; i < (num_bytes % 4); i++) | ||
| 1592 | dst[dws+i] = dst_tmp[dws+i]; | ||
| 1593 | } | ||
| 1594 | } | ||
| 1595 | #else | ||
| 1596 | memcpy(dst, src, num_bytes); | ||
| 1597 | #endif | ||
| 1598 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h new file mode 100644 index 000000000000..0ebb959ea435 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h | |||
| @@ -0,0 +1,206 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __AMDGPU_ATOMBIOS_H__ | ||
| 25 | #define __AMDGPU_ATOMBIOS_H__ | ||
| 26 | |||
| 27 | struct atom_clock_dividers { | ||
| 28 | u32 post_div; | ||
| 29 | union { | ||
| 30 | struct { | ||
| 31 | #ifdef __BIG_ENDIAN | ||
| 32 | u32 reserved : 6; | ||
| 33 | u32 whole_fb_div : 12; | ||
| 34 | u32 frac_fb_div : 14; | ||
| 35 | #else | ||
| 36 | u32 frac_fb_div : 14; | ||
| 37 | u32 whole_fb_div : 12; | ||
| 38 | u32 reserved : 6; | ||
| 39 | #endif | ||
| 40 | }; | ||
| 41 | u32 fb_div; | ||
| 42 | }; | ||
| 43 | u32 ref_div; | ||
| 44 | bool enable_post_div; | ||
| 45 | bool enable_dithen; | ||
| 46 | u32 vco_mode; | ||
| 47 | u32 real_clock; | ||
| 48 | /* added for CI */ | ||
| 49 | u32 post_divider; | ||
| 50 | u32 flags; | ||
| 51 | }; | ||
| 52 | |||
| 53 | struct atom_mpll_param { | ||
| 54 | union { | ||
| 55 | struct { | ||
| 56 | #ifdef __BIG_ENDIAN | ||
| 57 | u32 reserved : 8; | ||
| 58 | u32 clkfrac : 12; | ||
| 59 | u32 clkf : 12; | ||
| 60 | #else | ||
| 61 | u32 clkf : 12; | ||
| 62 | u32 clkfrac : 12; | ||
| 63 | u32 reserved : 8; | ||
| 64 | #endif | ||
| 65 | }; | ||
| 66 | u32 fb_div; | ||
| 67 | }; | ||
| 68 | u32 post_div; | ||
| 69 | u32 bwcntl; | ||
| 70 | u32 dll_speed; | ||
| 71 | u32 vco_mode; | ||
| 72 | u32 yclk_sel; | ||
| 73 | u32 qdr; | ||
| 74 | u32 half_rate; | ||
| 75 | }; | ||
| 76 | |||
| 77 | #define MEM_TYPE_GDDR5 0x50 | ||
| 78 | #define MEM_TYPE_GDDR4 0x40 | ||
| 79 | #define MEM_TYPE_GDDR3 0x30 | ||
| 80 | #define MEM_TYPE_DDR2 0x20 | ||
| 81 | #define MEM_TYPE_GDDR1 0x10 | ||
| 82 | #define MEM_TYPE_DDR3 0xb0 | ||
| 83 | #define MEM_TYPE_MASK 0xf0 | ||
| 84 | |||
| 85 | struct atom_memory_info { | ||
| 86 | u8 mem_vendor; | ||
| 87 | u8 mem_type; | ||
| 88 | }; | ||
| 89 | |||
| 90 | #define MAX_AC_TIMING_ENTRIES 16 | ||
| 91 | |||
| 92 | struct atom_memory_clock_range_table | ||
| 93 | { | ||
| 94 | u8 num_entries; | ||
| 95 | u8 rsv[3]; | ||
| 96 | u32 mclk[MAX_AC_TIMING_ENTRIES]; | ||
| 97 | }; | ||
| 98 | |||
| 99 | #define VBIOS_MC_REGISTER_ARRAY_SIZE 32 | ||
| 100 | #define VBIOS_MAX_AC_TIMING_ENTRIES 20 | ||
| 101 | |||
| 102 | struct atom_mc_reg_entry { | ||
| 103 | u32 mclk_max; | ||
| 104 | u32 mc_data[VBIOS_MC_REGISTER_ARRAY_SIZE]; | ||
| 105 | }; | ||
| 106 | |||
| 107 | struct atom_mc_register_address { | ||
| 108 | u16 s1; | ||
| 109 | u8 pre_reg_data; | ||
| 110 | }; | ||
| 111 | |||
| 112 | struct atom_mc_reg_table { | ||
| 113 | u8 last; | ||
| 114 | u8 num_entries; | ||
| 115 | struct atom_mc_reg_entry mc_reg_table_entry[VBIOS_MAX_AC_TIMING_ENTRIES]; | ||
| 116 | struct atom_mc_register_address mc_reg_address[VBIOS_MC_REGISTER_ARRAY_SIZE]; | ||
| 117 | }; | ||
| 118 | |||
| 119 | #define MAX_VOLTAGE_ENTRIES 32 | ||
| 120 | |||
| 121 | struct atom_voltage_table_entry | ||
| 122 | { | ||
| 123 | u16 value; | ||
| 124 | u32 smio_low; | ||
| 125 | }; | ||
| 126 | |||
| 127 | struct atom_voltage_table | ||
| 128 | { | ||
| 129 | u32 count; | ||
| 130 | u32 mask_low; | ||
| 131 | u32 phase_delay; | ||
| 132 | struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; | ||
| 133 | }; | ||
| 134 | |||
| 135 | struct amdgpu_gpio_rec | ||
| 136 | amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev, | ||
| 137 | u8 id); | ||
| 138 | |||
| 139 | struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev, | ||
| 140 | uint8_t id); | ||
| 141 | void amdgpu_atombios_i2c_init(struct amdgpu_device *adev); | ||
| 142 | |||
| 143 | bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev); | ||
| 144 | |||
| 145 | int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev); | ||
| 146 | |||
| 147 | bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev, | ||
| 148 | struct amdgpu_atom_ss *ss, | ||
| 149 | int id, u32 clock); | ||
| 150 | |||
| 151 | int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, | ||
| 152 | u8 clock_type, | ||
| 153 | u32 clock, | ||
| 154 | bool strobe_mode, | ||
| 155 | struct atom_clock_dividers *dividers); | ||
| 156 | |||
| 157 | int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, | ||
| 158 | u32 clock, | ||
| 159 | bool strobe_mode, | ||
| 160 | struct atom_mpll_param *mpll_param); | ||
| 161 | |||
| 162 | uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev); | ||
| 163 | uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev); | ||
| 164 | void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev, | ||
| 165 | uint32_t eng_clock); | ||
| 166 | void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev, | ||
| 167 | uint32_t mem_clock); | ||
| 168 | void amdgpu_atombios_set_voltage(struct amdgpu_device *adev, | ||
| 169 | u16 voltage_level, | ||
| 170 | u8 voltage_type); | ||
| 171 | |||
| 172 | void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, | ||
| 173 | u32 eng_clock, u32 mem_clock); | ||
| 174 | |||
| 175 | int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev, | ||
| 176 | u16 *leakage_id); | ||
| 177 | |||
| 178 | int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev, | ||
| 179 | u16 *vddc, u16 *vddci, | ||
| 180 | u16 virtual_voltage_id, | ||
| 181 | u16 vbios_voltage_id); | ||
| 182 | |||
| 183 | int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev, | ||
| 184 | u16 virtual_voltage_id, | ||
| 185 | u16 *voltage); | ||
| 186 | |||
| 187 | bool | ||
| 188 | amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev, | ||
| 189 | u8 voltage_type, u8 voltage_mode); | ||
| 190 | |||
| 191 | int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev, | ||
| 192 | u8 voltage_type, u8 voltage_mode, | ||
| 193 | struct atom_voltage_table *voltage_table); | ||
| 194 | |||
| 195 | int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev, | ||
| 196 | u8 module_index, | ||
| 197 | struct atom_mc_reg_table *reg_table); | ||
| 198 | |||
| 199 | void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); | ||
| 200 | void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev); | ||
| 201 | void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev); | ||
| 202 | void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev); | ||
| 203 | |||
| 204 | void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); | ||
| 205 | |||
| 206 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c new file mode 100644 index 000000000000..3f7aaa45bf8e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | |||
| @@ -0,0 +1,572 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2010 Red Hat Inc. | ||
| 3 | * Author : Dave Airlie <airlied@redhat.com> | ||
| 4 | * | ||
| 5 | * Licensed under GPLv2 | ||
| 6 | * | ||
| 7 | * ATPX support for both Intel/ATI | ||
| 8 | */ | ||
| 9 | #include <linux/vga_switcheroo.h> | ||
| 10 | #include <linux/slab.h> | ||
| 11 | #include <linux/acpi.h> | ||
| 12 | #include <linux/pci.h> | ||
| 13 | |||
| 14 | #include "amdgpu_acpi.h" | ||
| 15 | |||
| 16 | struct amdgpu_atpx_functions { | ||
| 17 | bool px_params; | ||
| 18 | bool power_cntl; | ||
| 19 | bool disp_mux_cntl; | ||
| 20 | bool i2c_mux_cntl; | ||
| 21 | bool switch_start; | ||
| 22 | bool switch_end; | ||
| 23 | bool disp_connectors_mapping; | ||
| 24 | bool disp_detetion_ports; | ||
| 25 | }; | ||
| 26 | |||
| 27 | struct amdgpu_atpx { | ||
| 28 | acpi_handle handle; | ||
| 29 | struct amdgpu_atpx_functions functions; | ||
| 30 | }; | ||
| 31 | |||
| 32 | static struct amdgpu_atpx_priv { | ||
| 33 | bool atpx_detected; | ||
| 34 | /* handle for device - and atpx */ | ||
| 35 | acpi_handle dhandle; | ||
| 36 | acpi_handle other_handle; | ||
| 37 | struct amdgpu_atpx atpx; | ||
| 38 | } amdgpu_atpx_priv; | ||
| 39 | |||
| 40 | struct atpx_verify_interface { | ||
| 41 | u16 size; /* structure size in bytes (includes size field) */ | ||
| 42 | u16 version; /* version */ | ||
| 43 | u32 function_bits; /* supported functions bit vector */ | ||
| 44 | } __packed; | ||
| 45 | |||
| 46 | struct atpx_px_params { | ||
| 47 | u16 size; /* structure size in bytes (includes size field) */ | ||
| 48 | u32 valid_flags; /* which flags are valid */ | ||
| 49 | u32 flags; /* flags */ | ||
| 50 | } __packed; | ||
| 51 | |||
| 52 | struct atpx_power_control { | ||
| 53 | u16 size; | ||
| 54 | u8 dgpu_state; | ||
| 55 | } __packed; | ||
| 56 | |||
| 57 | struct atpx_mux { | ||
| 58 | u16 size; | ||
| 59 | u16 mux; | ||
| 60 | } __packed; | ||
| 61 | |||
| 62 | bool amdgpu_has_atpx(void) { | ||
| 63 | return amdgpu_atpx_priv.atpx_detected; | ||
| 64 | } | ||
| 65 | |||
| 66 | /** | ||
| 67 | * amdgpu_atpx_call - call an ATPX method | ||
| 68 | * | ||
| 69 | * @handle: acpi handle | ||
| 70 | * @function: the ATPX function to execute | ||
| 71 | * @params: ATPX function params | ||
| 72 | * | ||
| 73 | * Executes the requested ATPX function (all asics). | ||
| 74 | * Returns a pointer to the acpi output buffer. | ||
| 75 | */ | ||
| 76 | static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function, | ||
| 77 | struct acpi_buffer *params) | ||
| 78 | { | ||
| 79 | acpi_status status; | ||
| 80 | union acpi_object atpx_arg_elements[2]; | ||
| 81 | struct acpi_object_list atpx_arg; | ||
| 82 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
| 83 | |||
| 84 | atpx_arg.count = 2; | ||
| 85 | atpx_arg.pointer = &atpx_arg_elements[0]; | ||
| 86 | |||
| 87 | atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; | ||
| 88 | atpx_arg_elements[0].integer.value = function; | ||
| 89 | |||
| 90 | if (params) { | ||
| 91 | atpx_arg_elements[1].type = ACPI_TYPE_BUFFER; | ||
| 92 | atpx_arg_elements[1].buffer.length = params->length; | ||
| 93 | atpx_arg_elements[1].buffer.pointer = params->pointer; | ||
| 94 | } else { | ||
| 95 | /* We need a second fake parameter */ | ||
| 96 | atpx_arg_elements[1].type = ACPI_TYPE_INTEGER; | ||
| 97 | atpx_arg_elements[1].integer.value = 0; | ||
| 98 | } | ||
| 99 | |||
| 100 | status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); | ||
| 101 | |||
| 102 | /* Fail only if calling the method fails and ATPX is supported */ | ||
| 103 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | ||
| 104 | printk("failed to evaluate ATPX got %s\n", | ||
| 105 | acpi_format_exception(status)); | ||
| 106 | kfree(buffer.pointer); | ||
| 107 | return NULL; | ||
| 108 | } | ||
| 109 | |||
| 110 | return buffer.pointer; | ||
| 111 | } | ||
| 112 | |||
| 113 | /** | ||
| 114 | * amdgpu_atpx_parse_functions - parse supported functions | ||
| 115 | * | ||
| 116 | * @f: supported functions struct | ||
| 117 | * @mask: supported functions mask from ATPX | ||
| 118 | * | ||
| 119 | * Use the supported functions mask from ATPX function | ||
| 120 | * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions | ||
| 121 | * are supported (all asics). | ||
| 122 | */ | ||
| 123 | static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mask) | ||
| 124 | { | ||
| 125 | f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED; | ||
| 126 | f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED; | ||
| 127 | f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED; | ||
| 128 | f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED; | ||
| 129 | f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED; | ||
| 130 | f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED; | ||
| 131 | f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED; | ||
| 132 | f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED; | ||
| 133 | } | ||
| 134 | |||
| 135 | /** | ||
| 136 | * amdgpu_atpx_validate_functions - validate ATPX functions | ||
| 137 | * | ||
| 138 | * @atpx: amdgpu atpx struct | ||
| 139 | * | ||
| 140 | * Validate that required functions are enabled (all asics). | ||
| 141 | * returns 0 on success, error on failure. | ||
| 142 | */ | ||
| 143 | static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) | ||
| 144 | { | ||
| 145 | /* make sure required functions are enabled */ | ||
| 146 | /* dGPU power control is required */ | ||
| 147 | atpx->functions.power_cntl = true; | ||
| 148 | |||
| 149 | if (atpx->functions.px_params) { | ||
| 150 | union acpi_object *info; | ||
| 151 | struct atpx_px_params output; | ||
| 152 | size_t size; | ||
| 153 | u32 valid_bits; | ||
| 154 | |||
| 155 | info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); | ||
| 156 | if (!info) | ||
| 157 | return -EIO; | ||
| 158 | |||
| 159 | memset(&output, 0, sizeof(output)); | ||
| 160 | |||
| 161 | size = *(u16 *) info->buffer.pointer; | ||
| 162 | if (size < 10) { | ||
| 163 | printk("ATPX buffer is too small: %zu\n", size); | ||
| 164 | kfree(info); | ||
| 165 | return -EINVAL; | ||
| 166 | } | ||
| 167 | size = min(sizeof(output), size); | ||
| 168 | |||
| 169 | memcpy(&output, info->buffer.pointer, size); | ||
| 170 | |||
| 171 | valid_bits = output.flags & output.valid_flags; | ||
| 172 | /* if separate mux flag is set, mux controls are required */ | ||
| 173 | if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { | ||
| 174 | atpx->functions.i2c_mux_cntl = true; | ||
| 175 | atpx->functions.disp_mux_cntl = true; | ||
| 176 | } | ||
| 177 | /* if any outputs are muxed, mux controls are required */ | ||
| 178 | if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | | ||
| 179 | ATPX_TV_SIGNAL_MUXED | | ||
| 180 | ATPX_DFP_SIGNAL_MUXED)) | ||
| 181 | atpx->functions.disp_mux_cntl = true; | ||
| 182 | |||
| 183 | kfree(info); | ||
| 184 | } | ||
| 185 | return 0; | ||
| 186 | } | ||
| 187 | |||
| 188 | /** | ||
| 189 | * amdgpu_atpx_verify_interface - verify ATPX | ||
| 190 | * | ||
| 191 | * @atpx: amdgpu atpx struct | ||
| 192 | * | ||
| 193 | * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function | ||
| 194 | * to initialize ATPX and determine what features are supported | ||
| 195 | * (all asics). | ||
| 196 | * returns 0 on success, error on failure. | ||
| 197 | */ | ||
| 198 | static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx) | ||
| 199 | { | ||
| 200 | union acpi_object *info; | ||
| 201 | struct atpx_verify_interface output; | ||
| 202 | size_t size; | ||
| 203 | int err = 0; | ||
| 204 | |||
| 205 | info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL); | ||
| 206 | if (!info) | ||
| 207 | return -EIO; | ||
| 208 | |||
| 209 | memset(&output, 0, sizeof(output)); | ||
| 210 | |||
| 211 | size = *(u16 *) info->buffer.pointer; | ||
| 212 | if (size < 8) { | ||
| 213 | printk("ATPX buffer is too small: %zu\n", size); | ||
| 214 | err = -EINVAL; | ||
| 215 | goto out; | ||
| 216 | } | ||
| 217 | size = min(sizeof(output), size); | ||
| 218 | |||
| 219 | memcpy(&output, info->buffer.pointer, size); | ||
| 220 | |||
| 221 | /* TODO: check version? */ | ||
| 222 | printk("ATPX version %u, functions 0x%08x\n", | ||
| 223 | output.version, output.function_bits); | ||
| 224 | |||
| 225 | amdgpu_atpx_parse_functions(&atpx->functions, output.function_bits); | ||
| 226 | |||
| 227 | out: | ||
| 228 | kfree(info); | ||
| 229 | return err; | ||
| 230 | } | ||
| 231 | |||
| 232 | /** | ||
| 233 | * amdgpu_atpx_set_discrete_state - power up/down discrete GPU | ||
| 234 | * | ||
| 235 | * @atpx: atpx info struct | ||
| 236 | * @state: discrete GPU state (0 = power down, 1 = power up) | ||
| 237 | * | ||
| 238 | * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to | ||
| 239 | * power down/up the discrete GPU (all asics). | ||
| 240 | * Returns 0 on success, error on failure. | ||
| 241 | */ | ||
| 242 | static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state) | ||
| 243 | { | ||
| 244 | struct acpi_buffer params; | ||
| 245 | union acpi_object *info; | ||
| 246 | struct atpx_power_control input; | ||
| 247 | |||
| 248 | if (atpx->functions.power_cntl) { | ||
| 249 | input.size = 3; | ||
| 250 | input.dgpu_state = state; | ||
| 251 | params.length = input.size; | ||
| 252 | params.pointer = &input; | ||
| 253 | info = amdgpu_atpx_call(atpx->handle, | ||
| 254 | ATPX_FUNCTION_POWER_CONTROL, | ||
| 255 | ¶ms); | ||
| 256 | if (!info) | ||
| 257 | return -EIO; | ||
| 258 | kfree(info); | ||
| 259 | } | ||
| 260 | return 0; | ||
| 261 | } | ||
| 262 | |||
| 263 | /** | ||
| 264 | * amdgpu_atpx_switch_disp_mux - switch display mux | ||
| 265 | * | ||
| 266 | * @atpx: atpx info struct | ||
| 267 | * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) | ||
| 268 | * | ||
| 269 | * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to | ||
| 270 | * switch the display mux between the discrete GPU and integrated GPU | ||
| 271 | * (all asics). | ||
| 272 | * Returns 0 on success, error on failure. | ||
| 273 | */ | ||
| 274 | static int amdgpu_atpx_switch_disp_mux(struct amdgpu_atpx *atpx, u16 mux_id) | ||
| 275 | { | ||
| 276 | struct acpi_buffer params; | ||
| 277 | union acpi_object *info; | ||
| 278 | struct atpx_mux input; | ||
| 279 | |||
| 280 | if (atpx->functions.disp_mux_cntl) { | ||
| 281 | input.size = 4; | ||
| 282 | input.mux = mux_id; | ||
| 283 | params.length = input.size; | ||
| 284 | params.pointer = &input; | ||
| 285 | info = amdgpu_atpx_call(atpx->handle, | ||
| 286 | ATPX_FUNCTION_DISPLAY_MUX_CONTROL, | ||
| 287 | ¶ms); | ||
| 288 | if (!info) | ||
| 289 | return -EIO; | ||
| 290 | kfree(info); | ||
| 291 | } | ||
| 292 | return 0; | ||
| 293 | } | ||
| 294 | |||
| 295 | /** | ||
| 296 | * amdgpu_atpx_switch_i2c_mux - switch i2c/hpd mux | ||
| 297 | * | ||
| 298 | * @atpx: atpx info struct | ||
| 299 | * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) | ||
| 300 | * | ||
| 301 | * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to | ||
| 302 | * switch the i2c/hpd mux between the discrete GPU and integrated GPU | ||
| 303 | * (all asics). | ||
| 304 | * Returns 0 on success, error on failure. | ||
| 305 | */ | ||
| 306 | static int amdgpu_atpx_switch_i2c_mux(struct amdgpu_atpx *atpx, u16 mux_id) | ||
| 307 | { | ||
| 308 | struct acpi_buffer params; | ||
| 309 | union acpi_object *info; | ||
| 310 | struct atpx_mux input; | ||
| 311 | |||
| 312 | if (atpx->functions.i2c_mux_cntl) { | ||
| 313 | input.size = 4; | ||
| 314 | input.mux = mux_id; | ||
| 315 | params.length = input.size; | ||
| 316 | params.pointer = &input; | ||
| 317 | info = amdgpu_atpx_call(atpx->handle, | ||
| 318 | ATPX_FUNCTION_I2C_MUX_CONTROL, | ||
| 319 | ¶ms); | ||
| 320 | if (!info) | ||
| 321 | return -EIO; | ||
| 322 | kfree(info); | ||
| 323 | } | ||
| 324 | return 0; | ||
| 325 | } | ||
| 326 | |||
| 327 | /** | ||
| 328 | * amdgpu_atpx_switch_start - notify the sbios of a GPU switch | ||
| 329 | * | ||
| 330 | * @atpx: atpx info struct | ||
| 331 | * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) | ||
| 332 | * | ||
| 333 | * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX | ||
| 334 | * function to notify the sbios that a switch between the discrete GPU and | ||
| 335 | * integrated GPU has begun (all asics). | ||
| 336 | * Returns 0 on success, error on failure. | ||
| 337 | */ | ||
| 338 | static int amdgpu_atpx_switch_start(struct amdgpu_atpx *atpx, u16 mux_id) | ||
| 339 | { | ||
| 340 | struct acpi_buffer params; | ||
| 341 | union acpi_object *info; | ||
| 342 | struct atpx_mux input; | ||
| 343 | |||
| 344 | if (atpx->functions.switch_start) { | ||
| 345 | input.size = 4; | ||
| 346 | input.mux = mux_id; | ||
| 347 | params.length = input.size; | ||
| 348 | params.pointer = &input; | ||
| 349 | info = amdgpu_atpx_call(atpx->handle, | ||
| 350 | ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION, | ||
| 351 | ¶ms); | ||
| 352 | if (!info) | ||
| 353 | return -EIO; | ||
| 354 | kfree(info); | ||
| 355 | } | ||
| 356 | return 0; | ||
| 357 | } | ||
| 358 | |||
| 359 | /** | ||
| 360 | * amdgpu_atpx_switch_end - notify the sbios of a GPU switch | ||
| 361 | * | ||
| 362 | * @atpx: atpx info struct | ||
| 363 | * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) | ||
| 364 | * | ||
| 365 | * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX | ||
| 366 | * function to notify the sbios that a switch between the discrete GPU and | ||
| 367 | * integrated GPU has ended (all asics). | ||
| 368 | * Returns 0 on success, error on failure. | ||
| 369 | */ | ||
| 370 | static int amdgpu_atpx_switch_end(struct amdgpu_atpx *atpx, u16 mux_id) | ||
| 371 | { | ||
| 372 | struct acpi_buffer params; | ||
| 373 | union acpi_object *info; | ||
| 374 | struct atpx_mux input; | ||
| 375 | |||
| 376 | if (atpx->functions.switch_end) { | ||
| 377 | input.size = 4; | ||
| 378 | input.mux = mux_id; | ||
| 379 | params.length = input.size; | ||
| 380 | params.pointer = &input; | ||
| 381 | info = amdgpu_atpx_call(atpx->handle, | ||
| 382 | ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION, | ||
| 383 | ¶ms); | ||
| 384 | if (!info) | ||
| 385 | return -EIO; | ||
| 386 | kfree(info); | ||
| 387 | } | ||
| 388 | return 0; | ||
| 389 | } | ||
| 390 | |||
| 391 | /** | ||
| 392 | * amdgpu_atpx_switchto - switch to the requested GPU | ||
| 393 | * | ||
| 394 | * @id: GPU to switch to | ||
| 395 | * | ||
| 396 | * Execute the necessary ATPX functions to switch between the discrete GPU and | ||
| 397 | * integrated GPU (all asics). | ||
| 398 | * Returns 0 on success, error on failure. | ||
| 399 | */ | ||
| 400 | static int amdgpu_atpx_switchto(enum vga_switcheroo_client_id id) | ||
| 401 | { | ||
| 402 | u16 gpu_id; | ||
| 403 | |||
| 404 | if (id == VGA_SWITCHEROO_IGD) | ||
| 405 | gpu_id = ATPX_INTEGRATED_GPU; | ||
| 406 | else | ||
| 407 | gpu_id = ATPX_DISCRETE_GPU; | ||
| 408 | |||
| 409 | amdgpu_atpx_switch_start(&amdgpu_atpx_priv.atpx, gpu_id); | ||
| 410 | amdgpu_atpx_switch_disp_mux(&amdgpu_atpx_priv.atpx, gpu_id); | ||
| 411 | amdgpu_atpx_switch_i2c_mux(&amdgpu_atpx_priv.atpx, gpu_id); | ||
| 412 | amdgpu_atpx_switch_end(&amdgpu_atpx_priv.atpx, gpu_id); | ||
| 413 | |||
| 414 | return 0; | ||
| 415 | } | ||
| 416 | |||
| 417 | /** | ||
| 418 | * amdgpu_atpx_power_state - power down/up the requested GPU | ||
| 419 | * | ||
| 420 | * @id: GPU to power down/up | ||
| 421 | * @state: requested power state (0 = off, 1 = on) | ||
| 422 | * | ||
| 423 | * Execute the necessary ATPX function to power down/up the discrete GPU | ||
| 424 | * (all asics). | ||
| 425 | * Returns 0 on success, error on failure. | ||
| 426 | */ | ||
| 427 | static int amdgpu_atpx_power_state(enum vga_switcheroo_client_id id, | ||
| 428 | enum vga_switcheroo_state state) | ||
| 429 | { | ||
| 430 | /* on w500 ACPI can't change intel gpu state */ | ||
| 431 | if (id == VGA_SWITCHEROO_IGD) | ||
| 432 | return 0; | ||
| 433 | |||
| 434 | amdgpu_atpx_set_discrete_state(&amdgpu_atpx_priv.atpx, state); | ||
| 435 | return 0; | ||
| 436 | } | ||
| 437 | |||
| 438 | /** | ||
| 439 | * amdgpu_atpx_pci_probe_handle - look up the ATPX handle | ||
| 440 | * | ||
| 441 | * @pdev: pci device | ||
| 442 | * | ||
| 443 | * Look up the ATPX handles (all asics). | ||
| 444 | * Returns true if the handles are found, false if not. | ||
| 445 | */ | ||
| 446 | static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev) | ||
| 447 | { | ||
| 448 | acpi_handle dhandle, atpx_handle; | ||
| 449 | acpi_status status; | ||
| 450 | |||
| 451 | dhandle = ACPI_HANDLE(&pdev->dev); | ||
| 452 | if (!dhandle) | ||
| 453 | return false; | ||
| 454 | |||
| 455 | status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); | ||
| 456 | if (ACPI_FAILURE(status)) { | ||
| 457 | amdgpu_atpx_priv.other_handle = dhandle; | ||
| 458 | return false; | ||
| 459 | } | ||
| 460 | amdgpu_atpx_priv.dhandle = dhandle; | ||
| 461 | amdgpu_atpx_priv.atpx.handle = atpx_handle; | ||
| 462 | return true; | ||
| 463 | } | ||
| 464 | |||
| 465 | /** | ||
| 466 | * amdgpu_atpx_init - verify the ATPX interface | ||
| 467 | * | ||
| 468 | * Verify the ATPX interface (all asics). | ||
| 469 | * Returns 0 on success, error on failure. | ||
| 470 | */ | ||
| 471 | static int amdgpu_atpx_init(void) | ||
| 472 | { | ||
| 473 | int r; | ||
| 474 | |||
| 475 | /* set up the ATPX handle */ | ||
| 476 | r = amdgpu_atpx_verify_interface(&amdgpu_atpx_priv.atpx); | ||
| 477 | if (r) | ||
| 478 | return r; | ||
| 479 | |||
| 480 | /* validate the atpx setup */ | ||
| 481 | r = amdgpu_atpx_validate(&amdgpu_atpx_priv.atpx); | ||
| 482 | if (r) | ||
| 483 | return r; | ||
| 484 | |||
| 485 | return 0; | ||
| 486 | } | ||
| 487 | |||
| 488 | /** | ||
| 489 | * amdgpu_atpx_get_client_id - get the client id | ||
| 490 | * | ||
| 491 | * @pdev: pci device | ||
| 492 | * | ||
| 493 | * look up whether we are the integrated or discrete GPU (all asics). | ||
| 494 | * Returns the client id. | ||
| 495 | */ | ||
| 496 | static int amdgpu_atpx_get_client_id(struct pci_dev *pdev) | ||
| 497 | { | ||
| 498 | if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev)) | ||
| 499 | return VGA_SWITCHEROO_IGD; | ||
| 500 | else | ||
| 501 | return VGA_SWITCHEROO_DIS; | ||
| 502 | } | ||
| 503 | |||
| 504 | static struct vga_switcheroo_handler amdgpu_atpx_handler = { | ||
| 505 | .switchto = amdgpu_atpx_switchto, | ||
| 506 | .power_state = amdgpu_atpx_power_state, | ||
| 507 | .init = amdgpu_atpx_init, | ||
| 508 | .get_client_id = amdgpu_atpx_get_client_id, | ||
| 509 | }; | ||
| 510 | |||
| 511 | /** | ||
| 512 | * amdgpu_atpx_detect - detect whether we have PX | ||
| 513 | * | ||
| 514 | * Check if we have a PX system (all asics). | ||
| 515 | * Returns true if we have a PX system, false if not. | ||
| 516 | */ | ||
| 517 | static bool amdgpu_atpx_detect(void) | ||
| 518 | { | ||
| 519 | char acpi_method_name[255] = { 0 }; | ||
| 520 | struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; | ||
| 521 | struct pci_dev *pdev = NULL; | ||
| 522 | bool has_atpx = false; | ||
| 523 | int vga_count = 0; | ||
| 524 | |||
| 525 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { | ||
| 526 | vga_count++; | ||
| 527 | |||
| 528 | has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); | ||
| 529 | } | ||
| 530 | |||
| 531 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { | ||
| 532 | vga_count++; | ||
| 533 | |||
| 534 | has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); | ||
| 535 | } | ||
| 536 | |||
| 537 | if (has_atpx && vga_count == 2) { | ||
| 538 | acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer); | ||
| 539 | printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", | ||
| 540 | acpi_method_name); | ||
| 541 | amdgpu_atpx_priv.atpx_detected = true; | ||
| 542 | return true; | ||
| 543 | } | ||
| 544 | return false; | ||
| 545 | } | ||
| 546 | |||
| 547 | /** | ||
| 548 | * amdgpu_register_atpx_handler - register with vga_switcheroo | ||
| 549 | * | ||
| 550 | * Register the PX callbacks with vga_switcheroo (all asics). | ||
| 551 | */ | ||
| 552 | void amdgpu_register_atpx_handler(void) | ||
| 553 | { | ||
| 554 | bool r; | ||
| 555 | |||
| 556 | /* detect if we have any ATPX + 2 VGA in the system */ | ||
| 557 | r = amdgpu_atpx_detect(); | ||
| 558 | if (!r) | ||
| 559 | return; | ||
| 560 | |||
| 561 | vga_switcheroo_register_handler(&amdgpu_atpx_handler); | ||
| 562 | } | ||
| 563 | |||
| 564 | /** | ||
| 565 | * amdgpu_unregister_atpx_handler - unregister with vga_switcheroo | ||
| 566 | * | ||
| 567 | * Unregister the PX callbacks with vga_switcheroo (all asics). | ||
| 568 | */ | ||
| 569 | void amdgpu_unregister_atpx_handler(void) | ||
| 570 | { | ||
| 571 | vga_switcheroo_unregister_handler(); | ||
| 572 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c new file mode 100644 index 000000000000..2742b9a35cbc --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | |||
| @@ -0,0 +1,221 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2009 Jerome Glisse. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Jerome Glisse | ||
| 23 | */ | ||
| 24 | #include <drm/drmP.h> | ||
| 25 | #include <drm/amdgpu_drm.h> | ||
| 26 | #include "amdgpu.h" | ||
| 27 | |||
| 28 | #define AMDGPU_BENCHMARK_ITERATIONS 1024 | ||
| 29 | #define AMDGPU_BENCHMARK_COMMON_MODES_N 17 | ||
| 30 | |||
| 31 | static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, | ||
| 32 | uint64_t saddr, uint64_t daddr, int n) | ||
| 33 | { | ||
| 34 | unsigned long start_jiffies; | ||
| 35 | unsigned long end_jiffies; | ||
| 36 | struct amdgpu_fence *fence = NULL; | ||
| 37 | int i, r; | ||
| 38 | |||
| 39 | start_jiffies = jiffies; | ||
| 40 | for (i = 0; i < n; i++) { | ||
| 41 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | ||
| 42 | r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence); | ||
| 43 | if (r) | ||
| 44 | goto exit_do_move; | ||
| 45 | r = amdgpu_fence_wait(fence, false); | ||
| 46 | if (r) | ||
| 47 | goto exit_do_move; | ||
| 48 | amdgpu_fence_unref(&fence); | ||
| 49 | } | ||
| 50 | end_jiffies = jiffies; | ||
| 51 | r = jiffies_to_msecs(end_jiffies - start_jiffies); | ||
| 52 | |||
| 53 | exit_do_move: | ||
| 54 | if (fence) | ||
| 55 | amdgpu_fence_unref(&fence); | ||
| 56 | return r; | ||
| 57 | } | ||
| 58 | |||
| 59 | |||
| 60 | static void amdgpu_benchmark_log_results(int n, unsigned size, | ||
| 61 | unsigned int time, | ||
| 62 | unsigned sdomain, unsigned ddomain, | ||
| 63 | char *kind) | ||
| 64 | { | ||
| 65 | unsigned int throughput = (n * (size >> 10)) / time; | ||
| 66 | DRM_INFO("amdgpu: %s %u bo moves of %u kB from" | ||
| 67 | " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n", | ||
| 68 | kind, n, size >> 10, sdomain, ddomain, time, | ||
| 69 | throughput * 8, throughput); | ||
| 70 | } | ||
| 71 | |||
| 72 | static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, | ||
| 73 | unsigned sdomain, unsigned ddomain) | ||
| 74 | { | ||
| 75 | struct amdgpu_bo *dobj = NULL; | ||
| 76 | struct amdgpu_bo *sobj = NULL; | ||
| 77 | uint64_t saddr, daddr; | ||
| 78 | int r, n; | ||
| 79 | int time; | ||
| 80 | |||
| 81 | n = AMDGPU_BENCHMARK_ITERATIONS; | ||
| 82 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); | ||
| 83 | if (r) { | ||
| 84 | goto out_cleanup; | ||
| 85 | } | ||
| 86 | r = amdgpu_bo_reserve(sobj, false); | ||
| 87 | if (unlikely(r != 0)) | ||
| 88 | goto out_cleanup; | ||
| 89 | r = amdgpu_bo_pin(sobj, sdomain, &saddr); | ||
| 90 | amdgpu_bo_unreserve(sobj); | ||
| 91 | if (r) { | ||
| 92 | goto out_cleanup; | ||
| 93 | } | ||
| 94 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); | ||
| 95 | if (r) { | ||
| 96 | goto out_cleanup; | ||
| 97 | } | ||
| 98 | r = amdgpu_bo_reserve(dobj, false); | ||
| 99 | if (unlikely(r != 0)) | ||
| 100 | goto out_cleanup; | ||
| 101 | r = amdgpu_bo_pin(dobj, ddomain, &daddr); | ||
| 102 | amdgpu_bo_unreserve(dobj); | ||
| 103 | if (r) { | ||
| 104 | goto out_cleanup; | ||
| 105 | } | ||
| 106 | |||
| 107 | if (adev->mman.buffer_funcs) { | ||
| 108 | time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n); | ||
| 109 | if (time < 0) | ||
| 110 | goto out_cleanup; | ||
| 111 | if (time > 0) | ||
| 112 | amdgpu_benchmark_log_results(n, size, time, | ||
| 113 | sdomain, ddomain, "dma"); | ||
| 114 | } | ||
| 115 | |||
| 116 | out_cleanup: | ||
| 117 | if (sobj) { | ||
| 118 | r = amdgpu_bo_reserve(sobj, false); | ||
| 119 | if (likely(r == 0)) { | ||
| 120 | amdgpu_bo_unpin(sobj); | ||
| 121 | amdgpu_bo_unreserve(sobj); | ||
| 122 | } | ||
| 123 | amdgpu_bo_unref(&sobj); | ||
| 124 | } | ||
| 125 | if (dobj) { | ||
| 126 | r = amdgpu_bo_reserve(dobj, false); | ||
| 127 | if (likely(r == 0)) { | ||
| 128 | amdgpu_bo_unpin(dobj); | ||
| 129 | amdgpu_bo_unreserve(dobj); | ||
| 130 | } | ||
| 131 | amdgpu_bo_unref(&dobj); | ||
| 132 | } | ||
| 133 | |||
| 134 | if (r) { | ||
| 135 | DRM_ERROR("Error while benchmarking BO move.\n"); | ||
| 136 | } | ||
| 137 | } | ||
| 138 | |||
| 139 | void amdgpu_benchmark(struct amdgpu_device *adev, int test_number) | ||
| 140 | { | ||
| 141 | int i; | ||
| 142 | int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = { | ||
| 143 | 640 * 480 * 4, | ||
| 144 | 720 * 480 * 4, | ||
| 145 | 800 * 600 * 4, | ||
| 146 | 848 * 480 * 4, | ||
| 147 | 1024 * 768 * 4, | ||
| 148 | 1152 * 768 * 4, | ||
| 149 | 1280 * 720 * 4, | ||
| 150 | 1280 * 800 * 4, | ||
| 151 | 1280 * 854 * 4, | ||
| 152 | 1280 * 960 * 4, | ||
| 153 | 1280 * 1024 * 4, | ||
| 154 | 1440 * 900 * 4, | ||
| 155 | 1400 * 1050 * 4, | ||
| 156 | 1680 * 1050 * 4, | ||
| 157 | 1600 * 1200 * 4, | ||
| 158 | 1920 * 1080 * 4, | ||
| 159 | 1920 * 1200 * 4 | ||
| 160 | }; | ||
| 161 | |||
| 162 | switch (test_number) { | ||
| 163 | case 1: | ||
| 164 | /* simple test, VRAM to GTT and GTT to VRAM */ | ||
| 165 | amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT, | ||
| 166 | AMDGPU_GEM_DOMAIN_VRAM); | ||
| 167 | amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM, | ||
| 168 | AMDGPU_GEM_DOMAIN_GTT); | ||
| 169 | break; | ||
| 170 | case 2: | ||
| 171 | /* simple test, VRAM to VRAM */ | ||
| 172 | amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM, | ||
| 173 | AMDGPU_GEM_DOMAIN_VRAM); | ||
| 174 | break; | ||
| 175 | case 3: | ||
| 176 | /* GTT to VRAM, buffer size sweep, powers of 2 */ | ||
| 177 | for (i = 1; i <= 16384; i <<= 1) | ||
| 178 | amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE, | ||
| 179 | AMDGPU_GEM_DOMAIN_GTT, | ||
| 180 | AMDGPU_GEM_DOMAIN_VRAM); | ||
| 181 | break; | ||
| 182 | case 4: | ||
| 183 | /* VRAM to GTT, buffer size sweep, powers of 2 */ | ||
| 184 | for (i = 1; i <= 16384; i <<= 1) | ||
| 185 | amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE, | ||
| 186 | AMDGPU_GEM_DOMAIN_VRAM, | ||
| 187 | AMDGPU_GEM_DOMAIN_GTT); | ||
| 188 | break; | ||
| 189 | case 5: | ||
| 190 | /* VRAM to VRAM, buffer size sweep, powers of 2 */ | ||
| 191 | for (i = 1; i <= 16384; i <<= 1) | ||
| 192 | amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE, | ||
| 193 | AMDGPU_GEM_DOMAIN_VRAM, | ||
| 194 | AMDGPU_GEM_DOMAIN_VRAM); | ||
| 195 | break; | ||
| 196 | case 6: | ||
| 197 | /* GTT to VRAM, buffer size sweep, common modes */ | ||
| 198 | for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) | ||
| 199 | amdgpu_benchmark_move(adev, common_modes[i], | ||
| 200 | AMDGPU_GEM_DOMAIN_GTT, | ||
| 201 | AMDGPU_GEM_DOMAIN_VRAM); | ||
| 202 | break; | ||
| 203 | case 7: | ||
| 204 | /* VRAM to GTT, buffer size sweep, common modes */ | ||
| 205 | for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) | ||
| 206 | amdgpu_benchmark_move(adev, common_modes[i], | ||
| 207 | AMDGPU_GEM_DOMAIN_VRAM, | ||
| 208 | AMDGPU_GEM_DOMAIN_GTT); | ||
| 209 | break; | ||
| 210 | case 8: | ||
| 211 | /* VRAM to VRAM, buffer size sweep, common modes */ | ||
| 212 | for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) | ||
| 213 | amdgpu_benchmark_move(adev, common_modes[i], | ||
| 214 | AMDGPU_GEM_DOMAIN_VRAM, | ||
| 215 | AMDGPU_GEM_DOMAIN_VRAM); | ||
| 216 | break; | ||
| 217 | |||
| 218 | default: | ||
| 219 | DRM_ERROR("Unknown benchmark\n"); | ||
| 220 | } | ||
| 221 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c new file mode 100644 index 000000000000..d7a3ab2624f0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | |||
| @@ -0,0 +1,359 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #include <drm/drmP.h> | ||
| 29 | #include "amdgpu.h" | ||
| 30 | #include "atom.h" | ||
| 31 | |||
| 32 | #include <linux/vga_switcheroo.h> | ||
| 33 | #include <linux/slab.h> | ||
| 34 | #include <linux/acpi.h> | ||
| 35 | /* | ||
| 36 | * BIOS. | ||
| 37 | */ | ||
| 38 | |||
| 39 | /* If you boot an IGP board with a discrete card as the primary, | ||
| 40 | * the IGP rom is not accessible via the rom bar as the IGP rom is | ||
| 41 | * part of the system bios. On boot, the system bios puts a | ||
| 42 | * copy of the igp rom at the start of vram if a discrete card is | ||
| 43 | * present. | ||
| 44 | */ | ||
| 45 | static bool igp_read_bios_from_vram(struct amdgpu_device *adev) | ||
| 46 | { | ||
| 47 | uint8_t __iomem *bios; | ||
| 48 | resource_size_t vram_base; | ||
| 49 | resource_size_t size = 256 * 1024; /* ??? */ | ||
| 50 | |||
| 51 | if (!(adev->flags & AMDGPU_IS_APU)) | ||
| 52 | if (!amdgpu_card_posted(adev)) | ||
| 53 | return false; | ||
| 54 | |||
| 55 | adev->bios = NULL; | ||
| 56 | vram_base = pci_resource_start(adev->pdev, 0); | ||
| 57 | bios = ioremap(vram_base, size); | ||
| 58 | if (!bios) { | ||
| 59 | return false; | ||
| 60 | } | ||
| 61 | |||
| 62 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | ||
| 63 | iounmap(bios); | ||
| 64 | return false; | ||
| 65 | } | ||
| 66 | adev->bios = kmalloc(size, GFP_KERNEL); | ||
| 67 | if (adev->bios == NULL) { | ||
| 68 | iounmap(bios); | ||
| 69 | return false; | ||
| 70 | } | ||
| 71 | memcpy_fromio(adev->bios, bios, size); | ||
| 72 | iounmap(bios); | ||
| 73 | return true; | ||
| 74 | } | ||
| 75 | |||
| 76 | bool amdgpu_read_bios(struct amdgpu_device *adev) | ||
| 77 | { | ||
| 78 | uint8_t __iomem *bios; | ||
| 79 | size_t size; | ||
| 80 | |||
| 81 | adev->bios = NULL; | ||
| 82 | /* XXX: some cards may return 0 for rom size? ddx has a workaround */ | ||
| 83 | bios = pci_map_rom(adev->pdev, &size); | ||
| 84 | if (!bios) { | ||
| 85 | return false; | ||
| 86 | } | ||
| 87 | |||
| 88 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | ||
| 89 | pci_unmap_rom(adev->pdev, bios); | ||
| 90 | return false; | ||
| 91 | } | ||
| 92 | adev->bios = kmemdup(bios, size, GFP_KERNEL); | ||
| 93 | if (adev->bios == NULL) { | ||
| 94 | pci_unmap_rom(adev->pdev, bios); | ||
| 95 | return false; | ||
| 96 | } | ||
| 97 | pci_unmap_rom(adev->pdev, bios); | ||
| 98 | return true; | ||
| 99 | } | ||
| 100 | |||
| 101 | static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) | ||
| 102 | { | ||
| 103 | uint8_t __iomem *bios; | ||
| 104 | size_t size; | ||
| 105 | |||
| 106 | adev->bios = NULL; | ||
| 107 | |||
| 108 | bios = pci_platform_rom(adev->pdev, &size); | ||
| 109 | if (!bios) { | ||
| 110 | return false; | ||
| 111 | } | ||
| 112 | |||
| 113 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | ||
| 114 | return false; | ||
| 115 | } | ||
| 116 | adev->bios = kmemdup(bios, size, GFP_KERNEL); | ||
| 117 | if (adev->bios == NULL) { | ||
| 118 | return false; | ||
| 119 | } | ||
| 120 | |||
| 121 | return true; | ||
| 122 | } | ||
| 123 | |||
| 124 | #ifdef CONFIG_ACPI | ||
| 125 | /* ATRM is used to get the BIOS on the discrete cards in | ||
| 126 | * dual-gpu systems. | ||
| 127 | */ | ||
| 128 | /* retrieve the ROM in 4k blocks */ | ||
| 129 | #define ATRM_BIOS_PAGE 4096 | ||
| 130 | /** | ||
| 131 | * amdgpu_atrm_call - fetch a chunk of the vbios | ||
| 132 | * | ||
| 133 | * @atrm_handle: acpi ATRM handle | ||
| 134 | * @bios: vbios image pointer | ||
| 135 | * @offset: offset of vbios image data to fetch | ||
| 136 | * @len: length of vbios image data to fetch | ||
| 137 | * | ||
| 138 | * Executes ATRM to fetch a chunk of the discrete | ||
| 139 | * vbios image on PX systems (all asics). | ||
| 140 | * Returns the length of the buffer fetched. | ||
| 141 | */ | ||
| 142 | static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios, | ||
| 143 | int offset, int len) | ||
| 144 | { | ||
| 145 | acpi_status status; | ||
| 146 | union acpi_object atrm_arg_elements[2], *obj; | ||
| 147 | struct acpi_object_list atrm_arg; | ||
| 148 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; | ||
| 149 | |||
| 150 | atrm_arg.count = 2; | ||
| 151 | atrm_arg.pointer = &atrm_arg_elements[0]; | ||
| 152 | |||
| 153 | atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; | ||
| 154 | atrm_arg_elements[0].integer.value = offset; | ||
| 155 | |||
| 156 | atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; | ||
| 157 | atrm_arg_elements[1].integer.value = len; | ||
| 158 | |||
| 159 | status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); | ||
| 160 | if (ACPI_FAILURE(status)) { | ||
| 161 | printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); | ||
| 162 | return -ENODEV; | ||
| 163 | } | ||
| 164 | |||
| 165 | obj = (union acpi_object *)buffer.pointer; | ||
| 166 | memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); | ||
| 167 | len = obj->buffer.length; | ||
| 168 | kfree(buffer.pointer); | ||
| 169 | return len; | ||
| 170 | } | ||
| 171 | |||
| 172 | static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) | ||
| 173 | { | ||
| 174 | int ret; | ||
| 175 | int size = 256 * 1024; | ||
| 176 | int i; | ||
| 177 | struct pci_dev *pdev = NULL; | ||
| 178 | acpi_handle dhandle, atrm_handle; | ||
| 179 | acpi_status status; | ||
| 180 | bool found = false; | ||
| 181 | |||
| 182 | /* ATRM is for the discrete card only */ | ||
| 183 | if (adev->flags & AMDGPU_IS_APU) | ||
| 184 | return false; | ||
| 185 | |||
| 186 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { | ||
| 187 | dhandle = ACPI_HANDLE(&pdev->dev); | ||
| 188 | if (!dhandle) | ||
| 189 | continue; | ||
| 190 | |||
| 191 | status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); | ||
| 192 | if (!ACPI_FAILURE(status)) { | ||
| 193 | found = true; | ||
| 194 | break; | ||
| 195 | } | ||
| 196 | } | ||
| 197 | |||
| 198 | if (!found) { | ||
| 199 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { | ||
| 200 | dhandle = ACPI_HANDLE(&pdev->dev); | ||
| 201 | if (!dhandle) | ||
| 202 | continue; | ||
| 203 | |||
| 204 | status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); | ||
| 205 | if (!ACPI_FAILURE(status)) { | ||
| 206 | found = true; | ||
| 207 | break; | ||
| 208 | } | ||
| 209 | } | ||
| 210 | } | ||
| 211 | |||
| 212 | if (!found) | ||
| 213 | return false; | ||
| 214 | |||
| 215 | adev->bios = kmalloc(size, GFP_KERNEL); | ||
| 216 | if (!adev->bios) { | ||
| 217 | DRM_ERROR("Unable to allocate bios\n"); | ||
| 218 | return false; | ||
| 219 | } | ||
| 220 | |||
| 221 | for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { | ||
| 222 | ret = amdgpu_atrm_call(atrm_handle, | ||
| 223 | adev->bios, | ||
| 224 | (i * ATRM_BIOS_PAGE), | ||
| 225 | ATRM_BIOS_PAGE); | ||
| 226 | if (ret < ATRM_BIOS_PAGE) | ||
| 227 | break; | ||
| 228 | } | ||
| 229 | |||
| 230 | if (i == 0 || adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) { | ||
| 231 | kfree(adev->bios); | ||
| 232 | return false; | ||
| 233 | } | ||
| 234 | return true; | ||
| 235 | } | ||
| 236 | #else | ||
| 237 | static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) | ||
| 238 | { | ||
| 239 | return false; | ||
| 240 | } | ||
| 241 | #endif | ||
| 242 | |||
| 243 | static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev) | ||
| 244 | { | ||
| 245 | if (adev->flags & AMDGPU_IS_APU) | ||
| 246 | return igp_read_bios_from_vram(adev); | ||
| 247 | else | ||
| 248 | return amdgpu_asic_read_disabled_bios(adev); | ||
| 249 | } | ||
| 250 | |||
| 251 | #ifdef CONFIG_ACPI | ||
| 252 | static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) | ||
| 253 | { | ||
| 254 | bool ret = false; | ||
| 255 | struct acpi_table_header *hdr; | ||
| 256 | acpi_size tbl_size; | ||
| 257 | UEFI_ACPI_VFCT *vfct; | ||
| 258 | GOP_VBIOS_CONTENT *vbios; | ||
| 259 | VFCT_IMAGE_HEADER *vhdr; | ||
| 260 | |||
| 261 | if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size))) | ||
| 262 | return false; | ||
| 263 | if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { | ||
| 264 | DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); | ||
| 265 | goto out_unmap; | ||
| 266 | } | ||
| 267 | |||
| 268 | vfct = (UEFI_ACPI_VFCT *)hdr; | ||
| 269 | if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) { | ||
| 270 | DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); | ||
| 271 | goto out_unmap; | ||
| 272 | } | ||
| 273 | |||
| 274 | vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset); | ||
| 275 | vhdr = &vbios->VbiosHeader; | ||
| 276 | DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n", | ||
| 277 | vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction, | ||
| 278 | vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength); | ||
| 279 | |||
| 280 | if (vhdr->PCIBus != adev->pdev->bus->number || | ||
| 281 | vhdr->PCIDevice != PCI_SLOT(adev->pdev->devfn) || | ||
| 282 | vhdr->PCIFunction != PCI_FUNC(adev->pdev->devfn) || | ||
| 283 | vhdr->VendorID != adev->pdev->vendor || | ||
| 284 | vhdr->DeviceID != adev->pdev->device) { | ||
| 285 | DRM_INFO("ACPI VFCT table is not for this card\n"); | ||
| 286 | goto out_unmap; | ||
| 287 | } | ||
| 288 | |||
| 289 | if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) { | ||
| 290 | DRM_ERROR("ACPI VFCT image truncated\n"); | ||
| 291 | goto out_unmap; | ||
| 292 | } | ||
| 293 | |||
| 294 | adev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL); | ||
| 295 | ret = !!adev->bios; | ||
| 296 | |||
| 297 | out_unmap: | ||
| 298 | return ret; | ||
| 299 | } | ||
| 300 | #else | ||
| 301 | static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) | ||
| 302 | { | ||
| 303 | return false; | ||
| 304 | } | ||
| 305 | #endif | ||
| 306 | |||
| 307 | bool amdgpu_get_bios(struct amdgpu_device *adev) | ||
| 308 | { | ||
| 309 | bool r; | ||
| 310 | uint16_t tmp; | ||
| 311 | |||
| 312 | r = amdgpu_atrm_get_bios(adev); | ||
| 313 | if (r == false) | ||
| 314 | r = amdgpu_acpi_vfct_bios(adev); | ||
| 315 | if (r == false) | ||
| 316 | r = igp_read_bios_from_vram(adev); | ||
| 317 | if (r == false) | ||
| 318 | r = amdgpu_read_bios(adev); | ||
| 319 | if (r == false) { | ||
| 320 | r = amdgpu_read_disabled_bios(adev); | ||
| 321 | } | ||
| 322 | if (r == false) { | ||
| 323 | r = amdgpu_read_platform_bios(adev); | ||
| 324 | } | ||
| 325 | if (r == false || adev->bios == NULL) { | ||
| 326 | DRM_ERROR("Unable to locate a BIOS ROM\n"); | ||
| 327 | adev->bios = NULL; | ||
| 328 | return false; | ||
| 329 | } | ||
| 330 | if (adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) { | ||
| 331 | printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]); | ||
| 332 | goto free_bios; | ||
| 333 | } | ||
| 334 | |||
| 335 | tmp = RBIOS16(0x18); | ||
| 336 | if (RBIOS8(tmp + 0x14) != 0x0) { | ||
| 337 | DRM_INFO("Not an x86 BIOS ROM, not using.\n"); | ||
| 338 | goto free_bios; | ||
| 339 | } | ||
| 340 | |||
| 341 | adev->bios_header_start = RBIOS16(0x48); | ||
| 342 | if (!adev->bios_header_start) { | ||
| 343 | goto free_bios; | ||
| 344 | } | ||
| 345 | tmp = adev->bios_header_start + 4; | ||
| 346 | if (!memcmp(adev->bios + tmp, "ATOM", 4) || | ||
| 347 | !memcmp(adev->bios + tmp, "MOTA", 4)) { | ||
| 348 | adev->is_atom_bios = true; | ||
| 349 | } else { | ||
| 350 | adev->is_atom_bios = false; | ||
| 351 | } | ||
| 352 | |||
| 353 | DRM_DEBUG("%sBIOS detected\n", adev->is_atom_bios ? "ATOM" : "COM"); | ||
| 354 | return true; | ||
| 355 | free_bios: | ||
| 356 | kfree(adev->bios); | ||
| 357 | adev->bios = NULL; | ||
| 358 | return false; | ||
| 359 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c new file mode 100644 index 000000000000..819fb861ac04 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | |||
| @@ -0,0 +1,268 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. | ||
| 3 | * All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the | ||
| 7 | * "Software"), to deal in the Software without restriction, including | ||
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 10 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 11 | * the following conditions: | ||
| 12 | * | ||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 20 | * | ||
| 21 | * The above copyright notice and this permission notice (including the | ||
| 22 | * next paragraph) shall be included in all copies or substantial portions | ||
| 23 | * of the Software. | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | /* | ||
| 27 | * Authors: | ||
| 28 | * Christian König <deathsimple@vodafone.de> | ||
| 29 | */ | ||
| 30 | |||
| 31 | #include <drm/drmP.h> | ||
| 32 | #include "amdgpu.h" | ||
| 33 | |||
| 34 | static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv, | ||
| 35 | struct amdgpu_bo_list **result, | ||
| 36 | int *id) | ||
| 37 | { | ||
| 38 | int r; | ||
| 39 | |||
| 40 | *result = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL); | ||
| 41 | if (!*result) | ||
| 42 | return -ENOMEM; | ||
| 43 | |||
| 44 | mutex_lock(&fpriv->bo_list_lock); | ||
| 45 | r = idr_alloc(&fpriv->bo_list_handles, *result, | ||
| 46 | 0, 0, GFP_KERNEL); | ||
| 47 | if (r < 0) { | ||
| 48 | mutex_unlock(&fpriv->bo_list_lock); | ||
| 49 | kfree(*result); | ||
| 50 | return r; | ||
| 51 | } | ||
| 52 | *id = r; | ||
| 53 | |||
| 54 | mutex_init(&(*result)->lock); | ||
| 55 | (*result)->num_entries = 0; | ||
| 56 | (*result)->array = NULL; | ||
| 57 | |||
| 58 | mutex_lock(&(*result)->lock); | ||
| 59 | mutex_unlock(&fpriv->bo_list_lock); | ||
| 60 | |||
| 61 | return 0; | ||
| 62 | } | ||
| 63 | |||
| 64 | static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) | ||
| 65 | { | ||
| 66 | struct amdgpu_bo_list *list; | ||
| 67 | |||
| 68 | mutex_lock(&fpriv->bo_list_lock); | ||
| 69 | list = idr_find(&fpriv->bo_list_handles, id); | ||
| 70 | if (list) { | ||
| 71 | mutex_lock(&list->lock); | ||
| 72 | idr_remove(&fpriv->bo_list_handles, id); | ||
| 73 | mutex_unlock(&list->lock); | ||
| 74 | amdgpu_bo_list_free(list); | ||
| 75 | } | ||
| 76 | mutex_unlock(&fpriv->bo_list_lock); | ||
| 77 | } | ||
| 78 | |||
| 79 | static int amdgpu_bo_list_set(struct amdgpu_device *adev, | ||
| 80 | struct drm_file *filp, | ||
| 81 | struct amdgpu_bo_list *list, | ||
| 82 | struct drm_amdgpu_bo_list_entry *info, | ||
| 83 | unsigned num_entries) | ||
| 84 | { | ||
| 85 | struct amdgpu_bo_list_entry *array; | ||
| 86 | struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo; | ||
| 87 | struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo; | ||
| 88 | struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo; | ||
| 89 | |||
| 90 | bool has_userptr = false; | ||
| 91 | unsigned i; | ||
| 92 | |||
| 93 | array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); | ||
| 94 | if (!array) | ||
| 95 | return -ENOMEM; | ||
| 96 | memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); | ||
| 97 | |||
| 98 | for (i = 0; i < num_entries; ++i) { | ||
| 99 | struct amdgpu_bo_list_entry *entry = &array[i]; | ||
| 100 | struct drm_gem_object *gobj; | ||
| 101 | |||
| 102 | gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle); | ||
| 103 | if (!gobj) | ||
| 104 | goto error_free; | ||
| 105 | |||
| 106 | entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); | ||
| 107 | drm_gem_object_unreference_unlocked(gobj); | ||
| 108 | entry->priority = info[i].bo_priority; | ||
| 109 | entry->prefered_domains = entry->robj->initial_domain; | ||
| 110 | entry->allowed_domains = entry->prefered_domains; | ||
| 111 | if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) | ||
| 112 | entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; | ||
| 113 | if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) { | ||
| 114 | has_userptr = true; | ||
| 115 | entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; | ||
| 116 | entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; | ||
| 117 | } | ||
| 118 | entry->tv.bo = &entry->robj->tbo; | ||
| 119 | entry->tv.shared = true; | ||
| 120 | |||
| 121 | if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) | ||
| 122 | gds_obj = entry->robj; | ||
| 123 | if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) | ||
| 124 | gws_obj = entry->robj; | ||
| 125 | if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA) | ||
| 126 | oa_obj = entry->robj; | ||
| 127 | } | ||
| 128 | |||
| 129 | for (i = 0; i < list->num_entries; ++i) | ||
| 130 | amdgpu_bo_unref(&list->array[i].robj); | ||
| 131 | |||
| 132 | drm_free_large(list->array); | ||
| 133 | |||
| 134 | list->gds_obj = gds_obj; | ||
| 135 | list->gws_obj = gws_obj; | ||
| 136 | list->oa_obj = oa_obj; | ||
| 137 | list->has_userptr = has_userptr; | ||
| 138 | list->array = array; | ||
| 139 | list->num_entries = num_entries; | ||
| 140 | |||
| 141 | return 0; | ||
| 142 | |||
| 143 | error_free: | ||
| 144 | drm_free_large(array); | ||
| 145 | return -ENOENT; | ||
| 146 | } | ||
| 147 | |||
| 148 | struct amdgpu_bo_list * | ||
| 149 | amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id) | ||
| 150 | { | ||
| 151 | struct amdgpu_bo_list *result; | ||
| 152 | |||
| 153 | mutex_lock(&fpriv->bo_list_lock); | ||
| 154 | result = idr_find(&fpriv->bo_list_handles, id); | ||
| 155 | if (result) | ||
| 156 | mutex_lock(&result->lock); | ||
| 157 | mutex_unlock(&fpriv->bo_list_lock); | ||
| 158 | return result; | ||
| 159 | } | ||
| 160 | |||
| 161 | void amdgpu_bo_list_put(struct amdgpu_bo_list *list) | ||
| 162 | { | ||
| 163 | mutex_unlock(&list->lock); | ||
| 164 | } | ||
| 165 | |||
| 166 | void amdgpu_bo_list_free(struct amdgpu_bo_list *list) | ||
| 167 | { | ||
| 168 | unsigned i; | ||
| 169 | |||
| 170 | for (i = 0; i < list->num_entries; ++i) | ||
| 171 | amdgpu_bo_unref(&list->array[i].robj); | ||
| 172 | |||
| 173 | mutex_destroy(&list->lock); | ||
| 174 | drm_free_large(list->array); | ||
| 175 | kfree(list); | ||
| 176 | } | ||
| 177 | |||
| 178 | int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, | ||
| 179 | struct drm_file *filp) | ||
| 180 | { | ||
| 181 | const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); | ||
| 182 | |||
| 183 | struct amdgpu_device *adev = dev->dev_private; | ||
| 184 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | ||
| 185 | union drm_amdgpu_bo_list *args = data; | ||
| 186 | uint32_t handle = args->in.list_handle; | ||
| 187 | const void __user *uptr = (const void*)(long)args->in.bo_info_ptr; | ||
| 188 | |||
| 189 | struct drm_amdgpu_bo_list_entry *info; | ||
| 190 | struct amdgpu_bo_list *list; | ||
| 191 | |||
| 192 | int r; | ||
| 193 | |||
| 194 | info = drm_malloc_ab(args->in.bo_number, | ||
| 195 | sizeof(struct drm_amdgpu_bo_list_entry)); | ||
| 196 | if (!info) | ||
| 197 | return -ENOMEM; | ||
| 198 | |||
| 199 | /* copy the handle array from userspace to a kernel buffer */ | ||
| 200 | r = -EFAULT; | ||
| 201 | if (likely(info_size == args->in.bo_info_size)) { | ||
| 202 | unsigned long bytes = args->in.bo_number * | ||
| 203 | args->in.bo_info_size; | ||
| 204 | |||
| 205 | if (copy_from_user(info, uptr, bytes)) | ||
| 206 | goto error_free; | ||
| 207 | |||
| 208 | } else { | ||
| 209 | unsigned long bytes = min(args->in.bo_info_size, info_size); | ||
| 210 | unsigned i; | ||
| 211 | |||
| 212 | memset(info, 0, args->in.bo_number * info_size); | ||
| 213 | for (i = 0; i < args->in.bo_number; ++i) { | ||
| 214 | if (copy_from_user(&info[i], uptr, bytes)) | ||
| 215 | goto error_free; | ||
| 216 | |||
| 217 | uptr += args->in.bo_info_size; | ||
| 218 | } | ||
| 219 | } | ||
| 220 | |||
| 221 | switch (args->in.operation) { | ||
| 222 | case AMDGPU_BO_LIST_OP_CREATE: | ||
| 223 | r = amdgpu_bo_list_create(fpriv, &list, &handle); | ||
| 224 | if (r) | ||
| 225 | goto error_free; | ||
| 226 | |||
| 227 | r = amdgpu_bo_list_set(adev, filp, list, info, | ||
| 228 | args->in.bo_number); | ||
| 229 | amdgpu_bo_list_put(list); | ||
| 230 | if (r) | ||
| 231 | goto error_free; | ||
| 232 | |||
| 233 | break; | ||
| 234 | |||
| 235 | case AMDGPU_BO_LIST_OP_DESTROY: | ||
| 236 | amdgpu_bo_list_destroy(fpriv, handle); | ||
| 237 | handle = 0; | ||
| 238 | break; | ||
| 239 | |||
| 240 | case AMDGPU_BO_LIST_OP_UPDATE: | ||
| 241 | r = -ENOENT; | ||
| 242 | list = amdgpu_bo_list_get(fpriv, handle); | ||
| 243 | if (!list) | ||
| 244 | goto error_free; | ||
| 245 | |||
| 246 | r = amdgpu_bo_list_set(adev, filp, list, info, | ||
| 247 | args->in.bo_number); | ||
| 248 | amdgpu_bo_list_put(list); | ||
| 249 | if (r) | ||
| 250 | goto error_free; | ||
| 251 | |||
| 252 | break; | ||
| 253 | |||
| 254 | default: | ||
| 255 | r = -EINVAL; | ||
| 256 | goto error_free; | ||
| 257 | } | ||
| 258 | |||
| 259 | memset(args, 0, sizeof(*args)); | ||
| 260 | args->out.list_handle = handle; | ||
| 261 | drm_free_large(info); | ||
| 262 | |||
| 263 | return 0; | ||
| 264 | |||
| 265 | error_free: | ||
| 266 | drm_free_large(info); | ||
| 267 | return r; | ||
| 268 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c new file mode 100644 index 000000000000..6a8d28f81780 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | |||
| @@ -0,0 +1,1907 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2007-8 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the "Software"), | ||
| 7 | * to deal in the Software without restriction, including without limitation | ||
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 10 | * Software is furnished to do so, subject to the following conditions: | ||
| 11 | * | ||
| 12 | * The above copyright notice and this permission notice shall be included in | ||
| 13 | * all copies or substantial portions of the Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 22 | * | ||
| 23 | * Authors: Dave Airlie | ||
| 24 | * Alex Deucher | ||
| 25 | */ | ||
| 26 | #include <drm/drmP.h> | ||
| 27 | #include <drm/drm_edid.h> | ||
| 28 | #include <drm/drm_crtc_helper.h> | ||
| 29 | #include <drm/drm_fb_helper.h> | ||
| 30 | #include <drm/amdgpu_drm.h> | ||
| 31 | #include "amdgpu.h" | ||
| 32 | #include "atom.h" | ||
| 33 | #include "atombios_encoders.h" | ||
| 34 | #include "atombios_dp.h" | ||
| 35 | #include "amdgpu_connectors.h" | ||
| 36 | #include "amdgpu_i2c.h" | ||
| 37 | |||
| 38 | #include <linux/pm_runtime.h> | ||
| 39 | |||
| 40 | void amdgpu_connector_hotplug(struct drm_connector *connector) | ||
| 41 | { | ||
| 42 | struct drm_device *dev = connector->dev; | ||
| 43 | struct amdgpu_device *adev = dev->dev_private; | ||
| 44 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 45 | |||
| 46 | /* bail if the connector does not have hpd pin, e.g., | ||
| 47 | * VGA, TV, etc. | ||
| 48 | */ | ||
| 49 | if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) | ||
| 50 | return; | ||
| 51 | |||
| 52 | amdgpu_display_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); | ||
| 53 | |||
| 54 | /* if the connector is already off, don't turn it back on */ | ||
| 55 | if (connector->dpms != DRM_MODE_DPMS_ON) | ||
| 56 | return; | ||
| 57 | |||
| 58 | /* just deal with DP (not eDP) here. */ | ||
| 59 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | ||
| 60 | struct amdgpu_connector_atom_dig *dig_connector = | ||
| 61 | amdgpu_connector->con_priv; | ||
| 62 | |||
| 63 | /* if existing sink type was not DP no need to retrain */ | ||
| 64 | if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) | ||
| 65 | return; | ||
| 66 | |||
| 67 | /* first get sink type as it may be reset after (un)plug */ | ||
| 68 | dig_connector->dp_sink_type = amdgpu_atombios_dp_get_sinktype(amdgpu_connector); | ||
| 69 | /* don't do anything if sink is not display port, i.e., | ||
| 70 | * passive dp->(dvi|hdmi) adaptor | ||
| 71 | */ | ||
| 72 | if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { | ||
| 73 | int saved_dpms = connector->dpms; | ||
| 74 | /* Only turn off the display if it's physically disconnected */ | ||
| 75 | if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { | ||
| 76 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | ||
| 77 | } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { | ||
| 78 | /* set it to OFF so that drm_helper_connector_dpms() | ||
| 79 | * won't return immediately since the current state | ||
| 80 | * is ON at this point. | ||
| 81 | */ | ||
| 82 | connector->dpms = DRM_MODE_DPMS_OFF; | ||
| 83 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
| 84 | } | ||
| 85 | connector->dpms = saved_dpms; | ||
| 86 | } | ||
| 87 | } | ||
| 88 | } | ||
| 89 | |||
| 90 | static void amdgpu_connector_property_change_mode(struct drm_encoder *encoder) | ||
| 91 | { | ||
| 92 | struct drm_crtc *crtc = encoder->crtc; | ||
| 93 | |||
| 94 | if (crtc && crtc->enabled) { | ||
| 95 | drm_crtc_helper_set_mode(crtc, &crtc->mode, | ||
| 96 | crtc->x, crtc->y, crtc->primary->fb); | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) | ||
| 101 | { | ||
| 102 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 103 | struct amdgpu_connector_atom_dig *dig_connector; | ||
| 104 | int bpc = 8; | ||
| 105 | unsigned mode_clock, max_tmds_clock; | ||
| 106 | |||
| 107 | switch (connector->connector_type) { | ||
| 108 | case DRM_MODE_CONNECTOR_DVII: | ||
| 109 | case DRM_MODE_CONNECTOR_HDMIB: | ||
| 110 | if (amdgpu_connector->use_digital) { | ||
| 111 | if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { | ||
| 112 | if (connector->display_info.bpc) | ||
| 113 | bpc = connector->display_info.bpc; | ||
| 114 | } | ||
| 115 | } | ||
| 116 | break; | ||
| 117 | case DRM_MODE_CONNECTOR_DVID: | ||
| 118 | case DRM_MODE_CONNECTOR_HDMIA: | ||
| 119 | if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { | ||
| 120 | if (connector->display_info.bpc) | ||
| 121 | bpc = connector->display_info.bpc; | ||
| 122 | } | ||
| 123 | break; | ||
| 124 | case DRM_MODE_CONNECTOR_DisplayPort: | ||
| 125 | dig_connector = amdgpu_connector->con_priv; | ||
| 126 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | ||
| 127 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) || | ||
| 128 | drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { | ||
| 129 | if (connector->display_info.bpc) | ||
| 130 | bpc = connector->display_info.bpc; | ||
| 131 | } | ||
| 132 | break; | ||
| 133 | case DRM_MODE_CONNECTOR_eDP: | ||
| 134 | case DRM_MODE_CONNECTOR_LVDS: | ||
| 135 | if (connector->display_info.bpc) | ||
| 136 | bpc = connector->display_info.bpc; | ||
| 137 | else { | ||
| 138 | struct drm_connector_helper_funcs *connector_funcs = | ||
| 139 | connector->helper_private; | ||
| 140 | struct drm_encoder *encoder = connector_funcs->best_encoder(connector); | ||
| 141 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 142 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
| 143 | |||
| 144 | if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR) | ||
| 145 | bpc = 6; | ||
| 146 | else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR) | ||
| 147 | bpc = 8; | ||
| 148 | } | ||
| 149 | break; | ||
| 150 | } | ||
| 151 | |||
| 152 | if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { | ||
| 153 | /* | ||
| 154 | * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make | ||
| 155 | * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at | ||
| 156 | * 12 bpc is always supported on hdmi deep color sinks, as this is | ||
| 157 | * required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum. | ||
| 158 | */ | ||
| 159 | if (bpc > 12) { | ||
| 160 | DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n", | ||
| 161 | connector->name, bpc); | ||
| 162 | bpc = 12; | ||
| 163 | } | ||
| 164 | |||
| 165 | /* Any defined maximum tmds clock limit we must not exceed? */ | ||
| 166 | if (connector->max_tmds_clock > 0) { | ||
| 167 | /* mode_clock is clock in kHz for mode to be modeset on this connector */ | ||
| 168 | mode_clock = amdgpu_connector->pixelclock_for_modeset; | ||
| 169 | |||
| 170 | /* Maximum allowable input clock in kHz */ | ||
| 171 | max_tmds_clock = connector->max_tmds_clock * 1000; | ||
| 172 | |||
| 173 | DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n", | ||
| 174 | connector->name, mode_clock, max_tmds_clock); | ||
| 175 | |||
| 176 | /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */ | ||
| 177 | if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) { | ||
| 178 | if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) && | ||
| 179 | (mode_clock * 5/4 <= max_tmds_clock)) | ||
| 180 | bpc = 10; | ||
| 181 | else | ||
| 182 | bpc = 8; | ||
| 183 | |||
| 184 | DRM_DEBUG("%s: HDMI deep color 12 bpc exceeds max tmds clock. Using %d bpc.\n", | ||
| 185 | connector->name, bpc); | ||
| 186 | } | ||
| 187 | |||
| 188 | if ((bpc == 10) && (mode_clock * 5/4 > max_tmds_clock)) { | ||
| 189 | bpc = 8; | ||
| 190 | DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n", | ||
| 191 | connector->name, bpc); | ||
| 192 | } else if (bpc > 8) { | ||
| 193 | /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ | ||
| 194 | DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", | ||
| 195 | connector->name); | ||
| 196 | bpc = 8; | ||
| 197 | } | ||
| 198 | } | ||
| 199 | } | ||
| 200 | |||
| 201 | if ((amdgpu_deep_color == 0) && (bpc > 8)) { | ||
| 202 | DRM_DEBUG("%s: Deep color disabled. Set amdgpu module param deep_color=1 to enable.\n", | ||
| 203 | connector->name); | ||
| 204 | bpc = 8; | ||
| 205 | } | ||
| 206 | |||
| 207 | DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n", | ||
| 208 | connector->name, connector->display_info.bpc, bpc); | ||
| 209 | |||
| 210 | return bpc; | ||
| 211 | } | ||
| 212 | |||
| 213 | static void | ||
| 214 | amdgpu_connector_update_scratch_regs(struct drm_connector *connector, | ||
| 215 | enum drm_connector_status status) | ||
| 216 | { | ||
| 217 | struct drm_encoder *best_encoder = NULL; | ||
| 218 | struct drm_encoder *encoder = NULL; | ||
| 219 | struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; | ||
| 220 | bool connected; | ||
| 221 | int i; | ||
| 222 | |||
| 223 | best_encoder = connector_funcs->best_encoder(connector); | ||
| 224 | |||
| 225 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
| 226 | if (connector->encoder_ids[i] == 0) | ||
| 227 | break; | ||
| 228 | |||
| 229 | encoder = drm_encoder_find(connector->dev, | ||
| 230 | connector->encoder_ids[i]); | ||
| 231 | if (!encoder) | ||
| 232 | continue; | ||
| 233 | |||
| 234 | if ((encoder == best_encoder) && (status == connector_status_connected)) | ||
| 235 | connected = true; | ||
| 236 | else | ||
| 237 | connected = false; | ||
| 238 | |||
| 239 | amdgpu_atombios_encoder_set_bios_scratch_regs(connector, encoder, connected); | ||
| 240 | |||
| 241 | } | ||
| 242 | } | ||
| 243 | |||
| 244 | static struct drm_encoder * | ||
| 245 | amdgpu_connector_find_encoder(struct drm_connector *connector, | ||
| 246 | int encoder_type) | ||
| 247 | { | ||
| 248 | struct drm_encoder *encoder; | ||
| 249 | int i; | ||
| 250 | |||
| 251 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
| 252 | if (connector->encoder_ids[i] == 0) | ||
| 253 | break; | ||
| 254 | encoder = drm_encoder_find(connector->dev, | ||
| 255 | connector->encoder_ids[i]); | ||
| 256 | if (!encoder) | ||
| 257 | continue; | ||
| 258 | |||
| 259 | if (encoder->encoder_type == encoder_type) | ||
| 260 | return encoder; | ||
| 261 | } | ||
| 262 | return NULL; | ||
| 263 | } | ||
| 264 | |||
| 265 | struct edid *amdgpu_connector_edid(struct drm_connector *connector) | ||
| 266 | { | ||
| 267 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 268 | struct drm_property_blob *edid_blob = connector->edid_blob_ptr; | ||
| 269 | |||
| 270 | if (amdgpu_connector->edid) { | ||
| 271 | return amdgpu_connector->edid; | ||
| 272 | } else if (edid_blob) { | ||
| 273 | struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL); | ||
| 274 | if (edid) | ||
| 275 | amdgpu_connector->edid = edid; | ||
| 276 | } | ||
| 277 | return amdgpu_connector->edid; | ||
| 278 | } | ||
| 279 | |||
| 280 | static struct edid * | ||
| 281 | amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev) | ||
| 282 | { | ||
| 283 | struct edid *edid; | ||
| 284 | |||
| 285 | if (adev->mode_info.bios_hardcoded_edid) { | ||
| 286 | edid = kmalloc(adev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); | ||
| 287 | if (edid) { | ||
| 288 | memcpy((unsigned char *)edid, | ||
| 289 | (unsigned char *)adev->mode_info.bios_hardcoded_edid, | ||
| 290 | adev->mode_info.bios_hardcoded_edid_size); | ||
| 291 | return edid; | ||
| 292 | } | ||
| 293 | } | ||
| 294 | return NULL; | ||
| 295 | } | ||
| 296 | |||
| 297 | static void amdgpu_connector_get_edid(struct drm_connector *connector) | ||
| 298 | { | ||
| 299 | struct drm_device *dev = connector->dev; | ||
| 300 | struct amdgpu_device *adev = dev->dev_private; | ||
| 301 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 302 | |||
| 303 | if (amdgpu_connector->edid) | ||
| 304 | return; | ||
| 305 | |||
| 306 | /* on hw with routers, select right port */ | ||
| 307 | if (amdgpu_connector->router.ddc_valid) | ||
| 308 | amdgpu_i2c_router_select_ddc_port(amdgpu_connector); | ||
| 309 | |||
| 310 | if ((amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) != | ||
| 311 | ENCODER_OBJECT_ID_NONE) && | ||
| 312 | amdgpu_connector->ddc_bus->has_aux) { | ||
| 313 | amdgpu_connector->edid = drm_get_edid(connector, | ||
| 314 | &amdgpu_connector->ddc_bus->aux.ddc); | ||
| 315 | } else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || | ||
| 316 | (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { | ||
| 317 | struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv; | ||
| 318 | |||
| 319 | if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || | ||
| 320 | dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && | ||
| 321 | amdgpu_connector->ddc_bus->has_aux) | ||
| 322 | amdgpu_connector->edid = drm_get_edid(connector, | ||
| 323 | &amdgpu_connector->ddc_bus->aux.ddc); | ||
| 324 | else if (amdgpu_connector->ddc_bus) | ||
| 325 | amdgpu_connector->edid = drm_get_edid(connector, | ||
| 326 | &amdgpu_connector->ddc_bus->adapter); | ||
| 327 | } else if (amdgpu_connector->ddc_bus) { | ||
| 328 | amdgpu_connector->edid = drm_get_edid(connector, | ||
| 329 | &amdgpu_connector->ddc_bus->adapter); | ||
| 330 | } | ||
| 331 | |||
| 332 | if (!amdgpu_connector->edid) { | ||
| 333 | /* some laptops provide a hardcoded edid in rom for LCDs */ | ||
| 334 | if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || | ||
| 335 | (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) | ||
| 336 | amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev); | ||
| 337 | } | ||
| 338 | } | ||
| 339 | |||
| 340 | static void amdgpu_connector_free_edid(struct drm_connector *connector) | ||
| 341 | { | ||
| 342 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 343 | |||
| 344 | if (amdgpu_connector->edid) { | ||
| 345 | kfree(amdgpu_connector->edid); | ||
| 346 | amdgpu_connector->edid = NULL; | ||
| 347 | } | ||
| 348 | } | ||
| 349 | |||
| 350 | static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector) | ||
| 351 | { | ||
| 352 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 353 | int ret; | ||
| 354 | |||
| 355 | if (amdgpu_connector->edid) { | ||
| 356 | drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid); | ||
| 357 | ret = drm_add_edid_modes(connector, amdgpu_connector->edid); | ||
| 358 | drm_edid_to_eld(connector, amdgpu_connector->edid); | ||
| 359 | return ret; | ||
| 360 | } | ||
| 361 | drm_mode_connector_update_edid_property(connector, NULL); | ||
| 362 | return 0; | ||
| 363 | } | ||
| 364 | |||
| 365 | static struct drm_encoder * | ||
| 366 | amdgpu_connector_best_single_encoder(struct drm_connector *connector) | ||
| 367 | { | ||
| 368 | int enc_id = connector->encoder_ids[0]; | ||
| 369 | |||
| 370 | /* pick the encoder ids */ | ||
| 371 | if (enc_id) | ||
| 372 | return drm_encoder_find(connector->dev, enc_id); | ||
| 373 | return NULL; | ||
| 374 | } | ||
| 375 | |||
| 376 | static void amdgpu_get_native_mode(struct drm_connector *connector) | ||
| 377 | { | ||
| 378 | struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); | ||
| 379 | struct amdgpu_encoder *amdgpu_encoder; | ||
| 380 | |||
| 381 | if (encoder == NULL) | ||
| 382 | return; | ||
| 383 | |||
| 384 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 385 | |||
| 386 | if (!list_empty(&connector->probed_modes)) { | ||
| 387 | struct drm_display_mode *preferred_mode = | ||
| 388 | list_first_entry(&connector->probed_modes, | ||
| 389 | struct drm_display_mode, head); | ||
| 390 | |||
| 391 | amdgpu_encoder->native_mode = *preferred_mode; | ||
| 392 | } else { | ||
| 393 | amdgpu_encoder->native_mode.clock = 0; | ||
| 394 | } | ||
| 395 | } | ||
| 396 | |||
| 397 | static struct drm_display_mode * | ||
| 398 | amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) | ||
| 399 | { | ||
| 400 | struct drm_device *dev = encoder->dev; | ||
| 401 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 402 | struct drm_display_mode *mode = NULL; | ||
| 403 | struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; | ||
| 404 | |||
| 405 | if (native_mode->hdisplay != 0 && | ||
| 406 | native_mode->vdisplay != 0 && | ||
| 407 | native_mode->clock != 0) { | ||
| 408 | mode = drm_mode_duplicate(dev, native_mode); | ||
| 409 | mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; | ||
| 410 | drm_mode_set_name(mode); | ||
| 411 | |||
| 412 | DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name); | ||
| 413 | } else if (native_mode->hdisplay != 0 && | ||
| 414 | native_mode->vdisplay != 0) { | ||
| 415 | /* mac laptops without an edid */ | ||
| 416 | /* Note that this is not necessarily the exact panel mode, | ||
| 417 | * but an approximation based on the cvt formula. For these | ||
| 418 | * systems we should ideally read the mode info out of the | ||
| 419 | * registers or add a mode table, but this works and is much | ||
| 420 | * simpler. | ||
| 421 | */ | ||
| 422 | mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); | ||
| 423 | mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; | ||
| 424 | DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name); | ||
| 425 | } | ||
| 426 | return mode; | ||
| 427 | } | ||
| 428 | |||
| 429 | static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder, | ||
| 430 | struct drm_connector *connector) | ||
| 431 | { | ||
| 432 | struct drm_device *dev = encoder->dev; | ||
| 433 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 434 | struct drm_display_mode *mode = NULL; | ||
| 435 | struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; | ||
| 436 | int i; | ||
| 437 | struct mode_size { | ||
| 438 | int w; | ||
| 439 | int h; | ||
| 440 | } common_modes[17] = { | ||
| 441 | { 640, 480}, | ||
| 442 | { 720, 480}, | ||
| 443 | { 800, 600}, | ||
| 444 | { 848, 480}, | ||
| 445 | {1024, 768}, | ||
| 446 | {1152, 768}, | ||
| 447 | {1280, 720}, | ||
| 448 | {1280, 800}, | ||
| 449 | {1280, 854}, | ||
| 450 | {1280, 960}, | ||
| 451 | {1280, 1024}, | ||
| 452 | {1440, 900}, | ||
| 453 | {1400, 1050}, | ||
| 454 | {1680, 1050}, | ||
| 455 | {1600, 1200}, | ||
| 456 | {1920, 1080}, | ||
| 457 | {1920, 1200} | ||
| 458 | }; | ||
| 459 | |||
| 460 | for (i = 0; i < 17; i++) { | ||
| 461 | if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { | ||
| 462 | if (common_modes[i].w > 1024 || | ||
| 463 | common_modes[i].h > 768) | ||
| 464 | continue; | ||
| 465 | } | ||
| 466 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
| 467 | if (common_modes[i].w > native_mode->hdisplay || | ||
| 468 | common_modes[i].h > native_mode->vdisplay || | ||
| 469 | (common_modes[i].w == native_mode->hdisplay && | ||
| 470 | common_modes[i].h == native_mode->vdisplay)) | ||
| 471 | continue; | ||
| 472 | } | ||
| 473 | if (common_modes[i].w < 320 || common_modes[i].h < 200) | ||
| 474 | continue; | ||
| 475 | |||
| 476 | mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); | ||
| 477 | drm_mode_probed_add(connector, mode); | ||
| 478 | } | ||
| 479 | } | ||
| 480 | |||
| 481 | static int amdgpu_connector_set_property(struct drm_connector *connector, | ||
| 482 | struct drm_property *property, | ||
| 483 | uint64_t val) | ||
| 484 | { | ||
| 485 | struct drm_device *dev = connector->dev; | ||
| 486 | struct amdgpu_device *adev = dev->dev_private; | ||
| 487 | struct drm_encoder *encoder; | ||
| 488 | struct amdgpu_encoder *amdgpu_encoder; | ||
| 489 | |||
| 490 | if (property == adev->mode_info.coherent_mode_property) { | ||
| 491 | struct amdgpu_encoder_atom_dig *dig; | ||
| 492 | bool new_coherent_mode; | ||
| 493 | |||
| 494 | /* need to find digital encoder on connector */ | ||
| 495 | encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | ||
| 496 | if (!encoder) | ||
| 497 | return 0; | ||
| 498 | |||
| 499 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 500 | |||
| 501 | if (!amdgpu_encoder->enc_priv) | ||
| 502 | return 0; | ||
| 503 | |||
| 504 | dig = amdgpu_encoder->enc_priv; | ||
| 505 | new_coherent_mode = val ? true : false; | ||
| 506 | if (dig->coherent_mode != new_coherent_mode) { | ||
| 507 | dig->coherent_mode = new_coherent_mode; | ||
| 508 | amdgpu_connector_property_change_mode(&amdgpu_encoder->base); | ||
| 509 | } | ||
| 510 | } | ||
| 511 | |||
| 512 | if (property == adev->mode_info.audio_property) { | ||
| 513 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 514 | /* need to find digital encoder on connector */ | ||
| 515 | encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | ||
| 516 | if (!encoder) | ||
| 517 | return 0; | ||
| 518 | |||
| 519 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 520 | |||
| 521 | if (amdgpu_connector->audio != val) { | ||
| 522 | amdgpu_connector->audio = val; | ||
| 523 | amdgpu_connector_property_change_mode(&amdgpu_encoder->base); | ||
| 524 | } | ||
| 525 | } | ||
| 526 | |||
| 527 | if (property == adev->mode_info.dither_property) { | ||
| 528 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 529 | /* need to find digital encoder on connector */ | ||
| 530 | encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | ||
| 531 | if (!encoder) | ||
| 532 | return 0; | ||
| 533 | |||
| 534 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 535 | |||
| 536 | if (amdgpu_connector->dither != val) { | ||
| 537 | amdgpu_connector->dither = val; | ||
| 538 | amdgpu_connector_property_change_mode(&amdgpu_encoder->base); | ||
| 539 | } | ||
| 540 | } | ||
| 541 | |||
| 542 | if (property == adev->mode_info.underscan_property) { | ||
| 543 | /* need to find digital encoder on connector */ | ||
| 544 | encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | ||
| 545 | if (!encoder) | ||
| 546 | return 0; | ||
| 547 | |||
| 548 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 549 | |||
| 550 | if (amdgpu_encoder->underscan_type != val) { | ||
| 551 | amdgpu_encoder->underscan_type = val; | ||
| 552 | amdgpu_connector_property_change_mode(&amdgpu_encoder->base); | ||
| 553 | } | ||
| 554 | } | ||
| 555 | |||
| 556 | if (property == adev->mode_info.underscan_hborder_property) { | ||
| 557 | /* need to find digital encoder on connector */ | ||
| 558 | encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | ||
| 559 | if (!encoder) | ||
| 560 | return 0; | ||
| 561 | |||
| 562 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 563 | |||
| 564 | if (amdgpu_encoder->underscan_hborder != val) { | ||
| 565 | amdgpu_encoder->underscan_hborder = val; | ||
| 566 | amdgpu_connector_property_change_mode(&amdgpu_encoder->base); | ||
| 567 | } | ||
| 568 | } | ||
| 569 | |||
| 570 | if (property == adev->mode_info.underscan_vborder_property) { | ||
| 571 | /* need to find digital encoder on connector */ | ||
| 572 | encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | ||
| 573 | if (!encoder) | ||
| 574 | return 0; | ||
| 575 | |||
| 576 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 577 | |||
| 578 | if (amdgpu_encoder->underscan_vborder != val) { | ||
| 579 | amdgpu_encoder->underscan_vborder = val; | ||
| 580 | amdgpu_connector_property_change_mode(&amdgpu_encoder->base); | ||
| 581 | } | ||
| 582 | } | ||
| 583 | |||
| 584 | if (property == adev->mode_info.load_detect_property) { | ||
| 585 | struct amdgpu_connector *amdgpu_connector = | ||
| 586 | to_amdgpu_connector(connector); | ||
| 587 | |||
| 588 | if (val == 0) | ||
| 589 | amdgpu_connector->dac_load_detect = false; | ||
| 590 | else | ||
| 591 | amdgpu_connector->dac_load_detect = true; | ||
| 592 | } | ||
| 593 | |||
| 594 | if (property == dev->mode_config.scaling_mode_property) { | ||
| 595 | enum amdgpu_rmx_type rmx_type; | ||
| 596 | |||
| 597 | if (connector->encoder) { | ||
| 598 | amdgpu_encoder = to_amdgpu_encoder(connector->encoder); | ||
| 599 | } else { | ||
| 600 | struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; | ||
| 601 | amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector)); | ||
| 602 | } | ||
| 603 | |||
| 604 | switch (val) { | ||
| 605 | default: | ||
| 606 | case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break; | ||
| 607 | case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break; | ||
| 608 | case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break; | ||
| 609 | case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break; | ||
| 610 | } | ||
| 611 | if (amdgpu_encoder->rmx_type == rmx_type) | ||
| 612 | return 0; | ||
| 613 | |||
| 614 | if ((rmx_type != DRM_MODE_SCALE_NONE) && | ||
| 615 | (amdgpu_encoder->native_mode.clock == 0)) | ||
| 616 | return 0; | ||
| 617 | |||
| 618 | amdgpu_encoder->rmx_type = rmx_type; | ||
| 619 | |||
| 620 | amdgpu_connector_property_change_mode(&amdgpu_encoder->base); | ||
| 621 | } | ||
| 622 | |||
| 623 | return 0; | ||
| 624 | } | ||
| 625 | |||
| 626 | static void | ||
| 627 | amdgpu_connector_fixup_lcd_native_mode(struct drm_encoder *encoder, | ||
| 628 | struct drm_connector *connector) | ||
| 629 | { | ||
| 630 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 631 | struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; | ||
| 632 | struct drm_display_mode *t, *mode; | ||
| 633 | |||
| 634 | /* If the EDID preferred mode doesn't match the native mode, use it */ | ||
| 635 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { | ||
| 636 | if (mode->type & DRM_MODE_TYPE_PREFERRED) { | ||
| 637 | if (mode->hdisplay != native_mode->hdisplay || | ||
| 638 | mode->vdisplay != native_mode->vdisplay) | ||
| 639 | memcpy(native_mode, mode, sizeof(*mode)); | ||
| 640 | } | ||
| 641 | } | ||
| 642 | |||
| 643 | /* Try to get native mode details from EDID if necessary */ | ||
| 644 | if (!native_mode->clock) { | ||
| 645 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { | ||
| 646 | if (mode->hdisplay == native_mode->hdisplay && | ||
| 647 | mode->vdisplay == native_mode->vdisplay) { | ||
| 648 | *native_mode = *mode; | ||
| 649 | drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); | ||
| 650 | DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n"); | ||
| 651 | break; | ||
| 652 | } | ||
| 653 | } | ||
| 654 | } | ||
| 655 | |||
| 656 | if (!native_mode->clock) { | ||
| 657 | DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); | ||
| 658 | amdgpu_encoder->rmx_type = RMX_OFF; | ||
| 659 | } | ||
| 660 | } | ||
| 661 | |||
| 662 | static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector) | ||
| 663 | { | ||
| 664 | struct drm_encoder *encoder; | ||
| 665 | int ret = 0; | ||
| 666 | struct drm_display_mode *mode; | ||
| 667 | |||
| 668 | amdgpu_connector_get_edid(connector); | ||
| 669 | ret = amdgpu_connector_ddc_get_modes(connector); | ||
| 670 | if (ret > 0) { | ||
| 671 | encoder = amdgpu_connector_best_single_encoder(connector); | ||
| 672 | if (encoder) { | ||
| 673 | amdgpu_connector_fixup_lcd_native_mode(encoder, connector); | ||
| 674 | /* add scaled modes */ | ||
| 675 | amdgpu_connector_add_common_modes(encoder, connector); | ||
| 676 | } | ||
| 677 | return ret; | ||
| 678 | } | ||
| 679 | |||
| 680 | encoder = amdgpu_connector_best_single_encoder(connector); | ||
| 681 | if (!encoder) | ||
| 682 | return 0; | ||
| 683 | |||
| 684 | /* we have no EDID modes */ | ||
| 685 | mode = amdgpu_connector_lcd_native_mode(encoder); | ||
| 686 | if (mode) { | ||
| 687 | ret = 1; | ||
| 688 | drm_mode_probed_add(connector, mode); | ||
| 689 | /* add the width/height from vbios tables if available */ | ||
| 690 | connector->display_info.width_mm = mode->width_mm; | ||
| 691 | connector->display_info.height_mm = mode->height_mm; | ||
| 692 | /* add scaled modes */ | ||
| 693 | amdgpu_connector_add_common_modes(encoder, connector); | ||
| 694 | } | ||
| 695 | |||
| 696 | return ret; | ||
| 697 | } | ||
| 698 | |||
| 699 | static int amdgpu_connector_lvds_mode_valid(struct drm_connector *connector, | ||
| 700 | struct drm_display_mode *mode) | ||
| 701 | { | ||
| 702 | struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); | ||
| 703 | |||
| 704 | if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) | ||
| 705 | return MODE_PANEL; | ||
| 706 | |||
| 707 | if (encoder) { | ||
| 708 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 709 | struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; | ||
| 710 | |||
| 711 | /* AVIVO hardware supports downscaling modes larger than the panel | ||
| 712 | * to the panel size, but I'm not sure this is desirable. | ||
| 713 | */ | ||
| 714 | if ((mode->hdisplay > native_mode->hdisplay) || | ||
| 715 | (mode->vdisplay > native_mode->vdisplay)) | ||
| 716 | return MODE_PANEL; | ||
| 717 | |||
| 718 | /* if scaling is disabled, block non-native modes */ | ||
| 719 | if (amdgpu_encoder->rmx_type == RMX_OFF) { | ||
| 720 | if ((mode->hdisplay != native_mode->hdisplay) || | ||
| 721 | (mode->vdisplay != native_mode->vdisplay)) | ||
| 722 | return MODE_PANEL; | ||
| 723 | } | ||
| 724 | } | ||
| 725 | |||
| 726 | return MODE_OK; | ||
| 727 | } | ||
| 728 | |||
| 729 | static enum drm_connector_status | ||
| 730 | amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) | ||
| 731 | { | ||
| 732 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 733 | struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); | ||
| 734 | enum drm_connector_status ret = connector_status_disconnected; | ||
| 735 | int r; | ||
| 736 | |||
| 737 | r = pm_runtime_get_sync(connector->dev->dev); | ||
| 738 | if (r < 0) | ||
| 739 | return connector_status_disconnected; | ||
| 740 | |||
| 741 | if (encoder) { | ||
| 742 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 743 | struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; | ||
| 744 | |||
| 745 | /* check if panel is valid */ | ||
| 746 | if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) | ||
| 747 | ret = connector_status_connected; | ||
| 748 | |||
| 749 | } | ||
| 750 | |||
| 751 | /* check for edid as well */ | ||
| 752 | amdgpu_connector_get_edid(connector); | ||
| 753 | if (amdgpu_connector->edid) | ||
| 754 | ret = connector_status_connected; | ||
| 755 | /* check acpi lid status ??? */ | ||
| 756 | |||
| 757 | amdgpu_connector_update_scratch_regs(connector, ret); | ||
| 758 | pm_runtime_mark_last_busy(connector->dev->dev); | ||
| 759 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
| 760 | return ret; | ||
| 761 | } | ||
| 762 | |||
| 763 | static void amdgpu_connector_destroy(struct drm_connector *connector) | ||
| 764 | { | ||
| 765 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 766 | |||
| 767 | if (amdgpu_connector->ddc_bus->has_aux) | ||
| 768 | drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); | ||
| 769 | amdgpu_connector_free_edid(connector); | ||
| 770 | kfree(amdgpu_connector->con_priv); | ||
| 771 | drm_connector_unregister(connector); | ||
| 772 | drm_connector_cleanup(connector); | ||
| 773 | kfree(connector); | ||
| 774 | } | ||
| 775 | |||
| 776 | static int amdgpu_connector_set_lcd_property(struct drm_connector *connector, | ||
| 777 | struct drm_property *property, | ||
| 778 | uint64_t value) | ||
| 779 | { | ||
| 780 | struct drm_device *dev = connector->dev; | ||
| 781 | struct amdgpu_encoder *amdgpu_encoder; | ||
| 782 | enum amdgpu_rmx_type rmx_type; | ||
| 783 | |||
| 784 | DRM_DEBUG_KMS("\n"); | ||
| 785 | if (property != dev->mode_config.scaling_mode_property) | ||
| 786 | return 0; | ||
| 787 | |||
| 788 | if (connector->encoder) | ||
| 789 | amdgpu_encoder = to_amdgpu_encoder(connector->encoder); | ||
| 790 | else { | ||
| 791 | struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; | ||
| 792 | amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector)); | ||
| 793 | } | ||
| 794 | |||
| 795 | switch (value) { | ||
| 796 | case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break; | ||
| 797 | case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break; | ||
| 798 | case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break; | ||
| 799 | default: | ||
| 800 | case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break; | ||
| 801 | } | ||
| 802 | if (amdgpu_encoder->rmx_type == rmx_type) | ||
| 803 | return 0; | ||
| 804 | |||
| 805 | amdgpu_encoder->rmx_type = rmx_type; | ||
| 806 | |||
| 807 | amdgpu_connector_property_change_mode(&amdgpu_encoder->base); | ||
| 808 | return 0; | ||
| 809 | } | ||
| 810 | |||
| 811 | |||
| 812 | static const struct drm_connector_helper_funcs amdgpu_connector_lvds_helper_funcs = { | ||
| 813 | .get_modes = amdgpu_connector_lvds_get_modes, | ||
| 814 | .mode_valid = amdgpu_connector_lvds_mode_valid, | ||
| 815 | .best_encoder = amdgpu_connector_best_single_encoder, | ||
| 816 | }; | ||
| 817 | |||
| 818 | static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = { | ||
| 819 | .dpms = drm_helper_connector_dpms, | ||
| 820 | .detect = amdgpu_connector_lvds_detect, | ||
| 821 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
| 822 | .destroy = amdgpu_connector_destroy, | ||
| 823 | .set_property = amdgpu_connector_set_lcd_property, | ||
| 824 | }; | ||
| 825 | |||
| 826 | static int amdgpu_connector_vga_get_modes(struct drm_connector *connector) | ||
| 827 | { | ||
| 828 | int ret; | ||
| 829 | |||
| 830 | amdgpu_connector_get_edid(connector); | ||
| 831 | ret = amdgpu_connector_ddc_get_modes(connector); | ||
| 832 | |||
| 833 | return ret; | ||
| 834 | } | ||
| 835 | |||
| 836 | static int amdgpu_connector_vga_mode_valid(struct drm_connector *connector, | ||
| 837 | struct drm_display_mode *mode) | ||
| 838 | { | ||
| 839 | struct drm_device *dev = connector->dev; | ||
| 840 | struct amdgpu_device *adev = dev->dev_private; | ||
| 841 | |||
| 842 | /* XXX check mode bandwidth */ | ||
| 843 | |||
| 844 | if ((mode->clock / 10) > adev->clock.max_pixel_clock) | ||
| 845 | return MODE_CLOCK_HIGH; | ||
| 846 | |||
| 847 | return MODE_OK; | ||
| 848 | } | ||
| 849 | |||
| 850 | static enum drm_connector_status | ||
| 851 | amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) | ||
| 852 | { | ||
| 853 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 854 | struct drm_encoder *encoder; | ||
| 855 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
| 856 | bool dret = false; | ||
| 857 | enum drm_connector_status ret = connector_status_disconnected; | ||
| 858 | int r; | ||
| 859 | |||
| 860 | r = pm_runtime_get_sync(connector->dev->dev); | ||
| 861 | if (r < 0) | ||
| 862 | return connector_status_disconnected; | ||
| 863 | |||
| 864 | encoder = amdgpu_connector_best_single_encoder(connector); | ||
| 865 | if (!encoder) | ||
| 866 | ret = connector_status_disconnected; | ||
| 867 | |||
| 868 | if (amdgpu_connector->ddc_bus) | ||
| 869 | dret = amdgpu_ddc_probe(amdgpu_connector, false); | ||
| 870 | if (dret) { | ||
| 871 | amdgpu_connector->detected_by_load = false; | ||
| 872 | amdgpu_connector_free_edid(connector); | ||
| 873 | amdgpu_connector_get_edid(connector); | ||
| 874 | |||
| 875 | if (!amdgpu_connector->edid) { | ||
| 876 | DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", | ||
| 877 | connector->name); | ||
| 878 | ret = connector_status_connected; | ||
| 879 | } else { | ||
| 880 | amdgpu_connector->use_digital = | ||
| 881 | !!(amdgpu_connector->edid->input & DRM_EDID_INPUT_DIGITAL); | ||
| 882 | |||
| 883 | /* some oems have boards with separate digital and analog connectors | ||
| 884 | * with a shared ddc line (often vga + hdmi) | ||
| 885 | */ | ||
| 886 | if (amdgpu_connector->use_digital && amdgpu_connector->shared_ddc) { | ||
| 887 | amdgpu_connector_free_edid(connector); | ||
| 888 | ret = connector_status_disconnected; | ||
| 889 | } else { | ||
| 890 | ret = connector_status_connected; | ||
| 891 | } | ||
| 892 | } | ||
| 893 | } else { | ||
| 894 | |||
| 895 | /* if we aren't forcing don't do destructive polling */ | ||
| 896 | if (!force) { | ||
| 897 | /* only return the previous status if we last | ||
| 898 | * detected a monitor via load. | ||
| 899 | */ | ||
| 900 | if (amdgpu_connector->detected_by_load) | ||
| 901 | ret = connector->status; | ||
| 902 | goto out; | ||
| 903 | } | ||
| 904 | |||
| 905 | if (amdgpu_connector->dac_load_detect && encoder) { | ||
| 906 | encoder_funcs = encoder->helper_private; | ||
| 907 | ret = encoder_funcs->detect(encoder, connector); | ||
| 908 | if (ret != connector_status_disconnected) | ||
| 909 | amdgpu_connector->detected_by_load = true; | ||
| 910 | } | ||
| 911 | } | ||
| 912 | |||
| 913 | amdgpu_connector_update_scratch_regs(connector, ret); | ||
| 914 | |||
| 915 | out: | ||
| 916 | pm_runtime_mark_last_busy(connector->dev->dev); | ||
| 917 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
| 918 | |||
| 919 | return ret; | ||
| 920 | } | ||
| 921 | |||
| 922 | static const struct drm_connector_helper_funcs amdgpu_connector_vga_helper_funcs = { | ||
| 923 | .get_modes = amdgpu_connector_vga_get_modes, | ||
| 924 | .mode_valid = amdgpu_connector_vga_mode_valid, | ||
| 925 | .best_encoder = amdgpu_connector_best_single_encoder, | ||
| 926 | }; | ||
| 927 | |||
| 928 | static const struct drm_connector_funcs amdgpu_connector_vga_funcs = { | ||
| 929 | .dpms = drm_helper_connector_dpms, | ||
| 930 | .detect = amdgpu_connector_vga_detect, | ||
| 931 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
| 932 | .destroy = amdgpu_connector_destroy, | ||
| 933 | .set_property = amdgpu_connector_set_property, | ||
| 934 | }; | ||
| 935 | |||
| 936 | static bool | ||
| 937 | amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector) | ||
| 938 | { | ||
| 939 | struct drm_device *dev = connector->dev; | ||
| 940 | struct amdgpu_device *adev = dev->dev_private; | ||
| 941 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 942 | enum drm_connector_status status; | ||
| 943 | |||
| 944 | if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE) { | ||
| 945 | if (amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) | ||
| 946 | status = connector_status_connected; | ||
| 947 | else | ||
| 948 | status = connector_status_disconnected; | ||
| 949 | if (connector->status == status) | ||
| 950 | return true; | ||
| 951 | } | ||
| 952 | |||
| 953 | return false; | ||
| 954 | } | ||
| 955 | |||
| 956 | /* | ||
| 957 | * DVI is complicated | ||
| 958 | * Do a DDC probe, if DDC probe passes, get the full EDID so | ||
| 959 | * we can do analog/digital monitor detection at this point. | ||
| 960 | * If the monitor is an analog monitor or we got no DDC, | ||
| 961 | * we need to find the DAC encoder object for this connector. | ||
| 962 | * If we got no DDC, we do load detection on the DAC encoder object. | ||
| 963 | * If we got analog DDC or load detection passes on the DAC encoder | ||
| 964 | * we have to check if this analog encoder is shared with anyone else (TV) | ||
| 965 | * if its shared we have to set the other connector to disconnected. | ||
| 966 | */ | ||
| 967 | static enum drm_connector_status | ||
| 968 | amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) | ||
| 969 | { | ||
| 970 | struct drm_device *dev = connector->dev; | ||
| 971 | struct amdgpu_device *adev = dev->dev_private; | ||
| 972 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 973 | struct drm_encoder *encoder = NULL; | ||
| 974 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
| 975 | int i, r; | ||
| 976 | enum drm_connector_status ret = connector_status_disconnected; | ||
| 977 | bool dret = false, broken_edid = false; | ||
| 978 | |||
| 979 | r = pm_runtime_get_sync(connector->dev->dev); | ||
| 980 | if (r < 0) | ||
| 981 | return connector_status_disconnected; | ||
| 982 | |||
| 983 | if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { | ||
| 984 | ret = connector->status; | ||
| 985 | goto exit; | ||
| 986 | } | ||
| 987 | |||
| 988 | if (amdgpu_connector->ddc_bus) | ||
| 989 | dret = amdgpu_ddc_probe(amdgpu_connector, false); | ||
| 990 | if (dret) { | ||
| 991 | amdgpu_connector->detected_by_load = false; | ||
| 992 | amdgpu_connector_free_edid(connector); | ||
| 993 | amdgpu_connector_get_edid(connector); | ||
| 994 | |||
| 995 | if (!amdgpu_connector->edid) { | ||
| 996 | DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", | ||
| 997 | connector->name); | ||
| 998 | ret = connector_status_connected; | ||
| 999 | broken_edid = true; /* defer use_digital to later */ | ||
| 1000 | } else { | ||
| 1001 | amdgpu_connector->use_digital = | ||
| 1002 | !!(amdgpu_connector->edid->input & DRM_EDID_INPUT_DIGITAL); | ||
| 1003 | |||
| 1004 | /* some oems have boards with separate digital and analog connectors | ||
| 1005 | * with a shared ddc line (often vga + hdmi) | ||
| 1006 | */ | ||
| 1007 | if ((!amdgpu_connector->use_digital) && amdgpu_connector->shared_ddc) { | ||
| 1008 | amdgpu_connector_free_edid(connector); | ||
| 1009 | ret = connector_status_disconnected; | ||
| 1010 | } else { | ||
| 1011 | ret = connector_status_connected; | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | /* This gets complicated. We have boards with VGA + HDMI with a | ||
| 1015 | * shared DDC line and we have boards with DVI-D + HDMI with a shared | ||
| 1016 | * DDC line. The latter is more complex because with DVI<->HDMI adapters | ||
| 1017 | * you don't really know what's connected to which port as both are digital. | ||
| 1018 | */ | ||
| 1019 | if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) { | ||
| 1020 | struct drm_connector *list_connector; | ||
| 1021 | struct amdgpu_connector *list_amdgpu_connector; | ||
| 1022 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { | ||
| 1023 | if (connector == list_connector) | ||
| 1024 | continue; | ||
| 1025 | list_amdgpu_connector = to_amdgpu_connector(list_connector); | ||
| 1026 | if (list_amdgpu_connector->shared_ddc && | ||
| 1027 | (list_amdgpu_connector->ddc_bus->rec.i2c_id == | ||
| 1028 | amdgpu_connector->ddc_bus->rec.i2c_id)) { | ||
| 1029 | /* cases where both connectors are digital */ | ||
| 1030 | if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { | ||
| 1031 | /* hpd is our only option in this case */ | ||
| 1032 | if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { | ||
| 1033 | amdgpu_connector_free_edid(connector); | ||
| 1034 | ret = connector_status_disconnected; | ||
| 1035 | } | ||
| 1036 | } | ||
| 1037 | } | ||
| 1038 | } | ||
| 1039 | } | ||
| 1040 | } | ||
| 1041 | } | ||
| 1042 | |||
| 1043 | if ((ret == connector_status_connected) && (amdgpu_connector->use_digital == true)) | ||
| 1044 | goto out; | ||
| 1045 | |||
| 1046 | /* DVI-D and HDMI-A are digital only */ | ||
| 1047 | if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) || | ||
| 1048 | (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA)) | ||
| 1049 | goto out; | ||
| 1050 | |||
| 1051 | /* if we aren't forcing don't do destructive polling */ | ||
| 1052 | if (!force) { | ||
| 1053 | /* only return the previous status if we last | ||
| 1054 | * detected a monitor via load. | ||
| 1055 | */ | ||
| 1056 | if (amdgpu_connector->detected_by_load) | ||
| 1057 | ret = connector->status; | ||
| 1058 | goto out; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | /* find analog encoder */ | ||
| 1062 | if (amdgpu_connector->dac_load_detect) { | ||
| 1063 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
| 1064 | if (connector->encoder_ids[i] == 0) | ||
| 1065 | break; | ||
| 1066 | |||
| 1067 | encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); | ||
| 1068 | if (!encoder) | ||
| 1069 | continue; | ||
| 1070 | |||
| 1071 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && | ||
| 1072 | encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) | ||
| 1073 | continue; | ||
| 1074 | |||
| 1075 | encoder_funcs = encoder->helper_private; | ||
| 1076 | if (encoder_funcs->detect) { | ||
| 1077 | if (!broken_edid) { | ||
| 1078 | if (ret != connector_status_connected) { | ||
| 1079 | /* deal with analog monitors without DDC */ | ||
| 1080 | ret = encoder_funcs->detect(encoder, connector); | ||
| 1081 | if (ret == connector_status_connected) { | ||
| 1082 | amdgpu_connector->use_digital = false; | ||
| 1083 | } | ||
| 1084 | if (ret != connector_status_disconnected) | ||
| 1085 | amdgpu_connector->detected_by_load = true; | ||
| 1086 | } | ||
| 1087 | } else { | ||
| 1088 | enum drm_connector_status lret; | ||
| 1089 | /* assume digital unless load detected otherwise */ | ||
| 1090 | amdgpu_connector->use_digital = true; | ||
| 1091 | lret = encoder_funcs->detect(encoder, connector); | ||
| 1092 | DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret); | ||
| 1093 | if (lret == connector_status_connected) | ||
| 1094 | amdgpu_connector->use_digital = false; | ||
| 1095 | } | ||
| 1096 | break; | ||
| 1097 | } | ||
| 1098 | } | ||
| 1099 | } | ||
| 1100 | |||
| 1101 | out: | ||
| 1102 | /* updated in get modes as well since we need to know if it's analog or digital */ | ||
| 1103 | amdgpu_connector_update_scratch_regs(connector, ret); | ||
| 1104 | |||
| 1105 | exit: | ||
| 1106 | pm_runtime_mark_last_busy(connector->dev->dev); | ||
| 1107 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
| 1108 | |||
| 1109 | return ret; | ||
| 1110 | } | ||
| 1111 | |||
| 1112 | /* okay need to be smart in here about which encoder to pick */ | ||
| 1113 | static struct drm_encoder * | ||
| 1114 | amdgpu_connector_dvi_encoder(struct drm_connector *connector) | ||
| 1115 | { | ||
| 1116 | int enc_id = connector->encoder_ids[0]; | ||
| 1117 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1118 | struct drm_encoder *encoder; | ||
| 1119 | int i; | ||
| 1120 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
| 1121 | if (connector->encoder_ids[i] == 0) | ||
| 1122 | break; | ||
| 1123 | |||
| 1124 | encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); | ||
| 1125 | if (!encoder) | ||
| 1126 | continue; | ||
| 1127 | |||
| 1128 | if (amdgpu_connector->use_digital == true) { | ||
| 1129 | if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) | ||
| 1130 | return encoder; | ||
| 1131 | } else { | ||
| 1132 | if (encoder->encoder_type == DRM_MODE_ENCODER_DAC || | ||
| 1133 | encoder->encoder_type == DRM_MODE_ENCODER_TVDAC) | ||
| 1134 | return encoder; | ||
| 1135 | } | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | /* see if we have a default encoder TODO */ | ||
| 1139 | |||
| 1140 | /* then check use digitial */ | ||
| 1141 | /* pick the first one */ | ||
| 1142 | if (enc_id) | ||
| 1143 | return drm_encoder_find(connector->dev, enc_id); | ||
| 1144 | return NULL; | ||
| 1145 | } | ||
| 1146 | |||
| 1147 | static void amdgpu_connector_dvi_force(struct drm_connector *connector) | ||
| 1148 | { | ||
| 1149 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1150 | if (connector->force == DRM_FORCE_ON) | ||
| 1151 | amdgpu_connector->use_digital = false; | ||
| 1152 | if (connector->force == DRM_FORCE_ON_DIGITAL) | ||
| 1153 | amdgpu_connector->use_digital = true; | ||
| 1154 | } | ||
| 1155 | |||
| 1156 | static int amdgpu_connector_dvi_mode_valid(struct drm_connector *connector, | ||
| 1157 | struct drm_display_mode *mode) | ||
| 1158 | { | ||
| 1159 | struct drm_device *dev = connector->dev; | ||
| 1160 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1161 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1162 | |||
| 1163 | /* XXX check mode bandwidth */ | ||
| 1164 | |||
| 1165 | if (amdgpu_connector->use_digital && (mode->clock > 165000)) { | ||
| 1166 | if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) || | ||
| 1167 | (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || | ||
| 1168 | (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) { | ||
| 1169 | return MODE_OK; | ||
| 1170 | } else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { | ||
| 1171 | /* HDMI 1.3+ supports max clock of 340 Mhz */ | ||
| 1172 | if (mode->clock > 340000) | ||
| 1173 | return MODE_CLOCK_HIGH; | ||
| 1174 | else | ||
| 1175 | return MODE_OK; | ||
| 1176 | } else { | ||
| 1177 | return MODE_CLOCK_HIGH; | ||
| 1178 | } | ||
| 1179 | } | ||
| 1180 | |||
| 1181 | /* check against the max pixel clock */ | ||
| 1182 | if ((mode->clock / 10) > adev->clock.max_pixel_clock) | ||
| 1183 | return MODE_CLOCK_HIGH; | ||
| 1184 | |||
| 1185 | return MODE_OK; | ||
| 1186 | } | ||
| 1187 | |||
| 1188 | static const struct drm_connector_helper_funcs amdgpu_connector_dvi_helper_funcs = { | ||
| 1189 | .get_modes = amdgpu_connector_vga_get_modes, | ||
| 1190 | .mode_valid = amdgpu_connector_dvi_mode_valid, | ||
| 1191 | .best_encoder = amdgpu_connector_dvi_encoder, | ||
| 1192 | }; | ||
| 1193 | |||
| 1194 | static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = { | ||
| 1195 | .dpms = drm_helper_connector_dpms, | ||
| 1196 | .detect = amdgpu_connector_dvi_detect, | ||
| 1197 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
| 1198 | .set_property = amdgpu_connector_set_property, | ||
| 1199 | .destroy = amdgpu_connector_destroy, | ||
| 1200 | .force = amdgpu_connector_dvi_force, | ||
| 1201 | }; | ||
| 1202 | |||
| 1203 | static int amdgpu_connector_dp_get_modes(struct drm_connector *connector) | ||
| 1204 | { | ||
| 1205 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1206 | struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv; | ||
| 1207 | struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); | ||
| 1208 | int ret; | ||
| 1209 | |||
| 1210 | if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || | ||
| 1211 | (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { | ||
| 1212 | struct drm_display_mode *mode; | ||
| 1213 | |||
| 1214 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
| 1215 | if (!amdgpu_dig_connector->edp_on) | ||
| 1216 | amdgpu_atombios_encoder_set_edp_panel_power(connector, | ||
| 1217 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
| 1218 | amdgpu_connector_get_edid(connector); | ||
| 1219 | ret = amdgpu_connector_ddc_get_modes(connector); | ||
| 1220 | if (!amdgpu_dig_connector->edp_on) | ||
| 1221 | amdgpu_atombios_encoder_set_edp_panel_power(connector, | ||
| 1222 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
| 1223 | } else { | ||
| 1224 | /* need to setup ddc on the bridge */ | ||
| 1225 | if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) != | ||
| 1226 | ENCODER_OBJECT_ID_NONE) { | ||
| 1227 | if (encoder) | ||
| 1228 | amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder); | ||
| 1229 | } | ||
| 1230 | amdgpu_connector_get_edid(connector); | ||
| 1231 | ret = amdgpu_connector_ddc_get_modes(connector); | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | if (ret > 0) { | ||
| 1235 | if (encoder) { | ||
| 1236 | amdgpu_connector_fixup_lcd_native_mode(encoder, connector); | ||
| 1237 | /* add scaled modes */ | ||
| 1238 | amdgpu_connector_add_common_modes(encoder, connector); | ||
| 1239 | } | ||
| 1240 | return ret; | ||
| 1241 | } | ||
| 1242 | |||
| 1243 | if (!encoder) | ||
| 1244 | return 0; | ||
| 1245 | |||
| 1246 | /* we have no EDID modes */ | ||
| 1247 | mode = amdgpu_connector_lcd_native_mode(encoder); | ||
| 1248 | if (mode) { | ||
| 1249 | ret = 1; | ||
| 1250 | drm_mode_probed_add(connector, mode); | ||
| 1251 | /* add the width/height from vbios tables if available */ | ||
| 1252 | connector->display_info.width_mm = mode->width_mm; | ||
| 1253 | connector->display_info.height_mm = mode->height_mm; | ||
| 1254 | /* add scaled modes */ | ||
| 1255 | amdgpu_connector_add_common_modes(encoder, connector); | ||
| 1256 | } | ||
| 1257 | } else { | ||
| 1258 | /* need to setup ddc on the bridge */ | ||
| 1259 | if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) != | ||
| 1260 | ENCODER_OBJECT_ID_NONE) { | ||
| 1261 | if (encoder) | ||
| 1262 | amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder); | ||
| 1263 | } | ||
| 1264 | amdgpu_connector_get_edid(connector); | ||
| 1265 | ret = amdgpu_connector_ddc_get_modes(connector); | ||
| 1266 | |||
| 1267 | amdgpu_get_native_mode(connector); | ||
| 1268 | } | ||
| 1269 | |||
| 1270 | return ret; | ||
| 1271 | } | ||
| 1272 | |||
| 1273 | u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector) | ||
| 1274 | { | ||
| 1275 | struct drm_encoder *encoder; | ||
| 1276 | struct amdgpu_encoder *amdgpu_encoder; | ||
| 1277 | int i; | ||
| 1278 | |||
| 1279 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
| 1280 | if (connector->encoder_ids[i] == 0) | ||
| 1281 | break; | ||
| 1282 | |||
| 1283 | encoder = drm_encoder_find(connector->dev, | ||
| 1284 | connector->encoder_ids[i]); | ||
| 1285 | if (!encoder) | ||
| 1286 | continue; | ||
| 1287 | |||
| 1288 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1289 | |||
| 1290 | switch (amdgpu_encoder->encoder_id) { | ||
| 1291 | case ENCODER_OBJECT_ID_TRAVIS: | ||
| 1292 | case ENCODER_OBJECT_ID_NUTMEG: | ||
| 1293 | return amdgpu_encoder->encoder_id; | ||
| 1294 | default: | ||
| 1295 | break; | ||
| 1296 | } | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | return ENCODER_OBJECT_ID_NONE; | ||
| 1300 | } | ||
| 1301 | |||
| 1302 | static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector) | ||
| 1303 | { | ||
| 1304 | struct drm_encoder *encoder; | ||
| 1305 | struct amdgpu_encoder *amdgpu_encoder; | ||
| 1306 | int i; | ||
| 1307 | bool found = false; | ||
| 1308 | |||
| 1309 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
| 1310 | if (connector->encoder_ids[i] == 0) | ||
| 1311 | break; | ||
| 1312 | encoder = drm_encoder_find(connector->dev, | ||
| 1313 | connector->encoder_ids[i]); | ||
| 1314 | if (!encoder) | ||
| 1315 | continue; | ||
| 1316 | |||
| 1317 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1318 | if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2) | ||
| 1319 | found = true; | ||
| 1320 | } | ||
| 1321 | |||
| 1322 | return found; | ||
| 1323 | } | ||
| 1324 | |||
| 1325 | bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector) | ||
| 1326 | { | ||
| 1327 | struct drm_device *dev = connector->dev; | ||
| 1328 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1329 | |||
| 1330 | if ((adev->clock.default_dispclk >= 53900) && | ||
| 1331 | amdgpu_connector_encoder_is_hbr2(connector)) { | ||
| 1332 | return true; | ||
| 1333 | } | ||
| 1334 | |||
| 1335 | return false; | ||
| 1336 | } | ||
| 1337 | |||
| 1338 | static enum drm_connector_status | ||
| 1339 | amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) | ||
| 1340 | { | ||
| 1341 | struct drm_device *dev = connector->dev; | ||
| 1342 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1343 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1344 | enum drm_connector_status ret = connector_status_disconnected; | ||
| 1345 | struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv; | ||
| 1346 | struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); | ||
| 1347 | int r; | ||
| 1348 | |||
| 1349 | r = pm_runtime_get_sync(connector->dev->dev); | ||
| 1350 | if (r < 0) | ||
| 1351 | return connector_status_disconnected; | ||
| 1352 | |||
| 1353 | if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { | ||
| 1354 | ret = connector->status; | ||
| 1355 | goto out; | ||
| 1356 | } | ||
| 1357 | |||
| 1358 | amdgpu_connector_free_edid(connector); | ||
| 1359 | |||
| 1360 | if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || | ||
| 1361 | (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { | ||
| 1362 | if (encoder) { | ||
| 1363 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1364 | struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; | ||
| 1365 | |||
| 1366 | /* check if panel is valid */ | ||
| 1367 | if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) | ||
| 1368 | ret = connector_status_connected; | ||
| 1369 | } | ||
| 1370 | /* eDP is always DP */ | ||
| 1371 | amdgpu_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; | ||
| 1372 | if (!amdgpu_dig_connector->edp_on) | ||
| 1373 | amdgpu_atombios_encoder_set_edp_panel_power(connector, | ||
| 1374 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
| 1375 | if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) | ||
| 1376 | ret = connector_status_connected; | ||
| 1377 | if (!amdgpu_dig_connector->edp_on) | ||
| 1378 | amdgpu_atombios_encoder_set_edp_panel_power(connector, | ||
| 1379 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
| 1380 | } else if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) != | ||
| 1381 | ENCODER_OBJECT_ID_NONE) { | ||
| 1382 | /* DP bridges are always DP */ | ||
| 1383 | amdgpu_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; | ||
| 1384 | /* get the DPCD from the bridge */ | ||
| 1385 | amdgpu_atombios_dp_get_dpcd(amdgpu_connector); | ||
| 1386 | |||
| 1387 | if (encoder) { | ||
| 1388 | /* setup ddc on the bridge */ | ||
| 1389 | amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder); | ||
| 1390 | /* bridge chips are always aux */ | ||
| 1391 | if (amdgpu_ddc_probe(amdgpu_connector, true)) /* try DDC */ | ||
| 1392 | ret = connector_status_connected; | ||
| 1393 | else if (amdgpu_connector->dac_load_detect) { /* try load detection */ | ||
| 1394 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | ||
| 1395 | ret = encoder_funcs->detect(encoder, connector); | ||
| 1396 | } | ||
| 1397 | } | ||
| 1398 | } else { | ||
| 1399 | amdgpu_dig_connector->dp_sink_type = | ||
| 1400 | amdgpu_atombios_dp_get_sinktype(amdgpu_connector); | ||
| 1401 | if (amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { | ||
| 1402 | ret = connector_status_connected; | ||
| 1403 | if (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) | ||
| 1404 | amdgpu_atombios_dp_get_dpcd(amdgpu_connector); | ||
| 1405 | } else { | ||
| 1406 | if (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { | ||
| 1407 | if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) | ||
| 1408 | ret = connector_status_connected; | ||
| 1409 | } else { | ||
| 1410 | /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */ | ||
| 1411 | if (amdgpu_ddc_probe(amdgpu_connector, false)) | ||
| 1412 | ret = connector_status_connected; | ||
| 1413 | } | ||
| 1414 | } | ||
| 1415 | } | ||
| 1416 | |||
| 1417 | amdgpu_connector_update_scratch_regs(connector, ret); | ||
| 1418 | out: | ||
| 1419 | pm_runtime_mark_last_busy(connector->dev->dev); | ||
| 1420 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
| 1421 | |||
| 1422 | return ret; | ||
| 1423 | } | ||
| 1424 | |||
| 1425 | static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector, | ||
| 1426 | struct drm_display_mode *mode) | ||
| 1427 | { | ||
| 1428 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1429 | struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv; | ||
| 1430 | |||
| 1431 | /* XXX check mode bandwidth */ | ||
| 1432 | |||
| 1433 | if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || | ||
| 1434 | (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { | ||
| 1435 | struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); | ||
| 1436 | |||
| 1437 | if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) | ||
| 1438 | return MODE_PANEL; | ||
| 1439 | |||
| 1440 | if (encoder) { | ||
| 1441 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1442 | struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; | ||
| 1443 | |||
| 1444 | /* AVIVO hardware supports downscaling modes larger than the panel | ||
| 1445 | * to the panel size, but I'm not sure this is desirable. | ||
| 1446 | */ | ||
| 1447 | if ((mode->hdisplay > native_mode->hdisplay) || | ||
| 1448 | (mode->vdisplay > native_mode->vdisplay)) | ||
| 1449 | return MODE_PANEL; | ||
| 1450 | |||
| 1451 | /* if scaling is disabled, block non-native modes */ | ||
| 1452 | if (amdgpu_encoder->rmx_type == RMX_OFF) { | ||
| 1453 | if ((mode->hdisplay != native_mode->hdisplay) || | ||
| 1454 | (mode->vdisplay != native_mode->vdisplay)) | ||
| 1455 | return MODE_PANEL; | ||
| 1456 | } | ||
| 1457 | } | ||
| 1458 | return MODE_OK; | ||
| 1459 | } else { | ||
| 1460 | if ((amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | ||
| 1461 | (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { | ||
| 1462 | return amdgpu_atombios_dp_mode_valid_helper(connector, mode); | ||
| 1463 | } else { | ||
| 1464 | if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { | ||
| 1465 | /* HDMI 1.3+ supports max clock of 340 Mhz */ | ||
| 1466 | if (mode->clock > 340000) | ||
| 1467 | return MODE_CLOCK_HIGH; | ||
| 1468 | } else { | ||
| 1469 | if (mode->clock > 165000) | ||
| 1470 | return MODE_CLOCK_HIGH; | ||
| 1471 | } | ||
| 1472 | } | ||
| 1473 | } | ||
| 1474 | |||
| 1475 | return MODE_OK; | ||
| 1476 | } | ||
| 1477 | |||
| 1478 | static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = { | ||
| 1479 | .get_modes = amdgpu_connector_dp_get_modes, | ||
| 1480 | .mode_valid = amdgpu_connector_dp_mode_valid, | ||
| 1481 | .best_encoder = amdgpu_connector_dvi_encoder, | ||
| 1482 | }; | ||
| 1483 | |||
| 1484 | static const struct drm_connector_funcs amdgpu_connector_dp_funcs = { | ||
| 1485 | .dpms = drm_helper_connector_dpms, | ||
| 1486 | .detect = amdgpu_connector_dp_detect, | ||
| 1487 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
| 1488 | .set_property = amdgpu_connector_set_property, | ||
| 1489 | .destroy = amdgpu_connector_destroy, | ||
| 1490 | .force = amdgpu_connector_dvi_force, | ||
| 1491 | }; | ||
| 1492 | |||
| 1493 | static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { | ||
| 1494 | .dpms = drm_helper_connector_dpms, | ||
| 1495 | .detect = amdgpu_connector_dp_detect, | ||
| 1496 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
| 1497 | .set_property = amdgpu_connector_set_lcd_property, | ||
| 1498 | .destroy = amdgpu_connector_destroy, | ||
| 1499 | .force = amdgpu_connector_dvi_force, | ||
| 1500 | }; | ||
| 1501 | |||
| 1502 | void | ||
| 1503 | amdgpu_connector_add(struct amdgpu_device *adev, | ||
| 1504 | uint32_t connector_id, | ||
| 1505 | uint32_t supported_device, | ||
| 1506 | int connector_type, | ||
| 1507 | struct amdgpu_i2c_bus_rec *i2c_bus, | ||
| 1508 | uint16_t connector_object_id, | ||
| 1509 | struct amdgpu_hpd *hpd, | ||
| 1510 | struct amdgpu_router *router) | ||
| 1511 | { | ||
| 1512 | struct drm_device *dev = adev->ddev; | ||
| 1513 | struct drm_connector *connector; | ||
| 1514 | struct amdgpu_connector *amdgpu_connector; | ||
| 1515 | struct amdgpu_connector_atom_dig *amdgpu_dig_connector; | ||
| 1516 | struct drm_encoder *encoder; | ||
| 1517 | struct amdgpu_encoder *amdgpu_encoder; | ||
| 1518 | uint32_t subpixel_order = SubPixelNone; | ||
| 1519 | bool shared_ddc = false; | ||
| 1520 | bool is_dp_bridge = false; | ||
| 1521 | bool has_aux = false; | ||
| 1522 | |||
| 1523 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | ||
| 1524 | return; | ||
| 1525 | |||
| 1526 | /* see if we already added it */ | ||
| 1527 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 1528 | amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1529 | if (amdgpu_connector->connector_id == connector_id) { | ||
| 1530 | amdgpu_connector->devices |= supported_device; | ||
| 1531 | return; | ||
| 1532 | } | ||
| 1533 | if (amdgpu_connector->ddc_bus && i2c_bus->valid) { | ||
| 1534 | if (amdgpu_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) { | ||
| 1535 | amdgpu_connector->shared_ddc = true; | ||
| 1536 | shared_ddc = true; | ||
| 1537 | } | ||
| 1538 | if (amdgpu_connector->router_bus && router->ddc_valid && | ||
| 1539 | (amdgpu_connector->router.router_id == router->router_id)) { | ||
| 1540 | amdgpu_connector->shared_ddc = false; | ||
| 1541 | shared_ddc = false; | ||
| 1542 | } | ||
| 1543 | } | ||
| 1544 | } | ||
| 1545 | |||
| 1546 | /* check if it's a dp bridge */ | ||
| 1547 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
| 1548 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1549 | if (amdgpu_encoder->devices & supported_device) { | ||
| 1550 | switch (amdgpu_encoder->encoder_id) { | ||
| 1551 | case ENCODER_OBJECT_ID_TRAVIS: | ||
| 1552 | case ENCODER_OBJECT_ID_NUTMEG: | ||
| 1553 | is_dp_bridge = true; | ||
| 1554 | break; | ||
| 1555 | default: | ||
| 1556 | break; | ||
| 1557 | } | ||
| 1558 | } | ||
| 1559 | } | ||
| 1560 | |||
| 1561 | amdgpu_connector = kzalloc(sizeof(struct amdgpu_connector), GFP_KERNEL); | ||
| 1562 | if (!amdgpu_connector) | ||
| 1563 | return; | ||
| 1564 | |||
| 1565 | connector = &amdgpu_connector->base; | ||
| 1566 | |||
| 1567 | amdgpu_connector->connector_id = connector_id; | ||
| 1568 | amdgpu_connector->devices = supported_device; | ||
| 1569 | amdgpu_connector->shared_ddc = shared_ddc; | ||
| 1570 | amdgpu_connector->connector_object_id = connector_object_id; | ||
| 1571 | amdgpu_connector->hpd = *hpd; | ||
| 1572 | |||
| 1573 | amdgpu_connector->router = *router; | ||
| 1574 | if (router->ddc_valid || router->cd_valid) { | ||
| 1575 | amdgpu_connector->router_bus = amdgpu_i2c_lookup(adev, &router->i2c_info); | ||
| 1576 | if (!amdgpu_connector->router_bus) | ||
| 1577 | DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); | ||
| 1578 | } | ||
| 1579 | |||
| 1580 | if (is_dp_bridge) { | ||
| 1581 | amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL); | ||
| 1582 | if (!amdgpu_dig_connector) | ||
| 1583 | goto failed; | ||
| 1584 | amdgpu_connector->con_priv = amdgpu_dig_connector; | ||
| 1585 | if (i2c_bus->valid) { | ||
| 1586 | amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); | ||
| 1587 | if (amdgpu_connector->ddc_bus) | ||
| 1588 | has_aux = true; | ||
| 1589 | else | ||
| 1590 | DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
| 1591 | } | ||
| 1592 | switch (connector_type) { | ||
| 1593 | case DRM_MODE_CONNECTOR_VGA: | ||
| 1594 | case DRM_MODE_CONNECTOR_DVIA: | ||
| 1595 | default: | ||
| 1596 | drm_connector_init(dev, &amdgpu_connector->base, | ||
| 1597 | &amdgpu_connector_dp_funcs, connector_type); | ||
| 1598 | drm_connector_helper_add(&amdgpu_connector->base, | ||
| 1599 | &amdgpu_connector_dp_helper_funcs); | ||
| 1600 | connector->interlace_allowed = true; | ||
| 1601 | connector->doublescan_allowed = true; | ||
| 1602 | amdgpu_connector->dac_load_detect = true; | ||
| 1603 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1604 | adev->mode_info.load_detect_property, | ||
| 1605 | 1); | ||
| 1606 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1607 | dev->mode_config.scaling_mode_property, | ||
| 1608 | DRM_MODE_SCALE_NONE); | ||
| 1609 | break; | ||
| 1610 | case DRM_MODE_CONNECTOR_DVII: | ||
| 1611 | case DRM_MODE_CONNECTOR_DVID: | ||
| 1612 | case DRM_MODE_CONNECTOR_HDMIA: | ||
| 1613 | case DRM_MODE_CONNECTOR_HDMIB: | ||
| 1614 | case DRM_MODE_CONNECTOR_DisplayPort: | ||
| 1615 | drm_connector_init(dev, &amdgpu_connector->base, | ||
| 1616 | &amdgpu_connector_dp_funcs, connector_type); | ||
| 1617 | drm_connector_helper_add(&amdgpu_connector->base, | ||
| 1618 | &amdgpu_connector_dp_helper_funcs); | ||
| 1619 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1620 | adev->mode_info.underscan_property, | ||
| 1621 | UNDERSCAN_OFF); | ||
| 1622 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1623 | adev->mode_info.underscan_hborder_property, | ||
| 1624 | 0); | ||
| 1625 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1626 | adev->mode_info.underscan_vborder_property, | ||
| 1627 | 0); | ||
| 1628 | |||
| 1629 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1630 | dev->mode_config.scaling_mode_property, | ||
| 1631 | DRM_MODE_SCALE_NONE); | ||
| 1632 | |||
| 1633 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1634 | adev->mode_info.dither_property, | ||
| 1635 | AMDGPU_FMT_DITHER_DISABLE); | ||
| 1636 | |||
| 1637 | if (amdgpu_audio != 0) | ||
| 1638 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1639 | adev->mode_info.audio_property, | ||
| 1640 | AMDGPU_AUDIO_AUTO); | ||
| 1641 | |||
| 1642 | subpixel_order = SubPixelHorizontalRGB; | ||
| 1643 | connector->interlace_allowed = true; | ||
| 1644 | if (connector_type == DRM_MODE_CONNECTOR_HDMIB) | ||
| 1645 | connector->doublescan_allowed = true; | ||
| 1646 | else | ||
| 1647 | connector->doublescan_allowed = false; | ||
| 1648 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { | ||
| 1649 | amdgpu_connector->dac_load_detect = true; | ||
| 1650 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1651 | adev->mode_info.load_detect_property, | ||
| 1652 | 1); | ||
| 1653 | } | ||
| 1654 | break; | ||
| 1655 | case DRM_MODE_CONNECTOR_LVDS: | ||
| 1656 | case DRM_MODE_CONNECTOR_eDP: | ||
| 1657 | drm_connector_init(dev, &amdgpu_connector->base, | ||
| 1658 | &amdgpu_connector_edp_funcs, connector_type); | ||
| 1659 | drm_connector_helper_add(&amdgpu_connector->base, | ||
| 1660 | &amdgpu_connector_dp_helper_funcs); | ||
| 1661 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1662 | dev->mode_config.scaling_mode_property, | ||
| 1663 | DRM_MODE_SCALE_FULLSCREEN); | ||
| 1664 | subpixel_order = SubPixelHorizontalRGB; | ||
| 1665 | connector->interlace_allowed = false; | ||
| 1666 | connector->doublescan_allowed = false; | ||
| 1667 | break; | ||
| 1668 | } | ||
| 1669 | } else { | ||
| 1670 | switch (connector_type) { | ||
| 1671 | case DRM_MODE_CONNECTOR_VGA: | ||
| 1672 | drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type); | ||
| 1673 | drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs); | ||
| 1674 | if (i2c_bus->valid) { | ||
| 1675 | amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); | ||
| 1676 | if (!amdgpu_connector->ddc_bus) | ||
| 1677 | DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
| 1678 | } | ||
| 1679 | amdgpu_connector->dac_load_detect = true; | ||
| 1680 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1681 | adev->mode_info.load_detect_property, | ||
| 1682 | 1); | ||
| 1683 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1684 | dev->mode_config.scaling_mode_property, | ||
| 1685 | DRM_MODE_SCALE_NONE); | ||
| 1686 | /* no HPD on analog connectors */ | ||
| 1687 | amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE; | ||
| 1688 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 1689 | connector->interlace_allowed = true; | ||
| 1690 | connector->doublescan_allowed = true; | ||
| 1691 | break; | ||
| 1692 | case DRM_MODE_CONNECTOR_DVIA: | ||
| 1693 | drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type); | ||
| 1694 | drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs); | ||
| 1695 | if (i2c_bus->valid) { | ||
| 1696 | amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); | ||
| 1697 | if (!amdgpu_connector->ddc_bus) | ||
| 1698 | DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
| 1699 | } | ||
| 1700 | amdgpu_connector->dac_load_detect = true; | ||
| 1701 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1702 | adev->mode_info.load_detect_property, | ||
| 1703 | 1); | ||
| 1704 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1705 | dev->mode_config.scaling_mode_property, | ||
| 1706 | DRM_MODE_SCALE_NONE); | ||
| 1707 | /* no HPD on analog connectors */ | ||
| 1708 | amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE; | ||
| 1709 | connector->interlace_allowed = true; | ||
| 1710 | connector->doublescan_allowed = true; | ||
| 1711 | break; | ||
| 1712 | case DRM_MODE_CONNECTOR_DVII: | ||
| 1713 | case DRM_MODE_CONNECTOR_DVID: | ||
| 1714 | amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL); | ||
| 1715 | if (!amdgpu_dig_connector) | ||
| 1716 | goto failed; | ||
| 1717 | amdgpu_connector->con_priv = amdgpu_dig_connector; | ||
| 1718 | drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type); | ||
| 1719 | drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs); | ||
| 1720 | if (i2c_bus->valid) { | ||
| 1721 | amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); | ||
| 1722 | if (!amdgpu_connector->ddc_bus) | ||
| 1723 | DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
| 1724 | } | ||
| 1725 | subpixel_order = SubPixelHorizontalRGB; | ||
| 1726 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1727 | adev->mode_info.coherent_mode_property, | ||
| 1728 | 1); | ||
| 1729 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1730 | adev->mode_info.underscan_property, | ||
| 1731 | UNDERSCAN_OFF); | ||
| 1732 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1733 | adev->mode_info.underscan_hborder_property, | ||
| 1734 | 0); | ||
| 1735 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1736 | adev->mode_info.underscan_vborder_property, | ||
| 1737 | 0); | ||
| 1738 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1739 | dev->mode_config.scaling_mode_property, | ||
| 1740 | DRM_MODE_SCALE_NONE); | ||
| 1741 | |||
| 1742 | if (amdgpu_audio != 0) { | ||
| 1743 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1744 | adev->mode_info.audio_property, | ||
| 1745 | AMDGPU_AUDIO_AUTO); | ||
| 1746 | } | ||
| 1747 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1748 | adev->mode_info.dither_property, | ||
| 1749 | AMDGPU_FMT_DITHER_DISABLE); | ||
| 1750 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { | ||
| 1751 | amdgpu_connector->dac_load_detect = true; | ||
| 1752 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1753 | adev->mode_info.load_detect_property, | ||
| 1754 | 1); | ||
| 1755 | } | ||
| 1756 | connector->interlace_allowed = true; | ||
| 1757 | if (connector_type == DRM_MODE_CONNECTOR_DVII) | ||
| 1758 | connector->doublescan_allowed = true; | ||
| 1759 | else | ||
| 1760 | connector->doublescan_allowed = false; | ||
| 1761 | break; | ||
| 1762 | case DRM_MODE_CONNECTOR_HDMIA: | ||
| 1763 | case DRM_MODE_CONNECTOR_HDMIB: | ||
| 1764 | amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL); | ||
| 1765 | if (!amdgpu_dig_connector) | ||
| 1766 | goto failed; | ||
| 1767 | amdgpu_connector->con_priv = amdgpu_dig_connector; | ||
| 1768 | drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type); | ||
| 1769 | drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs); | ||
| 1770 | if (i2c_bus->valid) { | ||
| 1771 | amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); | ||
| 1772 | if (!amdgpu_connector->ddc_bus) | ||
| 1773 | DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
| 1774 | } | ||
| 1775 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1776 | adev->mode_info.coherent_mode_property, | ||
| 1777 | 1); | ||
| 1778 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1779 | adev->mode_info.underscan_property, | ||
| 1780 | UNDERSCAN_OFF); | ||
| 1781 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1782 | adev->mode_info.underscan_hborder_property, | ||
| 1783 | 0); | ||
| 1784 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1785 | adev->mode_info.underscan_vborder_property, | ||
| 1786 | 0); | ||
| 1787 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1788 | dev->mode_config.scaling_mode_property, | ||
| 1789 | DRM_MODE_SCALE_NONE); | ||
| 1790 | if (amdgpu_audio != 0) { | ||
| 1791 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1792 | adev->mode_info.audio_property, | ||
| 1793 | AMDGPU_AUDIO_AUTO); | ||
| 1794 | } | ||
| 1795 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1796 | adev->mode_info.dither_property, | ||
| 1797 | AMDGPU_FMT_DITHER_DISABLE); | ||
| 1798 | subpixel_order = SubPixelHorizontalRGB; | ||
| 1799 | connector->interlace_allowed = true; | ||
| 1800 | if (connector_type == DRM_MODE_CONNECTOR_HDMIB) | ||
| 1801 | connector->doublescan_allowed = true; | ||
| 1802 | else | ||
| 1803 | connector->doublescan_allowed = false; | ||
| 1804 | break; | ||
| 1805 | case DRM_MODE_CONNECTOR_DisplayPort: | ||
| 1806 | amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL); | ||
| 1807 | if (!amdgpu_dig_connector) | ||
| 1808 | goto failed; | ||
| 1809 | amdgpu_connector->con_priv = amdgpu_dig_connector; | ||
| 1810 | drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dp_funcs, connector_type); | ||
| 1811 | drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs); | ||
| 1812 | if (i2c_bus->valid) { | ||
| 1813 | amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); | ||
| 1814 | if (amdgpu_connector->ddc_bus) | ||
| 1815 | has_aux = true; | ||
| 1816 | else | ||
| 1817 | DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
| 1818 | } | ||
| 1819 | subpixel_order = SubPixelHorizontalRGB; | ||
| 1820 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1821 | adev->mode_info.coherent_mode_property, | ||
| 1822 | 1); | ||
| 1823 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1824 | adev->mode_info.underscan_property, | ||
| 1825 | UNDERSCAN_OFF); | ||
| 1826 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1827 | adev->mode_info.underscan_hborder_property, | ||
| 1828 | 0); | ||
| 1829 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1830 | adev->mode_info.underscan_vborder_property, | ||
| 1831 | 0); | ||
| 1832 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1833 | dev->mode_config.scaling_mode_property, | ||
| 1834 | DRM_MODE_SCALE_NONE); | ||
| 1835 | if (amdgpu_audio != 0) { | ||
| 1836 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1837 | adev->mode_info.audio_property, | ||
| 1838 | AMDGPU_AUDIO_AUTO); | ||
| 1839 | } | ||
| 1840 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1841 | adev->mode_info.dither_property, | ||
| 1842 | AMDGPU_FMT_DITHER_DISABLE); | ||
| 1843 | connector->interlace_allowed = true; | ||
| 1844 | /* in theory with a DP to VGA converter... */ | ||
| 1845 | connector->doublescan_allowed = false; | ||
| 1846 | break; | ||
| 1847 | case DRM_MODE_CONNECTOR_eDP: | ||
| 1848 | amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL); | ||
| 1849 | if (!amdgpu_dig_connector) | ||
| 1850 | goto failed; | ||
| 1851 | amdgpu_connector->con_priv = amdgpu_dig_connector; | ||
| 1852 | drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_edp_funcs, connector_type); | ||
| 1853 | drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs); | ||
| 1854 | if (i2c_bus->valid) { | ||
| 1855 | amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); | ||
| 1856 | if (amdgpu_connector->ddc_bus) | ||
| 1857 | has_aux = true; | ||
| 1858 | else | ||
| 1859 | DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
| 1860 | } | ||
| 1861 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1862 | dev->mode_config.scaling_mode_property, | ||
| 1863 | DRM_MODE_SCALE_FULLSCREEN); | ||
| 1864 | subpixel_order = SubPixelHorizontalRGB; | ||
| 1865 | connector->interlace_allowed = false; | ||
| 1866 | connector->doublescan_allowed = false; | ||
| 1867 | break; | ||
| 1868 | case DRM_MODE_CONNECTOR_LVDS: | ||
| 1869 | amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL); | ||
| 1870 | if (!amdgpu_dig_connector) | ||
| 1871 | goto failed; | ||
| 1872 | amdgpu_connector->con_priv = amdgpu_dig_connector; | ||
| 1873 | drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_lvds_funcs, connector_type); | ||
| 1874 | drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_lvds_helper_funcs); | ||
| 1875 | if (i2c_bus->valid) { | ||
| 1876 | amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); | ||
| 1877 | if (!amdgpu_connector->ddc_bus) | ||
| 1878 | DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); | ||
| 1879 | } | ||
| 1880 | drm_object_attach_property(&amdgpu_connector->base.base, | ||
| 1881 | dev->mode_config.scaling_mode_property, | ||
| 1882 | DRM_MODE_SCALE_FULLSCREEN); | ||
| 1883 | subpixel_order = SubPixelHorizontalRGB; | ||
| 1884 | connector->interlace_allowed = false; | ||
| 1885 | connector->doublescan_allowed = false; | ||
| 1886 | break; | ||
| 1887 | } | ||
| 1888 | } | ||
| 1889 | |||
| 1890 | if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) { | ||
| 1891 | if (i2c_bus->valid) | ||
| 1892 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
| 1893 | } else | ||
| 1894 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 1895 | |||
| 1896 | connector->display_info.subpixel_order = subpixel_order; | ||
| 1897 | drm_connector_register(connector); | ||
| 1898 | |||
| 1899 | if (has_aux) | ||
| 1900 | amdgpu_atombios_dp_aux_init(amdgpu_connector); | ||
| 1901 | |||
| 1902 | return; | ||
| 1903 | |||
| 1904 | failed: | ||
| 1905 | drm_connector_cleanup(connector); | ||
| 1906 | kfree(connector); | ||
| 1907 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h new file mode 100644 index 000000000000..61fcef15ad72 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __AMDGPU_CONNECTORS_H__ | ||
| 25 | #define __AMDGPU_CONNECTORS_H__ | ||
| 26 | |||
| 27 | struct edid *amdgpu_connector_edid(struct drm_connector *connector); | ||
| 28 | void amdgpu_connector_hotplug(struct drm_connector *connector); | ||
| 29 | int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector); | ||
| 30 | u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector); | ||
| 31 | bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector); | ||
| 32 | void | ||
| 33 | amdgpu_connector_add(struct amdgpu_device *adev, | ||
| 34 | uint32_t connector_id, | ||
| 35 | uint32_t supported_device, | ||
| 36 | int connector_type, | ||
| 37 | struct amdgpu_i2c_bus_rec *i2c_bus, | ||
| 38 | uint16_t connector_object_id, | ||
| 39 | struct amdgpu_hpd *hpd, | ||
| 40 | struct amdgpu_router *router); | ||
| 41 | |||
| 42 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c new file mode 100644 index 000000000000..70a90312d0a4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -0,0 +1,825 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Jerome Glisse. | ||
| 3 | * All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the "Software"), | ||
| 7 | * to deal in the Software without restriction, including without limitation | ||
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 10 | * Software is furnished to do so, subject to the following conditions: | ||
| 11 | * | ||
| 12 | * The above copyright notice and this permission notice (including the next | ||
| 13 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 14 | * Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 22 | * DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: | ||
| 25 | * Jerome Glisse <glisse@freedesktop.org> | ||
| 26 | */ | ||
| 27 | #include <linux/list_sort.h> | ||
| 28 | #include <drm/drmP.h> | ||
| 29 | #include <drm/amdgpu_drm.h> | ||
| 30 | #include "amdgpu.h" | ||
| 31 | #include "amdgpu_trace.h" | ||
| 32 | |||
| 33 | #define AMDGPU_CS_MAX_PRIORITY 32u | ||
| 34 | #define AMDGPU_CS_NUM_BUCKETS (AMDGPU_CS_MAX_PRIORITY + 1) | ||
| 35 | |||
| 36 | /* This is based on the bucket sort with O(n) time complexity. | ||
| 37 | * An item with priority "i" is added to bucket[i]. The lists are then | ||
| 38 | * concatenated in descending order. | ||
| 39 | */ | ||
| 40 | struct amdgpu_cs_buckets { | ||
| 41 | struct list_head bucket[AMDGPU_CS_NUM_BUCKETS]; | ||
| 42 | }; | ||
| 43 | |||
| 44 | static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b) | ||
| 45 | { | ||
| 46 | unsigned i; | ||
| 47 | |||
| 48 | for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) | ||
| 49 | INIT_LIST_HEAD(&b->bucket[i]); | ||
| 50 | } | ||
| 51 | |||
| 52 | static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets *b, | ||
| 53 | struct list_head *item, unsigned priority) | ||
| 54 | { | ||
| 55 | /* Since buffers which appear sooner in the relocation list are | ||
| 56 | * likely to be used more often than buffers which appear later | ||
| 57 | * in the list, the sort mustn't change the ordering of buffers | ||
| 58 | * with the same priority, i.e. it must be stable. | ||
| 59 | */ | ||
| 60 | list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]); | ||
| 61 | } | ||
| 62 | |||
| 63 | static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets *b, | ||
| 64 | struct list_head *out_list) | ||
| 65 | { | ||
| 66 | unsigned i; | ||
| 67 | |||
| 68 | /* Connect the sorted buckets in the output list. */ | ||
| 69 | for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) { | ||
| 70 | list_splice(&b->bucket[i], out_list); | ||
| 71 | } | ||
| 72 | } | ||
| 73 | |||
| 74 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | ||
| 75 | u32 ip_instance, u32 ring, | ||
| 76 | struct amdgpu_ring **out_ring) | ||
| 77 | { | ||
| 78 | /* Right now all IPs have only one instance - multiple rings. */ | ||
| 79 | if (ip_instance != 0) { | ||
| 80 | DRM_ERROR("invalid ip instance: %d\n", ip_instance); | ||
| 81 | return -EINVAL; | ||
| 82 | } | ||
| 83 | |||
| 84 | switch (ip_type) { | ||
| 85 | default: | ||
| 86 | DRM_ERROR("unknown ip type: %d\n", ip_type); | ||
| 87 | return -EINVAL; | ||
| 88 | case AMDGPU_HW_IP_GFX: | ||
| 89 | if (ring < adev->gfx.num_gfx_rings) { | ||
| 90 | *out_ring = &adev->gfx.gfx_ring[ring]; | ||
| 91 | } else { | ||
| 92 | DRM_ERROR("only %d gfx rings are supported now\n", | ||
| 93 | adev->gfx.num_gfx_rings); | ||
| 94 | return -EINVAL; | ||
| 95 | } | ||
| 96 | break; | ||
| 97 | case AMDGPU_HW_IP_COMPUTE: | ||
| 98 | if (ring < adev->gfx.num_compute_rings) { | ||
| 99 | *out_ring = &adev->gfx.compute_ring[ring]; | ||
| 100 | } else { | ||
| 101 | DRM_ERROR("only %d compute rings are supported now\n", | ||
| 102 | adev->gfx.num_compute_rings); | ||
| 103 | return -EINVAL; | ||
| 104 | } | ||
| 105 | break; | ||
| 106 | case AMDGPU_HW_IP_DMA: | ||
| 107 | if (ring < 2) { | ||
| 108 | *out_ring = &adev->sdma[ring].ring; | ||
| 109 | } else { | ||
| 110 | DRM_ERROR("only two SDMA rings are supported\n"); | ||
| 111 | return -EINVAL; | ||
| 112 | } | ||
| 113 | break; | ||
| 114 | case AMDGPU_HW_IP_UVD: | ||
| 115 | *out_ring = &adev->uvd.ring; | ||
| 116 | break; | ||
| 117 | case AMDGPU_HW_IP_VCE: | ||
| 118 | if (ring < 2){ | ||
| 119 | *out_ring = &adev->vce.ring[ring]; | ||
| 120 | } else { | ||
| 121 | DRM_ERROR("only two VCE rings are supported\n"); | ||
| 122 | return -EINVAL; | ||
| 123 | } | ||
| 124 | break; | ||
| 125 | } | ||
| 126 | return 0; | ||
| 127 | } | ||
| 128 | |||
| 129 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | ||
| 130 | { | ||
| 131 | union drm_amdgpu_cs *cs = data; | ||
| 132 | uint64_t *chunk_array_user; | ||
| 133 | uint64_t *chunk_array = NULL; | ||
| 134 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | ||
| 135 | unsigned size, i; | ||
| 136 | int r = 0; | ||
| 137 | |||
| 138 | if (!cs->in.num_chunks) | ||
| 139 | goto out; | ||
| 140 | |||
| 141 | p->ctx_id = cs->in.ctx_id; | ||
| 142 | p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); | ||
| 143 | |||
| 144 | /* get chunks */ | ||
| 145 | INIT_LIST_HEAD(&p->validated); | ||
| 146 | chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); | ||
| 147 | if (chunk_array == NULL) { | ||
| 148 | r = -ENOMEM; | ||
| 149 | goto out; | ||
| 150 | } | ||
| 151 | |||
| 152 | chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks); | ||
| 153 | if (copy_from_user(chunk_array, chunk_array_user, | ||
| 154 | sizeof(uint64_t)*cs->in.num_chunks)) { | ||
| 155 | r = -EFAULT; | ||
| 156 | goto out; | ||
| 157 | } | ||
| 158 | |||
| 159 | p->nchunks = cs->in.num_chunks; | ||
| 160 | p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk), | ||
| 161 | GFP_KERNEL); | ||
| 162 | if (p->chunks == NULL) { | ||
| 163 | r = -ENOMEM; | ||
| 164 | goto out; | ||
| 165 | } | ||
| 166 | |||
| 167 | for (i = 0; i < p->nchunks; i++) { | ||
| 168 | struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; | ||
| 169 | struct drm_amdgpu_cs_chunk user_chunk; | ||
| 170 | uint32_t __user *cdata; | ||
| 171 | |||
| 172 | chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; | ||
| 173 | if (copy_from_user(&user_chunk, chunk_ptr, | ||
| 174 | sizeof(struct drm_amdgpu_cs_chunk))) { | ||
| 175 | r = -EFAULT; | ||
| 176 | goto out; | ||
| 177 | } | ||
| 178 | p->chunks[i].chunk_id = user_chunk.chunk_id; | ||
| 179 | p->chunks[i].length_dw = user_chunk.length_dw; | ||
| 180 | if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_IB) | ||
| 181 | p->num_ibs++; | ||
| 182 | |||
| 183 | size = p->chunks[i].length_dw; | ||
| 184 | cdata = (void __user *)(unsigned long)user_chunk.chunk_data; | ||
| 185 | p->chunks[i].user_ptr = cdata; | ||
| 186 | |||
| 187 | p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); | ||
| 188 | if (p->chunks[i].kdata == NULL) { | ||
| 189 | r = -ENOMEM; | ||
| 190 | goto out; | ||
| 191 | } | ||
| 192 | size *= sizeof(uint32_t); | ||
| 193 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { | ||
| 194 | r = -EFAULT; | ||
| 195 | goto out; | ||
| 196 | } | ||
| 197 | |||
| 198 | if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_FENCE) { | ||
| 199 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); | ||
| 200 | if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { | ||
| 201 | uint32_t handle; | ||
| 202 | struct drm_gem_object *gobj; | ||
| 203 | struct drm_amdgpu_cs_chunk_fence *fence_data; | ||
| 204 | |||
| 205 | fence_data = (void *)p->chunks[i].kdata; | ||
| 206 | handle = fence_data->handle; | ||
| 207 | gobj = drm_gem_object_lookup(p->adev->ddev, | ||
| 208 | p->filp, handle); | ||
| 209 | if (gobj == NULL) { | ||
| 210 | r = -EINVAL; | ||
| 211 | goto out; | ||
| 212 | } | ||
| 213 | |||
| 214 | p->uf.bo = gem_to_amdgpu_bo(gobj); | ||
| 215 | p->uf.offset = fence_data->offset; | ||
| 216 | } else { | ||
| 217 | r = -EINVAL; | ||
| 218 | goto out; | ||
| 219 | } | ||
| 220 | } | ||
| 221 | } | ||
| 222 | |||
| 223 | p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); | ||
| 224 | if (!p->ibs) { | ||
| 225 | r = -ENOMEM; | ||
| 226 | goto out; | ||
| 227 | } | ||
| 228 | |||
| 229 | p->ib_bos = kcalloc(p->num_ibs, sizeof(struct amdgpu_bo_list_entry), | ||
| 230 | GFP_KERNEL); | ||
| 231 | if (!p->ib_bos) | ||
| 232 | r = -ENOMEM; | ||
| 233 | |||
| 234 | out: | ||
| 235 | kfree(chunk_array); | ||
| 236 | return r; | ||
| 237 | } | ||
| 238 | |||
| 239 | /* Returns how many bytes TTM can move per IB. | ||
| 240 | */ | ||
| 241 | static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) | ||
| 242 | { | ||
| 243 | u64 real_vram_size = adev->mc.real_vram_size; | ||
| 244 | u64 vram_usage = atomic64_read(&adev->vram_usage); | ||
| 245 | |||
| 246 | /* This function is based on the current VRAM usage. | ||
| 247 | * | ||
| 248 | * - If all of VRAM is free, allow relocating the number of bytes that | ||
| 249 | * is equal to 1/4 of the size of VRAM for this IB. | ||
| 250 | |||
| 251 | * - If more than one half of VRAM is occupied, only allow relocating | ||
| 252 | * 1 MB of data for this IB. | ||
| 253 | * | ||
| 254 | * - From 0 to one half of used VRAM, the threshold decreases | ||
| 255 | * linearly. | ||
| 256 | * __________________ | ||
| 257 | * 1/4 of -|\ | | ||
| 258 | * VRAM | \ | | ||
| 259 | * | \ | | ||
| 260 | * | \ | | ||
| 261 | * | \ | | ||
| 262 | * | \ | | ||
| 263 | * | \ | | ||
| 264 | * | \________|1 MB | ||
| 265 | * |----------------| | ||
| 266 | * VRAM 0 % 100 % | ||
| 267 | * used used | ||
| 268 | * | ||
| 269 | * Note: It's a threshold, not a limit. The threshold must be crossed | ||
| 270 | * for buffer relocations to stop, so any buffer of an arbitrary size | ||
| 271 | * can be moved as long as the threshold isn't crossed before | ||
| 272 | * the relocation takes place. We don't want to disable buffer | ||
| 273 | * relocations completely. | ||
| 274 | * | ||
| 275 | * The idea is that buffers should be placed in VRAM at creation time | ||
| 276 | * and TTM should only do a minimum number of relocations during | ||
| 277 | * command submission. In practice, you need to submit at least | ||
| 278 | * a dozen IBs to move all buffers to VRAM if they are in GTT. | ||
| 279 | * | ||
| 280 | * Also, things can get pretty crazy under memory pressure and actual | ||
| 281 | * VRAM usage can change a lot, so playing safe even at 50% does | ||
| 282 | * consistently increase performance. | ||
| 283 | */ | ||
| 284 | |||
| 285 | u64 half_vram = real_vram_size >> 1; | ||
| 286 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; | ||
| 287 | u64 bytes_moved_threshold = half_free_vram >> 1; | ||
| 288 | return max(bytes_moved_threshold, 1024*1024ull); | ||
| 289 | } | ||
| 290 | |||
| 291 | int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) | ||
| 292 | { | ||
| 293 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | ||
| 294 | struct amdgpu_vm *vm = &fpriv->vm; | ||
| 295 | struct amdgpu_device *adev = p->adev; | ||
| 296 | struct amdgpu_bo_list_entry *lobj; | ||
| 297 | struct list_head duplicates; | ||
| 298 | struct amdgpu_bo *bo; | ||
| 299 | u64 bytes_moved = 0, initial_bytes_moved; | ||
| 300 | u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); | ||
| 301 | int r; | ||
| 302 | |||
| 303 | INIT_LIST_HEAD(&duplicates); | ||
| 304 | r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); | ||
| 305 | if (unlikely(r != 0)) { | ||
| 306 | return r; | ||
| 307 | } | ||
| 308 | |||
| 309 | list_for_each_entry(lobj, &p->validated, tv.head) { | ||
| 310 | bo = lobj->robj; | ||
| 311 | if (!bo->pin_count) { | ||
| 312 | u32 domain = lobj->prefered_domains; | ||
| 313 | u32 current_domain = | ||
| 314 | amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); | ||
| 315 | |||
| 316 | /* Check if this buffer will be moved and don't move it | ||
| 317 | * if we have moved too many buffers for this IB already. | ||
| 318 | * | ||
| 319 | * Note that this allows moving at least one buffer of | ||
| 320 | * any size, because it doesn't take the current "bo" | ||
| 321 | * into account. We don't want to disallow buffer moves | ||
| 322 | * completely. | ||
| 323 | */ | ||
| 324 | if (current_domain != AMDGPU_GEM_DOMAIN_CPU && | ||
| 325 | (domain & current_domain) == 0 && /* will be moved */ | ||
| 326 | bytes_moved > bytes_moved_threshold) { | ||
| 327 | /* don't move it */ | ||
| 328 | domain = current_domain; | ||
| 329 | } | ||
| 330 | |||
| 331 | retry: | ||
| 332 | amdgpu_ttm_placement_from_domain(bo, domain); | ||
| 333 | initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); | ||
| 334 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | ||
| 335 | bytes_moved += atomic64_read(&adev->num_bytes_moved) - | ||
| 336 | initial_bytes_moved; | ||
| 337 | |||
| 338 | if (unlikely(r)) { | ||
| 339 | if (r != -ERESTARTSYS && domain != lobj->allowed_domains) { | ||
| 340 | domain = lobj->allowed_domains; | ||
| 341 | goto retry; | ||
| 342 | } | ||
| 343 | ttm_eu_backoff_reservation(&p->ticket, &p->validated); | ||
| 344 | return r; | ||
| 345 | } | ||
| 346 | } | ||
| 347 | lobj->bo_va = amdgpu_vm_bo_find(vm, bo); | ||
| 348 | } | ||
| 349 | return 0; | ||
| 350 | } | ||
| 351 | |||
| 352 | static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) | ||
| 353 | { | ||
| 354 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | ||
| 355 | struct amdgpu_cs_buckets buckets; | ||
| 356 | bool need_mmap_lock; | ||
| 357 | int i, r; | ||
| 358 | |||
| 359 | if (p->bo_list == NULL) | ||
| 360 | return 0; | ||
| 361 | |||
| 362 | need_mmap_lock = p->bo_list->has_userptr; | ||
| 363 | amdgpu_cs_buckets_init(&buckets); | ||
| 364 | for (i = 0; i < p->bo_list->num_entries; i++) | ||
| 365 | amdgpu_cs_buckets_add(&buckets, &p->bo_list->array[i].tv.head, | ||
| 366 | p->bo_list->array[i].priority); | ||
| 367 | |||
| 368 | amdgpu_cs_buckets_get_list(&buckets, &p->validated); | ||
| 369 | p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, | ||
| 370 | &p->validated); | ||
| 371 | |||
| 372 | for (i = 0; i < p->num_ibs; i++) { | ||
| 373 | if (!p->ib_bos[i].robj) | ||
| 374 | continue; | ||
| 375 | |||
| 376 | list_add(&p->ib_bos[i].tv.head, &p->validated); | ||
| 377 | } | ||
| 378 | |||
| 379 | if (need_mmap_lock) | ||
| 380 | down_read(¤t->mm->mmap_sem); | ||
| 381 | |||
| 382 | r = amdgpu_cs_list_validate(p); | ||
| 383 | |||
| 384 | if (need_mmap_lock) | ||
| 385 | up_read(¤t->mm->mmap_sem); | ||
| 386 | |||
| 387 | return r; | ||
| 388 | } | ||
| 389 | |||
| 390 | static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) | ||
| 391 | { | ||
| 392 | struct amdgpu_bo_list_entry *e; | ||
| 393 | int r; | ||
| 394 | |||
| 395 | list_for_each_entry(e, &p->validated, tv.head) { | ||
| 396 | struct reservation_object *resv = e->robj->tbo.resv; | ||
| 397 | r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp); | ||
| 398 | |||
| 399 | if (r) | ||
| 400 | return r; | ||
| 401 | } | ||
| 402 | return 0; | ||
| 403 | } | ||
| 404 | |||
| 405 | static int cmp_size_smaller_first(void *priv, struct list_head *a, | ||
| 406 | struct list_head *b) | ||
| 407 | { | ||
| 408 | struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head); | ||
| 409 | struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head); | ||
| 410 | |||
| 411 | /* Sort A before B if A is smaller. */ | ||
| 412 | return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; | ||
| 413 | } | ||
| 414 | |||
| 415 | /** | ||
| 416 | * cs_parser_fini() - clean parser states | ||
| 417 | * @parser: parser structure holding parsing context. | ||
| 418 | * @error: error number | ||
| 419 | * | ||
| 420 | * If error is set than unvalidate buffer, otherwise just free memory | ||
| 421 | * used by parsing context. | ||
| 422 | **/ | ||
| 423 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) | ||
| 424 | { | ||
| 425 | unsigned i; | ||
| 426 | |||
| 427 | if (!error) { | ||
| 428 | /* Sort the buffer list from the smallest to largest buffer, | ||
| 429 | * which affects the order of buffers in the LRU list. | ||
| 430 | * This assures that the smallest buffers are added first | ||
| 431 | * to the LRU list, so they are likely to be later evicted | ||
| 432 | * first, instead of large buffers whose eviction is more | ||
| 433 | * expensive. | ||
| 434 | * | ||
| 435 | * This slightly lowers the number of bytes moved by TTM | ||
| 436 | * per frame under memory pressure. | ||
| 437 | */ | ||
| 438 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); | ||
| 439 | |||
| 440 | ttm_eu_fence_buffer_objects(&parser->ticket, | ||
| 441 | &parser->validated, | ||
| 442 | &parser->ibs[parser->num_ibs-1].fence->base); | ||
| 443 | } else if (backoff) { | ||
| 444 | ttm_eu_backoff_reservation(&parser->ticket, | ||
| 445 | &parser->validated); | ||
| 446 | } | ||
| 447 | |||
| 448 | if (parser->bo_list) | ||
| 449 | amdgpu_bo_list_put(parser->bo_list); | ||
| 450 | drm_free_large(parser->vm_bos); | ||
| 451 | for (i = 0; i < parser->nchunks; i++) | ||
| 452 | drm_free_large(parser->chunks[i].kdata); | ||
| 453 | kfree(parser->chunks); | ||
| 454 | for (i = 0; i < parser->num_ibs; i++) { | ||
| 455 | struct amdgpu_bo *bo = parser->ib_bos[i].robj; | ||
| 456 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); | ||
| 457 | |||
| 458 | if (bo) | ||
| 459 | drm_gem_object_unreference_unlocked(&bo->gem_base); | ||
| 460 | } | ||
| 461 | kfree(parser->ibs); | ||
| 462 | kfree(parser->ib_bos); | ||
| 463 | if (parser->uf.bo) | ||
| 464 | drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); | ||
| 465 | } | ||
| 466 | |||
| 467 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | ||
| 468 | struct amdgpu_vm *vm) | ||
| 469 | { | ||
| 470 | struct amdgpu_device *adev = p->adev; | ||
| 471 | struct amdgpu_bo_va *bo_va; | ||
| 472 | struct amdgpu_bo *bo; | ||
| 473 | int i, r; | ||
| 474 | |||
| 475 | r = amdgpu_vm_update_page_directory(adev, vm); | ||
| 476 | if (r) | ||
| 477 | return r; | ||
| 478 | |||
| 479 | r = amdgpu_vm_clear_freed(adev, vm); | ||
| 480 | if (r) | ||
| 481 | return r; | ||
| 482 | |||
| 483 | if (p->bo_list) { | ||
| 484 | for (i = 0; i < p->bo_list->num_entries; i++) { | ||
| 485 | /* ignore duplicates */ | ||
| 486 | bo = p->bo_list->array[i].robj; | ||
| 487 | if (!bo) | ||
| 488 | continue; | ||
| 489 | |||
| 490 | bo_va = p->bo_list->array[i].bo_va; | ||
| 491 | if (bo_va == NULL) | ||
| 492 | continue; | ||
| 493 | |||
| 494 | r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem); | ||
| 495 | if (r) | ||
| 496 | return r; | ||
| 497 | |||
| 498 | amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update); | ||
| 499 | } | ||
| 500 | } | ||
| 501 | |||
| 502 | for (i = 0; i < p->num_ibs; i++) { | ||
| 503 | bo = p->ib_bos[i].robj; | ||
| 504 | if (!bo) | ||
| 505 | continue; | ||
| 506 | |||
| 507 | bo_va = p->ib_bos[i].bo_va; | ||
| 508 | if (!bo_va) | ||
| 509 | continue; | ||
| 510 | |||
| 511 | r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem); | ||
| 512 | if (r) | ||
| 513 | return r; | ||
| 514 | |||
| 515 | amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update); | ||
| 516 | } | ||
| 517 | return amdgpu_vm_clear_invalids(adev, vm); | ||
| 518 | } | ||
| 519 | |||
| 520 | static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, | ||
| 521 | struct amdgpu_cs_parser *parser) | ||
| 522 | { | ||
| 523 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | ||
| 524 | struct amdgpu_vm *vm = &fpriv->vm; | ||
| 525 | struct amdgpu_ring *ring; | ||
| 526 | int i, r; | ||
| 527 | |||
| 528 | if (parser->num_ibs == 0) | ||
| 529 | return 0; | ||
| 530 | |||
| 531 | /* Only for UVD/VCE VM emulation */ | ||
| 532 | for (i = 0; i < parser->num_ibs; i++) { | ||
| 533 | ring = parser->ibs[i].ring; | ||
| 534 | if (ring->funcs->parse_cs) { | ||
| 535 | r = amdgpu_ring_parse_cs(ring, parser, i); | ||
| 536 | if (r) | ||
| 537 | return r; | ||
| 538 | } | ||
| 539 | } | ||
| 540 | |||
| 541 | mutex_lock(&vm->mutex); | ||
| 542 | r = amdgpu_bo_vm_update_pte(parser, vm); | ||
| 543 | if (r) { | ||
| 544 | goto out; | ||
| 545 | } | ||
| 546 | amdgpu_cs_sync_rings(parser); | ||
| 547 | |||
| 548 | r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, | ||
| 549 | parser->filp); | ||
| 550 | |||
| 551 | out: | ||
| 552 | mutex_unlock(&vm->mutex); | ||
| 553 | return r; | ||
| 554 | } | ||
| 555 | |||
| 556 | static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r) | ||
| 557 | { | ||
| 558 | if (r == -EDEADLK) { | ||
| 559 | r = amdgpu_gpu_reset(adev); | ||
| 560 | if (!r) | ||
| 561 | r = -EAGAIN; | ||
| 562 | } | ||
| 563 | return r; | ||
| 564 | } | ||
| 565 | |||
| 566 | static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | ||
| 567 | struct amdgpu_cs_parser *parser) | ||
| 568 | { | ||
| 569 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | ||
| 570 | struct amdgpu_vm *vm = &fpriv->vm; | ||
| 571 | int i, j; | ||
| 572 | int r; | ||
| 573 | |||
| 574 | for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) { | ||
| 575 | struct amdgpu_cs_chunk *chunk; | ||
| 576 | struct amdgpu_ib *ib; | ||
| 577 | struct drm_amdgpu_cs_chunk_ib *chunk_ib; | ||
| 578 | struct amdgpu_bo_list_entry *ib_bo; | ||
| 579 | struct amdgpu_ring *ring; | ||
| 580 | struct drm_gem_object *gobj; | ||
| 581 | struct amdgpu_bo *aobj; | ||
| 582 | void *kptr; | ||
| 583 | |||
| 584 | chunk = &parser->chunks[i]; | ||
| 585 | ib = &parser->ibs[j]; | ||
| 586 | chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; | ||
| 587 | |||
| 588 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) | ||
| 589 | continue; | ||
| 590 | |||
| 591 | gobj = drm_gem_object_lookup(adev->ddev, parser->filp, chunk_ib->handle); | ||
| 592 | if (gobj == NULL) | ||
| 593 | return -ENOENT; | ||
| 594 | aobj = gem_to_amdgpu_bo(gobj); | ||
| 595 | |||
| 596 | r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type, | ||
| 597 | chunk_ib->ip_instance, chunk_ib->ring, | ||
| 598 | &ring); | ||
| 599 | if (r) { | ||
| 600 | drm_gem_object_unreference_unlocked(gobj); | ||
| 601 | return r; | ||
| 602 | } | ||
| 603 | |||
| 604 | if (ring->funcs->parse_cs) { | ||
| 605 | r = amdgpu_bo_reserve(aobj, false); | ||
| 606 | if (r) { | ||
| 607 | drm_gem_object_unreference_unlocked(gobj); | ||
| 608 | return r; | ||
| 609 | } | ||
| 610 | |||
| 611 | r = amdgpu_bo_kmap(aobj, &kptr); | ||
| 612 | if (r) { | ||
| 613 | amdgpu_bo_unreserve(aobj); | ||
| 614 | drm_gem_object_unreference_unlocked(gobj); | ||
| 615 | return r; | ||
| 616 | } | ||
| 617 | |||
| 618 | r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib); | ||
| 619 | if (r) { | ||
| 620 | DRM_ERROR("Failed to get ib !\n"); | ||
| 621 | amdgpu_bo_unreserve(aobj); | ||
| 622 | drm_gem_object_unreference_unlocked(gobj); | ||
| 623 | return r; | ||
| 624 | } | ||
| 625 | |||
| 626 | memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); | ||
| 627 | amdgpu_bo_kunmap(aobj); | ||
| 628 | amdgpu_bo_unreserve(aobj); | ||
| 629 | } else { | ||
| 630 | r = amdgpu_ib_get(ring, vm, 0, ib); | ||
| 631 | if (r) { | ||
| 632 | DRM_ERROR("Failed to get ib !\n"); | ||
| 633 | drm_gem_object_unreference_unlocked(gobj); | ||
| 634 | return r; | ||
| 635 | } | ||
| 636 | |||
| 637 | ib->gpu_addr = chunk_ib->va_start; | ||
| 638 | } | ||
| 639 | ib->length_dw = chunk_ib->ib_bytes / 4; | ||
| 640 | |||
| 641 | if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) | ||
| 642 | ib->is_const_ib = true; | ||
| 643 | if (chunk_ib->flags & AMDGPU_IB_FLAG_GDS) | ||
| 644 | ib->gds_needed = true; | ||
| 645 | if (ib->ring->current_filp != parser->filp) { | ||
| 646 | ib->ring->need_ctx_switch = true; | ||
| 647 | ib->ring->current_filp = parser->filp; | ||
| 648 | } | ||
| 649 | |||
| 650 | ib_bo = &parser->ib_bos[j]; | ||
| 651 | ib_bo->robj = aobj; | ||
| 652 | ib_bo->prefered_domains = aobj->initial_domain; | ||
| 653 | ib_bo->allowed_domains = aobj->initial_domain; | ||
| 654 | ib_bo->priority = 0; | ||
| 655 | ib_bo->tv.bo = &aobj->tbo; | ||
| 656 | ib_bo->tv.shared = true; | ||
| 657 | j++; | ||
| 658 | } | ||
| 659 | |||
| 660 | if (!parser->num_ibs) | ||
| 661 | return 0; | ||
| 662 | |||
| 663 | /* add GDS resources to first IB */ | ||
| 664 | if (parser->bo_list) { | ||
| 665 | struct amdgpu_bo *gds = parser->bo_list->gds_obj; | ||
| 666 | struct amdgpu_bo *gws = parser->bo_list->gws_obj; | ||
| 667 | struct amdgpu_bo *oa = parser->bo_list->oa_obj; | ||
| 668 | struct amdgpu_ib *ib = &parser->ibs[0]; | ||
| 669 | |||
| 670 | if (gds) { | ||
| 671 | ib->gds_base = amdgpu_bo_gpu_offset(gds); | ||
| 672 | ib->gds_size = amdgpu_bo_size(gds); | ||
| 673 | } | ||
| 674 | if (gws) { | ||
| 675 | ib->gws_base = amdgpu_bo_gpu_offset(gws); | ||
| 676 | ib->gws_size = amdgpu_bo_size(gws); | ||
| 677 | } | ||
| 678 | if (oa) { | ||
| 679 | ib->oa_base = amdgpu_bo_gpu_offset(oa); | ||
| 680 | ib->oa_size = amdgpu_bo_size(oa); | ||
| 681 | } | ||
| 682 | } | ||
| 683 | |||
| 684 | /* wrap the last IB with user fence */ | ||
| 685 | if (parser->uf.bo) { | ||
| 686 | struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1]; | ||
| 687 | |||
| 688 | /* UVD & VCE fw doesn't support user fences */ | ||
| 689 | if (ib->ring->type == AMDGPU_RING_TYPE_UVD || | ||
| 690 | ib->ring->type == AMDGPU_RING_TYPE_VCE) | ||
| 691 | return -EINVAL; | ||
| 692 | |||
| 693 | ib->user = &parser->uf; | ||
| 694 | } | ||
| 695 | |||
| 696 | return 0; | ||
| 697 | } | ||
| 698 | |||
| 699 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | ||
| 700 | { | ||
| 701 | struct amdgpu_device *adev = dev->dev_private; | ||
| 702 | union drm_amdgpu_cs *cs = data; | ||
| 703 | struct amdgpu_cs_parser parser; | ||
| 704 | int r, i; | ||
| 705 | |||
| 706 | down_read(&adev->exclusive_lock); | ||
| 707 | if (!adev->accel_working) { | ||
| 708 | up_read(&adev->exclusive_lock); | ||
| 709 | return -EBUSY; | ||
| 710 | } | ||
| 711 | /* initialize parser */ | ||
| 712 | memset(&parser, 0, sizeof(struct amdgpu_cs_parser)); | ||
| 713 | parser.filp = filp; | ||
| 714 | parser.adev = adev; | ||
| 715 | r = amdgpu_cs_parser_init(&parser, data); | ||
| 716 | if (r) { | ||
| 717 | DRM_ERROR("Failed to initialize parser !\n"); | ||
| 718 | amdgpu_cs_parser_fini(&parser, r, false); | ||
| 719 | up_read(&adev->exclusive_lock); | ||
| 720 | r = amdgpu_cs_handle_lockup(adev, r); | ||
| 721 | return r; | ||
| 722 | } | ||
| 723 | |||
| 724 | r = amdgpu_cs_ib_fill(adev, &parser); | ||
| 725 | if (!r) { | ||
| 726 | r = amdgpu_cs_parser_relocs(&parser); | ||
| 727 | if (r && r != -ERESTARTSYS) | ||
| 728 | DRM_ERROR("Failed to parse relocation %d!\n", r); | ||
| 729 | } | ||
| 730 | |||
| 731 | if (r) { | ||
| 732 | amdgpu_cs_parser_fini(&parser, r, false); | ||
| 733 | up_read(&adev->exclusive_lock); | ||
| 734 | r = amdgpu_cs_handle_lockup(adev, r); | ||
| 735 | return r; | ||
| 736 | } | ||
| 737 | |||
| 738 | for (i = 0; i < parser.num_ibs; i++) | ||
| 739 | trace_amdgpu_cs(&parser, i); | ||
| 740 | |||
| 741 | r = amdgpu_cs_ib_vm_chunk(adev, &parser); | ||
| 742 | if (r) { | ||
| 743 | goto out; | ||
| 744 | } | ||
| 745 | |||
| 746 | cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq; | ||
| 747 | out: | ||
| 748 | amdgpu_cs_parser_fini(&parser, r, true); | ||
| 749 | up_read(&adev->exclusive_lock); | ||
| 750 | r = amdgpu_cs_handle_lockup(adev, r); | ||
| 751 | return r; | ||
| 752 | } | ||
| 753 | |||
| 754 | /** | ||
| 755 | * amdgpu_cs_wait_ioctl - wait for a command submission to finish | ||
| 756 | * | ||
| 757 | * @dev: drm device | ||
| 758 | * @data: data from userspace | ||
| 759 | * @filp: file private | ||
| 760 | * | ||
| 761 | * Wait for the command submission identified by handle to finish. | ||
| 762 | */ | ||
| 763 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | ||
| 764 | struct drm_file *filp) | ||
| 765 | { | ||
| 766 | union drm_amdgpu_wait_cs *wait = data; | ||
| 767 | struct amdgpu_device *adev = dev->dev_private; | ||
| 768 | uint64_t seq[AMDGPU_MAX_RINGS] = {0}; | ||
| 769 | struct amdgpu_ring *ring = NULL; | ||
| 770 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); | ||
| 771 | long r; | ||
| 772 | |||
| 773 | r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, | ||
| 774 | wait->in.ring, &ring); | ||
| 775 | if (r) | ||
| 776 | return r; | ||
| 777 | |||
| 778 | seq[ring->idx] = wait->in.handle; | ||
| 779 | |||
| 780 | r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout); | ||
| 781 | if (r < 0) | ||
| 782 | return r; | ||
| 783 | |||
| 784 | memset(wait, 0, sizeof(*wait)); | ||
| 785 | wait->out.status = (r == 0); | ||
| 786 | |||
| 787 | return 0; | ||
| 788 | } | ||
| 789 | |||
| 790 | /** | ||
| 791 | * amdgpu_cs_find_bo_va - find bo_va for VM address | ||
| 792 | * | ||
| 793 | * @parser: command submission parser context | ||
| 794 | * @addr: VM address | ||
| 795 | * @bo: resulting BO of the mapping found | ||
| 796 | * | ||
| 797 | * Search the buffer objects in the command submission context for a certain | ||
| 798 | * virtual memory address. Returns allocation structure when found, NULL | ||
| 799 | * otherwise. | ||
| 800 | */ | ||
| 801 | struct amdgpu_bo_va_mapping * | ||
| 802 | amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, | ||
| 803 | uint64_t addr, struct amdgpu_bo **bo) | ||
| 804 | { | ||
| 805 | struct amdgpu_bo_list_entry *reloc; | ||
| 806 | struct amdgpu_bo_va_mapping *mapping; | ||
| 807 | |||
| 808 | addr /= AMDGPU_GPU_PAGE_SIZE; | ||
| 809 | |||
| 810 | list_for_each_entry(reloc, &parser->validated, tv.head) { | ||
| 811 | if (!reloc->bo_va) | ||
| 812 | continue; | ||
| 813 | |||
| 814 | list_for_each_entry(mapping, &reloc->bo_va->mappings, list) { | ||
| 815 | if (mapping->it.start > addr || | ||
| 816 | addr > mapping->it.last) | ||
| 817 | continue; | ||
| 818 | |||
| 819 | *bo = reloc->bo_va->bo; | ||
| 820 | return mapping; | ||
| 821 | } | ||
| 822 | } | ||
| 823 | |||
| 824 | return NULL; | ||
| 825 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c new file mode 100644 index 000000000000..235010a83f8f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
| @@ -0,0 +1,161 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: monk liu <monk.liu@amd.com> | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <drm/drmP.h> | ||
| 26 | #include "amdgpu.h" | ||
| 27 | |||
| 28 | static void amdgpu_ctx_do_release(struct kref *ref) | ||
| 29 | { | ||
| 30 | struct amdgpu_ctx *ctx; | ||
| 31 | struct amdgpu_ctx_mgr *mgr; | ||
| 32 | |||
| 33 | ctx = container_of(ref, struct amdgpu_ctx, refcount); | ||
| 34 | mgr = &ctx->fpriv->ctx_mgr; | ||
| 35 | |||
| 36 | mutex_lock(&mgr->hlock); | ||
| 37 | idr_remove(&mgr->ctx_handles, ctx->id); | ||
| 38 | mutex_unlock(&mgr->hlock); | ||
| 39 | kfree(ctx); | ||
| 40 | } | ||
| 41 | |||
| 42 | int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags) | ||
| 43 | { | ||
| 44 | int r; | ||
| 45 | struct amdgpu_ctx *ctx; | ||
| 46 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | ||
| 47 | |||
| 48 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | ||
| 49 | if (!ctx) | ||
| 50 | return -ENOMEM; | ||
| 51 | |||
| 52 | mutex_lock(&mgr->hlock); | ||
| 53 | r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL); | ||
| 54 | if (r < 0) { | ||
| 55 | mutex_unlock(&mgr->hlock); | ||
| 56 | kfree(ctx); | ||
| 57 | return r; | ||
| 58 | } | ||
| 59 | mutex_unlock(&mgr->hlock); | ||
| 60 | *id = (uint32_t)r; | ||
| 61 | |||
| 62 | memset(ctx, 0, sizeof(*ctx)); | ||
| 63 | ctx->id = *id; | ||
| 64 | ctx->fpriv = fpriv; | ||
| 65 | kref_init(&ctx->refcount); | ||
| 66 | |||
| 67 | return 0; | ||
| 68 | } | ||
| 69 | |||
| 70 | int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id) | ||
| 71 | { | ||
| 72 | int r; | ||
| 73 | struct amdgpu_ctx *ctx; | ||
| 74 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | ||
| 75 | |||
| 76 | rcu_read_lock(); | ||
| 77 | ctx = idr_find(&mgr->ctx_handles, id); | ||
| 78 | rcu_read_unlock(); | ||
| 79 | if (ctx) { | ||
| 80 | /* if no task is pending on this context, free it */ | ||
| 81 | r = kref_put(&ctx->refcount, amdgpu_ctx_do_release); | ||
| 82 | if (r == 1) | ||
| 83 | return 0;//context is removed successfully | ||
| 84 | else { | ||
| 85 | /* context is still in using */ | ||
| 86 | kref_get(&ctx->refcount); | ||
| 87 | return -ERESTARTSYS; | ||
| 88 | } | ||
| 89 | } | ||
| 90 | return -EINVAL; | ||
| 91 | } | ||
| 92 | |||
| 93 | int amdgpu_ctx_query(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id, struct amdgpu_ctx_state *state) | ||
| 94 | { | ||
| 95 | struct amdgpu_ctx *ctx; | ||
| 96 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | ||
| 97 | |||
| 98 | rcu_read_lock(); | ||
| 99 | ctx = idr_find(&mgr->ctx_handles, id); | ||
| 100 | rcu_read_unlock(); | ||
| 101 | if (ctx) { | ||
| 102 | /* state should alter with CS activity */ | ||
| 103 | *state = ctx->state; | ||
| 104 | return 0; | ||
| 105 | } | ||
| 106 | return -EINVAL; | ||
| 107 | } | ||
| 108 | |||
| 109 | void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv) | ||
| 110 | { | ||
| 111 | struct idr *idp; | ||
| 112 | struct amdgpu_ctx *ctx; | ||
| 113 | uint32_t id; | ||
| 114 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | ||
| 115 | idp = &mgr->ctx_handles; | ||
| 116 | |||
| 117 | idr_for_each_entry(idp,ctx,id) { | ||
| 118 | if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) | ||
| 119 | DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id); | ||
| 120 | } | ||
| 121 | |||
| 122 | mutex_destroy(&mgr->hlock); | ||
| 123 | } | ||
| 124 | |||
| 125 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, | ||
| 126 | struct drm_file *filp) | ||
| 127 | { | ||
| 128 | int r; | ||
| 129 | uint32_t id; | ||
| 130 | uint32_t flags; | ||
| 131 | struct amdgpu_ctx_state state; | ||
| 132 | |||
| 133 | union drm_amdgpu_ctx *args = data; | ||
| 134 | struct amdgpu_device *adev = dev->dev_private; | ||
| 135 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | ||
| 136 | |||
| 137 | r = 0; | ||
| 138 | id = args->in.ctx_id; | ||
| 139 | flags = args->in.flags; | ||
| 140 | |||
| 141 | switch (args->in.op) { | ||
| 142 | case AMDGPU_CTX_OP_ALLOC_CTX: | ||
| 143 | r = amdgpu_ctx_alloc(adev, fpriv, &id, flags); | ||
| 144 | args->out.alloc.ctx_id = id; | ||
| 145 | break; | ||
| 146 | case AMDGPU_CTX_OP_FREE_CTX: | ||
| 147 | r = amdgpu_ctx_free(adev, fpriv, id); | ||
| 148 | break; | ||
| 149 | case AMDGPU_CTX_OP_QUERY_STATE: | ||
| 150 | r = amdgpu_ctx_query(adev, fpriv, id, &state); | ||
| 151 | if (r == 0) { | ||
| 152 | args->out.state.flags = state.flags; | ||
| 153 | args->out.state.hangs = state.hangs; | ||
| 154 | } | ||
| 155 | break; | ||
| 156 | default: | ||
| 157 | return -EINVAL; | ||
| 158 | } | ||
| 159 | |||
| 160 | return r; | ||
| 161 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c new file mode 100644 index 000000000000..cd4bb90fa85c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -0,0 +1,1971 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #include <linux/console.h> | ||
| 29 | #include <linux/slab.h> | ||
| 30 | #include <linux/debugfs.h> | ||
| 31 | #include <drm/drmP.h> | ||
| 32 | #include <drm/drm_crtc_helper.h> | ||
| 33 | #include <drm/amdgpu_drm.h> | ||
| 34 | #include <linux/vgaarb.h> | ||
| 35 | #include <linux/vga_switcheroo.h> | ||
| 36 | #include <linux/efi.h> | ||
| 37 | #include "amdgpu.h" | ||
| 38 | #include "amdgpu_i2c.h" | ||
| 39 | #include "atom.h" | ||
| 40 | #include "amdgpu_atombios.h" | ||
| 41 | #include "bif/bif_4_1_d.h" | ||
| 42 | |||
| 43 | static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); | ||
| 44 | static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); | ||
| 45 | |||
| 46 | static const char *amdgpu_asic_name[] = { | ||
| 47 | "BONAIRE", | ||
| 48 | "KAVERI", | ||
| 49 | "KABINI", | ||
| 50 | "HAWAII", | ||
| 51 | "MULLINS", | ||
| 52 | "TOPAZ", | ||
| 53 | "TONGA", | ||
| 54 | "CARRIZO", | ||
| 55 | "LAST", | ||
| 56 | }; | ||
| 57 | |||
| 58 | bool amdgpu_device_is_px(struct drm_device *dev) | ||
| 59 | { | ||
| 60 | struct amdgpu_device *adev = dev->dev_private; | ||
| 61 | |||
| 62 | if (adev->flags & AMDGPU_IS_PX) | ||
| 63 | return true; | ||
| 64 | return false; | ||
| 65 | } | ||
| 66 | |||
| 67 | /* | ||
| 68 | * MMIO register access helper functions. | ||
| 69 | */ | ||
| 70 | uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, | ||
| 71 | bool always_indirect) | ||
| 72 | { | ||
| 73 | if ((reg * 4) < adev->rmmio_size && !always_indirect) | ||
| 74 | return readl(((void __iomem *)adev->rmmio) + (reg * 4)); | ||
| 75 | else { | ||
| 76 | unsigned long flags; | ||
| 77 | uint32_t ret; | ||
| 78 | |||
| 79 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | ||
| 80 | writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); | ||
| 81 | ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); | ||
| 82 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); | ||
| 83 | |||
| 84 | return ret; | ||
| 85 | } | ||
| 86 | } | ||
| 87 | |||
| 88 | void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, | ||
| 89 | bool always_indirect) | ||
| 90 | { | ||
| 91 | if ((reg * 4) < adev->rmmio_size && !always_indirect) | ||
| 92 | writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); | ||
| 93 | else { | ||
| 94 | unsigned long flags; | ||
| 95 | |||
| 96 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | ||
| 97 | writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); | ||
| 98 | writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); | ||
| 99 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); | ||
| 100 | } | ||
| 101 | } | ||
| 102 | |||
| 103 | u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) | ||
| 104 | { | ||
| 105 | if ((reg * 4) < adev->rio_mem_size) | ||
| 106 | return ioread32(adev->rio_mem + (reg * 4)); | ||
| 107 | else { | ||
| 108 | iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); | ||
| 109 | return ioread32(adev->rio_mem + (mmMM_DATA * 4)); | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | ||
| 114 | { | ||
| 115 | |||
| 116 | if ((reg * 4) < adev->rio_mem_size) | ||
| 117 | iowrite32(v, adev->rio_mem + (reg * 4)); | ||
| 118 | else { | ||
| 119 | iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); | ||
| 120 | iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); | ||
| 121 | } | ||
| 122 | } | ||
| 123 | |||
| 124 | /** | ||
| 125 | * amdgpu_mm_rdoorbell - read a doorbell dword | ||
| 126 | * | ||
| 127 | * @adev: amdgpu_device pointer | ||
| 128 | * @index: doorbell index | ||
| 129 | * | ||
| 130 | * Returns the value in the doorbell aperture at the | ||
| 131 | * requested doorbell index (CIK). | ||
| 132 | */ | ||
| 133 | u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) | ||
| 134 | { | ||
| 135 | if (index < adev->doorbell.num_doorbells) { | ||
| 136 | return readl(adev->doorbell.ptr + index); | ||
| 137 | } else { | ||
| 138 | DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); | ||
| 139 | return 0; | ||
| 140 | } | ||
| 141 | } | ||
| 142 | |||
| 143 | /** | ||
| 144 | * amdgpu_mm_wdoorbell - write a doorbell dword | ||
| 145 | * | ||
| 146 | * @adev: amdgpu_device pointer | ||
| 147 | * @index: doorbell index | ||
| 148 | * @v: value to write | ||
| 149 | * | ||
| 150 | * Writes @v to the doorbell aperture at the | ||
| 151 | * requested doorbell index (CIK). | ||
| 152 | */ | ||
| 153 | void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) | ||
| 154 | { | ||
| 155 | if (index < adev->doorbell.num_doorbells) { | ||
| 156 | writel(v, adev->doorbell.ptr + index); | ||
| 157 | } else { | ||
| 158 | DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); | ||
| 159 | } | ||
| 160 | } | ||
| 161 | |||
| 162 | /** | ||
| 163 | * amdgpu_invalid_rreg - dummy reg read function | ||
| 164 | * | ||
| 165 | * @adev: amdgpu device pointer | ||
| 166 | * @reg: offset of register | ||
| 167 | * | ||
| 168 | * Dummy register read function. Used for register blocks | ||
| 169 | * that certain asics don't have (all asics). | ||
| 170 | * Returns the value in the register. | ||
| 171 | */ | ||
| 172 | static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) | ||
| 173 | { | ||
| 174 | DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); | ||
| 175 | BUG(); | ||
| 176 | return 0; | ||
| 177 | } | ||
| 178 | |||
| 179 | /** | ||
| 180 | * amdgpu_invalid_wreg - dummy reg write function | ||
| 181 | * | ||
| 182 | * @adev: amdgpu device pointer | ||
| 183 | * @reg: offset of register | ||
| 184 | * @v: value to write to the register | ||
| 185 | * | ||
| 186 | * Dummy register read function. Used for register blocks | ||
| 187 | * that certain asics don't have (all asics). | ||
| 188 | */ | ||
| 189 | static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) | ||
| 190 | { | ||
| 191 | DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", | ||
| 192 | reg, v); | ||
| 193 | BUG(); | ||
| 194 | } | ||
| 195 | |||
| 196 | /** | ||
| 197 | * amdgpu_block_invalid_rreg - dummy reg read function | ||
| 198 | * | ||
| 199 | * @adev: amdgpu device pointer | ||
| 200 | * @block: offset of instance | ||
| 201 | * @reg: offset of register | ||
| 202 | * | ||
| 203 | * Dummy register read function. Used for register blocks | ||
| 204 | * that certain asics don't have (all asics). | ||
| 205 | * Returns the value in the register. | ||
| 206 | */ | ||
| 207 | static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, | ||
| 208 | uint32_t block, uint32_t reg) | ||
| 209 | { | ||
| 210 | DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", | ||
| 211 | reg, block); | ||
| 212 | BUG(); | ||
| 213 | return 0; | ||
| 214 | } | ||
| 215 | |||
| 216 | /** | ||
| 217 | * amdgpu_block_invalid_wreg - dummy reg write function | ||
| 218 | * | ||
| 219 | * @adev: amdgpu device pointer | ||
| 220 | * @block: offset of instance | ||
| 221 | * @reg: offset of register | ||
| 222 | * @v: value to write to the register | ||
| 223 | * | ||
| 224 | * Dummy register read function. Used for register blocks | ||
| 225 | * that certain asics don't have (all asics). | ||
| 226 | */ | ||
| 227 | static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, | ||
| 228 | uint32_t block, | ||
| 229 | uint32_t reg, uint32_t v) | ||
| 230 | { | ||
| 231 | DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", | ||
| 232 | reg, block, v); | ||
| 233 | BUG(); | ||
| 234 | } | ||
| 235 | |||
| 236 | static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) | ||
| 237 | { | ||
| 238 | int r; | ||
| 239 | |||
| 240 | if (adev->vram_scratch.robj == NULL) { | ||
| 241 | r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, | ||
| 242 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, | ||
| 243 | NULL, &adev->vram_scratch.robj); | ||
| 244 | if (r) { | ||
| 245 | return r; | ||
| 246 | } | ||
| 247 | } | ||
| 248 | |||
| 249 | r = amdgpu_bo_reserve(adev->vram_scratch.robj, false); | ||
| 250 | if (unlikely(r != 0)) | ||
| 251 | return r; | ||
| 252 | r = amdgpu_bo_pin(adev->vram_scratch.robj, | ||
| 253 | AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr); | ||
| 254 | if (r) { | ||
| 255 | amdgpu_bo_unreserve(adev->vram_scratch.robj); | ||
| 256 | return r; | ||
| 257 | } | ||
| 258 | r = amdgpu_bo_kmap(adev->vram_scratch.robj, | ||
| 259 | (void **)&adev->vram_scratch.ptr); | ||
| 260 | if (r) | ||
| 261 | amdgpu_bo_unpin(adev->vram_scratch.robj); | ||
| 262 | amdgpu_bo_unreserve(adev->vram_scratch.robj); | ||
| 263 | |||
| 264 | return r; | ||
| 265 | } | ||
| 266 | |||
| 267 | static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) | ||
| 268 | { | ||
| 269 | int r; | ||
| 270 | |||
| 271 | if (adev->vram_scratch.robj == NULL) { | ||
| 272 | return; | ||
| 273 | } | ||
| 274 | r = amdgpu_bo_reserve(adev->vram_scratch.robj, false); | ||
| 275 | if (likely(r == 0)) { | ||
| 276 | amdgpu_bo_kunmap(adev->vram_scratch.robj); | ||
| 277 | amdgpu_bo_unpin(adev->vram_scratch.robj); | ||
| 278 | amdgpu_bo_unreserve(adev->vram_scratch.robj); | ||
| 279 | } | ||
| 280 | amdgpu_bo_unref(&adev->vram_scratch.robj); | ||
| 281 | } | ||
| 282 | |||
| 283 | /** | ||
| 284 | * amdgpu_program_register_sequence - program an array of registers. | ||
| 285 | * | ||
| 286 | * @adev: amdgpu_device pointer | ||
| 287 | * @registers: pointer to the register array | ||
| 288 | * @array_size: size of the register array | ||
| 289 | * | ||
| 290 | * Programs an array or registers with and and or masks. | ||
| 291 | * This is a helper for setting golden registers. | ||
| 292 | */ | ||
| 293 | void amdgpu_program_register_sequence(struct amdgpu_device *adev, | ||
| 294 | const u32 *registers, | ||
| 295 | const u32 array_size) | ||
| 296 | { | ||
| 297 | u32 tmp, reg, and_mask, or_mask; | ||
| 298 | int i; | ||
| 299 | |||
| 300 | if (array_size % 3) | ||
| 301 | return; | ||
| 302 | |||
| 303 | for (i = 0; i < array_size; i +=3) { | ||
| 304 | reg = registers[i + 0]; | ||
| 305 | and_mask = registers[i + 1]; | ||
| 306 | or_mask = registers[i + 2]; | ||
| 307 | |||
| 308 | if (and_mask == 0xffffffff) { | ||
| 309 | tmp = or_mask; | ||
| 310 | } else { | ||
| 311 | tmp = RREG32(reg); | ||
| 312 | tmp &= ~and_mask; | ||
| 313 | tmp |= or_mask; | ||
| 314 | } | ||
| 315 | WREG32(reg, tmp); | ||
| 316 | } | ||
| 317 | } | ||
| 318 | |||
| 319 | void amdgpu_pci_config_reset(struct amdgpu_device *adev) | ||
| 320 | { | ||
| 321 | pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); | ||
| 322 | } | ||
| 323 | |||
| 324 | /* | ||
| 325 | * GPU doorbell aperture helpers function. | ||
| 326 | */ | ||
| 327 | /** | ||
| 328 | * amdgpu_doorbell_init - Init doorbell driver information. | ||
| 329 | * | ||
| 330 | * @adev: amdgpu_device pointer | ||
| 331 | * | ||
| 332 | * Init doorbell driver information (CIK) | ||
| 333 | * Returns 0 on success, error on failure. | ||
| 334 | */ | ||
| 335 | static int amdgpu_doorbell_init(struct amdgpu_device *adev) | ||
| 336 | { | ||
| 337 | /* doorbell bar mapping */ | ||
| 338 | adev->doorbell.base = pci_resource_start(adev->pdev, 2); | ||
| 339 | adev->doorbell.size = pci_resource_len(adev->pdev, 2); | ||
| 340 | |||
| 341 | adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), | ||
| 342 | AMDGPU_DOORBELL_MAX_ASSIGNMENT+1); | ||
| 343 | if (adev->doorbell.num_doorbells == 0) | ||
| 344 | return -EINVAL; | ||
| 345 | |||
| 346 | adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32)); | ||
| 347 | if (adev->doorbell.ptr == NULL) { | ||
| 348 | return -ENOMEM; | ||
| 349 | } | ||
| 350 | DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base); | ||
| 351 | DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size); | ||
| 352 | |||
| 353 | return 0; | ||
| 354 | } | ||
| 355 | |||
| 356 | /** | ||
| 357 | * amdgpu_doorbell_fini - Tear down doorbell driver information. | ||
| 358 | * | ||
| 359 | * @adev: amdgpu_device pointer | ||
| 360 | * | ||
| 361 | * Tear down doorbell driver information (CIK) | ||
| 362 | */ | ||
| 363 | static void amdgpu_doorbell_fini(struct amdgpu_device *adev) | ||
| 364 | { | ||
| 365 | iounmap(adev->doorbell.ptr); | ||
| 366 | adev->doorbell.ptr = NULL; | ||
| 367 | } | ||
| 368 | |||
| 369 | /** | ||
| 370 | * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to | ||
| 371 | * setup amdkfd | ||
| 372 | * | ||
| 373 | * @adev: amdgpu_device pointer | ||
| 374 | * @aperture_base: output returning doorbell aperture base physical address | ||
| 375 | * @aperture_size: output returning doorbell aperture size in bytes | ||
| 376 | * @start_offset: output returning # of doorbell bytes reserved for amdgpu. | ||
| 377 | * | ||
| 378 | * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up, | ||
| 379 | * takes doorbells required for its own rings and reports the setup to amdkfd. | ||
| 380 | * amdgpu reserved doorbells are at the start of the doorbell aperture. | ||
| 381 | */ | ||
| 382 | void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, | ||
| 383 | phys_addr_t *aperture_base, | ||
| 384 | size_t *aperture_size, | ||
| 385 | size_t *start_offset) | ||
| 386 | { | ||
| 387 | /* | ||
| 388 | * The first num_doorbells are used by amdgpu. | ||
| 389 | * amdkfd takes whatever's left in the aperture. | ||
| 390 | */ | ||
| 391 | if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) { | ||
| 392 | *aperture_base = adev->doorbell.base; | ||
| 393 | *aperture_size = adev->doorbell.size; | ||
| 394 | *start_offset = adev->doorbell.num_doorbells * sizeof(u32); | ||
| 395 | } else { | ||
| 396 | *aperture_base = 0; | ||
| 397 | *aperture_size = 0; | ||
| 398 | *start_offset = 0; | ||
| 399 | } | ||
| 400 | } | ||
| 401 | |||
| 402 | /* | ||
| 403 | * amdgpu_wb_*() | ||
| 404 | * Writeback is the the method by which the the GPU updates special pages | ||
| 405 | * in memory with the status of certain GPU events (fences, ring pointers, | ||
| 406 | * etc.). | ||
| 407 | */ | ||
| 408 | |||
| 409 | /** | ||
| 410 | * amdgpu_wb_fini - Disable Writeback and free memory | ||
| 411 | * | ||
| 412 | * @adev: amdgpu_device pointer | ||
| 413 | * | ||
| 414 | * Disables Writeback and frees the Writeback memory (all asics). | ||
| 415 | * Used at driver shutdown. | ||
| 416 | */ | ||
| 417 | static void amdgpu_wb_fini(struct amdgpu_device *adev) | ||
| 418 | { | ||
| 419 | if (adev->wb.wb_obj) { | ||
| 420 | if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) { | ||
| 421 | amdgpu_bo_kunmap(adev->wb.wb_obj); | ||
| 422 | amdgpu_bo_unpin(adev->wb.wb_obj); | ||
| 423 | amdgpu_bo_unreserve(adev->wb.wb_obj); | ||
| 424 | } | ||
| 425 | amdgpu_bo_unref(&adev->wb.wb_obj); | ||
| 426 | adev->wb.wb = NULL; | ||
| 427 | adev->wb.wb_obj = NULL; | ||
| 428 | } | ||
| 429 | } | ||
| 430 | |||
| 431 | /** | ||
| 432 | * amdgpu_wb_init- Init Writeback driver info and allocate memory | ||
| 433 | * | ||
| 434 | * @adev: amdgpu_device pointer | ||
| 435 | * | ||
| 436 | * Disables Writeback and frees the Writeback memory (all asics). | ||
| 437 | * Used at driver startup. | ||
| 438 | * Returns 0 on success or an -error on failure. | ||
| 439 | */ | ||
| 440 | static int amdgpu_wb_init(struct amdgpu_device *adev) | ||
| 441 | { | ||
| 442 | int r; | ||
| 443 | |||
| 444 | if (adev->wb.wb_obj == NULL) { | ||
| 445 | r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, | ||
| 446 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj); | ||
| 447 | if (r) { | ||
| 448 | dev_warn(adev->dev, "(%d) create WB bo failed\n", r); | ||
| 449 | return r; | ||
| 450 | } | ||
| 451 | r = amdgpu_bo_reserve(adev->wb.wb_obj, false); | ||
| 452 | if (unlikely(r != 0)) { | ||
| 453 | amdgpu_wb_fini(adev); | ||
| 454 | return r; | ||
| 455 | } | ||
| 456 | r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT, | ||
| 457 | &adev->wb.gpu_addr); | ||
| 458 | if (r) { | ||
| 459 | amdgpu_bo_unreserve(adev->wb.wb_obj); | ||
| 460 | dev_warn(adev->dev, "(%d) pin WB bo failed\n", r); | ||
| 461 | amdgpu_wb_fini(adev); | ||
| 462 | return r; | ||
| 463 | } | ||
| 464 | r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb); | ||
| 465 | amdgpu_bo_unreserve(adev->wb.wb_obj); | ||
| 466 | if (r) { | ||
| 467 | dev_warn(adev->dev, "(%d) map WB bo failed\n", r); | ||
| 468 | amdgpu_wb_fini(adev); | ||
| 469 | return r; | ||
| 470 | } | ||
| 471 | |||
| 472 | adev->wb.num_wb = AMDGPU_MAX_WB; | ||
| 473 | memset(&adev->wb.used, 0, sizeof(adev->wb.used)); | ||
| 474 | |||
| 475 | /* clear wb memory */ | ||
| 476 | memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE); | ||
| 477 | } | ||
| 478 | |||
| 479 | return 0; | ||
| 480 | } | ||
| 481 | |||
| 482 | /** | ||
| 483 | * amdgpu_wb_get - Allocate a wb entry | ||
| 484 | * | ||
| 485 | * @adev: amdgpu_device pointer | ||
| 486 | * @wb: wb index | ||
| 487 | * | ||
| 488 | * Allocate a wb slot for use by the driver (all asics). | ||
| 489 | * Returns 0 on success or -EINVAL on failure. | ||
| 490 | */ | ||
| 491 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) | ||
| 492 | { | ||
| 493 | unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); | ||
| 494 | if (offset < adev->wb.num_wb) { | ||
| 495 | __set_bit(offset, adev->wb.used); | ||
| 496 | *wb = offset; | ||
| 497 | return 0; | ||
| 498 | } else { | ||
| 499 | return -EINVAL; | ||
| 500 | } | ||
| 501 | } | ||
| 502 | |||
| 503 | /** | ||
| 504 | * amdgpu_wb_free - Free a wb entry | ||
| 505 | * | ||
| 506 | * @adev: amdgpu_device pointer | ||
| 507 | * @wb: wb index | ||
| 508 | * | ||
| 509 | * Free a wb slot allocated for use by the driver (all asics) | ||
| 510 | */ | ||
| 511 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) | ||
| 512 | { | ||
| 513 | if (wb < adev->wb.num_wb) | ||
| 514 | __clear_bit(wb, adev->wb.used); | ||
| 515 | } | ||
| 516 | |||
| 517 | /** | ||
| 518 | * amdgpu_vram_location - try to find VRAM location | ||
| 519 | * @adev: amdgpu device structure holding all necessary informations | ||
| 520 | * @mc: memory controller structure holding memory informations | ||
| 521 | * @base: base address at which to put VRAM | ||
| 522 | * | ||
| 523 | * Function will place try to place VRAM at base address provided | ||
| 524 | * as parameter (which is so far either PCI aperture address or | ||
| 525 | * for IGP TOM base address). | ||
| 526 | * | ||
| 527 | * If there is not enough space to fit the unvisible VRAM in the 32bits | ||
| 528 | * address space then we limit the VRAM size to the aperture. | ||
| 529 | * | ||
| 530 | * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, | ||
| 531 | * this shouldn't be a problem as we are using the PCI aperture as a reference. | ||
| 532 | * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but | ||
| 533 | * not IGP. | ||
| 534 | * | ||
| 535 | * Note: we use mc_vram_size as on some board we need to program the mc to | ||
| 536 | * cover the whole aperture even if VRAM size is inferior to aperture size | ||
| 537 | * Novell bug 204882 + along with lots of ubuntu ones | ||
| 538 | * | ||
| 539 | * Note: when limiting vram it's safe to overwritte real_vram_size because | ||
| 540 | * we are not in case where real_vram_size is inferior to mc_vram_size (ie | ||
| 541 | * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu | ||
| 542 | * ones) | ||
| 543 | * | ||
| 544 | * Note: IGP TOM addr should be the same as the aperture addr, we don't | ||
| 545 | * explicitly check for that thought. | ||
| 546 | * | ||
| 547 | * FIXME: when reducing VRAM size align new size on power of 2. | ||
| 548 | */ | ||
| 549 | void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base) | ||
| 550 | { | ||
| 551 | uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; | ||
| 552 | |||
| 553 | mc->vram_start = base; | ||
| 554 | if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) { | ||
| 555 | dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n"); | ||
| 556 | mc->real_vram_size = mc->aper_size; | ||
| 557 | mc->mc_vram_size = mc->aper_size; | ||
| 558 | } | ||
| 559 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | ||
| 560 | if (limit && limit < mc->real_vram_size) | ||
| 561 | mc->real_vram_size = limit; | ||
| 562 | dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", | ||
| 563 | mc->mc_vram_size >> 20, mc->vram_start, | ||
| 564 | mc->vram_end, mc->real_vram_size >> 20); | ||
| 565 | } | ||
| 566 | |||
| 567 | /** | ||
| 568 | * amdgpu_gtt_location - try to find GTT location | ||
| 569 | * @adev: amdgpu device structure holding all necessary informations | ||
| 570 | * @mc: memory controller structure holding memory informations | ||
| 571 | * | ||
| 572 | * Function will place try to place GTT before or after VRAM. | ||
| 573 | * | ||
| 574 | * If GTT size is bigger than space left then we ajust GTT size. | ||
| 575 | * Thus function will never fails. | ||
| 576 | * | ||
| 577 | * FIXME: when reducing GTT size align new size on power of 2. | ||
| 578 | */ | ||
| 579 | void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) | ||
| 580 | { | ||
| 581 | u64 size_af, size_bf; | ||
| 582 | |||
| 583 | size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; | ||
| 584 | size_bf = mc->vram_start & ~mc->gtt_base_align; | ||
| 585 | if (size_bf > size_af) { | ||
| 586 | if (mc->gtt_size > size_bf) { | ||
| 587 | dev_warn(adev->dev, "limiting GTT\n"); | ||
| 588 | mc->gtt_size = size_bf; | ||
| 589 | } | ||
| 590 | mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; | ||
| 591 | } else { | ||
| 592 | if (mc->gtt_size > size_af) { | ||
| 593 | dev_warn(adev->dev, "limiting GTT\n"); | ||
| 594 | mc->gtt_size = size_af; | ||
| 595 | } | ||
| 596 | mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; | ||
| 597 | } | ||
| 598 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; | ||
| 599 | dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", | ||
| 600 | mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); | ||
| 601 | } | ||
| 602 | |||
| 603 | /* | ||
| 604 | * GPU helpers function. | ||
| 605 | */ | ||
| 606 | /** | ||
| 607 | * amdgpu_card_posted - check if the hw has already been initialized | ||
| 608 | * | ||
| 609 | * @adev: amdgpu_device pointer | ||
| 610 | * | ||
| 611 | * Check if the asic has been initialized (all asics). | ||
| 612 | * Used at driver startup. | ||
| 613 | * Returns true if initialized or false if not. | ||
| 614 | */ | ||
| 615 | bool amdgpu_card_posted(struct amdgpu_device *adev) | ||
| 616 | { | ||
| 617 | uint32_t reg; | ||
| 618 | |||
| 619 | /* then check MEM_SIZE, in case the crtcs are off */ | ||
| 620 | reg = RREG32(mmCONFIG_MEMSIZE); | ||
| 621 | |||
| 622 | if (reg) | ||
| 623 | return true; | ||
| 624 | |||
| 625 | return false; | ||
| 626 | |||
| 627 | } | ||
| 628 | |||
| 629 | /** | ||
| 630 | * amdgpu_boot_test_post_card - check and possibly initialize the hw | ||
| 631 | * | ||
| 632 | * @adev: amdgpu_device pointer | ||
| 633 | * | ||
| 634 | * Check if the asic is initialized and if not, attempt to initialize | ||
| 635 | * it (all asics). | ||
| 636 | * Returns true if initialized or false if not. | ||
| 637 | */ | ||
| 638 | bool amdgpu_boot_test_post_card(struct amdgpu_device *adev) | ||
| 639 | { | ||
| 640 | if (amdgpu_card_posted(adev)) | ||
| 641 | return true; | ||
| 642 | |||
| 643 | if (adev->bios) { | ||
| 644 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 645 | if (adev->is_atom_bios) | ||
| 646 | amdgpu_atom_asic_init(adev->mode_info.atom_context); | ||
| 647 | return true; | ||
| 648 | } else { | ||
| 649 | dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); | ||
| 650 | return false; | ||
| 651 | } | ||
| 652 | } | ||
| 653 | |||
| 654 | /** | ||
| 655 | * amdgpu_dummy_page_init - init dummy page used by the driver | ||
| 656 | * | ||
| 657 | * @adev: amdgpu_device pointer | ||
| 658 | * | ||
| 659 | * Allocate the dummy page used by the driver (all asics). | ||
| 660 | * This dummy page is used by the driver as a filler for gart entries | ||
| 661 | * when pages are taken out of the GART | ||
| 662 | * Returns 0 on sucess, -ENOMEM on failure. | ||
| 663 | */ | ||
| 664 | int amdgpu_dummy_page_init(struct amdgpu_device *adev) | ||
| 665 | { | ||
| 666 | if (adev->dummy_page.page) | ||
| 667 | return 0; | ||
| 668 | adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); | ||
| 669 | if (adev->dummy_page.page == NULL) | ||
| 670 | return -ENOMEM; | ||
| 671 | adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page, | ||
| 672 | 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
| 673 | if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) { | ||
| 674 | dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); | ||
| 675 | __free_page(adev->dummy_page.page); | ||
| 676 | adev->dummy_page.page = NULL; | ||
| 677 | return -ENOMEM; | ||
| 678 | } | ||
| 679 | return 0; | ||
| 680 | } | ||
| 681 | |||
| 682 | /** | ||
| 683 | * amdgpu_dummy_page_fini - free dummy page used by the driver | ||
| 684 | * | ||
| 685 | * @adev: amdgpu_device pointer | ||
| 686 | * | ||
| 687 | * Frees the dummy page used by the driver (all asics). | ||
| 688 | */ | ||
| 689 | void amdgpu_dummy_page_fini(struct amdgpu_device *adev) | ||
| 690 | { | ||
| 691 | if (adev->dummy_page.page == NULL) | ||
| 692 | return; | ||
| 693 | pci_unmap_page(adev->pdev, adev->dummy_page.addr, | ||
| 694 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
| 695 | __free_page(adev->dummy_page.page); | ||
| 696 | adev->dummy_page.page = NULL; | ||
| 697 | } | ||
| 698 | |||
| 699 | |||
| 700 | /* ATOM accessor methods */ | ||
| 701 | /* | ||
| 702 | * ATOM is an interpreted byte code stored in tables in the vbios. The | ||
| 703 | * driver registers callbacks to access registers and the interpreter | ||
| 704 | * in the driver parses the tables and executes then to program specific | ||
| 705 | * actions (set display modes, asic init, etc.). See amdgpu_atombios.c, | ||
| 706 | * atombios.h, and atom.c | ||
| 707 | */ | ||
| 708 | |||
| 709 | /** | ||
| 710 | * cail_pll_read - read PLL register | ||
| 711 | * | ||
| 712 | * @info: atom card_info pointer | ||
| 713 | * @reg: PLL register offset | ||
| 714 | * | ||
| 715 | * Provides a PLL register accessor for the atom interpreter (r4xx+). | ||
| 716 | * Returns the value of the PLL register. | ||
| 717 | */ | ||
| 718 | static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) | ||
| 719 | { | ||
| 720 | return 0; | ||
| 721 | } | ||
| 722 | |||
| 723 | /** | ||
| 724 | * cail_pll_write - write PLL register | ||
| 725 | * | ||
| 726 | * @info: atom card_info pointer | ||
| 727 | * @reg: PLL register offset | ||
| 728 | * @val: value to write to the pll register | ||
| 729 | * | ||
| 730 | * Provides a PLL register accessor for the atom interpreter (r4xx+). | ||
| 731 | */ | ||
| 732 | static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) | ||
| 733 | { | ||
| 734 | |||
| 735 | } | ||
| 736 | |||
| 737 | /** | ||
| 738 | * cail_mc_read - read MC (Memory Controller) register | ||
| 739 | * | ||
| 740 | * @info: atom card_info pointer | ||
| 741 | * @reg: MC register offset | ||
| 742 | * | ||
| 743 | * Provides an MC register accessor for the atom interpreter (r4xx+). | ||
| 744 | * Returns the value of the MC register. | ||
| 745 | */ | ||
| 746 | static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) | ||
| 747 | { | ||
| 748 | return 0; | ||
| 749 | } | ||
| 750 | |||
| 751 | /** | ||
| 752 | * cail_mc_write - write MC (Memory Controller) register | ||
| 753 | * | ||
| 754 | * @info: atom card_info pointer | ||
| 755 | * @reg: MC register offset | ||
| 756 | * @val: value to write to the pll register | ||
| 757 | * | ||
| 758 | * Provides a MC register accessor for the atom interpreter (r4xx+). | ||
| 759 | */ | ||
| 760 | static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) | ||
| 761 | { | ||
| 762 | |||
| 763 | } | ||
| 764 | |||
| 765 | /** | ||
| 766 | * cail_reg_write - write MMIO register | ||
| 767 | * | ||
| 768 | * @info: atom card_info pointer | ||
| 769 | * @reg: MMIO register offset | ||
| 770 | * @val: value to write to the pll register | ||
| 771 | * | ||
| 772 | * Provides a MMIO register accessor for the atom interpreter (r4xx+). | ||
| 773 | */ | ||
| 774 | static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) | ||
| 775 | { | ||
| 776 | struct amdgpu_device *adev = info->dev->dev_private; | ||
| 777 | |||
| 778 | WREG32(reg, val); | ||
| 779 | } | ||
| 780 | |||
| 781 | /** | ||
| 782 | * cail_reg_read - read MMIO register | ||
| 783 | * | ||
| 784 | * @info: atom card_info pointer | ||
| 785 | * @reg: MMIO register offset | ||
| 786 | * | ||
| 787 | * Provides an MMIO register accessor for the atom interpreter (r4xx+). | ||
| 788 | * Returns the value of the MMIO register. | ||
| 789 | */ | ||
| 790 | static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) | ||
| 791 | { | ||
| 792 | struct amdgpu_device *adev = info->dev->dev_private; | ||
| 793 | uint32_t r; | ||
| 794 | |||
| 795 | r = RREG32(reg); | ||
| 796 | return r; | ||
| 797 | } | ||
| 798 | |||
| 799 | /** | ||
| 800 | * cail_ioreg_write - write IO register | ||
| 801 | * | ||
| 802 | * @info: atom card_info pointer | ||
| 803 | * @reg: IO register offset | ||
| 804 | * @val: value to write to the pll register | ||
| 805 | * | ||
| 806 | * Provides a IO register accessor for the atom interpreter (r4xx+). | ||
| 807 | */ | ||
| 808 | static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) | ||
| 809 | { | ||
| 810 | struct amdgpu_device *adev = info->dev->dev_private; | ||
| 811 | |||
| 812 | WREG32_IO(reg, val); | ||
| 813 | } | ||
| 814 | |||
| 815 | /** | ||
| 816 | * cail_ioreg_read - read IO register | ||
| 817 | * | ||
| 818 | * @info: atom card_info pointer | ||
| 819 | * @reg: IO register offset | ||
| 820 | * | ||
| 821 | * Provides an IO register accessor for the atom interpreter (r4xx+). | ||
| 822 | * Returns the value of the IO register. | ||
| 823 | */ | ||
| 824 | static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) | ||
| 825 | { | ||
| 826 | struct amdgpu_device *adev = info->dev->dev_private; | ||
| 827 | uint32_t r; | ||
| 828 | |||
| 829 | r = RREG32_IO(reg); | ||
| 830 | return r; | ||
| 831 | } | ||
| 832 | |||
| 833 | /** | ||
| 834 | * amdgpu_atombios_fini - free the driver info and callbacks for atombios | ||
| 835 | * | ||
| 836 | * @adev: amdgpu_device pointer | ||
| 837 | * | ||
| 838 | * Frees the driver info and register access callbacks for the ATOM | ||
| 839 | * interpreter (r4xx+). | ||
| 840 | * Called at driver shutdown. | ||
| 841 | */ | ||
| 842 | static void amdgpu_atombios_fini(struct amdgpu_device *adev) | ||
| 843 | { | ||
| 844 | if (adev->mode_info.atom_context) | ||
| 845 | kfree(adev->mode_info.atom_context->scratch); | ||
| 846 | kfree(adev->mode_info.atom_context); | ||
| 847 | adev->mode_info.atom_context = NULL; | ||
| 848 | kfree(adev->mode_info.atom_card_info); | ||
| 849 | adev->mode_info.atom_card_info = NULL; | ||
| 850 | } | ||
| 851 | |||
| 852 | /** | ||
| 853 | * amdgpu_atombios_init - init the driver info and callbacks for atombios | ||
| 854 | * | ||
| 855 | * @adev: amdgpu_device pointer | ||
| 856 | * | ||
| 857 | * Initializes the driver info and register access callbacks for the | ||
| 858 | * ATOM interpreter (r4xx+). | ||
| 859 | * Returns 0 on sucess, -ENOMEM on failure. | ||
| 860 | * Called at driver startup. | ||
| 861 | */ | ||
| 862 | static int amdgpu_atombios_init(struct amdgpu_device *adev) | ||
| 863 | { | ||
| 864 | struct card_info *atom_card_info = | ||
| 865 | kzalloc(sizeof(struct card_info), GFP_KERNEL); | ||
| 866 | |||
| 867 | if (!atom_card_info) | ||
| 868 | return -ENOMEM; | ||
| 869 | |||
| 870 | adev->mode_info.atom_card_info = atom_card_info; | ||
| 871 | atom_card_info->dev = adev->ddev; | ||
| 872 | atom_card_info->reg_read = cail_reg_read; | ||
| 873 | atom_card_info->reg_write = cail_reg_write; | ||
| 874 | /* needed for iio ops */ | ||
| 875 | if (adev->rio_mem) { | ||
| 876 | atom_card_info->ioreg_read = cail_ioreg_read; | ||
| 877 | atom_card_info->ioreg_write = cail_ioreg_write; | ||
| 878 | } else { | ||
| 879 | DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); | ||
| 880 | atom_card_info->ioreg_read = cail_reg_read; | ||
| 881 | atom_card_info->ioreg_write = cail_reg_write; | ||
| 882 | } | ||
| 883 | atom_card_info->mc_read = cail_mc_read; | ||
| 884 | atom_card_info->mc_write = cail_mc_write; | ||
| 885 | atom_card_info->pll_read = cail_pll_read; | ||
| 886 | atom_card_info->pll_write = cail_pll_write; | ||
| 887 | |||
| 888 | adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios); | ||
| 889 | if (!adev->mode_info.atom_context) { | ||
| 890 | amdgpu_atombios_fini(adev); | ||
| 891 | return -ENOMEM; | ||
| 892 | } | ||
| 893 | |||
| 894 | mutex_init(&adev->mode_info.atom_context->mutex); | ||
| 895 | amdgpu_atombios_scratch_regs_init(adev); | ||
| 896 | amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context); | ||
| 897 | return 0; | ||
| 898 | } | ||
| 899 | |||
| 900 | /* if we get transitioned to only one device, take VGA back */ | ||
| 901 | /** | ||
| 902 | * amdgpu_vga_set_decode - enable/disable vga decode | ||
| 903 | * | ||
| 904 | * @cookie: amdgpu_device pointer | ||
| 905 | * @state: enable/disable vga decode | ||
| 906 | * | ||
| 907 | * Enable/disable vga decode (all asics). | ||
| 908 | * Returns VGA resource flags. | ||
| 909 | */ | ||
| 910 | static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) | ||
| 911 | { | ||
| 912 | struct amdgpu_device *adev = cookie; | ||
| 913 | amdgpu_asic_set_vga_state(adev, state); | ||
| 914 | if (state) | ||
| 915 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | ||
| 916 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | ||
| 917 | else | ||
| 918 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | ||
| 919 | } | ||
| 920 | |||
| 921 | /** | ||
| 922 | * amdgpu_check_pot_argument - check that argument is a power of two | ||
| 923 | * | ||
| 924 | * @arg: value to check | ||
| 925 | * | ||
| 926 | * Validates that a certain argument is a power of two (all asics). | ||
| 927 | * Returns true if argument is valid. | ||
| 928 | */ | ||
| 929 | static bool amdgpu_check_pot_argument(int arg) | ||
| 930 | { | ||
| 931 | return (arg & (arg - 1)) == 0; | ||
| 932 | } | ||
| 933 | |||
| 934 | /** | ||
| 935 | * amdgpu_check_arguments - validate module params | ||
| 936 | * | ||
| 937 | * @adev: amdgpu_device pointer | ||
| 938 | * | ||
| 939 | * Validates certain module parameters and updates | ||
| 940 | * the associated values used by the driver (all asics). | ||
| 941 | */ | ||
| 942 | static void amdgpu_check_arguments(struct amdgpu_device *adev) | ||
| 943 | { | ||
| 944 | /* vramlimit must be a power of two */ | ||
| 945 | if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) { | ||
| 946 | dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n", | ||
| 947 | amdgpu_vram_limit); | ||
| 948 | amdgpu_vram_limit = 0; | ||
| 949 | } | ||
| 950 | |||
| 951 | if (amdgpu_gart_size != -1) { | ||
| 952 | /* gtt size must be power of two and greater or equal to 32M */ | ||
| 953 | if (amdgpu_gart_size < 32) { | ||
| 954 | dev_warn(adev->dev, "gart size (%d) too small\n", | ||
| 955 | amdgpu_gart_size); | ||
| 956 | amdgpu_gart_size = -1; | ||
| 957 | } else if (!amdgpu_check_pot_argument(amdgpu_gart_size)) { | ||
| 958 | dev_warn(adev->dev, "gart size (%d) must be a power of 2\n", | ||
| 959 | amdgpu_gart_size); | ||
| 960 | amdgpu_gart_size = -1; | ||
| 961 | } | ||
| 962 | } | ||
| 963 | |||
| 964 | if (!amdgpu_check_pot_argument(amdgpu_vm_size)) { | ||
| 965 | dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", | ||
| 966 | amdgpu_vm_size); | ||
| 967 | amdgpu_vm_size = 4; | ||
| 968 | } | ||
| 969 | |||
| 970 | if (amdgpu_vm_size < 1) { | ||
| 971 | dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", | ||
| 972 | amdgpu_vm_size); | ||
| 973 | amdgpu_vm_size = 4; | ||
| 974 | } | ||
| 975 | |||
| 976 | /* | ||
| 977 | * Max GPUVM size for Cayman, SI and CI are 40 bits. | ||
| 978 | */ | ||
| 979 | if (amdgpu_vm_size > 1024) { | ||
| 980 | dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n", | ||
| 981 | amdgpu_vm_size); | ||
| 982 | amdgpu_vm_size = 4; | ||
| 983 | } | ||
| 984 | |||
| 985 | /* defines number of bits in page table versus page directory, | ||
| 986 | * a page is 4KB so we have 12 bits offset, minimum 9 bits in the | ||
| 987 | * page table and the remaining bits are in the page directory */ | ||
| 988 | if (amdgpu_vm_block_size == -1) { | ||
| 989 | |||
| 990 | /* Total bits covered by PD + PTs */ | ||
| 991 | unsigned bits = ilog2(amdgpu_vm_size) + 18; | ||
| 992 | |||
| 993 | /* Make sure the PD is 4K in size up to 8GB address space. | ||
| 994 | Above that split equal between PD and PTs */ | ||
| 995 | if (amdgpu_vm_size <= 8) | ||
| 996 | amdgpu_vm_block_size = bits - 9; | ||
| 997 | else | ||
| 998 | amdgpu_vm_block_size = (bits + 3) / 2; | ||
| 999 | |||
| 1000 | } else if (amdgpu_vm_block_size < 9) { | ||
| 1001 | dev_warn(adev->dev, "VM page table size (%d) too small\n", | ||
| 1002 | amdgpu_vm_block_size); | ||
| 1003 | amdgpu_vm_block_size = 9; | ||
| 1004 | } | ||
| 1005 | |||
| 1006 | if (amdgpu_vm_block_size > 24 || | ||
| 1007 | (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) { | ||
| 1008 | dev_warn(adev->dev, "VM page table size (%d) too large\n", | ||
| 1009 | amdgpu_vm_block_size); | ||
| 1010 | amdgpu_vm_block_size = 9; | ||
| 1011 | } | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | /** | ||
| 1015 | * amdgpu_switcheroo_set_state - set switcheroo state | ||
| 1016 | * | ||
| 1017 | * @pdev: pci dev pointer | ||
| 1018 | * @state: vga switcheroo state | ||
| 1019 | * | ||
| 1020 | * Callback for the switcheroo driver. Suspends or resumes the | ||
| 1021 | * the asics before or after it is powered up using ACPI methods. | ||
| 1022 | */ | ||
| 1023 | static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) | ||
| 1024 | { | ||
| 1025 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
| 1026 | |||
| 1027 | if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF) | ||
| 1028 | return; | ||
| 1029 | |||
| 1030 | if (state == VGA_SWITCHEROO_ON) { | ||
| 1031 | unsigned d3_delay = dev->pdev->d3_delay; | ||
| 1032 | |||
| 1033 | printk(KERN_INFO "amdgpu: switched on\n"); | ||
| 1034 | /* don't suspend or resume card normally */ | ||
| 1035 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
| 1036 | |||
| 1037 | amdgpu_resume_kms(dev, true, true); | ||
| 1038 | |||
| 1039 | dev->pdev->d3_delay = d3_delay; | ||
| 1040 | |||
| 1041 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | ||
| 1042 | drm_kms_helper_poll_enable(dev); | ||
| 1043 | } else { | ||
| 1044 | printk(KERN_INFO "amdgpu: switched off\n"); | ||
| 1045 | drm_kms_helper_poll_disable(dev); | ||
| 1046 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
| 1047 | amdgpu_suspend_kms(dev, true, true); | ||
| 1048 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | ||
| 1049 | } | ||
| 1050 | } | ||
| 1051 | |||
| 1052 | /** | ||
| 1053 | * amdgpu_switcheroo_can_switch - see if switcheroo state can change | ||
| 1054 | * | ||
| 1055 | * @pdev: pci dev pointer | ||
| 1056 | * | ||
| 1057 | * Callback for the switcheroo driver. Check of the switcheroo | ||
| 1058 | * state can be changed. | ||
| 1059 | * Returns true if the state can be changed, false if not. | ||
| 1060 | */ | ||
| 1061 | static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) | ||
| 1062 | { | ||
| 1063 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
| 1064 | |||
| 1065 | /* | ||
| 1066 | * FIXME: open_count is protected by drm_global_mutex but that would lead to | ||
| 1067 | * locking inversion with the driver load path. And the access here is | ||
| 1068 | * completely racy anyway. So don't bother with locking for now. | ||
| 1069 | */ | ||
| 1070 | return dev->open_count == 0; | ||
| 1071 | } | ||
| 1072 | |||
| 1073 | static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { | ||
| 1074 | .set_gpu_state = amdgpu_switcheroo_set_state, | ||
| 1075 | .reprobe = NULL, | ||
| 1076 | .can_switch = amdgpu_switcheroo_can_switch, | ||
| 1077 | }; | ||
| 1078 | |||
| 1079 | int amdgpu_set_clockgating_state(struct amdgpu_device *adev, | ||
| 1080 | enum amdgpu_ip_block_type block_type, | ||
| 1081 | enum amdgpu_clockgating_state state) | ||
| 1082 | { | ||
| 1083 | int i, r = 0; | ||
| 1084 | |||
| 1085 | for (i = 0; i < adev->num_ip_blocks; i++) { | ||
| 1086 | if (adev->ip_blocks[i].type == block_type) { | ||
| 1087 | r = adev->ip_blocks[i].funcs->set_clockgating_state(adev, | ||
| 1088 | state); | ||
| 1089 | if (r) | ||
| 1090 | return r; | ||
| 1091 | } | ||
| 1092 | } | ||
| 1093 | return r; | ||
| 1094 | } | ||
| 1095 | |||
| 1096 | int amdgpu_set_powergating_state(struct amdgpu_device *adev, | ||
| 1097 | enum amdgpu_ip_block_type block_type, | ||
| 1098 | enum amdgpu_powergating_state state) | ||
| 1099 | { | ||
| 1100 | int i, r = 0; | ||
| 1101 | |||
| 1102 | for (i = 0; i < adev->num_ip_blocks; i++) { | ||
| 1103 | if (adev->ip_blocks[i].type == block_type) { | ||
| 1104 | r = adev->ip_blocks[i].funcs->set_powergating_state(adev, | ||
| 1105 | state); | ||
| 1106 | if (r) | ||
| 1107 | return r; | ||
| 1108 | } | ||
| 1109 | } | ||
| 1110 | return r; | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | const struct amdgpu_ip_block_version * amdgpu_get_ip_block( | ||
| 1114 | struct amdgpu_device *adev, | ||
| 1115 | enum amdgpu_ip_block_type type) | ||
| 1116 | { | ||
| 1117 | int i; | ||
| 1118 | |||
| 1119 | for (i = 0; i < adev->num_ip_blocks; i++) | ||
| 1120 | if (adev->ip_blocks[i].type == type) | ||
| 1121 | return &adev->ip_blocks[i]; | ||
| 1122 | |||
| 1123 | return NULL; | ||
| 1124 | } | ||
| 1125 | |||
| 1126 | /** | ||
| 1127 | * amdgpu_ip_block_version_cmp | ||
| 1128 | * | ||
| 1129 | * @adev: amdgpu_device pointer | ||
| 1130 | * @type: enum amdgpu_ip_block_type | ||
| 1131 | * @major: major version | ||
| 1132 | * @minor: minor version | ||
| 1133 | * | ||
| 1134 | * return 0 if equal or greater | ||
| 1135 | * return 1 if smaller or the ip_block doesn't exist | ||
| 1136 | */ | ||
| 1137 | int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, | ||
| 1138 | enum amdgpu_ip_block_type type, | ||
| 1139 | u32 major, u32 minor) | ||
| 1140 | { | ||
| 1141 | const struct amdgpu_ip_block_version *ip_block; | ||
| 1142 | ip_block = amdgpu_get_ip_block(adev, type); | ||
| 1143 | |||
| 1144 | if (ip_block && ((ip_block->major > major) || | ||
| 1145 | ((ip_block->major == major) && | ||
| 1146 | (ip_block->minor >= minor)))) | ||
| 1147 | return 0; | ||
| 1148 | |||
| 1149 | return 1; | ||
| 1150 | } | ||
| 1151 | |||
| 1152 | static int amdgpu_early_init(struct amdgpu_device *adev) | ||
| 1153 | { | ||
| 1154 | int i, r = -EINVAL; | ||
| 1155 | |||
| 1156 | switch (adev->asic_type) { | ||
| 1157 | default: | ||
| 1158 | /* FIXME: not supported yet */ | ||
| 1159 | return -EINVAL; | ||
| 1160 | } | ||
| 1161 | |||
| 1162 | |||
| 1163 | |||
| 1164 | if (adev->ip_blocks == NULL) { | ||
| 1165 | DRM_ERROR("No IP blocks found!\n"); | ||
| 1166 | return r; | ||
| 1167 | } | ||
| 1168 | |||
| 1169 | for (i = 0; i < adev->num_ip_blocks; i++) { | ||
| 1170 | if ((amdgpu_ip_block_mask & (1 << i)) == 0) { | ||
| 1171 | DRM_ERROR("disabled ip block: %d\n", i); | ||
| 1172 | adev->ip_block_enabled[i] = false; | ||
| 1173 | } else { | ||
| 1174 | if (adev->ip_blocks[i].funcs->early_init) { | ||
| 1175 | r = adev->ip_blocks[i].funcs->early_init(adev); | ||
| 1176 | if (r) | ||
| 1177 | return r; | ||
| 1178 | } | ||
| 1179 | adev->ip_block_enabled[i] = true; | ||
| 1180 | } | ||
| 1181 | } | ||
| 1182 | |||
| 1183 | return 0; | ||
| 1184 | } | ||
| 1185 | |||
| 1186 | static int amdgpu_init(struct amdgpu_device *adev) | ||
| 1187 | { | ||
| 1188 | int i, r; | ||
| 1189 | |||
| 1190 | for (i = 0; i < adev->num_ip_blocks; i++) { | ||
| 1191 | if (!adev->ip_block_enabled[i]) | ||
| 1192 | continue; | ||
| 1193 | r = adev->ip_blocks[i].funcs->sw_init(adev); | ||
| 1194 | if (r) | ||
| 1195 | return r; | ||
| 1196 | /* need to do gmc hw init early so we can allocate gpu mem */ | ||
| 1197 | if (adev->ip_blocks[i].type == AMDGPU_IP_BLOCK_TYPE_GMC) { | ||
| 1198 | r = amdgpu_vram_scratch_init(adev); | ||
| 1199 | if (r) | ||
| 1200 | return r; | ||
| 1201 | r = adev->ip_blocks[i].funcs->hw_init(adev); | ||
| 1202 | if (r) | ||
| 1203 | return r; | ||
| 1204 | r = amdgpu_wb_init(adev); | ||
| 1205 | if (r) | ||
| 1206 | return r; | ||
| 1207 | } | ||
| 1208 | } | ||
| 1209 | |||
| 1210 | for (i = 0; i < adev->num_ip_blocks; i++) { | ||
| 1211 | if (!adev->ip_block_enabled[i]) | ||
| 1212 | continue; | ||
| 1213 | /* gmc hw init is done early */ | ||
| 1214 | if (adev->ip_blocks[i].type == AMDGPU_IP_BLOCK_TYPE_GMC) | ||
| 1215 | continue; | ||
| 1216 | r = adev->ip_blocks[i].funcs->hw_init(adev); | ||
| 1217 | if (r) | ||
| 1218 | return r; | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | return 0; | ||
| 1222 | } | ||
| 1223 | |||
| 1224 | static int amdgpu_late_init(struct amdgpu_device *adev) | ||
| 1225 | { | ||
| 1226 | int i = 0, r; | ||
| 1227 | |||
| 1228 | for (i = 0; i < adev->num_ip_blocks; i++) { | ||
| 1229 | if (!adev->ip_block_enabled[i]) | ||
| 1230 | continue; | ||
| 1231 | /* enable clockgating to save power */ | ||
| 1232 | r = adev->ip_blocks[i].funcs->set_clockgating_state(adev, | ||
| 1233 | AMDGPU_CG_STATE_GATE); | ||
| 1234 | if (r) | ||
| 1235 | return r; | ||
| 1236 | if (adev->ip_blocks[i].funcs->late_init) { | ||
| 1237 | r = adev->ip_blocks[i].funcs->late_init(adev); | ||
| 1238 | if (r) | ||
| 1239 | return r; | ||
| 1240 | } | ||
| 1241 | } | ||
| 1242 | |||
| 1243 | return 0; | ||
| 1244 | } | ||
| 1245 | |||
| 1246 | static int amdgpu_fini(struct amdgpu_device *adev) | ||
| 1247 | { | ||
| 1248 | int i, r; | ||
| 1249 | |||
| 1250 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | ||
| 1251 | if (!adev->ip_block_enabled[i]) | ||
| 1252 | continue; | ||
| 1253 | if (adev->ip_blocks[i].type == AMDGPU_IP_BLOCK_TYPE_GMC) { | ||
| 1254 | amdgpu_wb_fini(adev); | ||
| 1255 | amdgpu_vram_scratch_fini(adev); | ||
| 1256 | } | ||
| 1257 | /* ungate blocks before hw fini so that we can shutdown the blocks safely */ | ||
| 1258 | r = adev->ip_blocks[i].funcs->set_clockgating_state(adev, | ||
| 1259 | AMDGPU_CG_STATE_UNGATE); | ||
| 1260 | if (r) | ||
| 1261 | return r; | ||
| 1262 | r = adev->ip_blocks[i].funcs->hw_fini(adev); | ||
| 1263 | /* XXX handle errors */ | ||
| 1264 | } | ||
| 1265 | |||
| 1266 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | ||
| 1267 | if (!adev->ip_block_enabled[i]) | ||
| 1268 | continue; | ||
| 1269 | r = adev->ip_blocks[i].funcs->sw_fini(adev); | ||
| 1270 | /* XXX handle errors */ | ||
| 1271 | adev->ip_block_enabled[i] = false; | ||
| 1272 | } | ||
| 1273 | |||
| 1274 | return 0; | ||
| 1275 | } | ||
| 1276 | |||
| 1277 | static int amdgpu_suspend(struct amdgpu_device *adev) | ||
| 1278 | { | ||
| 1279 | int i, r; | ||
| 1280 | |||
| 1281 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | ||
| 1282 | if (!adev->ip_block_enabled[i]) | ||
| 1283 | continue; | ||
| 1284 | /* ungate blocks so that suspend can properly shut them down */ | ||
| 1285 | r = adev->ip_blocks[i].funcs->set_clockgating_state(adev, | ||
| 1286 | AMDGPU_CG_STATE_UNGATE); | ||
| 1287 | /* XXX handle errors */ | ||
| 1288 | r = adev->ip_blocks[i].funcs->suspend(adev); | ||
| 1289 | /* XXX handle errors */ | ||
| 1290 | } | ||
| 1291 | |||
| 1292 | return 0; | ||
| 1293 | } | ||
| 1294 | |||
| 1295 | static int amdgpu_resume(struct amdgpu_device *adev) | ||
| 1296 | { | ||
| 1297 | int i, r; | ||
| 1298 | |||
| 1299 | for (i = 0; i < adev->num_ip_blocks; i++) { | ||
| 1300 | if (!adev->ip_block_enabled[i]) | ||
| 1301 | continue; | ||
| 1302 | r = adev->ip_blocks[i].funcs->resume(adev); | ||
| 1303 | if (r) | ||
| 1304 | return r; | ||
| 1305 | } | ||
| 1306 | |||
| 1307 | return 0; | ||
| 1308 | } | ||
| 1309 | |||
| 1310 | /** | ||
| 1311 | * amdgpu_device_init - initialize the driver | ||
| 1312 | * | ||
| 1313 | * @adev: amdgpu_device pointer | ||
| 1314 | * @pdev: drm dev pointer | ||
| 1315 | * @pdev: pci dev pointer | ||
| 1316 | * @flags: driver flags | ||
| 1317 | * | ||
| 1318 | * Initializes the driver info and hw (all asics). | ||
| 1319 | * Returns 0 for success or an error on failure. | ||
| 1320 | * Called at driver startup. | ||
| 1321 | */ | ||
| 1322 | int amdgpu_device_init(struct amdgpu_device *adev, | ||
| 1323 | struct drm_device *ddev, | ||
| 1324 | struct pci_dev *pdev, | ||
| 1325 | uint32_t flags) | ||
| 1326 | { | ||
| 1327 | int r, i; | ||
| 1328 | bool runtime = false; | ||
| 1329 | |||
| 1330 | adev->shutdown = false; | ||
| 1331 | adev->dev = &pdev->dev; | ||
| 1332 | adev->ddev = ddev; | ||
| 1333 | adev->pdev = pdev; | ||
| 1334 | adev->flags = flags; | ||
| 1335 | adev->asic_type = flags & AMDGPU_ASIC_MASK; | ||
| 1336 | adev->is_atom_bios = false; | ||
| 1337 | adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; | ||
| 1338 | adev->mc.gtt_size = 512 * 1024 * 1024; | ||
| 1339 | adev->accel_working = false; | ||
| 1340 | adev->num_rings = 0; | ||
| 1341 | adev->mman.buffer_funcs = NULL; | ||
| 1342 | adev->mman.buffer_funcs_ring = NULL; | ||
| 1343 | adev->vm_manager.vm_pte_funcs = NULL; | ||
| 1344 | adev->vm_manager.vm_pte_funcs_ring = NULL; | ||
| 1345 | adev->gart.gart_funcs = NULL; | ||
| 1346 | adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); | ||
| 1347 | |||
| 1348 | adev->smc_rreg = &amdgpu_invalid_rreg; | ||
| 1349 | adev->smc_wreg = &amdgpu_invalid_wreg; | ||
| 1350 | adev->pcie_rreg = &amdgpu_invalid_rreg; | ||
| 1351 | adev->pcie_wreg = &amdgpu_invalid_wreg; | ||
| 1352 | adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; | ||
| 1353 | adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; | ||
| 1354 | adev->didt_rreg = &amdgpu_invalid_rreg; | ||
| 1355 | adev->didt_wreg = &amdgpu_invalid_wreg; | ||
| 1356 | adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; | ||
| 1357 | adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; | ||
| 1358 | |||
| 1359 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", | ||
| 1360 | amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, | ||
| 1361 | pdev->subsystem_vendor, pdev->subsystem_device); | ||
| 1362 | |||
| 1363 | /* mutex initialization are all done here so we | ||
| 1364 | * can recall function without having locking issues */ | ||
| 1365 | mutex_init(&adev->ring_lock); | ||
| 1366 | atomic_set(&adev->irq.ih.lock, 0); | ||
| 1367 | mutex_init(&adev->gem.mutex); | ||
| 1368 | mutex_init(&adev->pm.mutex); | ||
| 1369 | mutex_init(&adev->gfx.gpu_clock_mutex); | ||
| 1370 | mutex_init(&adev->srbm_mutex); | ||
| 1371 | mutex_init(&adev->grbm_idx_mutex); | ||
| 1372 | init_rwsem(&adev->pm.mclk_lock); | ||
| 1373 | init_rwsem(&adev->exclusive_lock); | ||
| 1374 | mutex_init(&adev->mn_lock); | ||
| 1375 | hash_init(adev->mn_hash); | ||
| 1376 | |||
| 1377 | amdgpu_check_arguments(adev); | ||
| 1378 | |||
| 1379 | /* Registers mapping */ | ||
| 1380 | /* TODO: block userspace mapping of io register */ | ||
| 1381 | spin_lock_init(&adev->mmio_idx_lock); | ||
| 1382 | spin_lock_init(&adev->smc_idx_lock); | ||
| 1383 | spin_lock_init(&adev->pcie_idx_lock); | ||
| 1384 | spin_lock_init(&adev->uvd_ctx_idx_lock); | ||
| 1385 | spin_lock_init(&adev->didt_idx_lock); | ||
| 1386 | spin_lock_init(&adev->audio_endpt_idx_lock); | ||
| 1387 | |||
| 1388 | adev->rmmio_base = pci_resource_start(adev->pdev, 5); | ||
| 1389 | adev->rmmio_size = pci_resource_len(adev->pdev, 5); | ||
| 1390 | adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); | ||
| 1391 | if (adev->rmmio == NULL) { | ||
| 1392 | return -ENOMEM; | ||
| 1393 | } | ||
| 1394 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); | ||
| 1395 | DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); | ||
| 1396 | |||
| 1397 | /* doorbell bar mapping */ | ||
| 1398 | amdgpu_doorbell_init(adev); | ||
| 1399 | |||
| 1400 | /* io port mapping */ | ||
| 1401 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
| 1402 | if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) { | ||
| 1403 | adev->rio_mem_size = pci_resource_len(adev->pdev, i); | ||
| 1404 | adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size); | ||
| 1405 | break; | ||
| 1406 | } | ||
| 1407 | } | ||
| 1408 | if (adev->rio_mem == NULL) | ||
| 1409 | DRM_ERROR("Unable to find PCI I/O BAR\n"); | ||
| 1410 | |||
| 1411 | /* early init functions */ | ||
| 1412 | r = amdgpu_early_init(adev); | ||
| 1413 | if (r) | ||
| 1414 | return r; | ||
| 1415 | |||
| 1416 | /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ | ||
| 1417 | /* this will fail for cards that aren't VGA class devices, just | ||
| 1418 | * ignore it */ | ||
| 1419 | vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode); | ||
| 1420 | |||
| 1421 | if (amdgpu_runtime_pm == 1) | ||
| 1422 | runtime = true; | ||
| 1423 | if (amdgpu_device_is_px(ddev)) | ||
| 1424 | runtime = true; | ||
| 1425 | vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); | ||
| 1426 | if (runtime) | ||
| 1427 | vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); | ||
| 1428 | |||
| 1429 | /* Read BIOS */ | ||
| 1430 | if (!amdgpu_get_bios(adev)) | ||
| 1431 | return -EINVAL; | ||
| 1432 | /* Must be an ATOMBIOS */ | ||
| 1433 | if (!adev->is_atom_bios) { | ||
| 1434 | dev_err(adev->dev, "Expecting atombios for GPU\n"); | ||
| 1435 | return -EINVAL; | ||
| 1436 | } | ||
| 1437 | r = amdgpu_atombios_init(adev); | ||
| 1438 | if (r) | ||
| 1439 | return r; | ||
| 1440 | |||
| 1441 | /* Post card if necessary */ | ||
| 1442 | if (!amdgpu_card_posted(adev)) { | ||
| 1443 | if (!adev->bios) { | ||
| 1444 | dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); | ||
| 1445 | return -EINVAL; | ||
| 1446 | } | ||
| 1447 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 1448 | amdgpu_atom_asic_init(adev->mode_info.atom_context); | ||
| 1449 | } | ||
| 1450 | |||
| 1451 | /* Initialize clocks */ | ||
| 1452 | r = amdgpu_atombios_get_clock_info(adev); | ||
| 1453 | if (r) | ||
| 1454 | return r; | ||
| 1455 | /* init i2c buses */ | ||
| 1456 | amdgpu_atombios_i2c_init(adev); | ||
| 1457 | |||
| 1458 | /* Fence driver */ | ||
| 1459 | r = amdgpu_fence_driver_init(adev); | ||
| 1460 | if (r) | ||
| 1461 | return r; | ||
| 1462 | |||
| 1463 | /* init the mode config */ | ||
| 1464 | drm_mode_config_init(adev->ddev); | ||
| 1465 | |||
| 1466 | r = amdgpu_init(adev); | ||
| 1467 | if (r) { | ||
| 1468 | amdgpu_fini(adev); | ||
| 1469 | return r; | ||
| 1470 | } | ||
| 1471 | |||
| 1472 | adev->accel_working = true; | ||
| 1473 | |||
| 1474 | amdgpu_fbdev_init(adev); | ||
| 1475 | |||
| 1476 | r = amdgpu_ib_pool_init(adev); | ||
| 1477 | if (r) { | ||
| 1478 | dev_err(adev->dev, "IB initialization failed (%d).\n", r); | ||
| 1479 | return r; | ||
| 1480 | } | ||
| 1481 | |||
| 1482 | r = amdgpu_ib_ring_tests(adev); | ||
| 1483 | if (r) | ||
| 1484 | DRM_ERROR("ib ring test failed (%d).\n", r); | ||
| 1485 | |||
| 1486 | r = amdgpu_gem_debugfs_init(adev); | ||
| 1487 | if (r) { | ||
| 1488 | DRM_ERROR("registering gem debugfs failed (%d).\n", r); | ||
| 1489 | } | ||
| 1490 | |||
| 1491 | r = amdgpu_debugfs_regs_init(adev); | ||
| 1492 | if (r) { | ||
| 1493 | DRM_ERROR("registering register debugfs failed (%d).\n", r); | ||
| 1494 | } | ||
| 1495 | |||
| 1496 | if ((amdgpu_testing & 1)) { | ||
| 1497 | if (adev->accel_working) | ||
| 1498 | amdgpu_test_moves(adev); | ||
| 1499 | else | ||
| 1500 | DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n"); | ||
| 1501 | } | ||
| 1502 | if ((amdgpu_testing & 2)) { | ||
| 1503 | if (adev->accel_working) | ||
| 1504 | amdgpu_test_syncing(adev); | ||
| 1505 | else | ||
| 1506 | DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n"); | ||
| 1507 | } | ||
| 1508 | if (amdgpu_benchmarking) { | ||
| 1509 | if (adev->accel_working) | ||
| 1510 | amdgpu_benchmark(adev, amdgpu_benchmarking); | ||
| 1511 | else | ||
| 1512 | DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); | ||
| 1513 | } | ||
| 1514 | |||
| 1515 | /* enable clockgating, etc. after ib tests, etc. since some blocks require | ||
| 1516 | * explicit gating rather than handling it automatically. | ||
| 1517 | */ | ||
| 1518 | r = amdgpu_late_init(adev); | ||
| 1519 | if (r) | ||
| 1520 | return r; | ||
| 1521 | |||
| 1522 | return 0; | ||
| 1523 | } | ||
| 1524 | |||
| 1525 | static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev); | ||
| 1526 | |||
| 1527 | /** | ||
| 1528 | * amdgpu_device_fini - tear down the driver | ||
| 1529 | * | ||
| 1530 | * @adev: amdgpu_device pointer | ||
| 1531 | * | ||
| 1532 | * Tear down the driver info (all asics). | ||
| 1533 | * Called at driver shutdown. | ||
| 1534 | */ | ||
| 1535 | void amdgpu_device_fini(struct amdgpu_device *adev) | ||
| 1536 | { | ||
| 1537 | int r; | ||
| 1538 | |||
| 1539 | DRM_INFO("amdgpu: finishing device.\n"); | ||
| 1540 | adev->shutdown = true; | ||
| 1541 | /* evict vram memory */ | ||
| 1542 | amdgpu_bo_evict_vram(adev); | ||
| 1543 | amdgpu_ib_pool_fini(adev); | ||
| 1544 | amdgpu_fence_driver_fini(adev); | ||
| 1545 | amdgpu_fbdev_fini(adev); | ||
| 1546 | r = amdgpu_fini(adev); | ||
| 1547 | if (adev->ip_block_enabled) | ||
| 1548 | kfree(adev->ip_block_enabled); | ||
| 1549 | adev->ip_block_enabled = NULL; | ||
| 1550 | adev->accel_working = false; | ||
| 1551 | /* free i2c buses */ | ||
| 1552 | amdgpu_i2c_fini(adev); | ||
| 1553 | amdgpu_atombios_fini(adev); | ||
| 1554 | kfree(adev->bios); | ||
| 1555 | adev->bios = NULL; | ||
| 1556 | vga_switcheroo_unregister_client(adev->pdev); | ||
| 1557 | vga_client_register(adev->pdev, NULL, NULL, NULL); | ||
| 1558 | if (adev->rio_mem) | ||
| 1559 | pci_iounmap(adev->pdev, adev->rio_mem); | ||
| 1560 | adev->rio_mem = NULL; | ||
| 1561 | iounmap(adev->rmmio); | ||
| 1562 | adev->rmmio = NULL; | ||
| 1563 | amdgpu_doorbell_fini(adev); | ||
| 1564 | amdgpu_debugfs_regs_cleanup(adev); | ||
| 1565 | amdgpu_debugfs_remove_files(adev); | ||
| 1566 | } | ||
| 1567 | |||
| 1568 | |||
| 1569 | /* | ||
| 1570 | * Suspend & resume. | ||
| 1571 | */ | ||
| 1572 | /** | ||
| 1573 | * amdgpu_suspend_kms - initiate device suspend | ||
| 1574 | * | ||
| 1575 | * @pdev: drm dev pointer | ||
| 1576 | * @state: suspend state | ||
| 1577 | * | ||
| 1578 | * Puts the hw in the suspend state (all asics). | ||
| 1579 | * Returns 0 for success or an error on failure. | ||
| 1580 | * Called at driver suspend. | ||
| 1581 | */ | ||
| 1582 | int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) | ||
| 1583 | { | ||
| 1584 | struct amdgpu_device *adev; | ||
| 1585 | struct drm_crtc *crtc; | ||
| 1586 | struct drm_connector *connector; | ||
| 1587 | int i, r; | ||
| 1588 | bool force_completion = false; | ||
| 1589 | |||
| 1590 | if (dev == NULL || dev->dev_private == NULL) { | ||
| 1591 | return -ENODEV; | ||
| 1592 | } | ||
| 1593 | |||
| 1594 | adev = dev->dev_private; | ||
| 1595 | |||
| 1596 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
| 1597 | return 0; | ||
| 1598 | |||
| 1599 | drm_kms_helper_poll_disable(dev); | ||
| 1600 | |||
| 1601 | /* turn off display hw */ | ||
| 1602 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 1603 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | ||
| 1604 | } | ||
| 1605 | |||
| 1606 | /* unpin the front buffers */ | ||
| 1607 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 1608 | struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb); | ||
| 1609 | struct amdgpu_bo *robj; | ||
| 1610 | |||
| 1611 | if (rfb == NULL || rfb->obj == NULL) { | ||
| 1612 | continue; | ||
| 1613 | } | ||
| 1614 | robj = gem_to_amdgpu_bo(rfb->obj); | ||
| 1615 | /* don't unpin kernel fb objects */ | ||
| 1616 | if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { | ||
| 1617 | r = amdgpu_bo_reserve(robj, false); | ||
| 1618 | if (r == 0) { | ||
| 1619 | amdgpu_bo_unpin(robj); | ||
| 1620 | amdgpu_bo_unreserve(robj); | ||
| 1621 | } | ||
| 1622 | } | ||
| 1623 | } | ||
| 1624 | /* evict vram memory */ | ||
| 1625 | amdgpu_bo_evict_vram(adev); | ||
| 1626 | |||
| 1627 | /* wait for gpu to finish processing current batch */ | ||
| 1628 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | ||
| 1629 | struct amdgpu_ring *ring = adev->rings[i]; | ||
| 1630 | if (!ring) | ||
| 1631 | continue; | ||
| 1632 | |||
| 1633 | r = amdgpu_fence_wait_empty(ring); | ||
| 1634 | if (r) { | ||
| 1635 | /* delay GPU reset to resume */ | ||
| 1636 | force_completion = true; | ||
| 1637 | } | ||
| 1638 | } | ||
| 1639 | if (force_completion) { | ||
| 1640 | amdgpu_fence_driver_force_completion(adev); | ||
| 1641 | } | ||
| 1642 | |||
| 1643 | r = amdgpu_suspend(adev); | ||
| 1644 | |||
| 1645 | /* evict remaining vram memory */ | ||
| 1646 | amdgpu_bo_evict_vram(adev); | ||
| 1647 | |||
| 1648 | pci_save_state(dev->pdev); | ||
| 1649 | if (suspend) { | ||
| 1650 | /* Shut down the device */ | ||
| 1651 | pci_disable_device(dev->pdev); | ||
| 1652 | pci_set_power_state(dev->pdev, PCI_D3hot); | ||
| 1653 | } | ||
| 1654 | |||
| 1655 | if (fbcon) { | ||
| 1656 | console_lock(); | ||
| 1657 | amdgpu_fbdev_set_suspend(adev, 1); | ||
| 1658 | console_unlock(); | ||
| 1659 | } | ||
| 1660 | return 0; | ||
| 1661 | } | ||
| 1662 | |||
| 1663 | /** | ||
| 1664 | * amdgpu_resume_kms - initiate device resume | ||
| 1665 | * | ||
| 1666 | * @pdev: drm dev pointer | ||
| 1667 | * | ||
| 1668 | * Bring the hw back to operating state (all asics). | ||
| 1669 | * Returns 0 for success or an error on failure. | ||
| 1670 | * Called at driver resume. | ||
| 1671 | */ | ||
| 1672 | int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | ||
| 1673 | { | ||
| 1674 | struct drm_connector *connector; | ||
| 1675 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1676 | int r; | ||
| 1677 | |||
| 1678 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
| 1679 | return 0; | ||
| 1680 | |||
| 1681 | if (fbcon) { | ||
| 1682 | console_lock(); | ||
| 1683 | } | ||
| 1684 | if (resume) { | ||
| 1685 | pci_set_power_state(dev->pdev, PCI_D0); | ||
| 1686 | pci_restore_state(dev->pdev); | ||
| 1687 | if (pci_enable_device(dev->pdev)) { | ||
| 1688 | if (fbcon) | ||
| 1689 | console_unlock(); | ||
| 1690 | return -1; | ||
| 1691 | } | ||
| 1692 | } | ||
| 1693 | |||
| 1694 | /* post card */ | ||
| 1695 | amdgpu_atom_asic_init(adev->mode_info.atom_context); | ||
| 1696 | |||
| 1697 | r = amdgpu_resume(adev); | ||
| 1698 | |||
| 1699 | r = amdgpu_ib_ring_tests(adev); | ||
| 1700 | if (r) | ||
| 1701 | DRM_ERROR("ib ring test failed (%d).\n", r); | ||
| 1702 | |||
| 1703 | r = amdgpu_late_init(adev); | ||
| 1704 | if (r) | ||
| 1705 | return r; | ||
| 1706 | |||
| 1707 | /* blat the mode back in */ | ||
| 1708 | if (fbcon) { | ||
| 1709 | drm_helper_resume_force_mode(dev); | ||
| 1710 | /* turn on display hw */ | ||
| 1711 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 1712 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
| 1713 | } | ||
| 1714 | } | ||
| 1715 | |||
| 1716 | drm_kms_helper_poll_enable(dev); | ||
| 1717 | |||
| 1718 | if (fbcon) { | ||
| 1719 | amdgpu_fbdev_set_suspend(adev, 0); | ||
| 1720 | console_unlock(); | ||
| 1721 | } | ||
| 1722 | |||
| 1723 | return 0; | ||
| 1724 | } | ||
| 1725 | |||
| 1726 | /** | ||
| 1727 | * amdgpu_gpu_reset - reset the asic | ||
| 1728 | * | ||
| 1729 | * @adev: amdgpu device pointer | ||
| 1730 | * | ||
| 1731 | * Attempt the reset the GPU if it has hung (all asics). | ||
| 1732 | * Returns 0 for success or an error on failure. | ||
| 1733 | */ | ||
| 1734 | int amdgpu_gpu_reset(struct amdgpu_device *adev) | ||
| 1735 | { | ||
| 1736 | unsigned ring_sizes[AMDGPU_MAX_RINGS]; | ||
| 1737 | uint32_t *ring_data[AMDGPU_MAX_RINGS]; | ||
| 1738 | |||
| 1739 | bool saved = false; | ||
| 1740 | |||
| 1741 | int i, r; | ||
| 1742 | int resched; | ||
| 1743 | |||
| 1744 | down_write(&adev->exclusive_lock); | ||
| 1745 | |||
| 1746 | if (!adev->needs_reset) { | ||
| 1747 | up_write(&adev->exclusive_lock); | ||
| 1748 | return 0; | ||
| 1749 | } | ||
| 1750 | |||
| 1751 | adev->needs_reset = false; | ||
| 1752 | |||
| 1753 | /* block TTM */ | ||
| 1754 | resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); | ||
| 1755 | |||
| 1756 | r = amdgpu_suspend(adev); | ||
| 1757 | |||
| 1758 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 1759 | struct amdgpu_ring *ring = adev->rings[i]; | ||
| 1760 | if (!ring) | ||
| 1761 | continue; | ||
| 1762 | |||
| 1763 | ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]); | ||
| 1764 | if (ring_sizes[i]) { | ||
| 1765 | saved = true; | ||
| 1766 | dev_info(adev->dev, "Saved %d dwords of commands " | ||
| 1767 | "on ring %d.\n", ring_sizes[i], i); | ||
| 1768 | } | ||
| 1769 | } | ||
| 1770 | |||
| 1771 | retry: | ||
| 1772 | r = amdgpu_asic_reset(adev); | ||
| 1773 | if (!r) { | ||
| 1774 | dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); | ||
| 1775 | r = amdgpu_resume(adev); | ||
| 1776 | } | ||
| 1777 | |||
| 1778 | if (!r) { | ||
| 1779 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 1780 | struct amdgpu_ring *ring = adev->rings[i]; | ||
| 1781 | if (!ring) | ||
| 1782 | continue; | ||
| 1783 | |||
| 1784 | amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]); | ||
| 1785 | ring_sizes[i] = 0; | ||
| 1786 | ring_data[i] = NULL; | ||
| 1787 | } | ||
| 1788 | |||
| 1789 | r = amdgpu_ib_ring_tests(adev); | ||
| 1790 | if (r) { | ||
| 1791 | dev_err(adev->dev, "ib ring test failed (%d).\n", r); | ||
| 1792 | if (saved) { | ||
| 1793 | saved = false; | ||
| 1794 | r = amdgpu_suspend(adev); | ||
| 1795 | goto retry; | ||
| 1796 | } | ||
| 1797 | } | ||
| 1798 | } else { | ||
| 1799 | amdgpu_fence_driver_force_completion(adev); | ||
| 1800 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 1801 | if (adev->rings[i]) | ||
| 1802 | kfree(ring_data[i]); | ||
| 1803 | } | ||
| 1804 | } | ||
| 1805 | |||
| 1806 | drm_helper_resume_force_mode(adev->ddev); | ||
| 1807 | |||
| 1808 | ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); | ||
| 1809 | if (r) { | ||
| 1810 | /* bad news, how to tell it to userspace ? */ | ||
| 1811 | dev_info(adev->dev, "GPU reset failed\n"); | ||
| 1812 | } | ||
| 1813 | |||
| 1814 | up_write(&adev->exclusive_lock); | ||
| 1815 | return r; | ||
| 1816 | } | ||
| 1817 | |||
| 1818 | |||
| 1819 | /* | ||
| 1820 | * Debugfs | ||
| 1821 | */ | ||
| 1822 | int amdgpu_debugfs_add_files(struct amdgpu_device *adev, | ||
| 1823 | struct drm_info_list *files, | ||
| 1824 | unsigned nfiles) | ||
| 1825 | { | ||
| 1826 | unsigned i; | ||
| 1827 | |||
| 1828 | for (i = 0; i < adev->debugfs_count; i++) { | ||
| 1829 | if (adev->debugfs[i].files == files) { | ||
| 1830 | /* Already registered */ | ||
| 1831 | return 0; | ||
| 1832 | } | ||
| 1833 | } | ||
| 1834 | |||
| 1835 | i = adev->debugfs_count + 1; | ||
| 1836 | if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) { | ||
| 1837 | DRM_ERROR("Reached maximum number of debugfs components.\n"); | ||
| 1838 | DRM_ERROR("Report so we increase " | ||
| 1839 | "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n"); | ||
| 1840 | return -EINVAL; | ||
| 1841 | } | ||
| 1842 | adev->debugfs[adev->debugfs_count].files = files; | ||
| 1843 | adev->debugfs[adev->debugfs_count].num_files = nfiles; | ||
| 1844 | adev->debugfs_count = i; | ||
| 1845 | #if defined(CONFIG_DEBUG_FS) | ||
| 1846 | drm_debugfs_create_files(files, nfiles, | ||
| 1847 | adev->ddev->control->debugfs_root, | ||
| 1848 | adev->ddev->control); | ||
| 1849 | drm_debugfs_create_files(files, nfiles, | ||
| 1850 | adev->ddev->primary->debugfs_root, | ||
| 1851 | adev->ddev->primary); | ||
| 1852 | #endif | ||
| 1853 | return 0; | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev) | ||
| 1857 | { | ||
| 1858 | #if defined(CONFIG_DEBUG_FS) | ||
| 1859 | unsigned i; | ||
| 1860 | |||
| 1861 | for (i = 0; i < adev->debugfs_count; i++) { | ||
| 1862 | drm_debugfs_remove_files(adev->debugfs[i].files, | ||
| 1863 | adev->debugfs[i].num_files, | ||
| 1864 | adev->ddev->control); | ||
| 1865 | drm_debugfs_remove_files(adev->debugfs[i].files, | ||
| 1866 | adev->debugfs[i].num_files, | ||
| 1867 | adev->ddev->primary); | ||
| 1868 | } | ||
| 1869 | #endif | ||
| 1870 | } | ||
| 1871 | |||
| 1872 | #if defined(CONFIG_DEBUG_FS) | ||
| 1873 | |||
| 1874 | static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, | ||
| 1875 | size_t size, loff_t *pos) | ||
| 1876 | { | ||
| 1877 | struct amdgpu_device *adev = f->f_inode->i_private; | ||
| 1878 | ssize_t result = 0; | ||
| 1879 | int r; | ||
| 1880 | |||
| 1881 | if (size & 0x3 || *pos & 0x3) | ||
| 1882 | return -EINVAL; | ||
| 1883 | |||
| 1884 | while (size) { | ||
| 1885 | uint32_t value; | ||
| 1886 | |||
| 1887 | if (*pos > adev->rmmio_size) | ||
| 1888 | return result; | ||
| 1889 | |||
| 1890 | value = RREG32(*pos >> 2); | ||
| 1891 | r = put_user(value, (uint32_t *)buf); | ||
| 1892 | if (r) | ||
| 1893 | return r; | ||
| 1894 | |||
| 1895 | result += 4; | ||
| 1896 | buf += 4; | ||
| 1897 | *pos += 4; | ||
| 1898 | size -= 4; | ||
| 1899 | } | ||
| 1900 | |||
| 1901 | return result; | ||
| 1902 | } | ||
| 1903 | |||
| 1904 | static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, | ||
| 1905 | size_t size, loff_t *pos) | ||
| 1906 | { | ||
| 1907 | struct amdgpu_device *adev = f->f_inode->i_private; | ||
| 1908 | ssize_t result = 0; | ||
| 1909 | int r; | ||
| 1910 | |||
| 1911 | if (size & 0x3 || *pos & 0x3) | ||
| 1912 | return -EINVAL; | ||
| 1913 | |||
| 1914 | while (size) { | ||
| 1915 | uint32_t value; | ||
| 1916 | |||
| 1917 | if (*pos > adev->rmmio_size) | ||
| 1918 | return result; | ||
| 1919 | |||
| 1920 | r = get_user(value, (uint32_t *)buf); | ||
| 1921 | if (r) | ||
| 1922 | return r; | ||
| 1923 | |||
| 1924 | WREG32(*pos >> 2, value); | ||
| 1925 | |||
| 1926 | result += 4; | ||
| 1927 | buf += 4; | ||
| 1928 | *pos += 4; | ||
| 1929 | size -= 4; | ||
| 1930 | } | ||
| 1931 | |||
| 1932 | return result; | ||
| 1933 | } | ||
| 1934 | |||
| 1935 | static const struct file_operations amdgpu_debugfs_regs_fops = { | ||
| 1936 | .owner = THIS_MODULE, | ||
| 1937 | .read = amdgpu_debugfs_regs_read, | ||
| 1938 | .write = amdgpu_debugfs_regs_write, | ||
| 1939 | .llseek = default_llseek | ||
| 1940 | }; | ||
| 1941 | |||
| 1942 | static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) | ||
| 1943 | { | ||
| 1944 | struct drm_minor *minor = adev->ddev->primary; | ||
| 1945 | struct dentry *ent, *root = minor->debugfs_root; | ||
| 1946 | |||
| 1947 | ent = debugfs_create_file("amdgpu_regs", S_IFREG | S_IRUGO, root, | ||
| 1948 | adev, &amdgpu_debugfs_regs_fops); | ||
| 1949 | if (IS_ERR(ent)) | ||
| 1950 | return PTR_ERR(ent); | ||
| 1951 | i_size_write(ent->d_inode, adev->rmmio_size); | ||
| 1952 | adev->debugfs_regs = ent; | ||
| 1953 | |||
| 1954 | return 0; | ||
| 1955 | } | ||
| 1956 | |||
| 1957 | static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) | ||
| 1958 | { | ||
| 1959 | debugfs_remove(adev->debugfs_regs); | ||
| 1960 | adev->debugfs_regs = NULL; | ||
| 1961 | } | ||
| 1962 | |||
| 1963 | int amdgpu_debugfs_init(struct drm_minor *minor) | ||
| 1964 | { | ||
| 1965 | return 0; | ||
| 1966 | } | ||
| 1967 | |||
| 1968 | void amdgpu_debugfs_cleanup(struct drm_minor *minor) | ||
| 1969 | { | ||
| 1970 | } | ||
| 1971 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c new file mode 100644 index 000000000000..f22c0671c3eb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
| @@ -0,0 +1,832 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2007-8 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the "Software"), | ||
| 7 | * to deal in the Software without restriction, including without limitation | ||
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 10 | * Software is furnished to do so, subject to the following conditions: | ||
| 11 | * | ||
| 12 | * The above copyright notice and this permission notice shall be included in | ||
| 13 | * all copies or substantial portions of the Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 22 | * | ||
| 23 | * Authors: Dave Airlie | ||
| 24 | * Alex Deucher | ||
| 25 | */ | ||
| 26 | #include <drm/drmP.h> | ||
| 27 | #include <drm/amdgpu_drm.h> | ||
| 28 | #include "amdgpu.h" | ||
| 29 | #include "amdgpu_i2c.h" | ||
| 30 | #include "atom.h" | ||
| 31 | #include "amdgpu_connectors.h" | ||
| 32 | #include <asm/div64.h> | ||
| 33 | |||
| 34 | #include <linux/pm_runtime.h> | ||
| 35 | #include <drm/drm_crtc_helper.h> | ||
| 36 | #include <drm/drm_edid.h> | ||
| 37 | |||
| 38 | |||
| 39 | static void amdgpu_flip_work_func(struct work_struct *__work) | ||
| 40 | { | ||
| 41 | struct amdgpu_flip_work *work = | ||
| 42 | container_of(__work, struct amdgpu_flip_work, flip_work); | ||
| 43 | struct amdgpu_device *adev = work->adev; | ||
| 44 | struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; | ||
| 45 | |||
| 46 | struct drm_crtc *crtc = &amdgpuCrtc->base; | ||
| 47 | struct amdgpu_fence *fence; | ||
| 48 | unsigned long flags; | ||
| 49 | int r; | ||
| 50 | |||
| 51 | down_read(&adev->exclusive_lock); | ||
| 52 | if (work->fence) { | ||
| 53 | fence = to_amdgpu_fence(work->fence); | ||
| 54 | if (fence) { | ||
| 55 | r = amdgpu_fence_wait(fence, false); | ||
| 56 | if (r == -EDEADLK) { | ||
| 57 | up_read(&adev->exclusive_lock); | ||
| 58 | r = amdgpu_gpu_reset(adev); | ||
| 59 | down_read(&adev->exclusive_lock); | ||
| 60 | } | ||
| 61 | } else | ||
| 62 | r = fence_wait(work->fence, false); | ||
| 63 | |||
| 64 | if (r) | ||
| 65 | DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); | ||
| 66 | |||
| 67 | /* We continue with the page flip even if we failed to wait on | ||
| 68 | * the fence, otherwise the DRM core and userspace will be | ||
| 69 | * confused about which BO the CRTC is scanning out | ||
| 70 | */ | ||
| 71 | |||
| 72 | fence_put(work->fence); | ||
| 73 | work->fence = NULL; | ||
| 74 | } | ||
| 75 | |||
| 76 | /* We borrow the event spin lock for protecting flip_status */ | ||
| 77 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
| 78 | |||
| 79 | /* set the proper interrupt */ | ||
| 80 | amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id); | ||
| 81 | /* do the flip (mmio) */ | ||
| 82 | adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); | ||
| 83 | /* set the flip status */ | ||
| 84 | amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; | ||
| 85 | |||
| 86 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
| 87 | up_read(&adev->exclusive_lock); | ||
| 88 | } | ||
| 89 | |||
| 90 | /* | ||
| 91 | * Handle unpin events outside the interrupt handler proper. | ||
| 92 | */ | ||
| 93 | static void amdgpu_unpin_work_func(struct work_struct *__work) | ||
| 94 | { | ||
| 95 | struct amdgpu_flip_work *work = | ||
| 96 | container_of(__work, struct amdgpu_flip_work, unpin_work); | ||
| 97 | int r; | ||
| 98 | |||
| 99 | /* unpin of the old buffer */ | ||
| 100 | r = amdgpu_bo_reserve(work->old_rbo, false); | ||
| 101 | if (likely(r == 0)) { | ||
| 102 | r = amdgpu_bo_unpin(work->old_rbo); | ||
| 103 | if (unlikely(r != 0)) { | ||
| 104 | DRM_ERROR("failed to unpin buffer after flip\n"); | ||
| 105 | } | ||
| 106 | amdgpu_bo_unreserve(work->old_rbo); | ||
| 107 | } else | ||
| 108 | DRM_ERROR("failed to reserve buffer after flip\n"); | ||
| 109 | |||
| 110 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | ||
| 111 | kfree(work); | ||
| 112 | } | ||
| 113 | |||
| 114 | int amdgpu_crtc_page_flip(struct drm_crtc *crtc, | ||
| 115 | struct drm_framebuffer *fb, | ||
| 116 | struct drm_pending_vblank_event *event, | ||
| 117 | uint32_t page_flip_flags) | ||
| 118 | { | ||
| 119 | struct drm_device *dev = crtc->dev; | ||
| 120 | struct amdgpu_device *adev = dev->dev_private; | ||
| 121 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 122 | struct amdgpu_framebuffer *old_amdgpu_fb; | ||
| 123 | struct amdgpu_framebuffer *new_amdgpu_fb; | ||
| 124 | struct drm_gem_object *obj; | ||
| 125 | struct amdgpu_flip_work *work; | ||
| 126 | struct amdgpu_bo *new_rbo; | ||
| 127 | unsigned long flags; | ||
| 128 | u64 tiling_flags; | ||
| 129 | u64 base; | ||
| 130 | int r; | ||
| 131 | |||
| 132 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
| 133 | if (work == NULL) | ||
| 134 | return -ENOMEM; | ||
| 135 | |||
| 136 | INIT_WORK(&work->flip_work, amdgpu_flip_work_func); | ||
| 137 | INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func); | ||
| 138 | |||
| 139 | work->event = event; | ||
| 140 | work->adev = adev; | ||
| 141 | work->crtc_id = amdgpu_crtc->crtc_id; | ||
| 142 | |||
| 143 | /* schedule unpin of the old buffer */ | ||
| 144 | old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | ||
| 145 | obj = old_amdgpu_fb->obj; | ||
| 146 | |||
| 147 | /* take a reference to the old object */ | ||
| 148 | drm_gem_object_reference(obj); | ||
| 149 | work->old_rbo = gem_to_amdgpu_bo(obj); | ||
| 150 | |||
| 151 | new_amdgpu_fb = to_amdgpu_framebuffer(fb); | ||
| 152 | obj = new_amdgpu_fb->obj; | ||
| 153 | new_rbo = gem_to_amdgpu_bo(obj); | ||
| 154 | |||
| 155 | /* pin the new buffer */ | ||
| 156 | r = amdgpu_bo_reserve(new_rbo, false); | ||
| 157 | if (unlikely(r != 0)) { | ||
| 158 | DRM_ERROR("failed to reserve new rbo buffer before flip\n"); | ||
| 159 | goto cleanup; | ||
| 160 | } | ||
| 161 | |||
| 162 | r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, &base); | ||
| 163 | if (unlikely(r != 0)) { | ||
| 164 | amdgpu_bo_unreserve(new_rbo); | ||
| 165 | r = -EINVAL; | ||
| 166 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); | ||
| 167 | goto cleanup; | ||
| 168 | } | ||
| 169 | |||
| 170 | work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); | ||
| 171 | amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); | ||
| 172 | amdgpu_bo_unreserve(new_rbo); | ||
| 173 | |||
| 174 | work->base = base; | ||
| 175 | |||
| 176 | r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id); | ||
| 177 | if (r) { | ||
| 178 | DRM_ERROR("failed to get vblank before flip\n"); | ||
| 179 | goto pflip_cleanup; | ||
| 180 | } | ||
| 181 | |||
| 182 | /* we borrow the event spin lock for protecting flip_wrok */ | ||
| 183 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
| 184 | if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) { | ||
| 185 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | ||
| 186 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
| 187 | r = -EBUSY; | ||
| 188 | goto vblank_cleanup; | ||
| 189 | } | ||
| 190 | |||
| 191 | amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; | ||
| 192 | amdgpu_crtc->pflip_works = work; | ||
| 193 | |||
| 194 | /* update crtc fb */ | ||
| 195 | crtc->primary->fb = fb; | ||
| 196 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
| 197 | queue_work(amdgpu_crtc->pflip_queue, &work->flip_work); | ||
| 198 | return 0; | ||
| 199 | |||
| 200 | vblank_cleanup: | ||
| 201 | drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id); | ||
| 202 | |||
| 203 | pflip_cleanup: | ||
| 204 | if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { | ||
| 205 | DRM_ERROR("failed to reserve new rbo in error path\n"); | ||
| 206 | goto cleanup; | ||
| 207 | } | ||
| 208 | if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) { | ||
| 209 | DRM_ERROR("failed to unpin new rbo in error path\n"); | ||
| 210 | } | ||
| 211 | amdgpu_bo_unreserve(new_rbo); | ||
| 212 | |||
| 213 | cleanup: | ||
| 214 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | ||
| 215 | fence_put(work->fence); | ||
| 216 | kfree(work); | ||
| 217 | |||
| 218 | return r; | ||
| 219 | } | ||
| 220 | |||
| 221 | int amdgpu_crtc_set_config(struct drm_mode_set *set) | ||
| 222 | { | ||
| 223 | struct drm_device *dev; | ||
| 224 | struct amdgpu_device *adev; | ||
| 225 | struct drm_crtc *crtc; | ||
| 226 | bool active = false; | ||
| 227 | int ret; | ||
| 228 | |||
| 229 | if (!set || !set->crtc) | ||
| 230 | return -EINVAL; | ||
| 231 | |||
| 232 | dev = set->crtc->dev; | ||
| 233 | |||
| 234 | ret = pm_runtime_get_sync(dev->dev); | ||
| 235 | if (ret < 0) | ||
| 236 | return ret; | ||
| 237 | |||
| 238 | ret = drm_crtc_helper_set_config(set); | ||
| 239 | |||
| 240 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | ||
| 241 | if (crtc->enabled) | ||
| 242 | active = true; | ||
| 243 | |||
| 244 | pm_runtime_mark_last_busy(dev->dev); | ||
| 245 | |||
| 246 | adev = dev->dev_private; | ||
| 247 | /* if we have active crtcs and we don't have a power ref, | ||
| 248 | take the current one */ | ||
| 249 | if (active && !adev->have_disp_power_ref) { | ||
| 250 | adev->have_disp_power_ref = true; | ||
| 251 | return ret; | ||
| 252 | } | ||
| 253 | /* if we have no active crtcs, then drop the power ref | ||
| 254 | we got before */ | ||
| 255 | if (!active && adev->have_disp_power_ref) { | ||
| 256 | pm_runtime_put_autosuspend(dev->dev); | ||
| 257 | adev->have_disp_power_ref = false; | ||
| 258 | } | ||
| 259 | |||
| 260 | /* drop the power reference we got coming in here */ | ||
| 261 | pm_runtime_put_autosuspend(dev->dev); | ||
| 262 | return ret; | ||
| 263 | } | ||
| 264 | |||
| 265 | static const char *encoder_names[38] = { | ||
| 266 | "NONE", | ||
| 267 | "INTERNAL_LVDS", | ||
| 268 | "INTERNAL_TMDS1", | ||
| 269 | "INTERNAL_TMDS2", | ||
| 270 | "INTERNAL_DAC1", | ||
| 271 | "INTERNAL_DAC2", | ||
| 272 | "INTERNAL_SDVOA", | ||
| 273 | "INTERNAL_SDVOB", | ||
| 274 | "SI170B", | ||
| 275 | "CH7303", | ||
| 276 | "CH7301", | ||
| 277 | "INTERNAL_DVO1", | ||
| 278 | "EXTERNAL_SDVOA", | ||
| 279 | "EXTERNAL_SDVOB", | ||
| 280 | "TITFP513", | ||
| 281 | "INTERNAL_LVTM1", | ||
| 282 | "VT1623", | ||
| 283 | "HDMI_SI1930", | ||
| 284 | "HDMI_INTERNAL", | ||
| 285 | "INTERNAL_KLDSCP_TMDS1", | ||
| 286 | "INTERNAL_KLDSCP_DVO1", | ||
| 287 | "INTERNAL_KLDSCP_DAC1", | ||
| 288 | "INTERNAL_KLDSCP_DAC2", | ||
| 289 | "SI178", | ||
| 290 | "MVPU_FPGA", | ||
| 291 | "INTERNAL_DDI", | ||
| 292 | "VT1625", | ||
| 293 | "HDMI_SI1932", | ||
| 294 | "DP_AN9801", | ||
| 295 | "DP_DP501", | ||
| 296 | "INTERNAL_UNIPHY", | ||
| 297 | "INTERNAL_KLDSCP_LVTMA", | ||
| 298 | "INTERNAL_UNIPHY1", | ||
| 299 | "INTERNAL_UNIPHY2", | ||
| 300 | "NUTMEG", | ||
| 301 | "TRAVIS", | ||
| 302 | "INTERNAL_VCE", | ||
| 303 | "INTERNAL_UNIPHY3", | ||
| 304 | }; | ||
| 305 | |||
| 306 | static const char *hpd_names[6] = { | ||
| 307 | "HPD1", | ||
| 308 | "HPD2", | ||
| 309 | "HPD3", | ||
| 310 | "HPD4", | ||
| 311 | "HPD5", | ||
| 312 | "HPD6", | ||
| 313 | }; | ||
| 314 | |||
| 315 | void amdgpu_print_display_setup(struct drm_device *dev) | ||
| 316 | { | ||
| 317 | struct drm_connector *connector; | ||
| 318 | struct amdgpu_connector *amdgpu_connector; | ||
| 319 | struct drm_encoder *encoder; | ||
| 320 | struct amdgpu_encoder *amdgpu_encoder; | ||
| 321 | uint32_t devices; | ||
| 322 | int i = 0; | ||
| 323 | |||
| 324 | DRM_INFO("AMDGPU Display Connectors\n"); | ||
| 325 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 326 | amdgpu_connector = to_amdgpu_connector(connector); | ||
| 327 | DRM_INFO("Connector %d:\n", i); | ||
| 328 | DRM_INFO(" %s\n", connector->name); | ||
| 329 | if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE) | ||
| 330 | DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]); | ||
| 331 | if (amdgpu_connector->ddc_bus) { | ||
| 332 | DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", | ||
| 333 | amdgpu_connector->ddc_bus->rec.mask_clk_reg, | ||
| 334 | amdgpu_connector->ddc_bus->rec.mask_data_reg, | ||
| 335 | amdgpu_connector->ddc_bus->rec.a_clk_reg, | ||
| 336 | amdgpu_connector->ddc_bus->rec.a_data_reg, | ||
| 337 | amdgpu_connector->ddc_bus->rec.en_clk_reg, | ||
| 338 | amdgpu_connector->ddc_bus->rec.en_data_reg, | ||
| 339 | amdgpu_connector->ddc_bus->rec.y_clk_reg, | ||
| 340 | amdgpu_connector->ddc_bus->rec.y_data_reg); | ||
| 341 | if (amdgpu_connector->router.ddc_valid) | ||
| 342 | DRM_INFO(" DDC Router 0x%x/0x%x\n", | ||
| 343 | amdgpu_connector->router.ddc_mux_control_pin, | ||
| 344 | amdgpu_connector->router.ddc_mux_state); | ||
| 345 | if (amdgpu_connector->router.cd_valid) | ||
| 346 | DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", | ||
| 347 | amdgpu_connector->router.cd_mux_control_pin, | ||
| 348 | amdgpu_connector->router.cd_mux_state); | ||
| 349 | } else { | ||
| 350 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || | ||
| 351 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || | ||
| 352 | connector->connector_type == DRM_MODE_CONNECTOR_DVID || | ||
| 353 | connector->connector_type == DRM_MODE_CONNECTOR_DVIA || | ||
| 354 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || | ||
| 355 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) | ||
| 356 | DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); | ||
| 357 | } | ||
| 358 | DRM_INFO(" Encoders:\n"); | ||
| 359 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
| 360 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 361 | devices = amdgpu_encoder->devices & amdgpu_connector->devices; | ||
| 362 | if (devices) { | ||
| 363 | if (devices & ATOM_DEVICE_CRT1_SUPPORT) | ||
| 364 | DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | ||
| 365 | if (devices & ATOM_DEVICE_CRT2_SUPPORT) | ||
| 366 | DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | ||
| 367 | if (devices & ATOM_DEVICE_LCD1_SUPPORT) | ||
| 368 | DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | ||
| 369 | if (devices & ATOM_DEVICE_DFP1_SUPPORT) | ||
| 370 | DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | ||
| 371 | if (devices & ATOM_DEVICE_DFP2_SUPPORT) | ||
| 372 | DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | ||
| 373 | if (devices & ATOM_DEVICE_DFP3_SUPPORT) | ||
| 374 | DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | ||
| 375 | if (devices & ATOM_DEVICE_DFP4_SUPPORT) | ||
| 376 | DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | ||
| 377 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) | ||
| 378 | DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | ||
| 379 | if (devices & ATOM_DEVICE_DFP6_SUPPORT) | ||
| 380 | DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | ||
| 381 | if (devices & ATOM_DEVICE_TV1_SUPPORT) | ||
| 382 | DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | ||
| 383 | if (devices & ATOM_DEVICE_CV_SUPPORT) | ||
| 384 | DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | ||
| 385 | } | ||
| 386 | } | ||
| 387 | i++; | ||
| 388 | } | ||
| 389 | } | ||
| 390 | |||
| 391 | /** | ||
| 392 | * amdgpu_ddc_probe | ||
| 393 | * | ||
| 394 | */ | ||
| 395 | bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, | ||
| 396 | bool use_aux) | ||
| 397 | { | ||
| 398 | u8 out = 0x0; | ||
| 399 | u8 buf[8]; | ||
| 400 | int ret; | ||
| 401 | struct i2c_msg msgs[] = { | ||
| 402 | { | ||
| 403 | .addr = DDC_ADDR, | ||
| 404 | .flags = 0, | ||
| 405 | .len = 1, | ||
| 406 | .buf = &out, | ||
| 407 | }, | ||
| 408 | { | ||
| 409 | .addr = DDC_ADDR, | ||
| 410 | .flags = I2C_M_RD, | ||
| 411 | .len = 8, | ||
| 412 | .buf = buf, | ||
| 413 | } | ||
| 414 | }; | ||
| 415 | |||
| 416 | /* on hw with routers, select right port */ | ||
| 417 | if (amdgpu_connector->router.ddc_valid) | ||
| 418 | amdgpu_i2c_router_select_ddc_port(amdgpu_connector); | ||
| 419 | |||
| 420 | if (use_aux) { | ||
| 421 | ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2); | ||
| 422 | } else { | ||
| 423 | ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2); | ||
| 424 | } | ||
| 425 | |||
| 426 | if (ret != 2) | ||
| 427 | /* Couldn't find an accessible DDC on this connector */ | ||
| 428 | return false; | ||
| 429 | /* Probe also for valid EDID header | ||
| 430 | * EDID header starts with: | ||
| 431 | * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. | ||
| 432 | * Only the first 6 bytes must be valid as | ||
| 433 | * drm_edid_block_valid() can fix the last 2 bytes */ | ||
| 434 | if (drm_edid_header_is_valid(buf) < 6) { | ||
| 435 | /* Couldn't find an accessible EDID on this | ||
| 436 | * connector */ | ||
| 437 | return false; | ||
| 438 | } | ||
| 439 | return true; | ||
| 440 | } | ||
| 441 | |||
| 442 | static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) | ||
| 443 | { | ||
| 444 | struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); | ||
| 445 | |||
| 446 | if (amdgpu_fb->obj) { | ||
| 447 | drm_gem_object_unreference_unlocked(amdgpu_fb->obj); | ||
| 448 | } | ||
| 449 | drm_framebuffer_cleanup(fb); | ||
| 450 | kfree(amdgpu_fb); | ||
| 451 | } | ||
| 452 | |||
| 453 | static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb, | ||
| 454 | struct drm_file *file_priv, | ||
| 455 | unsigned int *handle) | ||
| 456 | { | ||
| 457 | struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); | ||
| 458 | |||
| 459 | return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle); | ||
| 460 | } | ||
| 461 | |||
| 462 | static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { | ||
| 463 | .destroy = amdgpu_user_framebuffer_destroy, | ||
| 464 | .create_handle = amdgpu_user_framebuffer_create_handle, | ||
| 465 | }; | ||
| 466 | |||
| 467 | int | ||
| 468 | amdgpu_framebuffer_init(struct drm_device *dev, | ||
| 469 | struct amdgpu_framebuffer *rfb, | ||
| 470 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
| 471 | struct drm_gem_object *obj) | ||
| 472 | { | ||
| 473 | int ret; | ||
| 474 | rfb->obj = obj; | ||
| 475 | drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd); | ||
| 476 | ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); | ||
| 477 | if (ret) { | ||
| 478 | rfb->obj = NULL; | ||
| 479 | return ret; | ||
| 480 | } | ||
| 481 | return 0; | ||
| 482 | } | ||
| 483 | |||
| 484 | static struct drm_framebuffer * | ||
| 485 | amdgpu_user_framebuffer_create(struct drm_device *dev, | ||
| 486 | struct drm_file *file_priv, | ||
| 487 | struct drm_mode_fb_cmd2 *mode_cmd) | ||
| 488 | { | ||
| 489 | struct drm_gem_object *obj; | ||
| 490 | struct amdgpu_framebuffer *amdgpu_fb; | ||
| 491 | int ret; | ||
| 492 | |||
| 493 | obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); | ||
| 494 | if (obj == NULL) { | ||
| 495 | dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " | ||
| 496 | "can't create framebuffer\n", mode_cmd->handles[0]); | ||
| 497 | return ERR_PTR(-ENOENT); | ||
| 498 | } | ||
| 499 | |||
| 500 | amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); | ||
| 501 | if (amdgpu_fb == NULL) { | ||
| 502 | drm_gem_object_unreference_unlocked(obj); | ||
| 503 | return ERR_PTR(-ENOMEM); | ||
| 504 | } | ||
| 505 | |||
| 506 | ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); | ||
| 507 | if (ret) { | ||
| 508 | kfree(amdgpu_fb); | ||
| 509 | drm_gem_object_unreference_unlocked(obj); | ||
| 510 | return ERR_PTR(ret); | ||
| 511 | } | ||
| 512 | |||
| 513 | return &amdgpu_fb->base; | ||
| 514 | } | ||
| 515 | |||
| 516 | static void amdgpu_output_poll_changed(struct drm_device *dev) | ||
| 517 | { | ||
| 518 | struct amdgpu_device *adev = dev->dev_private; | ||
| 519 | amdgpu_fb_output_poll_changed(adev); | ||
| 520 | } | ||
| 521 | |||
| 522 | const struct drm_mode_config_funcs amdgpu_mode_funcs = { | ||
| 523 | .fb_create = amdgpu_user_framebuffer_create, | ||
| 524 | .output_poll_changed = amdgpu_output_poll_changed | ||
| 525 | }; | ||
| 526 | |||
| 527 | static struct drm_prop_enum_list amdgpu_underscan_enum_list[] = | ||
| 528 | { { UNDERSCAN_OFF, "off" }, | ||
| 529 | { UNDERSCAN_ON, "on" }, | ||
| 530 | { UNDERSCAN_AUTO, "auto" }, | ||
| 531 | }; | ||
| 532 | |||
| 533 | static struct drm_prop_enum_list amdgpu_audio_enum_list[] = | ||
| 534 | { { AMDGPU_AUDIO_DISABLE, "off" }, | ||
| 535 | { AMDGPU_AUDIO_ENABLE, "on" }, | ||
| 536 | { AMDGPU_AUDIO_AUTO, "auto" }, | ||
| 537 | }; | ||
| 538 | |||
| 539 | /* XXX support different dither options? spatial, temporal, both, etc. */ | ||
| 540 | static struct drm_prop_enum_list amdgpu_dither_enum_list[] = | ||
| 541 | { { AMDGPU_FMT_DITHER_DISABLE, "off" }, | ||
| 542 | { AMDGPU_FMT_DITHER_ENABLE, "on" }, | ||
| 543 | }; | ||
| 544 | |||
| 545 | int amdgpu_modeset_create_props(struct amdgpu_device *adev) | ||
| 546 | { | ||
| 547 | int sz; | ||
| 548 | |||
| 549 | if (adev->is_atom_bios) { | ||
| 550 | adev->mode_info.coherent_mode_property = | ||
| 551 | drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1); | ||
| 552 | if (!adev->mode_info.coherent_mode_property) | ||
| 553 | return -ENOMEM; | ||
| 554 | } | ||
| 555 | |||
| 556 | adev->mode_info.load_detect_property = | ||
| 557 | drm_property_create_range(adev->ddev, 0, "load detection", 0, 1); | ||
| 558 | if (!adev->mode_info.load_detect_property) | ||
| 559 | return -ENOMEM; | ||
| 560 | |||
| 561 | drm_mode_create_scaling_mode_property(adev->ddev); | ||
| 562 | |||
| 563 | sz = ARRAY_SIZE(amdgpu_underscan_enum_list); | ||
| 564 | adev->mode_info.underscan_property = | ||
| 565 | drm_property_create_enum(adev->ddev, 0, | ||
| 566 | "underscan", | ||
| 567 | amdgpu_underscan_enum_list, sz); | ||
| 568 | |||
| 569 | adev->mode_info.underscan_hborder_property = | ||
| 570 | drm_property_create_range(adev->ddev, 0, | ||
| 571 | "underscan hborder", 0, 128); | ||
| 572 | if (!adev->mode_info.underscan_hborder_property) | ||
| 573 | return -ENOMEM; | ||
| 574 | |||
| 575 | adev->mode_info.underscan_vborder_property = | ||
| 576 | drm_property_create_range(adev->ddev, 0, | ||
| 577 | "underscan vborder", 0, 128); | ||
| 578 | if (!adev->mode_info.underscan_vborder_property) | ||
| 579 | return -ENOMEM; | ||
| 580 | |||
| 581 | sz = ARRAY_SIZE(amdgpu_audio_enum_list); | ||
| 582 | adev->mode_info.audio_property = | ||
| 583 | drm_property_create_enum(adev->ddev, 0, | ||
| 584 | "audio", | ||
| 585 | amdgpu_audio_enum_list, sz); | ||
| 586 | |||
| 587 | sz = ARRAY_SIZE(amdgpu_dither_enum_list); | ||
| 588 | adev->mode_info.dither_property = | ||
| 589 | drm_property_create_enum(adev->ddev, 0, | ||
| 590 | "dither", | ||
| 591 | amdgpu_dither_enum_list, sz); | ||
| 592 | |||
| 593 | return 0; | ||
| 594 | } | ||
| 595 | |||
| 596 | void amdgpu_update_display_priority(struct amdgpu_device *adev) | ||
| 597 | { | ||
| 598 | /* adjustment options for the display watermarks */ | ||
| 599 | if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2)) | ||
| 600 | adev->mode_info.disp_priority = 0; | ||
| 601 | else | ||
| 602 | adev->mode_info.disp_priority = amdgpu_disp_priority; | ||
| 603 | |||
| 604 | } | ||
| 605 | |||
| 606 | static bool is_hdtv_mode(const struct drm_display_mode *mode) | ||
| 607 | { | ||
| 608 | /* try and guess if this is a tv or a monitor */ | ||
| 609 | if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ | ||
| 610 | (mode->vdisplay == 576) || /* 576p */ | ||
| 611 | (mode->vdisplay == 720) || /* 720p */ | ||
| 612 | (mode->vdisplay == 1080)) /* 1080p */ | ||
| 613 | return true; | ||
| 614 | else | ||
| 615 | return false; | ||
| 616 | } | ||
| 617 | |||
| 618 | bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | ||
| 619 | const struct drm_display_mode *mode, | ||
| 620 | struct drm_display_mode *adjusted_mode) | ||
| 621 | { | ||
| 622 | struct drm_device *dev = crtc->dev; | ||
| 623 | struct drm_encoder *encoder; | ||
| 624 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 625 | struct amdgpu_encoder *amdgpu_encoder; | ||
| 626 | struct drm_connector *connector; | ||
| 627 | struct amdgpu_connector *amdgpu_connector; | ||
| 628 | u32 src_v = 1, dst_v = 1; | ||
| 629 | u32 src_h = 1, dst_h = 1; | ||
| 630 | |||
| 631 | amdgpu_crtc->h_border = 0; | ||
| 632 | amdgpu_crtc->v_border = 0; | ||
| 633 | |||
| 634 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
| 635 | if (encoder->crtc != crtc) | ||
| 636 | continue; | ||
| 637 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 638 | connector = amdgpu_get_connector_for_encoder(encoder); | ||
| 639 | amdgpu_connector = to_amdgpu_connector(connector); | ||
| 640 | |||
| 641 | /* set scaling */ | ||
| 642 | if (amdgpu_encoder->rmx_type == RMX_OFF) | ||
| 643 | amdgpu_crtc->rmx_type = RMX_OFF; | ||
| 644 | else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay || | ||
| 645 | mode->vdisplay < amdgpu_encoder->native_mode.vdisplay) | ||
| 646 | amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type; | ||
| 647 | else | ||
| 648 | amdgpu_crtc->rmx_type = RMX_OFF; | ||
| 649 | /* copy native mode */ | ||
| 650 | memcpy(&amdgpu_crtc->native_mode, | ||
| 651 | &amdgpu_encoder->native_mode, | ||
| 652 | sizeof(struct drm_display_mode)); | ||
| 653 | src_v = crtc->mode.vdisplay; | ||
| 654 | dst_v = amdgpu_crtc->native_mode.vdisplay; | ||
| 655 | src_h = crtc->mode.hdisplay; | ||
| 656 | dst_h = amdgpu_crtc->native_mode.hdisplay; | ||
| 657 | |||
| 658 | /* fix up for overscan on hdmi */ | ||
| 659 | if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && | ||
| 660 | ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) || | ||
| 661 | ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) && | ||
| 662 | drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && | ||
| 663 | is_hdtv_mode(mode)))) { | ||
| 664 | if (amdgpu_encoder->underscan_hborder != 0) | ||
| 665 | amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder; | ||
| 666 | else | ||
| 667 | amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16; | ||
| 668 | if (amdgpu_encoder->underscan_vborder != 0) | ||
| 669 | amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder; | ||
| 670 | else | ||
| 671 | amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16; | ||
| 672 | amdgpu_crtc->rmx_type = RMX_FULL; | ||
| 673 | src_v = crtc->mode.vdisplay; | ||
| 674 | dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2); | ||
| 675 | src_h = crtc->mode.hdisplay; | ||
| 676 | dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2); | ||
| 677 | } | ||
| 678 | } | ||
| 679 | if (amdgpu_crtc->rmx_type != RMX_OFF) { | ||
| 680 | fixed20_12 a, b; | ||
| 681 | a.full = dfixed_const(src_v); | ||
| 682 | b.full = dfixed_const(dst_v); | ||
| 683 | amdgpu_crtc->vsc.full = dfixed_div(a, b); | ||
| 684 | a.full = dfixed_const(src_h); | ||
| 685 | b.full = dfixed_const(dst_h); | ||
| 686 | amdgpu_crtc->hsc.full = dfixed_div(a, b); | ||
| 687 | } else { | ||
| 688 | amdgpu_crtc->vsc.full = dfixed_const(1); | ||
| 689 | amdgpu_crtc->hsc.full = dfixed_const(1); | ||
| 690 | } | ||
| 691 | return true; | ||
| 692 | } | ||
| 693 | |||
| 694 | /* | ||
| 695 | * Retrieve current video scanout position of crtc on a given gpu, and | ||
| 696 | * an optional accurate timestamp of when query happened. | ||
| 697 | * | ||
| 698 | * \param dev Device to query. | ||
| 699 | * \param crtc Crtc to query. | ||
| 700 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). | ||
| 701 | * \param *vpos Location where vertical scanout position should be stored. | ||
| 702 | * \param *hpos Location where horizontal scanout position should go. | ||
| 703 | * \param *stime Target location for timestamp taken immediately before | ||
| 704 | * scanout position query. Can be NULL to skip timestamp. | ||
| 705 | * \param *etime Target location for timestamp taken immediately after | ||
| 706 | * scanout position query. Can be NULL to skip timestamp. | ||
| 707 | * | ||
| 708 | * Returns vpos as a positive number while in active scanout area. | ||
| 709 | * Returns vpos as a negative number inside vblank, counting the number | ||
| 710 | * of scanlines to go until end of vblank, e.g., -1 means "one scanline | ||
| 711 | * until start of active scanout / end of vblank." | ||
| 712 | * | ||
| 713 | * \return Flags, or'ed together as follows: | ||
| 714 | * | ||
| 715 | * DRM_SCANOUTPOS_VALID = Query successful. | ||
| 716 | * DRM_SCANOUTPOS_INVBL = Inside vblank. | ||
| 717 | * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of | ||
| 718 | * this flag means that returned position may be offset by a constant but | ||
| 719 | * unknown small number of scanlines wrt. real scanout position. | ||
| 720 | * | ||
| 721 | */ | ||
| 722 | int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, | ||
| 723 | int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) | ||
| 724 | { | ||
| 725 | u32 vbl = 0, position = 0; | ||
| 726 | int vbl_start, vbl_end, vtotal, ret = 0; | ||
| 727 | bool in_vbl = true; | ||
| 728 | |||
| 729 | struct amdgpu_device *adev = dev->dev_private; | ||
| 730 | |||
| 731 | /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ | ||
| 732 | |||
| 733 | /* Get optional system timestamp before query. */ | ||
| 734 | if (stime) | ||
| 735 | *stime = ktime_get(); | ||
| 736 | |||
| 737 | if (amdgpu_display_page_flip_get_scanoutpos(adev, crtc, &vbl, &position) == 0) | ||
| 738 | ret |= DRM_SCANOUTPOS_VALID; | ||
| 739 | |||
| 740 | /* Get optional system timestamp after query. */ | ||
| 741 | if (etime) | ||
| 742 | *etime = ktime_get(); | ||
| 743 | |||
| 744 | /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ | ||
| 745 | |||
| 746 | /* Decode into vertical and horizontal scanout position. */ | ||
| 747 | *vpos = position & 0x1fff; | ||
| 748 | *hpos = (position >> 16) & 0x1fff; | ||
| 749 | |||
| 750 | /* Valid vblank area boundaries from gpu retrieved? */ | ||
| 751 | if (vbl > 0) { | ||
| 752 | /* Yes: Decode. */ | ||
| 753 | ret |= DRM_SCANOUTPOS_ACCURATE; | ||
| 754 | vbl_start = vbl & 0x1fff; | ||
| 755 | vbl_end = (vbl >> 16) & 0x1fff; | ||
| 756 | } | ||
| 757 | else { | ||
| 758 | /* No: Fake something reasonable which gives at least ok results. */ | ||
| 759 | vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; | ||
| 760 | vbl_end = 0; | ||
| 761 | } | ||
| 762 | |||
| 763 | /* Test scanout position against vblank region. */ | ||
| 764 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) | ||
| 765 | in_vbl = false; | ||
| 766 | |||
| 767 | /* Check if inside vblank area and apply corrective offsets: | ||
| 768 | * vpos will then be >=0 in video scanout area, but negative | ||
| 769 | * within vblank area, counting down the number of lines until | ||
| 770 | * start of scanout. | ||
| 771 | */ | ||
| 772 | |||
| 773 | /* Inside "upper part" of vblank area? Apply corrective offset if so: */ | ||
| 774 | if (in_vbl && (*vpos >= vbl_start)) { | ||
| 775 | vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; | ||
| 776 | *vpos = *vpos - vtotal; | ||
| 777 | } | ||
| 778 | |||
| 779 | /* Correct for shifted end of vbl at vbl_end. */ | ||
| 780 | *vpos = *vpos - vbl_end; | ||
| 781 | |||
| 782 | /* In vblank? */ | ||
| 783 | if (in_vbl) | ||
| 784 | ret |= DRM_SCANOUTPOS_IN_VBLANK; | ||
| 785 | |||
| 786 | /* Is vpos outside nominal vblank area, but less than | ||
| 787 | * 1/100 of a frame height away from start of vblank? | ||
| 788 | * If so, assume this isn't a massively delayed vblank | ||
| 789 | * interrupt, but a vblank interrupt that fired a few | ||
| 790 | * microseconds before true start of vblank. Compensate | ||
| 791 | * by adding a full frame duration to the final timestamp. | ||
| 792 | * Happens, e.g., on ATI R500, R600. | ||
| 793 | * | ||
| 794 | * We only do this if DRM_CALLED_FROM_VBLIRQ. | ||
| 795 | */ | ||
| 796 | if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { | ||
| 797 | vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; | ||
| 798 | vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; | ||
| 799 | |||
| 800 | if (vbl_start - *vpos < vtotal / 100) { | ||
| 801 | *vpos -= vtotal; | ||
| 802 | |||
| 803 | /* Signal this correction as "applied". */ | ||
| 804 | ret |= 0x8; | ||
| 805 | } | ||
| 806 | } | ||
| 807 | |||
| 808 | return ret; | ||
| 809 | } | ||
| 810 | |||
| 811 | int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc) | ||
| 812 | { | ||
| 813 | if (crtc < 0 || crtc >= adev->mode_info.num_crtc) | ||
| 814 | return AMDGPU_CRTC_IRQ_NONE; | ||
| 815 | |||
| 816 | switch (crtc) { | ||
| 817 | case 0: | ||
| 818 | return AMDGPU_CRTC_IRQ_VBLANK1; | ||
| 819 | case 1: | ||
| 820 | return AMDGPU_CRTC_IRQ_VBLANK2; | ||
| 821 | case 2: | ||
| 822 | return AMDGPU_CRTC_IRQ_VBLANK3; | ||
| 823 | case 3: | ||
| 824 | return AMDGPU_CRTC_IRQ_VBLANK4; | ||
| 825 | case 4: | ||
| 826 | return AMDGPU_CRTC_IRQ_VBLANK5; | ||
| 827 | case 5: | ||
| 828 | return AMDGPU_CRTC_IRQ_VBLANK6; | ||
| 829 | default: | ||
| 830 | return AMDGPU_CRTC_IRQ_NONE; | ||
| 831 | } | ||
| 832 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c new file mode 100644 index 000000000000..7b7f4aba60c0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c | |||
| @@ -0,0 +1,955 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2011 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Alex Deucher | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include "drmP.h" | ||
| 26 | #include "amdgpu.h" | ||
| 27 | #include "amdgpu_atombios.h" | ||
| 28 | #include "amdgpu_i2c.h" | ||
| 29 | #include "amdgpu_dpm.h" | ||
| 30 | #include "atom.h" | ||
| 31 | |||
| 32 | void amdgpu_dpm_print_class_info(u32 class, u32 class2) | ||
| 33 | { | ||
| 34 | printk("\tui class: "); | ||
| 35 | switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { | ||
| 36 | case ATOM_PPLIB_CLASSIFICATION_UI_NONE: | ||
| 37 | default: | ||
| 38 | printk("none\n"); | ||
| 39 | break; | ||
| 40 | case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: | ||
| 41 | printk("battery\n"); | ||
| 42 | break; | ||
| 43 | case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: | ||
| 44 | printk("balanced\n"); | ||
| 45 | break; | ||
| 46 | case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: | ||
| 47 | printk("performance\n"); | ||
| 48 | break; | ||
| 49 | } | ||
| 50 | printk("\tinternal class: "); | ||
| 51 | if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && | ||
| 52 | (class2 == 0)) | ||
| 53 | printk("none"); | ||
| 54 | else { | ||
| 55 | if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) | ||
| 56 | printk("boot "); | ||
| 57 | if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) | ||
| 58 | printk("thermal "); | ||
| 59 | if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) | ||
| 60 | printk("limited_pwr "); | ||
| 61 | if (class & ATOM_PPLIB_CLASSIFICATION_REST) | ||
| 62 | printk("rest "); | ||
| 63 | if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) | ||
| 64 | printk("forced "); | ||
| 65 | if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) | ||
| 66 | printk("3d_perf "); | ||
| 67 | if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) | ||
| 68 | printk("ovrdrv "); | ||
| 69 | if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) | ||
| 70 | printk("uvd "); | ||
| 71 | if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) | ||
| 72 | printk("3d_low "); | ||
| 73 | if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) | ||
| 74 | printk("acpi "); | ||
| 75 | if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) | ||
| 76 | printk("uvd_hd2 "); | ||
| 77 | if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) | ||
| 78 | printk("uvd_hd "); | ||
| 79 | if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) | ||
| 80 | printk("uvd_sd "); | ||
| 81 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) | ||
| 82 | printk("limited_pwr2 "); | ||
| 83 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) | ||
| 84 | printk("ulv "); | ||
| 85 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) | ||
| 86 | printk("uvd_mvc "); | ||
| 87 | } | ||
| 88 | printk("\n"); | ||
| 89 | } | ||
| 90 | |||
| 91 | void amdgpu_dpm_print_cap_info(u32 caps) | ||
| 92 | { | ||
| 93 | printk("\tcaps: "); | ||
| 94 | if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) | ||
| 95 | printk("single_disp "); | ||
| 96 | if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) | ||
| 97 | printk("video "); | ||
| 98 | if (caps & ATOM_PPLIB_DISALLOW_ON_DC) | ||
| 99 | printk("no_dc "); | ||
| 100 | printk("\n"); | ||
| 101 | } | ||
| 102 | |||
| 103 | void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, | ||
| 104 | struct amdgpu_ps *rps) | ||
| 105 | { | ||
| 106 | printk("\tstatus: "); | ||
| 107 | if (rps == adev->pm.dpm.current_ps) | ||
| 108 | printk("c "); | ||
| 109 | if (rps == adev->pm.dpm.requested_ps) | ||
| 110 | printk("r "); | ||
| 111 | if (rps == adev->pm.dpm.boot_ps) | ||
| 112 | printk("b "); | ||
| 113 | printk("\n"); | ||
| 114 | } | ||
| 115 | |||
| 116 | u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) | ||
| 117 | { | ||
| 118 | struct drm_device *dev = adev->ddev; | ||
| 119 | struct drm_crtc *crtc; | ||
| 120 | struct amdgpu_crtc *amdgpu_crtc; | ||
| 121 | u32 line_time_us, vblank_lines; | ||
| 122 | u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ | ||
| 123 | |||
| 124 | if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { | ||
| 125 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 126 | amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 127 | if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { | ||
| 128 | line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / | ||
| 129 | amdgpu_crtc->hw_mode.clock; | ||
| 130 | vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - | ||
| 131 | amdgpu_crtc->hw_mode.crtc_vdisplay + | ||
| 132 | (amdgpu_crtc->v_border * 2); | ||
| 133 | vblank_time_us = vblank_lines * line_time_us; | ||
| 134 | break; | ||
| 135 | } | ||
| 136 | } | ||
| 137 | } | ||
| 138 | |||
| 139 | return vblank_time_us; | ||
| 140 | } | ||
| 141 | |||
| 142 | u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) | ||
| 143 | { | ||
| 144 | struct drm_device *dev = adev->ddev; | ||
| 145 | struct drm_crtc *crtc; | ||
| 146 | struct amdgpu_crtc *amdgpu_crtc; | ||
| 147 | u32 vrefresh = 0; | ||
| 148 | |||
| 149 | if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { | ||
| 150 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 151 | amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 152 | if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { | ||
| 153 | vrefresh = amdgpu_crtc->hw_mode.vrefresh; | ||
| 154 | break; | ||
| 155 | } | ||
| 156 | } | ||
| 157 | } | ||
| 158 | |||
| 159 | return vrefresh; | ||
| 160 | } | ||
| 161 | |||
| 162 | void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, | ||
| 163 | u32 *p, u32 *u) | ||
| 164 | { | ||
| 165 | u32 b_c = 0; | ||
| 166 | u32 i_c; | ||
| 167 | u32 tmp; | ||
| 168 | |||
| 169 | i_c = (i * r_c) / 100; | ||
| 170 | tmp = i_c >> p_b; | ||
| 171 | |||
| 172 | while (tmp) { | ||
| 173 | b_c++; | ||
| 174 | tmp >>= 1; | ||
| 175 | } | ||
| 176 | |||
| 177 | *u = (b_c + 1) / 2; | ||
| 178 | *p = i_c / (1 << (2 * (*u))); | ||
| 179 | } | ||
| 180 | |||
| 181 | int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) | ||
| 182 | { | ||
| 183 | u32 k, a, ah, al; | ||
| 184 | u32 t1; | ||
| 185 | |||
| 186 | if ((fl == 0) || (fh == 0) || (fl > fh)) | ||
| 187 | return -EINVAL; | ||
| 188 | |||
| 189 | k = (100 * fh) / fl; | ||
| 190 | t1 = (t * (k - 100)); | ||
| 191 | a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); | ||
| 192 | a = (a + 5) / 10; | ||
| 193 | ah = ((a * t) + 5000) / 10000; | ||
| 194 | al = a - ah; | ||
| 195 | |||
| 196 | *th = t - ah; | ||
| 197 | *tl = t + al; | ||
| 198 | |||
| 199 | return 0; | ||
| 200 | } | ||
| 201 | |||
| 202 | bool amdgpu_is_uvd_state(u32 class, u32 class2) | ||
| 203 | { | ||
| 204 | if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) | ||
| 205 | return true; | ||
| 206 | if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) | ||
| 207 | return true; | ||
| 208 | if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) | ||
| 209 | return true; | ||
| 210 | if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) | ||
| 211 | return true; | ||
| 212 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) | ||
| 213 | return true; | ||
| 214 | return false; | ||
| 215 | } | ||
| 216 | |||
| 217 | bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) | ||
| 218 | { | ||
| 219 | switch (sensor) { | ||
| 220 | case THERMAL_TYPE_RV6XX: | ||
| 221 | case THERMAL_TYPE_RV770: | ||
| 222 | case THERMAL_TYPE_EVERGREEN: | ||
| 223 | case THERMAL_TYPE_SUMO: | ||
| 224 | case THERMAL_TYPE_NI: | ||
| 225 | case THERMAL_TYPE_SI: | ||
| 226 | case THERMAL_TYPE_CI: | ||
| 227 | case THERMAL_TYPE_KV: | ||
| 228 | return true; | ||
| 229 | case THERMAL_TYPE_ADT7473_WITH_INTERNAL: | ||
| 230 | case THERMAL_TYPE_EMC2103_WITH_INTERNAL: | ||
| 231 | return false; /* need special handling */ | ||
| 232 | case THERMAL_TYPE_NONE: | ||
| 233 | case THERMAL_TYPE_EXTERNAL: | ||
| 234 | case THERMAL_TYPE_EXTERNAL_GPIO: | ||
| 235 | default: | ||
| 236 | return false; | ||
| 237 | } | ||
| 238 | } | ||
| 239 | |||
| 240 | union power_info { | ||
| 241 | struct _ATOM_POWERPLAY_INFO info; | ||
| 242 | struct _ATOM_POWERPLAY_INFO_V2 info_2; | ||
| 243 | struct _ATOM_POWERPLAY_INFO_V3 info_3; | ||
| 244 | struct _ATOM_PPLIB_POWERPLAYTABLE pplib; | ||
| 245 | struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; | ||
| 246 | struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; | ||
| 247 | struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; | ||
| 248 | struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; | ||
| 249 | }; | ||
| 250 | |||
| 251 | union fan_info { | ||
| 252 | struct _ATOM_PPLIB_FANTABLE fan; | ||
| 253 | struct _ATOM_PPLIB_FANTABLE2 fan2; | ||
| 254 | struct _ATOM_PPLIB_FANTABLE3 fan3; | ||
| 255 | }; | ||
| 256 | |||
| 257 | static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table, | ||
| 258 | ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) | ||
| 259 | { | ||
| 260 | u32 size = atom_table->ucNumEntries * | ||
| 261 | sizeof(struct amdgpu_clock_voltage_dependency_entry); | ||
| 262 | int i; | ||
| 263 | ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; | ||
| 264 | |||
| 265 | amdgpu_table->entries = kzalloc(size, GFP_KERNEL); | ||
| 266 | if (!amdgpu_table->entries) | ||
| 267 | return -ENOMEM; | ||
| 268 | |||
| 269 | entry = &atom_table->entries[0]; | ||
| 270 | for (i = 0; i < atom_table->ucNumEntries; i++) { | ||
| 271 | amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | | ||
| 272 | (entry->ucClockHigh << 16); | ||
| 273 | amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); | ||
| 274 | entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) | ||
| 275 | ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); | ||
| 276 | } | ||
| 277 | amdgpu_table->count = atom_table->ucNumEntries; | ||
| 278 | |||
| 279 | return 0; | ||
| 280 | } | ||
| 281 | |||
| 282 | int amdgpu_get_platform_caps(struct amdgpu_device *adev) | ||
| 283 | { | ||
| 284 | struct amdgpu_mode_info *mode_info = &adev->mode_info; | ||
| 285 | union power_info *power_info; | ||
| 286 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | ||
| 287 | u16 data_offset; | ||
| 288 | u8 frev, crev; | ||
| 289 | |||
| 290 | if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
| 291 | &frev, &crev, &data_offset)) | ||
| 292 | return -EINVAL; | ||
| 293 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
| 294 | |||
| 295 | adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
| 296 | adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
| 297 | adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | ||
| 298 | |||
| 299 | return 0; | ||
| 300 | } | ||
| 301 | |||
| 302 | /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ | ||
| 303 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 | ||
| 304 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 | ||
| 305 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 | ||
| 306 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 | ||
| 307 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 | ||
| 308 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 | ||
| 309 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24 | ||
| 310 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26 | ||
| 311 | |||
| 312 | int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) | ||
| 313 | { | ||
| 314 | struct amdgpu_mode_info *mode_info = &adev->mode_info; | ||
| 315 | union power_info *power_info; | ||
| 316 | union fan_info *fan_info; | ||
| 317 | ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; | ||
| 318 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | ||
| 319 | u16 data_offset; | ||
| 320 | u8 frev, crev; | ||
| 321 | int ret, i; | ||
| 322 | |||
| 323 | if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
| 324 | &frev, &crev, &data_offset)) | ||
| 325 | return -EINVAL; | ||
| 326 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
| 327 | |||
| 328 | /* fan table */ | ||
| 329 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | ||
| 330 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { | ||
| 331 | if (power_info->pplib3.usFanTableOffset) { | ||
| 332 | fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + | ||
| 333 | le16_to_cpu(power_info->pplib3.usFanTableOffset)); | ||
| 334 | adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; | ||
| 335 | adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); | ||
| 336 | adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); | ||
| 337 | adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); | ||
| 338 | adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); | ||
| 339 | adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); | ||
| 340 | adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); | ||
| 341 | if (fan_info->fan.ucFanTableFormat >= 2) | ||
| 342 | adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); | ||
| 343 | else | ||
| 344 | adev->pm.dpm.fan.t_max = 10900; | ||
| 345 | adev->pm.dpm.fan.cycle_delay = 100000; | ||
| 346 | if (fan_info->fan.ucFanTableFormat >= 3) { | ||
| 347 | adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; | ||
| 348 | adev->pm.dpm.fan.default_max_fan_pwm = | ||
| 349 | le16_to_cpu(fan_info->fan3.usFanPWMMax); | ||
| 350 | adev->pm.dpm.fan.default_fan_output_sensitivity = 4836; | ||
| 351 | adev->pm.dpm.fan.fan_output_sensitivity = | ||
| 352 | le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); | ||
| 353 | } | ||
| 354 | adev->pm.dpm.fan.ucode_fan_control = true; | ||
| 355 | } | ||
| 356 | } | ||
| 357 | |||
| 358 | /* clock dependancy tables, shedding tables */ | ||
| 359 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | ||
| 360 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { | ||
| 361 | if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { | ||
| 362 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | ||
| 363 | (mode_info->atom_context->bios + data_offset + | ||
| 364 | le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); | ||
| 365 | ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, | ||
| 366 | dep_table); | ||
| 367 | if (ret) { | ||
| 368 | amdgpu_free_extended_power_table(adev); | ||
| 369 | return ret; | ||
| 370 | } | ||
| 371 | } | ||
| 372 | if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { | ||
| 373 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | ||
| 374 | (mode_info->atom_context->bios + data_offset + | ||
| 375 | le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); | ||
| 376 | ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, | ||
| 377 | dep_table); | ||
| 378 | if (ret) { | ||
| 379 | amdgpu_free_extended_power_table(adev); | ||
| 380 | return ret; | ||
| 381 | } | ||
| 382 | } | ||
| 383 | if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { | ||
| 384 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | ||
| 385 | (mode_info->atom_context->bios + data_offset + | ||
| 386 | le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); | ||
| 387 | ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, | ||
| 388 | dep_table); | ||
| 389 | if (ret) { | ||
| 390 | amdgpu_free_extended_power_table(adev); | ||
| 391 | return ret; | ||
| 392 | } | ||
| 393 | } | ||
| 394 | if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { | ||
| 395 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | ||
| 396 | (mode_info->atom_context->bios + data_offset + | ||
| 397 | le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); | ||
| 398 | ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, | ||
| 399 | dep_table); | ||
| 400 | if (ret) { | ||
| 401 | amdgpu_free_extended_power_table(adev); | ||
| 402 | return ret; | ||
| 403 | } | ||
| 404 | } | ||
| 405 | if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { | ||
| 406 | ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = | ||
| 407 | (ATOM_PPLIB_Clock_Voltage_Limit_Table *) | ||
| 408 | (mode_info->atom_context->bios + data_offset + | ||
| 409 | le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); | ||
| 410 | if (clk_v->ucNumEntries) { | ||
| 411 | adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = | ||
| 412 | le16_to_cpu(clk_v->entries[0].usSclkLow) | | ||
| 413 | (clk_v->entries[0].ucSclkHigh << 16); | ||
| 414 | adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = | ||
| 415 | le16_to_cpu(clk_v->entries[0].usMclkLow) | | ||
| 416 | (clk_v->entries[0].ucMclkHigh << 16); | ||
| 417 | adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = | ||
| 418 | le16_to_cpu(clk_v->entries[0].usVddc); | ||
| 419 | adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = | ||
| 420 | le16_to_cpu(clk_v->entries[0].usVddci); | ||
| 421 | } | ||
| 422 | } | ||
| 423 | if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { | ||
| 424 | ATOM_PPLIB_PhaseSheddingLimits_Table *psl = | ||
| 425 | (ATOM_PPLIB_PhaseSheddingLimits_Table *) | ||
| 426 | (mode_info->atom_context->bios + data_offset + | ||
| 427 | le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); | ||
| 428 | ATOM_PPLIB_PhaseSheddingLimits_Record *entry; | ||
| 429 | |||
| 430 | adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = | ||
| 431 | kzalloc(psl->ucNumEntries * | ||
| 432 | sizeof(struct amdgpu_phase_shedding_limits_entry), | ||
| 433 | GFP_KERNEL); | ||
| 434 | if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { | ||
| 435 | amdgpu_free_extended_power_table(adev); | ||
| 436 | return -ENOMEM; | ||
| 437 | } | ||
| 438 | |||
| 439 | entry = &psl->entries[0]; | ||
| 440 | for (i = 0; i < psl->ucNumEntries; i++) { | ||
| 441 | adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = | ||
| 442 | le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); | ||
| 443 | adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = | ||
| 444 | le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); | ||
| 445 | adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = | ||
| 446 | le16_to_cpu(entry->usVoltage); | ||
| 447 | entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) | ||
| 448 | ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); | ||
| 449 | } | ||
| 450 | adev->pm.dpm.dyn_state.phase_shedding_limits_table.count = | ||
| 451 | psl->ucNumEntries; | ||
| 452 | } | ||
| 453 | } | ||
| 454 | |||
| 455 | /* cac data */ | ||
| 456 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | ||
| 457 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { | ||
| 458 | adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); | ||
| 459 | adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); | ||
| 460 | adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit; | ||
| 461 | adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); | ||
| 462 | if (adev->pm.dpm.tdp_od_limit) | ||
| 463 | adev->pm.dpm.power_control = true; | ||
| 464 | else | ||
| 465 | adev->pm.dpm.power_control = false; | ||
| 466 | adev->pm.dpm.tdp_adjustment = 0; | ||
| 467 | adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); | ||
| 468 | adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); | ||
| 469 | adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); | ||
| 470 | if (power_info->pplib5.usCACLeakageTableOffset) { | ||
| 471 | ATOM_PPLIB_CAC_Leakage_Table *cac_table = | ||
| 472 | (ATOM_PPLIB_CAC_Leakage_Table *) | ||
| 473 | (mode_info->atom_context->bios + data_offset + | ||
| 474 | le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); | ||
| 475 | ATOM_PPLIB_CAC_Leakage_Record *entry; | ||
| 476 | u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table); | ||
| 477 | adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); | ||
| 478 | if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { | ||
| 479 | amdgpu_free_extended_power_table(adev); | ||
| 480 | return -ENOMEM; | ||
| 481 | } | ||
| 482 | entry = &cac_table->entries[0]; | ||
| 483 | for (i = 0; i < cac_table->ucNumEntries; i++) { | ||
| 484 | if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { | ||
| 485 | adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = | ||
| 486 | le16_to_cpu(entry->usVddc1); | ||
| 487 | adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = | ||
| 488 | le16_to_cpu(entry->usVddc2); | ||
| 489 | adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = | ||
| 490 | le16_to_cpu(entry->usVddc3); | ||
| 491 | } else { | ||
| 492 | adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = | ||
| 493 | le16_to_cpu(entry->usVddc); | ||
| 494 | adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = | ||
| 495 | le32_to_cpu(entry->ulLeakageValue); | ||
| 496 | } | ||
| 497 | entry = (ATOM_PPLIB_CAC_Leakage_Record *) | ||
| 498 | ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); | ||
| 499 | } | ||
| 500 | adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; | ||
| 501 | } | ||
| 502 | } | ||
| 503 | |||
| 504 | /* ext tables */ | ||
| 505 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | ||
| 506 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { | ||
| 507 | ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) | ||
| 508 | (mode_info->atom_context->bios + data_offset + | ||
| 509 | le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); | ||
| 510 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && | ||
| 511 | ext_hdr->usVCETableOffset) { | ||
| 512 | VCEClockInfoArray *array = (VCEClockInfoArray *) | ||
| 513 | (mode_info->atom_context->bios + data_offset + | ||
| 514 | le16_to_cpu(ext_hdr->usVCETableOffset) + 1); | ||
| 515 | ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = | ||
| 516 | (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) | ||
| 517 | (mode_info->atom_context->bios + data_offset + | ||
| 518 | le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + | ||
| 519 | 1 + array->ucNumEntries * sizeof(VCEClockInfo)); | ||
| 520 | ATOM_PPLIB_VCE_State_Table *states = | ||
| 521 | (ATOM_PPLIB_VCE_State_Table *) | ||
| 522 | (mode_info->atom_context->bios + data_offset + | ||
| 523 | le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + | ||
| 524 | 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + | ||
| 525 | 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); | ||
| 526 | ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; | ||
| 527 | ATOM_PPLIB_VCE_State_Record *state_entry; | ||
| 528 | VCEClockInfo *vce_clk; | ||
| 529 | u32 size = limits->numEntries * | ||
| 530 | sizeof(struct amdgpu_vce_clock_voltage_dependency_entry); | ||
| 531 | adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = | ||
| 532 | kzalloc(size, GFP_KERNEL); | ||
| 533 | if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { | ||
| 534 | amdgpu_free_extended_power_table(adev); | ||
| 535 | return -ENOMEM; | ||
| 536 | } | ||
| 537 | adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = | ||
| 538 | limits->numEntries; | ||
| 539 | entry = &limits->entries[0]; | ||
| 540 | state_entry = &states->entries[0]; | ||
| 541 | for (i = 0; i < limits->numEntries; i++) { | ||
| 542 | vce_clk = (VCEClockInfo *) | ||
| 543 | ((u8 *)&array->entries[0] + | ||
| 544 | (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); | ||
| 545 | adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = | ||
| 546 | le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); | ||
| 547 | adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = | ||
| 548 | le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); | ||
| 549 | adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = | ||
| 550 | le16_to_cpu(entry->usVoltage); | ||
| 551 | entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) | ||
| 552 | ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); | ||
| 553 | } | ||
| 554 | for (i = 0; i < states->numEntries; i++) { | ||
| 555 | if (i >= AMDGPU_MAX_VCE_LEVELS) | ||
| 556 | break; | ||
| 557 | vce_clk = (VCEClockInfo *) | ||
| 558 | ((u8 *)&array->entries[0] + | ||
| 559 | (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); | ||
| 560 | adev->pm.dpm.vce_states[i].evclk = | ||
| 561 | le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); | ||
| 562 | adev->pm.dpm.vce_states[i].ecclk = | ||
| 563 | le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); | ||
| 564 | adev->pm.dpm.vce_states[i].clk_idx = | ||
| 565 | state_entry->ucClockInfoIndex & 0x3f; | ||
| 566 | adev->pm.dpm.vce_states[i].pstate = | ||
| 567 | (state_entry->ucClockInfoIndex & 0xc0) >> 6; | ||
| 568 | state_entry = (ATOM_PPLIB_VCE_State_Record *) | ||
| 569 | ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); | ||
| 570 | } | ||
| 571 | } | ||
| 572 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && | ||
| 573 | ext_hdr->usUVDTableOffset) { | ||
| 574 | UVDClockInfoArray *array = (UVDClockInfoArray *) | ||
| 575 | (mode_info->atom_context->bios + data_offset + | ||
| 576 | le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); | ||
| 577 | ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = | ||
| 578 | (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) | ||
| 579 | (mode_info->atom_context->bios + data_offset + | ||
| 580 | le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + | ||
| 581 | 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); | ||
| 582 | ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; | ||
| 583 | u32 size = limits->numEntries * | ||
| 584 | sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry); | ||
| 585 | adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = | ||
| 586 | kzalloc(size, GFP_KERNEL); | ||
| 587 | if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { | ||
| 588 | amdgpu_free_extended_power_table(adev); | ||
| 589 | return -ENOMEM; | ||
| 590 | } | ||
| 591 | adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = | ||
| 592 | limits->numEntries; | ||
| 593 | entry = &limits->entries[0]; | ||
| 594 | for (i = 0; i < limits->numEntries; i++) { | ||
| 595 | UVDClockInfo *uvd_clk = (UVDClockInfo *) | ||
| 596 | ((u8 *)&array->entries[0] + | ||
| 597 | (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); | ||
| 598 | adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = | ||
| 599 | le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); | ||
| 600 | adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = | ||
| 601 | le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); | ||
| 602 | adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = | ||
| 603 | le16_to_cpu(entry->usVoltage); | ||
| 604 | entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) | ||
| 605 | ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); | ||
| 606 | } | ||
| 607 | } | ||
| 608 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && | ||
| 609 | ext_hdr->usSAMUTableOffset) { | ||
| 610 | ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = | ||
| 611 | (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) | ||
| 612 | (mode_info->atom_context->bios + data_offset + | ||
| 613 | le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); | ||
| 614 | ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; | ||
| 615 | u32 size = limits->numEntries * | ||
| 616 | sizeof(struct amdgpu_clock_voltage_dependency_entry); | ||
| 617 | adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = | ||
| 618 | kzalloc(size, GFP_KERNEL); | ||
| 619 | if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { | ||
| 620 | amdgpu_free_extended_power_table(adev); | ||
| 621 | return -ENOMEM; | ||
| 622 | } | ||
| 623 | adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = | ||
| 624 | limits->numEntries; | ||
| 625 | entry = &limits->entries[0]; | ||
| 626 | for (i = 0; i < limits->numEntries; i++) { | ||
| 627 | adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = | ||
| 628 | le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); | ||
| 629 | adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = | ||
| 630 | le16_to_cpu(entry->usVoltage); | ||
| 631 | entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) | ||
| 632 | ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); | ||
| 633 | } | ||
| 634 | } | ||
| 635 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && | ||
| 636 | ext_hdr->usPPMTableOffset) { | ||
| 637 | ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) | ||
| 638 | (mode_info->atom_context->bios + data_offset + | ||
| 639 | le16_to_cpu(ext_hdr->usPPMTableOffset)); | ||
| 640 | adev->pm.dpm.dyn_state.ppm_table = | ||
| 641 | kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL); | ||
| 642 | if (!adev->pm.dpm.dyn_state.ppm_table) { | ||
| 643 | amdgpu_free_extended_power_table(adev); | ||
| 644 | return -ENOMEM; | ||
| 645 | } | ||
| 646 | adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; | ||
| 647 | adev->pm.dpm.dyn_state.ppm_table->cpu_core_number = | ||
| 648 | le16_to_cpu(ppm->usCpuCoreNumber); | ||
| 649 | adev->pm.dpm.dyn_state.ppm_table->platform_tdp = | ||
| 650 | le32_to_cpu(ppm->ulPlatformTDP); | ||
| 651 | adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = | ||
| 652 | le32_to_cpu(ppm->ulSmallACPlatformTDP); | ||
| 653 | adev->pm.dpm.dyn_state.ppm_table->platform_tdc = | ||
| 654 | le32_to_cpu(ppm->ulPlatformTDC); | ||
| 655 | adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = | ||
| 656 | le32_to_cpu(ppm->ulSmallACPlatformTDC); | ||
| 657 | adev->pm.dpm.dyn_state.ppm_table->apu_tdp = | ||
| 658 | le32_to_cpu(ppm->ulApuTDP); | ||
| 659 | adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = | ||
| 660 | le32_to_cpu(ppm->ulDGpuTDP); | ||
| 661 | adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = | ||
| 662 | le32_to_cpu(ppm->ulDGpuUlvPower); | ||
| 663 | adev->pm.dpm.dyn_state.ppm_table->tj_max = | ||
| 664 | le32_to_cpu(ppm->ulTjmax); | ||
| 665 | } | ||
| 666 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && | ||
| 667 | ext_hdr->usACPTableOffset) { | ||
| 668 | ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = | ||
| 669 | (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) | ||
| 670 | (mode_info->atom_context->bios + data_offset + | ||
| 671 | le16_to_cpu(ext_hdr->usACPTableOffset) + 1); | ||
| 672 | ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; | ||
| 673 | u32 size = limits->numEntries * | ||
| 674 | sizeof(struct amdgpu_clock_voltage_dependency_entry); | ||
| 675 | adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = | ||
| 676 | kzalloc(size, GFP_KERNEL); | ||
| 677 | if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { | ||
| 678 | amdgpu_free_extended_power_table(adev); | ||
| 679 | return -ENOMEM; | ||
| 680 | } | ||
| 681 | adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = | ||
| 682 | limits->numEntries; | ||
| 683 | entry = &limits->entries[0]; | ||
| 684 | for (i = 0; i < limits->numEntries; i++) { | ||
| 685 | adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = | ||
| 686 | le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); | ||
| 687 | adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = | ||
| 688 | le16_to_cpu(entry->usVoltage); | ||
| 689 | entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) | ||
| 690 | ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); | ||
| 691 | } | ||
| 692 | } | ||
| 693 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && | ||
| 694 | ext_hdr->usPowerTuneTableOffset) { | ||
| 695 | u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + | ||
| 696 | le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); | ||
| 697 | ATOM_PowerTune_Table *pt; | ||
| 698 | adev->pm.dpm.dyn_state.cac_tdp_table = | ||
| 699 | kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL); | ||
| 700 | if (!adev->pm.dpm.dyn_state.cac_tdp_table) { | ||
| 701 | amdgpu_free_extended_power_table(adev); | ||
| 702 | return -ENOMEM; | ||
| 703 | } | ||
| 704 | if (rev > 0) { | ||
| 705 | ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) | ||
| 706 | (mode_info->atom_context->bios + data_offset + | ||
| 707 | le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); | ||
| 708 | adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = | ||
| 709 | ppt->usMaximumPowerDeliveryLimit; | ||
| 710 | pt = &ppt->power_tune_table; | ||
| 711 | } else { | ||
| 712 | ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) | ||
| 713 | (mode_info->atom_context->bios + data_offset + | ||
| 714 | le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); | ||
| 715 | adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; | ||
| 716 | pt = &ppt->power_tune_table; | ||
| 717 | } | ||
| 718 | adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); | ||
| 719 | adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = | ||
| 720 | le16_to_cpu(pt->usConfigurableTDP); | ||
| 721 | adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); | ||
| 722 | adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = | ||
| 723 | le16_to_cpu(pt->usBatteryPowerLimit); | ||
| 724 | adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = | ||
| 725 | le16_to_cpu(pt->usSmallPowerLimit); | ||
| 726 | adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = | ||
| 727 | le16_to_cpu(pt->usLowCACLeakage); | ||
| 728 | adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = | ||
| 729 | le16_to_cpu(pt->usHighCACLeakage); | ||
| 730 | } | ||
| 731 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) && | ||
| 732 | ext_hdr->usSclkVddgfxTableOffset) { | ||
| 733 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | ||
| 734 | (mode_info->atom_context->bios + data_offset + | ||
| 735 | le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset)); | ||
| 736 | ret = amdgpu_parse_clk_voltage_dep_table( | ||
| 737 | &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk, | ||
| 738 | dep_table); | ||
| 739 | if (ret) { | ||
| 740 | kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); | ||
| 741 | return ret; | ||
| 742 | } | ||
| 743 | } | ||
| 744 | } | ||
| 745 | |||
| 746 | return 0; | ||
| 747 | } | ||
| 748 | |||
| 749 | void amdgpu_free_extended_power_table(struct amdgpu_device *adev) | ||
| 750 | { | ||
| 751 | struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state; | ||
| 752 | |||
| 753 | kfree(dyn_state->vddc_dependency_on_sclk.entries); | ||
| 754 | kfree(dyn_state->vddci_dependency_on_mclk.entries); | ||
| 755 | kfree(dyn_state->vddc_dependency_on_mclk.entries); | ||
| 756 | kfree(dyn_state->mvdd_dependency_on_mclk.entries); | ||
| 757 | kfree(dyn_state->cac_leakage_table.entries); | ||
| 758 | kfree(dyn_state->phase_shedding_limits_table.entries); | ||
| 759 | kfree(dyn_state->ppm_table); | ||
| 760 | kfree(dyn_state->cac_tdp_table); | ||
| 761 | kfree(dyn_state->vce_clock_voltage_dependency_table.entries); | ||
| 762 | kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); | ||
| 763 | kfree(dyn_state->samu_clock_voltage_dependency_table.entries); | ||
| 764 | kfree(dyn_state->acp_clock_voltage_dependency_table.entries); | ||
| 765 | kfree(dyn_state->vddgfx_dependency_on_sclk.entries); | ||
| 766 | } | ||
| 767 | |||
| 768 | static const char *pp_lib_thermal_controller_names[] = { | ||
| 769 | "NONE", | ||
| 770 | "lm63", | ||
| 771 | "adm1032", | ||
| 772 | "adm1030", | ||
| 773 | "max6649", | ||
| 774 | "lm64", | ||
| 775 | "f75375", | ||
| 776 | "RV6xx", | ||
| 777 | "RV770", | ||
| 778 | "adt7473", | ||
| 779 | "NONE", | ||
| 780 | "External GPIO", | ||
| 781 | "Evergreen", | ||
| 782 | "emc2103", | ||
| 783 | "Sumo", | ||
| 784 | "Northern Islands", | ||
| 785 | "Southern Islands", | ||
| 786 | "lm96163", | ||
| 787 | "Sea Islands", | ||
| 788 | "Kaveri/Kabini", | ||
| 789 | }; | ||
| 790 | |||
| 791 | void amdgpu_add_thermal_controller(struct amdgpu_device *adev) | ||
| 792 | { | ||
| 793 | struct amdgpu_mode_info *mode_info = &adev->mode_info; | ||
| 794 | ATOM_PPLIB_POWERPLAYTABLE *power_table; | ||
| 795 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | ||
| 796 | ATOM_PPLIB_THERMALCONTROLLER *controller; | ||
| 797 | struct amdgpu_i2c_bus_rec i2c_bus; | ||
| 798 | u16 data_offset; | ||
| 799 | u8 frev, crev; | ||
| 800 | |||
| 801 | if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
| 802 | &frev, &crev, &data_offset)) | ||
| 803 | return; | ||
| 804 | power_table = (ATOM_PPLIB_POWERPLAYTABLE *) | ||
| 805 | (mode_info->atom_context->bios + data_offset); | ||
| 806 | controller = &power_table->sThermalController; | ||
| 807 | |||
| 808 | /* add the i2c bus for thermal/fan chip */ | ||
| 809 | if (controller->ucType > 0) { | ||
| 810 | if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) | ||
| 811 | adev->pm.no_fan = true; | ||
| 812 | adev->pm.fan_pulses_per_revolution = | ||
| 813 | controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; | ||
| 814 | if (adev->pm.fan_pulses_per_revolution) { | ||
| 815 | adev->pm.fan_min_rpm = controller->ucFanMinRPM; | ||
| 816 | adev->pm.fan_max_rpm = controller->ucFanMaxRPM; | ||
| 817 | } | ||
| 818 | if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { | ||
| 819 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
| 820 | (controller->ucFanParameters & | ||
| 821 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 822 | adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; | ||
| 823 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { | ||
| 824 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
| 825 | (controller->ucFanParameters & | ||
| 826 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 827 | adev->pm.int_thermal_type = THERMAL_TYPE_RV770; | ||
| 828 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { | ||
| 829 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
| 830 | (controller->ucFanParameters & | ||
| 831 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 832 | adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; | ||
| 833 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { | ||
| 834 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
| 835 | (controller->ucFanParameters & | ||
| 836 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 837 | adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; | ||
| 838 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { | ||
| 839 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
| 840 | (controller->ucFanParameters & | ||
| 841 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 842 | adev->pm.int_thermal_type = THERMAL_TYPE_NI; | ||
| 843 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { | ||
| 844 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
| 845 | (controller->ucFanParameters & | ||
| 846 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 847 | adev->pm.int_thermal_type = THERMAL_TYPE_SI; | ||
| 848 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { | ||
| 849 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
| 850 | (controller->ucFanParameters & | ||
| 851 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 852 | adev->pm.int_thermal_type = THERMAL_TYPE_CI; | ||
| 853 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { | ||
| 854 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
| 855 | (controller->ucFanParameters & | ||
| 856 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 857 | adev->pm.int_thermal_type = THERMAL_TYPE_KV; | ||
| 858 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { | ||
| 859 | DRM_INFO("External GPIO thermal controller %s fan control\n", | ||
| 860 | (controller->ucFanParameters & | ||
| 861 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 862 | adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; | ||
| 863 | } else if (controller->ucType == | ||
| 864 | ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { | ||
| 865 | DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", | ||
| 866 | (controller->ucFanParameters & | ||
| 867 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 868 | adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; | ||
| 869 | } else if (controller->ucType == | ||
| 870 | ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { | ||
| 871 | DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", | ||
| 872 | (controller->ucFanParameters & | ||
| 873 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 874 | adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; | ||
| 875 | } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { | ||
| 876 | DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", | ||
| 877 | pp_lib_thermal_controller_names[controller->ucType], | ||
| 878 | controller->ucI2cAddress >> 1, | ||
| 879 | (controller->ucFanParameters & | ||
| 880 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 881 | adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; | ||
| 882 | i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine); | ||
| 883 | adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus); | ||
| 884 | if (adev->pm.i2c_bus) { | ||
| 885 | struct i2c_board_info info = { }; | ||
| 886 | const char *name = pp_lib_thermal_controller_names[controller->ucType]; | ||
| 887 | info.addr = controller->ucI2cAddress >> 1; | ||
| 888 | strlcpy(info.type, name, sizeof(info.type)); | ||
| 889 | i2c_new_device(&adev->pm.i2c_bus->adapter, &info); | ||
| 890 | } | ||
| 891 | } else { | ||
| 892 | DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", | ||
| 893 | controller->ucType, | ||
| 894 | controller->ucI2cAddress >> 1, | ||
| 895 | (controller->ucFanParameters & | ||
| 896 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
| 897 | } | ||
| 898 | } | ||
| 899 | } | ||
| 900 | |||
| 901 | enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, | ||
| 902 | u32 sys_mask, | ||
| 903 | enum amdgpu_pcie_gen asic_gen, | ||
| 904 | enum amdgpu_pcie_gen default_gen) | ||
| 905 | { | ||
| 906 | switch (asic_gen) { | ||
| 907 | case AMDGPU_PCIE_GEN1: | ||
| 908 | return AMDGPU_PCIE_GEN1; | ||
| 909 | case AMDGPU_PCIE_GEN2: | ||
| 910 | return AMDGPU_PCIE_GEN2; | ||
| 911 | case AMDGPU_PCIE_GEN3: | ||
| 912 | return AMDGPU_PCIE_GEN3; | ||
| 913 | default: | ||
| 914 | if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3)) | ||
| 915 | return AMDGPU_PCIE_GEN3; | ||
| 916 | else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2)) | ||
| 917 | return AMDGPU_PCIE_GEN2; | ||
| 918 | else | ||
| 919 | return AMDGPU_PCIE_GEN1; | ||
| 920 | } | ||
| 921 | return AMDGPU_PCIE_GEN1; | ||
| 922 | } | ||
| 923 | |||
| 924 | u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev, | ||
| 925 | u16 asic_lanes, | ||
| 926 | u16 default_lanes) | ||
| 927 | { | ||
| 928 | switch (asic_lanes) { | ||
| 929 | case 0: | ||
| 930 | default: | ||
| 931 | return default_lanes; | ||
| 932 | case 1: | ||
| 933 | return 1; | ||
| 934 | case 2: | ||
| 935 | return 2; | ||
| 936 | case 4: | ||
| 937 | return 4; | ||
| 938 | case 8: | ||
| 939 | return 8; | ||
| 940 | case 12: | ||
| 941 | return 12; | ||
| 942 | case 16: | ||
| 943 | return 16; | ||
| 944 | } | ||
| 945 | } | ||
| 946 | |||
| 947 | u8 amdgpu_encode_pci_lane_width(u32 lanes) | ||
| 948 | { | ||
| 949 | u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 }; | ||
| 950 | |||
| 951 | if (lanes > 16) | ||
| 952 | return 0; | ||
| 953 | |||
| 954 | return encoded_lanes[lanes]; | ||
| 955 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h new file mode 100644 index 000000000000..3738a96c2619 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | |||
| @@ -0,0 +1,85 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | #ifndef __AMDGPU_DPM_H__ | ||
| 24 | #define __AMDGPU_DPM_H__ | ||
| 25 | |||
| 26 | #define R600_SSTU_DFLT 0 | ||
| 27 | #define R600_SST_DFLT 0x00C8 | ||
| 28 | |||
| 29 | /* XXX are these ok? */ | ||
| 30 | #define R600_TEMP_RANGE_MIN (90 * 1000) | ||
| 31 | #define R600_TEMP_RANGE_MAX (120 * 1000) | ||
| 32 | |||
| 33 | #define FDO_PWM_MODE_STATIC 1 | ||
| 34 | #define FDO_PWM_MODE_STATIC_RPM 5 | ||
| 35 | |||
| 36 | enum amdgpu_td { | ||
| 37 | AMDGPU_TD_AUTO, | ||
| 38 | AMDGPU_TD_UP, | ||
| 39 | AMDGPU_TD_DOWN, | ||
| 40 | }; | ||
| 41 | |||
| 42 | enum amdgpu_display_watermark { | ||
| 43 | AMDGPU_DISPLAY_WATERMARK_LOW = 0, | ||
| 44 | AMDGPU_DISPLAY_WATERMARK_HIGH = 1, | ||
| 45 | }; | ||
| 46 | |||
| 47 | enum amdgpu_display_gap | ||
| 48 | { | ||
| 49 | AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, | ||
| 50 | AMDGPU_PM_DISPLAY_GAP_VBLANK = 1, | ||
| 51 | AMDGPU_PM_DISPLAY_GAP_WATERMARK = 2, | ||
| 52 | AMDGPU_PM_DISPLAY_GAP_IGNORE = 3, | ||
| 53 | }; | ||
| 54 | |||
| 55 | void amdgpu_dpm_print_class_info(u32 class, u32 class2); | ||
| 56 | void amdgpu_dpm_print_cap_info(u32 caps); | ||
| 57 | void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, | ||
| 58 | struct amdgpu_ps *rps); | ||
| 59 | u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); | ||
| 60 | u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev); | ||
| 61 | bool amdgpu_is_uvd_state(u32 class, u32 class2); | ||
| 62 | void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, | ||
| 63 | u32 *p, u32 *u); | ||
| 64 | int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th); | ||
| 65 | |||
| 66 | bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor); | ||
| 67 | |||
| 68 | int amdgpu_get_platform_caps(struct amdgpu_device *adev); | ||
| 69 | |||
| 70 | int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); | ||
| 71 | void amdgpu_free_extended_power_table(struct amdgpu_device *adev); | ||
| 72 | |||
| 73 | void amdgpu_add_thermal_controller(struct amdgpu_device *adev); | ||
| 74 | |||
| 75 | enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, | ||
| 76 | u32 sys_mask, | ||
| 77 | enum amdgpu_pcie_gen asic_gen, | ||
| 78 | enum amdgpu_pcie_gen default_gen); | ||
| 79 | |||
| 80 | u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev, | ||
| 81 | u16 asic_lanes, | ||
| 82 | u16 default_lanes); | ||
| 83 | u8 amdgpu_encode_pci_lane_width(u32 lanes); | ||
| 84 | |||
| 85 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c new file mode 100644 index 000000000000..d1af448795f3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
| @@ -0,0 +1,439 @@ | |||
| 1 | /** | ||
| 2 | * \file amdgpu_drv.c | ||
| 3 | * AMD Amdgpu driver | ||
| 4 | * | ||
| 5 | * \author Gareth Hughes <gareth@valinux.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | /* | ||
| 9 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
| 10 | * All Rights Reserved. | ||
| 11 | * | ||
| 12 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 13 | * copy of this software and associated documentation files (the "Software"), | ||
| 14 | * to deal in the Software without restriction, including without limitation | ||
| 15 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 16 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 17 | * Software is furnished to do so, subject to the following conditions: | ||
| 18 | * | ||
| 19 | * The above copyright notice and this permission notice (including the next | ||
| 20 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 21 | * Software. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 24 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 25 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 26 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 27 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 28 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 29 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 30 | */ | ||
| 31 | |||
| 32 | #include <drm/drmP.h> | ||
| 33 | #include <drm/amdgpu_drm.h> | ||
| 34 | #include <drm/drm_gem.h> | ||
| 35 | #include "amdgpu_drv.h" | ||
| 36 | |||
| 37 | #include <drm/drm_pciids.h> | ||
| 38 | #include <linux/console.h> | ||
| 39 | #include <linux/module.h> | ||
| 40 | #include <linux/pm_runtime.h> | ||
| 41 | #include <linux/vga_switcheroo.h> | ||
| 42 | #include "drm_crtc_helper.h" | ||
| 43 | |||
| 44 | #include "amdgpu.h" | ||
| 45 | #include "amdgpu_irq.h" | ||
| 46 | |||
| 47 | /* | ||
| 48 | * KMS wrapper. | ||
| 49 | * - 3.0.0 - initial driver | ||
| 50 | */ | ||
| 51 | #define KMS_DRIVER_MAJOR 3 | ||
| 52 | #define KMS_DRIVER_MINOR 0 | ||
| 53 | #define KMS_DRIVER_PATCHLEVEL 0 | ||
| 54 | |||
| 55 | int amdgpu_vram_limit = 0; | ||
| 56 | int amdgpu_gart_size = -1; /* auto */ | ||
| 57 | int amdgpu_benchmarking = 0; | ||
| 58 | int amdgpu_testing = 0; | ||
| 59 | int amdgpu_audio = -1; | ||
| 60 | int amdgpu_disp_priority = 0; | ||
| 61 | int amdgpu_hw_i2c = 0; | ||
| 62 | int amdgpu_pcie_gen2 = -1; | ||
| 63 | int amdgpu_msi = -1; | ||
| 64 | int amdgpu_lockup_timeout = 10000; | ||
| 65 | int amdgpu_dpm = -1; | ||
| 66 | int amdgpu_smc_load_fw = 1; | ||
| 67 | int amdgpu_aspm = -1; | ||
| 68 | int amdgpu_runtime_pm = -1; | ||
| 69 | int amdgpu_hard_reset = 0; | ||
| 70 | unsigned amdgpu_ip_block_mask = 0xffffffff; | ||
| 71 | int amdgpu_bapm = -1; | ||
| 72 | int amdgpu_deep_color = 0; | ||
| 73 | int amdgpu_vm_size = 8; | ||
| 74 | int amdgpu_vm_block_size = -1; | ||
| 75 | int amdgpu_exp_hw_support = 0; | ||
| 76 | |||
| 77 | MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); | ||
| 78 | module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); | ||
| 79 | |||
| 80 | MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)"); | ||
| 81 | module_param_named(gartsize, amdgpu_gart_size, int, 0600); | ||
| 82 | |||
| 83 | MODULE_PARM_DESC(benchmark, "Run benchmark"); | ||
| 84 | module_param_named(benchmark, amdgpu_benchmarking, int, 0444); | ||
| 85 | |||
| 86 | MODULE_PARM_DESC(test, "Run tests"); | ||
| 87 | module_param_named(test, amdgpu_testing, int, 0444); | ||
| 88 | |||
| 89 | MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)"); | ||
| 90 | module_param_named(audio, amdgpu_audio, int, 0444); | ||
| 91 | |||
| 92 | MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); | ||
| 93 | module_param_named(disp_priority, amdgpu_disp_priority, int, 0444); | ||
| 94 | |||
| 95 | MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); | ||
| 96 | module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444); | ||
| 97 | |||
| 98 | MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)"); | ||
| 99 | module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444); | ||
| 100 | |||
| 101 | MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); | ||
| 102 | module_param_named(msi, amdgpu_msi, int, 0444); | ||
| 103 | |||
| 104 | MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)"); | ||
| 105 | module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444); | ||
| 106 | |||
| 107 | MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); | ||
| 108 | module_param_named(dpm, amdgpu_dpm, int, 0444); | ||
| 109 | |||
| 110 | MODULE_PARM_DESC(smc_load_fw, "SMC firmware loading(1 = enable, 0 = disable)"); | ||
| 111 | module_param_named(smc_load_fw, amdgpu_smc_load_fw, int, 0444); | ||
| 112 | |||
| 113 | MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)"); | ||
| 114 | module_param_named(aspm, amdgpu_aspm, int, 0444); | ||
| 115 | |||
| 116 | MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)"); | ||
| 117 | module_param_named(runpm, amdgpu_runtime_pm, int, 0444); | ||
| 118 | |||
| 119 | MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); | ||
| 120 | module_param_named(hard_reset, amdgpu_hard_reset, int, 0444); | ||
| 121 | |||
| 122 | MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))"); | ||
| 123 | module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444); | ||
| 124 | |||
| 125 | MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)"); | ||
| 126 | module_param_named(bapm, amdgpu_bapm, int, 0444); | ||
| 127 | |||
| 128 | MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))"); | ||
| 129 | module_param_named(deep_color, amdgpu_deep_color, int, 0444); | ||
| 130 | |||
| 131 | MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)"); | ||
| 132 | module_param_named(vm_size, amdgpu_vm_size, int, 0444); | ||
| 133 | |||
| 134 | MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)"); | ||
| 135 | module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444); | ||
| 136 | |||
| 137 | MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); | ||
| 138 | module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); | ||
| 139 | |||
| 140 | static struct pci_device_id pciidlist[] = { | ||
| 141 | |||
| 142 | {0, 0, 0} | ||
| 143 | }; | ||
| 144 | |||
| 145 | MODULE_DEVICE_TABLE(pci, pciidlist); | ||
| 146 | |||
| 147 | static struct drm_driver kms_driver; | ||
| 148 | |||
| 149 | static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev) | ||
| 150 | { | ||
| 151 | struct apertures_struct *ap; | ||
| 152 | bool primary = false; | ||
| 153 | |||
| 154 | ap = alloc_apertures(1); | ||
| 155 | if (!ap) | ||
| 156 | return -ENOMEM; | ||
| 157 | |||
| 158 | ap->ranges[0].base = pci_resource_start(pdev, 0); | ||
| 159 | ap->ranges[0].size = pci_resource_len(pdev, 0); | ||
| 160 | |||
| 161 | #ifdef CONFIG_X86 | ||
| 162 | primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | ||
| 163 | #endif | ||
| 164 | remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary); | ||
| 165 | kfree(ap); | ||
| 166 | |||
| 167 | return 0; | ||
| 168 | } | ||
| 169 | |||
| 170 | static int amdgpu_pci_probe(struct pci_dev *pdev, | ||
| 171 | const struct pci_device_id *ent) | ||
| 172 | { | ||
| 173 | unsigned long flags = ent->driver_data; | ||
| 174 | int ret; | ||
| 175 | |||
| 176 | if ((flags & AMDGPU_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { | ||
| 177 | DRM_INFO("This hardware requires experimental hardware support.\n" | ||
| 178 | "See modparam exp_hw_support\n"); | ||
| 179 | return -ENODEV; | ||
| 180 | } | ||
| 181 | |||
| 182 | /* Get rid of things like offb */ | ||
| 183 | ret = amdgpu_kick_out_firmware_fb(pdev); | ||
| 184 | if (ret) | ||
| 185 | return ret; | ||
| 186 | |||
| 187 | return drm_get_pci_dev(pdev, ent, &kms_driver); | ||
| 188 | } | ||
| 189 | |||
| 190 | static void | ||
| 191 | amdgpu_pci_remove(struct pci_dev *pdev) | ||
| 192 | { | ||
| 193 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
| 194 | |||
| 195 | drm_put_dev(dev); | ||
| 196 | } | ||
| 197 | |||
| 198 | static int amdgpu_pmops_suspend(struct device *dev) | ||
| 199 | { | ||
| 200 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 201 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
| 202 | return amdgpu_suspend_kms(drm_dev, true, true); | ||
| 203 | } | ||
| 204 | |||
| 205 | static int amdgpu_pmops_resume(struct device *dev) | ||
| 206 | { | ||
| 207 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 208 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
| 209 | return amdgpu_resume_kms(drm_dev, true, true); | ||
| 210 | } | ||
| 211 | |||
| 212 | static int amdgpu_pmops_freeze(struct device *dev) | ||
| 213 | { | ||
| 214 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 215 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
| 216 | return amdgpu_suspend_kms(drm_dev, false, true); | ||
| 217 | } | ||
| 218 | |||
| 219 | static int amdgpu_pmops_thaw(struct device *dev) | ||
| 220 | { | ||
| 221 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 222 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
| 223 | return amdgpu_resume_kms(drm_dev, false, true); | ||
| 224 | } | ||
| 225 | |||
| 226 | static int amdgpu_pmops_runtime_suspend(struct device *dev) | ||
| 227 | { | ||
| 228 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 229 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
| 230 | int ret; | ||
| 231 | |||
| 232 | if (!amdgpu_device_is_px(drm_dev)) { | ||
| 233 | pm_runtime_forbid(dev); | ||
| 234 | return -EBUSY; | ||
| 235 | } | ||
| 236 | |||
| 237 | drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
| 238 | drm_kms_helper_poll_disable(drm_dev); | ||
| 239 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); | ||
| 240 | |||
| 241 | ret = amdgpu_suspend_kms(drm_dev, false, false); | ||
| 242 | pci_save_state(pdev); | ||
| 243 | pci_disable_device(pdev); | ||
| 244 | pci_ignore_hotplug(pdev); | ||
| 245 | pci_set_power_state(pdev, PCI_D3cold); | ||
| 246 | drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; | ||
| 247 | |||
| 248 | return 0; | ||
| 249 | } | ||
| 250 | |||
| 251 | static int amdgpu_pmops_runtime_resume(struct device *dev) | ||
| 252 | { | ||
| 253 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 254 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
| 255 | int ret; | ||
| 256 | |||
| 257 | if (!amdgpu_device_is_px(drm_dev)) | ||
| 258 | return -EINVAL; | ||
| 259 | |||
| 260 | drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
| 261 | |||
| 262 | pci_set_power_state(pdev, PCI_D0); | ||
| 263 | pci_restore_state(pdev); | ||
| 264 | ret = pci_enable_device(pdev); | ||
| 265 | if (ret) | ||
| 266 | return ret; | ||
| 267 | pci_set_master(pdev); | ||
| 268 | |||
| 269 | ret = amdgpu_resume_kms(drm_dev, false, false); | ||
| 270 | drm_kms_helper_poll_enable(drm_dev); | ||
| 271 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); | ||
| 272 | drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; | ||
| 273 | return 0; | ||
| 274 | } | ||
| 275 | |||
| 276 | static int amdgpu_pmops_runtime_idle(struct device *dev) | ||
| 277 | { | ||
| 278 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 279 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
| 280 | struct drm_crtc *crtc; | ||
| 281 | |||
| 282 | if (!amdgpu_device_is_px(drm_dev)) { | ||
| 283 | pm_runtime_forbid(dev); | ||
| 284 | return -EBUSY; | ||
| 285 | } | ||
| 286 | |||
| 287 | list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { | ||
| 288 | if (crtc->enabled) { | ||
| 289 | DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); | ||
| 290 | return -EBUSY; | ||
| 291 | } | ||
| 292 | } | ||
| 293 | |||
| 294 | pm_runtime_mark_last_busy(dev); | ||
| 295 | pm_runtime_autosuspend(dev); | ||
| 296 | /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ | ||
| 297 | return 1; | ||
| 298 | } | ||
| 299 | |||
| 300 | long amdgpu_drm_ioctl(struct file *filp, | ||
| 301 | unsigned int cmd, unsigned long arg) | ||
| 302 | { | ||
| 303 | struct drm_file *file_priv = filp->private_data; | ||
| 304 | struct drm_device *dev; | ||
| 305 | long ret; | ||
| 306 | dev = file_priv->minor->dev; | ||
| 307 | ret = pm_runtime_get_sync(dev->dev); | ||
| 308 | if (ret < 0) | ||
| 309 | return ret; | ||
| 310 | |||
| 311 | ret = drm_ioctl(filp, cmd, arg); | ||
| 312 | |||
| 313 | pm_runtime_mark_last_busy(dev->dev); | ||
| 314 | pm_runtime_put_autosuspend(dev->dev); | ||
| 315 | return ret; | ||
| 316 | } | ||
| 317 | |||
| 318 | static const struct dev_pm_ops amdgpu_pm_ops = { | ||
| 319 | .suspend = amdgpu_pmops_suspend, | ||
| 320 | .resume = amdgpu_pmops_resume, | ||
| 321 | .freeze = amdgpu_pmops_freeze, | ||
| 322 | .thaw = amdgpu_pmops_thaw, | ||
| 323 | .poweroff = amdgpu_pmops_freeze, | ||
| 324 | .restore = amdgpu_pmops_resume, | ||
| 325 | .runtime_suspend = amdgpu_pmops_runtime_suspend, | ||
| 326 | .runtime_resume = amdgpu_pmops_runtime_resume, | ||
| 327 | .runtime_idle = amdgpu_pmops_runtime_idle, | ||
| 328 | }; | ||
| 329 | |||
| 330 | static const struct file_operations amdgpu_driver_kms_fops = { | ||
| 331 | .owner = THIS_MODULE, | ||
| 332 | .open = drm_open, | ||
| 333 | .release = drm_release, | ||
| 334 | .unlocked_ioctl = amdgpu_drm_ioctl, | ||
| 335 | .mmap = amdgpu_mmap, | ||
| 336 | .poll = drm_poll, | ||
| 337 | .read = drm_read, | ||
| 338 | #ifdef CONFIG_COMPAT | ||
| 339 | .compat_ioctl = amdgpu_kms_compat_ioctl, | ||
| 340 | #endif | ||
| 341 | }; | ||
| 342 | |||
| 343 | static struct drm_driver kms_driver = { | ||
| 344 | .driver_features = | ||
| 345 | DRIVER_USE_AGP | | ||
| 346 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | | ||
| 347 | DRIVER_PRIME | DRIVER_RENDER, | ||
| 348 | .dev_priv_size = 0, | ||
| 349 | .load = amdgpu_driver_load_kms, | ||
| 350 | .open = amdgpu_driver_open_kms, | ||
| 351 | .preclose = amdgpu_driver_preclose_kms, | ||
| 352 | .postclose = amdgpu_driver_postclose_kms, | ||
| 353 | .lastclose = amdgpu_driver_lastclose_kms, | ||
| 354 | .set_busid = drm_pci_set_busid, | ||
| 355 | .unload = amdgpu_driver_unload_kms, | ||
| 356 | .get_vblank_counter = amdgpu_get_vblank_counter_kms, | ||
| 357 | .enable_vblank = amdgpu_enable_vblank_kms, | ||
| 358 | .disable_vblank = amdgpu_disable_vblank_kms, | ||
| 359 | .get_vblank_timestamp = amdgpu_get_vblank_timestamp_kms, | ||
| 360 | .get_scanout_position = amdgpu_get_crtc_scanoutpos, | ||
| 361 | #if defined(CONFIG_DEBUG_FS) | ||
| 362 | .debugfs_init = amdgpu_debugfs_init, | ||
| 363 | .debugfs_cleanup = amdgpu_debugfs_cleanup, | ||
| 364 | #endif | ||
| 365 | .irq_preinstall = amdgpu_irq_preinstall, | ||
| 366 | .irq_postinstall = amdgpu_irq_postinstall, | ||
| 367 | .irq_uninstall = amdgpu_irq_uninstall, | ||
| 368 | .irq_handler = amdgpu_irq_handler, | ||
| 369 | .ioctls = amdgpu_ioctls_kms, | ||
| 370 | .gem_free_object = amdgpu_gem_object_free, | ||
| 371 | .gem_open_object = amdgpu_gem_object_open, | ||
| 372 | .gem_close_object = amdgpu_gem_object_close, | ||
| 373 | .dumb_create = amdgpu_mode_dumb_create, | ||
| 374 | .dumb_map_offset = amdgpu_mode_dumb_mmap, | ||
| 375 | .dumb_destroy = drm_gem_dumb_destroy, | ||
| 376 | .fops = &amdgpu_driver_kms_fops, | ||
| 377 | |||
| 378 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
| 379 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
| 380 | .gem_prime_export = amdgpu_gem_prime_export, | ||
| 381 | .gem_prime_import = drm_gem_prime_import, | ||
| 382 | .gem_prime_pin = amdgpu_gem_prime_pin, | ||
| 383 | .gem_prime_unpin = amdgpu_gem_prime_unpin, | ||
| 384 | .gem_prime_res_obj = amdgpu_gem_prime_res_obj, | ||
| 385 | .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table, | ||
| 386 | .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table, | ||
| 387 | .gem_prime_vmap = amdgpu_gem_prime_vmap, | ||
| 388 | .gem_prime_vunmap = amdgpu_gem_prime_vunmap, | ||
| 389 | |||
| 390 | .name = DRIVER_NAME, | ||
| 391 | .desc = DRIVER_DESC, | ||
| 392 | .date = DRIVER_DATE, | ||
| 393 | .major = KMS_DRIVER_MAJOR, | ||
| 394 | .minor = KMS_DRIVER_MINOR, | ||
| 395 | .patchlevel = KMS_DRIVER_PATCHLEVEL, | ||
| 396 | }; | ||
| 397 | |||
| 398 | static struct drm_driver *driver; | ||
| 399 | static struct pci_driver *pdriver; | ||
| 400 | |||
| 401 | static struct pci_driver amdgpu_kms_pci_driver = { | ||
| 402 | .name = DRIVER_NAME, | ||
| 403 | .id_table = pciidlist, | ||
| 404 | .probe = amdgpu_pci_probe, | ||
| 405 | .remove = amdgpu_pci_remove, | ||
| 406 | .driver.pm = &amdgpu_pm_ops, | ||
| 407 | }; | ||
| 408 | |||
| 409 | static int __init amdgpu_init(void) | ||
| 410 | { | ||
| 411 | #ifdef CONFIG_VGA_CONSOLE | ||
| 412 | if (vgacon_text_force()) { | ||
| 413 | DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); | ||
| 414 | return -EINVAL; | ||
| 415 | } | ||
| 416 | #endif | ||
| 417 | DRM_INFO("amdgpu kernel modesetting enabled.\n"); | ||
| 418 | driver = &kms_driver; | ||
| 419 | pdriver = &amdgpu_kms_pci_driver; | ||
| 420 | driver->driver_features |= DRIVER_MODESET; | ||
| 421 | driver->num_ioctls = amdgpu_max_kms_ioctl; | ||
| 422 | amdgpu_register_atpx_handler(); | ||
| 423 | |||
| 424 | /* let modprobe override vga console setting */ | ||
| 425 | return drm_pci_init(driver, pdriver); | ||
| 426 | } | ||
| 427 | |||
| 428 | static void __exit amdgpu_exit(void) | ||
| 429 | { | ||
| 430 | drm_pci_exit(driver, pdriver); | ||
| 431 | amdgpu_unregister_atpx_handler(); | ||
| 432 | } | ||
| 433 | |||
| 434 | module_init(amdgpu_init); | ||
| 435 | module_exit(amdgpu_exit); | ||
| 436 | |||
| 437 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
| 438 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
| 439 | MODULE_LICENSE("GPL and additional rights"); | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h new file mode 100644 index 000000000000..cceeb33c447a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h | |||
| @@ -0,0 +1,48 @@ | |||
| 1 | /* amdgpu_drv.h -- Private header for amdgpu driver -*- linux-c -*- | ||
| 2 | * | ||
| 3 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
| 4 | * Copyright 2000 VA Linux Systems, Inc., Fremont, California. | ||
| 5 | * All rights reserved. | ||
| 6 | * | ||
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 8 | * copy of this software and associated documentation files (the "Software"), | ||
| 9 | * to deal in the Software without restriction, including without limitation | ||
| 10 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 11 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 12 | * Software is furnished to do so, subject to the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the next | ||
| 15 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 16 | * Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 22 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 23 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 24 | * DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | */ | ||
| 27 | |||
| 28 | #ifndef __AMDGPU_DRV_H__ | ||
| 29 | #define __AMDGPU_DRV_H__ | ||
| 30 | |||
| 31 | #include <linux/firmware.h> | ||
| 32 | #include <linux/platform_device.h> | ||
| 33 | |||
| 34 | #include "amdgpu_family.h" | ||
| 35 | |||
| 36 | /* General customization: | ||
| 37 | */ | ||
| 38 | |||
| 39 | #define DRIVER_AUTHOR "AMD linux driver team" | ||
| 40 | |||
| 41 | #define DRIVER_NAME "amdgpu" | ||
| 42 | #define DRIVER_DESC "AMD GPU" | ||
| 43 | #define DRIVER_DATE "20150101" | ||
| 44 | |||
| 45 | long amdgpu_drm_ioctl(struct file *filp, | ||
| 46 | unsigned int cmd, unsigned long arg); | ||
| 47 | |||
| 48 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c new file mode 100644 index 000000000000..94138abe093b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c | |||
| @@ -0,0 +1,245 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2007-8 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the "Software"), | ||
| 7 | * to deal in the Software without restriction, including without limitation | ||
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 10 | * Software is furnished to do so, subject to the following conditions: | ||
| 11 | * | ||
| 12 | * The above copyright notice and this permission notice shall be included in | ||
| 13 | * all copies or substantial portions of the Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 22 | * | ||
| 23 | * Authors: Dave Airlie | ||
| 24 | * Alex Deucher | ||
| 25 | */ | ||
| 26 | #include <drm/drmP.h> | ||
| 27 | #include <drm/drm_crtc_helper.h> | ||
| 28 | #include <drm/amdgpu_drm.h> | ||
| 29 | #include "amdgpu.h" | ||
| 30 | #include "amdgpu_connectors.h" | ||
| 31 | #include "atom.h" | ||
| 32 | #include "atombios_encoders.h" | ||
| 33 | |||
| 34 | void | ||
| 35 | amdgpu_link_encoder_connector(struct drm_device *dev) | ||
| 36 | { | ||
| 37 | struct amdgpu_device *adev = dev->dev_private; | ||
| 38 | struct drm_connector *connector; | ||
| 39 | struct amdgpu_connector *amdgpu_connector; | ||
| 40 | struct drm_encoder *encoder; | ||
| 41 | struct amdgpu_encoder *amdgpu_encoder; | ||
| 42 | |||
| 43 | /* walk the list and link encoders to connectors */ | ||
| 44 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 45 | amdgpu_connector = to_amdgpu_connector(connector); | ||
| 46 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
| 47 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 48 | if (amdgpu_encoder->devices & amdgpu_connector->devices) { | ||
| 49 | drm_mode_connector_attach_encoder(connector, encoder); | ||
| 50 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
| 51 | amdgpu_atombios_encoder_init_backlight(amdgpu_encoder, connector); | ||
| 52 | adev->mode_info.bl_encoder = amdgpu_encoder; | ||
| 53 | } | ||
| 54 | } | ||
| 55 | } | ||
| 56 | } | ||
| 57 | } | ||
| 58 | |||
| 59 | void amdgpu_encoder_set_active_device(struct drm_encoder *encoder) | ||
| 60 | { | ||
| 61 | struct drm_device *dev = encoder->dev; | ||
| 62 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 63 | struct drm_connector *connector; | ||
| 64 | |||
| 65 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 66 | if (connector->encoder == encoder) { | ||
| 67 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 68 | amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices; | ||
| 69 | DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n", | ||
| 70 | amdgpu_encoder->active_device, amdgpu_encoder->devices, | ||
| 71 | amdgpu_connector->devices, encoder->encoder_type); | ||
| 72 | } | ||
| 73 | } | ||
| 74 | } | ||
| 75 | |||
| 76 | struct drm_connector * | ||
| 77 | amdgpu_get_connector_for_encoder(struct drm_encoder *encoder) | ||
| 78 | { | ||
| 79 | struct drm_device *dev = encoder->dev; | ||
| 80 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 81 | struct drm_connector *connector; | ||
| 82 | struct amdgpu_connector *amdgpu_connector; | ||
| 83 | |||
| 84 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 85 | amdgpu_connector = to_amdgpu_connector(connector); | ||
| 86 | if (amdgpu_encoder->active_device & amdgpu_connector->devices) | ||
| 87 | return connector; | ||
| 88 | } | ||
| 89 | return NULL; | ||
| 90 | } | ||
| 91 | |||
| 92 | struct drm_connector * | ||
| 93 | amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder) | ||
| 94 | { | ||
| 95 | struct drm_device *dev = encoder->dev; | ||
| 96 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 97 | struct drm_connector *connector; | ||
| 98 | struct amdgpu_connector *amdgpu_connector; | ||
| 99 | |||
| 100 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 101 | amdgpu_connector = to_amdgpu_connector(connector); | ||
| 102 | if (amdgpu_encoder->devices & amdgpu_connector->devices) | ||
| 103 | return connector; | ||
| 104 | } | ||
| 105 | return NULL; | ||
| 106 | } | ||
| 107 | |||
| 108 | struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder) | ||
| 109 | { | ||
| 110 | struct drm_device *dev = encoder->dev; | ||
| 111 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 112 | struct drm_encoder *other_encoder; | ||
| 113 | struct amdgpu_encoder *other_amdgpu_encoder; | ||
| 114 | |||
| 115 | if (amdgpu_encoder->is_ext_encoder) | ||
| 116 | return NULL; | ||
| 117 | |||
| 118 | list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { | ||
| 119 | if (other_encoder == encoder) | ||
| 120 | continue; | ||
| 121 | other_amdgpu_encoder = to_amdgpu_encoder(other_encoder); | ||
| 122 | if (other_amdgpu_encoder->is_ext_encoder && | ||
| 123 | (amdgpu_encoder->devices & other_amdgpu_encoder->devices)) | ||
| 124 | return other_encoder; | ||
| 125 | } | ||
| 126 | return NULL; | ||
| 127 | } | ||
| 128 | |||
| 129 | u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder) | ||
| 130 | { | ||
| 131 | struct drm_encoder *other_encoder = amdgpu_get_external_encoder(encoder); | ||
| 132 | |||
| 133 | if (other_encoder) { | ||
| 134 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(other_encoder); | ||
| 135 | |||
| 136 | switch (amdgpu_encoder->encoder_id) { | ||
| 137 | case ENCODER_OBJECT_ID_TRAVIS: | ||
| 138 | case ENCODER_OBJECT_ID_NUTMEG: | ||
| 139 | return amdgpu_encoder->encoder_id; | ||
| 140 | default: | ||
| 141 | return ENCODER_OBJECT_ID_NONE; | ||
| 142 | } | ||
| 143 | } | ||
| 144 | return ENCODER_OBJECT_ID_NONE; | ||
| 145 | } | ||
| 146 | |||
| 147 | void amdgpu_panel_mode_fixup(struct drm_encoder *encoder, | ||
| 148 | struct drm_display_mode *adjusted_mode) | ||
| 149 | { | ||
| 150 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 151 | struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; | ||
| 152 | unsigned hblank = native_mode->htotal - native_mode->hdisplay; | ||
| 153 | unsigned vblank = native_mode->vtotal - native_mode->vdisplay; | ||
| 154 | unsigned hover = native_mode->hsync_start - native_mode->hdisplay; | ||
| 155 | unsigned vover = native_mode->vsync_start - native_mode->vdisplay; | ||
| 156 | unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start; | ||
| 157 | unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start; | ||
| 158 | |||
| 159 | adjusted_mode->clock = native_mode->clock; | ||
| 160 | adjusted_mode->flags = native_mode->flags; | ||
| 161 | |||
| 162 | adjusted_mode->hdisplay = native_mode->hdisplay; | ||
| 163 | adjusted_mode->vdisplay = native_mode->vdisplay; | ||
| 164 | |||
| 165 | adjusted_mode->htotal = native_mode->hdisplay + hblank; | ||
| 166 | adjusted_mode->hsync_start = native_mode->hdisplay + hover; | ||
| 167 | adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width; | ||
| 168 | |||
| 169 | adjusted_mode->vtotal = native_mode->vdisplay + vblank; | ||
| 170 | adjusted_mode->vsync_start = native_mode->vdisplay + vover; | ||
| 171 | adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width; | ||
| 172 | |||
| 173 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
| 174 | |||
| 175 | adjusted_mode->crtc_hdisplay = native_mode->hdisplay; | ||
| 176 | adjusted_mode->crtc_vdisplay = native_mode->vdisplay; | ||
| 177 | |||
| 178 | adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank; | ||
| 179 | adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover; | ||
| 180 | adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width; | ||
| 181 | |||
| 182 | adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank; | ||
| 183 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover; | ||
| 184 | adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width; | ||
| 185 | |||
| 186 | } | ||
| 187 | |||
| 188 | bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder, | ||
| 189 | u32 pixel_clock) | ||
| 190 | { | ||
| 191 | struct drm_connector *connector; | ||
| 192 | struct amdgpu_connector *amdgpu_connector; | ||
| 193 | struct amdgpu_connector_atom_dig *dig_connector; | ||
| 194 | |||
| 195 | connector = amdgpu_get_connector_for_encoder(encoder); | ||
| 196 | /* if we don't have an active device yet, just use one of | ||
| 197 | * the connectors tied to the encoder. | ||
| 198 | */ | ||
| 199 | if (!connector) | ||
| 200 | connector = amdgpu_get_connector_for_encoder_init(encoder); | ||
| 201 | amdgpu_connector = to_amdgpu_connector(connector); | ||
| 202 | |||
| 203 | switch (connector->connector_type) { | ||
| 204 | case DRM_MODE_CONNECTOR_DVII: | ||
| 205 | case DRM_MODE_CONNECTOR_HDMIB: | ||
| 206 | if (amdgpu_connector->use_digital) { | ||
| 207 | /* HDMI 1.3 supports up to 340 Mhz over single link */ | ||
| 208 | if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { | ||
| 209 | if (pixel_clock > 340000) | ||
| 210 | return true; | ||
| 211 | else | ||
| 212 | return false; | ||
| 213 | } else { | ||
| 214 | if (pixel_clock > 165000) | ||
| 215 | return true; | ||
| 216 | else | ||
| 217 | return false; | ||
| 218 | } | ||
| 219 | } else | ||
| 220 | return false; | ||
| 221 | case DRM_MODE_CONNECTOR_DVID: | ||
| 222 | case DRM_MODE_CONNECTOR_HDMIA: | ||
| 223 | case DRM_MODE_CONNECTOR_DisplayPort: | ||
| 224 | dig_connector = amdgpu_connector->con_priv; | ||
| 225 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | ||
| 226 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | ||
| 227 | return false; | ||
| 228 | else { | ||
| 229 | /* HDMI 1.3 supports up to 340 Mhz over single link */ | ||
| 230 | if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { | ||
| 231 | if (pixel_clock > 340000) | ||
| 232 | return true; | ||
| 233 | else | ||
| 234 | return false; | ||
| 235 | } else { | ||
| 236 | if (pixel_clock > 165000) | ||
| 237 | return true; | ||
| 238 | else | ||
| 239 | return false; | ||
| 240 | } | ||
| 241 | } | ||
| 242 | default: | ||
| 243 | return false; | ||
| 244 | } | ||
| 245 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c new file mode 100644 index 000000000000..2b1735d2efd6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | |||
| @@ -0,0 +1,432 @@ | |||
| 1 | /* | ||
| 2 | * Copyright © 2007 David Airlie | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 21 | * DEALINGS IN THE SOFTWARE. | ||
| 22 | * | ||
| 23 | * Authors: | ||
| 24 | * David Airlie | ||
| 25 | */ | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/slab.h> | ||
| 28 | #include <linux/fb.h> | ||
| 29 | |||
| 30 | #include <drm/drmP.h> | ||
| 31 | #include <drm/drm_crtc.h> | ||
| 32 | #include <drm/drm_crtc_helper.h> | ||
| 33 | #include <drm/amdgpu_drm.h> | ||
| 34 | #include "amdgpu.h" | ||
| 35 | |||
| 36 | #include <drm/drm_fb_helper.h> | ||
| 37 | |||
| 38 | #include <linux/vga_switcheroo.h> | ||
| 39 | |||
| 40 | /* object hierarchy - | ||
| 41 | this contains a helper + a amdgpu fb | ||
| 42 | the helper contains a pointer to amdgpu framebuffer baseclass. | ||
| 43 | */ | ||
| 44 | struct amdgpu_fbdev { | ||
| 45 | struct drm_fb_helper helper; | ||
| 46 | struct amdgpu_framebuffer rfb; | ||
| 47 | struct list_head fbdev_list; | ||
| 48 | struct amdgpu_device *adev; | ||
| 49 | }; | ||
| 50 | |||
| 51 | static struct fb_ops amdgpufb_ops = { | ||
| 52 | .owner = THIS_MODULE, | ||
| 53 | .fb_check_var = drm_fb_helper_check_var, | ||
| 54 | .fb_set_par = drm_fb_helper_set_par, | ||
| 55 | .fb_fillrect = cfb_fillrect, | ||
| 56 | .fb_copyarea = cfb_copyarea, | ||
| 57 | .fb_imageblit = cfb_imageblit, | ||
| 58 | .fb_pan_display = drm_fb_helper_pan_display, | ||
| 59 | .fb_blank = drm_fb_helper_blank, | ||
| 60 | .fb_setcmap = drm_fb_helper_setcmap, | ||
| 61 | .fb_debug_enter = drm_fb_helper_debug_enter, | ||
| 62 | .fb_debug_leave = drm_fb_helper_debug_leave, | ||
| 63 | }; | ||
| 64 | |||
| 65 | |||
| 66 | int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled) | ||
| 67 | { | ||
| 68 | int aligned = width; | ||
| 69 | int pitch_mask = 0; | ||
| 70 | |||
| 71 | switch (bpp / 8) { | ||
| 72 | case 1: | ||
| 73 | pitch_mask = 255; | ||
| 74 | break; | ||
| 75 | case 2: | ||
| 76 | pitch_mask = 127; | ||
| 77 | break; | ||
| 78 | case 3: | ||
| 79 | case 4: | ||
| 80 | pitch_mask = 63; | ||
| 81 | break; | ||
| 82 | } | ||
| 83 | |||
| 84 | aligned += pitch_mask; | ||
| 85 | aligned &= ~pitch_mask; | ||
| 86 | return aligned; | ||
| 87 | } | ||
| 88 | |||
| 89 | static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) | ||
| 90 | { | ||
| 91 | struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj); | ||
| 92 | int ret; | ||
| 93 | |||
| 94 | ret = amdgpu_bo_reserve(rbo, false); | ||
| 95 | if (likely(ret == 0)) { | ||
| 96 | amdgpu_bo_kunmap(rbo); | ||
| 97 | amdgpu_bo_unpin(rbo); | ||
| 98 | amdgpu_bo_unreserve(rbo); | ||
| 99 | } | ||
| 100 | drm_gem_object_unreference_unlocked(gobj); | ||
| 101 | } | ||
| 102 | |||
| 103 | static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, | ||
| 104 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
| 105 | struct drm_gem_object **gobj_p) | ||
| 106 | { | ||
| 107 | struct amdgpu_device *adev = rfbdev->adev; | ||
| 108 | struct drm_gem_object *gobj = NULL; | ||
| 109 | struct amdgpu_bo *rbo = NULL; | ||
| 110 | bool fb_tiled = false; /* useful for testing */ | ||
| 111 | u32 tiling_flags = 0; | ||
| 112 | int ret; | ||
| 113 | int aligned_size, size; | ||
| 114 | int height = mode_cmd->height; | ||
| 115 | u32 bpp, depth; | ||
| 116 | |||
| 117 | drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); | ||
| 118 | |||
| 119 | /* need to align pitch with crtc limits */ | ||
| 120 | mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp, | ||
| 121 | fb_tiled) * ((bpp + 1) / 8); | ||
| 122 | |||
| 123 | height = ALIGN(mode_cmd->height, 8); | ||
| 124 | size = mode_cmd->pitches[0] * height; | ||
| 125 | aligned_size = ALIGN(size, PAGE_SIZE); | ||
| 126 | ret = amdgpu_gem_object_create(adev, aligned_size, 0, | ||
| 127 | AMDGPU_GEM_DOMAIN_VRAM, | ||
| 128 | 0, true, | ||
| 129 | &gobj); | ||
| 130 | if (ret) { | ||
| 131 | printk(KERN_ERR "failed to allocate framebuffer (%d)\n", | ||
| 132 | aligned_size); | ||
| 133 | return -ENOMEM; | ||
| 134 | } | ||
| 135 | rbo = gem_to_amdgpu_bo(gobj); | ||
| 136 | |||
| 137 | if (fb_tiled) | ||
| 138 | tiling_flags = AMDGPU_TILING_MACRO; | ||
| 139 | |||
| 140 | #ifdef __BIG_ENDIAN | ||
| 141 | switch (bpp) { | ||
| 142 | case 32: | ||
| 143 | tiling_flags |= AMDGPU_TILING_SWAP_32BIT; | ||
| 144 | break; | ||
| 145 | case 16: | ||
| 146 | tiling_flags |= AMDGPU_TILING_SWAP_16BIT; | ||
| 147 | default: | ||
| 148 | break; | ||
| 149 | } | ||
| 150 | #endif | ||
| 151 | |||
| 152 | ret = amdgpu_bo_reserve(rbo, false); | ||
| 153 | if (unlikely(ret != 0)) | ||
| 154 | goto out_unref; | ||
| 155 | |||
| 156 | if (tiling_flags) { | ||
| 157 | ret = amdgpu_bo_set_tiling_flags(rbo, | ||
| 158 | tiling_flags | AMDGPU_TILING_SURFACE); | ||
| 159 | if (ret) | ||
| 160 | dev_err(adev->dev, "FB failed to set tiling flags\n"); | ||
| 161 | } | ||
| 162 | |||
| 163 | |||
| 164 | ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL); | ||
| 165 | if (ret) { | ||
| 166 | amdgpu_bo_unreserve(rbo); | ||
| 167 | goto out_unref; | ||
| 168 | } | ||
| 169 | ret = amdgpu_bo_kmap(rbo, NULL); | ||
| 170 | amdgpu_bo_unreserve(rbo); | ||
| 171 | if (ret) { | ||
| 172 | goto out_unref; | ||
| 173 | } | ||
| 174 | |||
| 175 | *gobj_p = gobj; | ||
| 176 | return 0; | ||
| 177 | out_unref: | ||
| 178 | amdgpufb_destroy_pinned_object(gobj); | ||
| 179 | *gobj_p = NULL; | ||
| 180 | return ret; | ||
| 181 | } | ||
| 182 | |||
| 183 | static int amdgpufb_create(struct drm_fb_helper *helper, | ||
| 184 | struct drm_fb_helper_surface_size *sizes) | ||
| 185 | { | ||
| 186 | struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper; | ||
| 187 | struct amdgpu_device *adev = rfbdev->adev; | ||
| 188 | struct fb_info *info; | ||
| 189 | struct drm_framebuffer *fb = NULL; | ||
| 190 | struct drm_mode_fb_cmd2 mode_cmd; | ||
| 191 | struct drm_gem_object *gobj = NULL; | ||
| 192 | struct amdgpu_bo *rbo = NULL; | ||
| 193 | struct device *device = &adev->pdev->dev; | ||
| 194 | int ret; | ||
| 195 | unsigned long tmp; | ||
| 196 | |||
| 197 | mode_cmd.width = sizes->surface_width; | ||
| 198 | mode_cmd.height = sizes->surface_height; | ||
| 199 | |||
| 200 | if (sizes->surface_bpp == 24) | ||
| 201 | sizes->surface_bpp = 32; | ||
| 202 | |||
| 203 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, | ||
| 204 | sizes->surface_depth); | ||
| 205 | |||
| 206 | ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj); | ||
| 207 | if (ret) { | ||
| 208 | DRM_ERROR("failed to create fbcon object %d\n", ret); | ||
| 209 | return ret; | ||
| 210 | } | ||
| 211 | |||
| 212 | rbo = gem_to_amdgpu_bo(gobj); | ||
| 213 | |||
| 214 | /* okay we have an object now allocate the framebuffer */ | ||
| 215 | info = framebuffer_alloc(0, device); | ||
| 216 | if (info == NULL) { | ||
| 217 | ret = -ENOMEM; | ||
| 218 | goto out_unref; | ||
| 219 | } | ||
| 220 | |||
| 221 | info->par = rfbdev; | ||
| 222 | |||
| 223 | ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj); | ||
| 224 | if (ret) { | ||
| 225 | DRM_ERROR("failed to initialize framebuffer %d\n", ret); | ||
| 226 | goto out_unref; | ||
| 227 | } | ||
| 228 | |||
| 229 | fb = &rfbdev->rfb.base; | ||
| 230 | |||
| 231 | /* setup helper */ | ||
| 232 | rfbdev->helper.fb = fb; | ||
| 233 | rfbdev->helper.fbdev = info; | ||
| 234 | |||
| 235 | memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo)); | ||
| 236 | |||
| 237 | strcpy(info->fix.id, "amdgpudrmfb"); | ||
| 238 | |||
| 239 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | ||
| 240 | |||
| 241 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; | ||
| 242 | info->fbops = &amdgpufb_ops; | ||
| 243 | |||
| 244 | tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start; | ||
| 245 | info->fix.smem_start = adev->mc.aper_base + tmp; | ||
| 246 | info->fix.smem_len = amdgpu_bo_size(rbo); | ||
| 247 | info->screen_base = rbo->kptr; | ||
| 248 | info->screen_size = amdgpu_bo_size(rbo); | ||
| 249 | |||
| 250 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); | ||
| 251 | |||
| 252 | /* setup aperture base/size for vesafb takeover */ | ||
| 253 | info->apertures = alloc_apertures(1); | ||
| 254 | if (!info->apertures) { | ||
| 255 | ret = -ENOMEM; | ||
| 256 | goto out_unref; | ||
| 257 | } | ||
| 258 | info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; | ||
| 259 | info->apertures->ranges[0].size = adev->mc.aper_size; | ||
| 260 | |||
| 261 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ | ||
| 262 | |||
| 263 | if (info->screen_base == NULL) { | ||
| 264 | ret = -ENOSPC; | ||
| 265 | goto out_unref; | ||
| 266 | } | ||
| 267 | |||
| 268 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
| 269 | if (ret) { | ||
| 270 | ret = -ENOMEM; | ||
| 271 | goto out_unref; | ||
| 272 | } | ||
| 273 | |||
| 274 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); | ||
| 275 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base); | ||
| 276 | DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo)); | ||
| 277 | DRM_INFO("fb depth is %d\n", fb->depth); | ||
| 278 | DRM_INFO(" pitch is %d\n", fb->pitches[0]); | ||
| 279 | |||
| 280 | vga_switcheroo_client_fb_set(adev->ddev->pdev, info); | ||
| 281 | return 0; | ||
| 282 | |||
| 283 | out_unref: | ||
| 284 | if (rbo) { | ||
| 285 | |||
| 286 | } | ||
| 287 | if (fb && ret) { | ||
| 288 | drm_gem_object_unreference(gobj); | ||
| 289 | drm_framebuffer_unregister_private(fb); | ||
| 290 | drm_framebuffer_cleanup(fb); | ||
| 291 | kfree(fb); | ||
| 292 | } | ||
| 293 | return ret; | ||
| 294 | } | ||
| 295 | |||
| 296 | void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev) | ||
| 297 | { | ||
| 298 | if (adev->mode_info.rfbdev) | ||
| 299 | drm_fb_helper_hotplug_event(&adev->mode_info.rfbdev->helper); | ||
| 300 | } | ||
| 301 | |||
| 302 | static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) | ||
| 303 | { | ||
| 304 | struct fb_info *info; | ||
| 305 | struct amdgpu_framebuffer *rfb = &rfbdev->rfb; | ||
| 306 | |||
| 307 | if (rfbdev->helper.fbdev) { | ||
| 308 | info = rfbdev->helper.fbdev; | ||
| 309 | |||
| 310 | unregister_framebuffer(info); | ||
| 311 | if (info->cmap.len) | ||
| 312 | fb_dealloc_cmap(&info->cmap); | ||
| 313 | framebuffer_release(info); | ||
| 314 | } | ||
| 315 | |||
| 316 | if (rfb->obj) { | ||
| 317 | amdgpufb_destroy_pinned_object(rfb->obj); | ||
| 318 | rfb->obj = NULL; | ||
| 319 | } | ||
| 320 | drm_fb_helper_fini(&rfbdev->helper); | ||
| 321 | drm_framebuffer_unregister_private(&rfb->base); | ||
| 322 | drm_framebuffer_cleanup(&rfb->base); | ||
| 323 | |||
| 324 | return 0; | ||
| 325 | } | ||
| 326 | |||
| 327 | /** Sets the color ramps on behalf of fbcon */ | ||
| 328 | static void amdgpu_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | ||
| 329 | u16 blue, int regno) | ||
| 330 | { | ||
| 331 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 332 | |||
| 333 | amdgpu_crtc->lut_r[regno] = red >> 6; | ||
| 334 | amdgpu_crtc->lut_g[regno] = green >> 6; | ||
| 335 | amdgpu_crtc->lut_b[regno] = blue >> 6; | ||
| 336 | } | ||
| 337 | |||
| 338 | /** Gets the color ramps on behalf of fbcon */ | ||
| 339 | static void amdgpu_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 340 | u16 *blue, int regno) | ||
| 341 | { | ||
| 342 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 343 | |||
| 344 | *red = amdgpu_crtc->lut_r[regno] << 6; | ||
| 345 | *green = amdgpu_crtc->lut_g[regno] << 6; | ||
| 346 | *blue = amdgpu_crtc->lut_b[regno] << 6; | ||
| 347 | } | ||
| 348 | |||
| 349 | static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = { | ||
| 350 | .gamma_set = amdgpu_crtc_fb_gamma_set, | ||
| 351 | .gamma_get = amdgpu_crtc_fb_gamma_get, | ||
| 352 | .fb_probe = amdgpufb_create, | ||
| 353 | }; | ||
| 354 | |||
| 355 | int amdgpu_fbdev_init(struct amdgpu_device *adev) | ||
| 356 | { | ||
| 357 | struct amdgpu_fbdev *rfbdev; | ||
| 358 | int bpp_sel = 32; | ||
| 359 | int ret; | ||
| 360 | |||
| 361 | /* don't init fbdev on hw without DCE */ | ||
| 362 | if (!adev->mode_info.mode_config_initialized) | ||
| 363 | return 0; | ||
| 364 | |||
| 365 | /* select 8 bpp console on low vram cards */ | ||
| 366 | if (adev->mc.real_vram_size <= (32*1024*1024)) | ||
| 367 | bpp_sel = 8; | ||
| 368 | |||
| 369 | rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL); | ||
| 370 | if (!rfbdev) | ||
| 371 | return -ENOMEM; | ||
| 372 | |||
| 373 | rfbdev->adev = adev; | ||
| 374 | adev->mode_info.rfbdev = rfbdev; | ||
| 375 | |||
| 376 | drm_fb_helper_prepare(adev->ddev, &rfbdev->helper, | ||
| 377 | &amdgpu_fb_helper_funcs); | ||
| 378 | |||
| 379 | ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper, | ||
| 380 | adev->mode_info.num_crtc, | ||
| 381 | AMDGPUFB_CONN_LIMIT); | ||
| 382 | if (ret) { | ||
| 383 | kfree(rfbdev); | ||
| 384 | return ret; | ||
| 385 | } | ||
| 386 | |||
| 387 | drm_fb_helper_single_add_all_connectors(&rfbdev->helper); | ||
| 388 | |||
| 389 | /* disable all the possible outputs/crtcs before entering KMS mode */ | ||
| 390 | drm_helper_disable_unused_functions(adev->ddev); | ||
| 391 | |||
| 392 | drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); | ||
| 393 | return 0; | ||
| 394 | } | ||
| 395 | |||
| 396 | void amdgpu_fbdev_fini(struct amdgpu_device *adev) | ||
| 397 | { | ||
| 398 | if (!adev->mode_info.rfbdev) | ||
| 399 | return; | ||
| 400 | |||
| 401 | amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev); | ||
| 402 | kfree(adev->mode_info.rfbdev); | ||
| 403 | adev->mode_info.rfbdev = NULL; | ||
| 404 | } | ||
| 405 | |||
| 406 | void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) | ||
| 407 | { | ||
| 408 | if (adev->mode_info.rfbdev) | ||
| 409 | fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state); | ||
| 410 | } | ||
| 411 | |||
| 412 | int amdgpu_fbdev_total_size(struct amdgpu_device *adev) | ||
| 413 | { | ||
| 414 | struct amdgpu_bo *robj; | ||
| 415 | int size = 0; | ||
| 416 | |||
| 417 | if (!adev->mode_info.rfbdev) | ||
| 418 | return 0; | ||
| 419 | |||
| 420 | robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj); | ||
| 421 | size += amdgpu_bo_size(robj); | ||
| 422 | return size; | ||
| 423 | } | ||
| 424 | |||
| 425 | bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) | ||
| 426 | { | ||
| 427 | if (!adev->mode_info.rfbdev) | ||
| 428 | return false; | ||
| 429 | if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj)) | ||
| 430 | return true; | ||
| 431 | return false; | ||
| 432 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c new file mode 100644 index 000000000000..fc63855ed517 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
| @@ -0,0 +1,1139 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2009 Jerome Glisse. | ||
| 3 | * All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the | ||
| 7 | * "Software"), to deal in the Software without restriction, including | ||
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 10 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 11 | * the following conditions: | ||
| 12 | * | ||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 20 | * | ||
| 21 | * The above copyright notice and this permission notice (including the | ||
| 22 | * next paragraph) shall be included in all copies or substantial portions | ||
| 23 | * of the Software. | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | /* | ||
| 27 | * Authors: | ||
| 28 | * Jerome Glisse <glisse@freedesktop.org> | ||
| 29 | * Dave Airlie | ||
| 30 | */ | ||
| 31 | #include <linux/seq_file.h> | ||
| 32 | #include <linux/atomic.h> | ||
| 33 | #include <linux/wait.h> | ||
| 34 | #include <linux/kref.h> | ||
| 35 | #include <linux/slab.h> | ||
| 36 | #include <linux/firmware.h> | ||
| 37 | #include <drm/drmP.h> | ||
| 38 | #include "amdgpu.h" | ||
| 39 | #include "amdgpu_trace.h" | ||
| 40 | |||
| 41 | /* | ||
| 42 | * Fences | ||
| 43 | * Fences mark an event in the GPUs pipeline and are used | ||
| 44 | * for GPU/CPU synchronization. When the fence is written, | ||
| 45 | * it is expected that all buffers associated with that fence | ||
| 46 | * are no longer in use by the associated ring on the GPU and | ||
| 47 | * that the the relevant GPU caches have been flushed. | ||
| 48 | */ | ||
| 49 | |||
| 50 | /** | ||
| 51 | * amdgpu_fence_write - write a fence value | ||
| 52 | * | ||
| 53 | * @ring: ring the fence is associated with | ||
| 54 | * @seq: sequence number to write | ||
| 55 | * | ||
| 56 | * Writes a fence value to memory (all asics). | ||
| 57 | */ | ||
| 58 | static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) | ||
| 59 | { | ||
| 60 | struct amdgpu_fence_driver *drv = &ring->fence_drv; | ||
| 61 | |||
| 62 | if (drv->cpu_addr) | ||
| 63 | *drv->cpu_addr = cpu_to_le32(seq); | ||
| 64 | } | ||
| 65 | |||
| 66 | /** | ||
| 67 | * amdgpu_fence_read - read a fence value | ||
| 68 | * | ||
| 69 | * @ring: ring the fence is associated with | ||
| 70 | * | ||
| 71 | * Reads a fence value from memory (all asics). | ||
| 72 | * Returns the value of the fence read from memory. | ||
| 73 | */ | ||
| 74 | static u32 amdgpu_fence_read(struct amdgpu_ring *ring) | ||
| 75 | { | ||
| 76 | struct amdgpu_fence_driver *drv = &ring->fence_drv; | ||
| 77 | u32 seq = 0; | ||
| 78 | |||
| 79 | if (drv->cpu_addr) | ||
| 80 | seq = le32_to_cpu(*drv->cpu_addr); | ||
| 81 | else | ||
| 82 | seq = lower_32_bits(atomic64_read(&drv->last_seq)); | ||
| 83 | |||
| 84 | return seq; | ||
| 85 | } | ||
| 86 | |||
| 87 | /** | ||
| 88 | * amdgpu_fence_schedule_check - schedule lockup check | ||
| 89 | * | ||
| 90 | * @ring: pointer to struct amdgpu_ring | ||
| 91 | * | ||
| 92 | * Queues a delayed work item to check for lockups. | ||
| 93 | */ | ||
| 94 | static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring) | ||
| 95 | { | ||
| 96 | /* | ||
| 97 | * Do not reset the timer here with mod_delayed_work, | ||
| 98 | * this can livelock in an interaction with TTM delayed destroy. | ||
| 99 | */ | ||
| 100 | queue_delayed_work(system_power_efficient_wq, | ||
| 101 | &ring->fence_drv.lockup_work, | ||
| 102 | AMDGPU_FENCE_JIFFIES_TIMEOUT); | ||
| 103 | } | ||
| 104 | |||
| 105 | /** | ||
| 106 | * amdgpu_fence_emit - emit a fence on the requested ring | ||
| 107 | * | ||
| 108 | * @ring: ring the fence is associated with | ||
| 109 | * @owner: creator of the fence | ||
| 110 | * @fence: amdgpu fence object | ||
| 111 | * | ||
| 112 | * Emits a fence command on the requested ring (all asics). | ||
| 113 | * Returns 0 on success, -ENOMEM on failure. | ||
| 114 | */ | ||
| 115 | int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, | ||
| 116 | struct amdgpu_fence **fence) | ||
| 117 | { | ||
| 118 | struct amdgpu_device *adev = ring->adev; | ||
| 119 | |||
| 120 | /* we are protected by the ring emission mutex */ | ||
| 121 | *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); | ||
| 122 | if ((*fence) == NULL) { | ||
| 123 | return -ENOMEM; | ||
| 124 | } | ||
| 125 | (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx]; | ||
| 126 | (*fence)->ring = ring; | ||
| 127 | (*fence)->owner = owner; | ||
| 128 | fence_init(&(*fence)->base, &amdgpu_fence_ops, | ||
| 129 | &adev->fence_queue.lock, adev->fence_context + ring->idx, | ||
| 130 | (*fence)->seq); | ||
| 131 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, (*fence)->seq, false); | ||
| 132 | trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq); | ||
| 133 | return 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | /** | ||
| 137 | * amdgpu_fence_check_signaled - callback from fence_queue | ||
| 138 | * | ||
| 139 | * this function is called with fence_queue lock held, which is also used | ||
| 140 | * for the fence locking itself, so unlocked variants are used for | ||
| 141 | * fence_signal, and remove_wait_queue. | ||
| 142 | */ | ||
| 143 | static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) | ||
| 144 | { | ||
| 145 | struct amdgpu_fence *fence; | ||
| 146 | struct amdgpu_device *adev; | ||
| 147 | u64 seq; | ||
| 148 | int ret; | ||
| 149 | |||
| 150 | fence = container_of(wait, struct amdgpu_fence, fence_wake); | ||
| 151 | adev = fence->ring->adev; | ||
| 152 | |||
| 153 | /* | ||
| 154 | * We cannot use amdgpu_fence_process here because we're already | ||
| 155 | * in the waitqueue, in a call from wake_up_all. | ||
| 156 | */ | ||
| 157 | seq = atomic64_read(&fence->ring->fence_drv.last_seq); | ||
| 158 | if (seq >= fence->seq) { | ||
| 159 | ret = fence_signal_locked(&fence->base); | ||
| 160 | if (!ret) | ||
| 161 | FENCE_TRACE(&fence->base, "signaled from irq context\n"); | ||
| 162 | else | ||
| 163 | FENCE_TRACE(&fence->base, "was already signaled\n"); | ||
| 164 | |||
| 165 | amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src, | ||
| 166 | fence->ring->fence_drv.irq_type); | ||
| 167 | __remove_wait_queue(&adev->fence_queue, &fence->fence_wake); | ||
| 168 | fence_put(&fence->base); | ||
| 169 | } else | ||
| 170 | FENCE_TRACE(&fence->base, "pending\n"); | ||
| 171 | return 0; | ||
| 172 | } | ||
| 173 | |||
| 174 | /** | ||
| 175 | * amdgpu_fence_activity - check for fence activity | ||
| 176 | * | ||
| 177 | * @ring: pointer to struct amdgpu_ring | ||
| 178 | * | ||
| 179 | * Checks the current fence value and calculates the last | ||
| 180 | * signalled fence value. Returns true if activity occured | ||
| 181 | * on the ring, and the fence_queue should be waken up. | ||
| 182 | */ | ||
| 183 | static bool amdgpu_fence_activity(struct amdgpu_ring *ring) | ||
| 184 | { | ||
| 185 | uint64_t seq, last_seq, last_emitted; | ||
| 186 | unsigned count_loop = 0; | ||
| 187 | bool wake = false; | ||
| 188 | |||
| 189 | /* Note there is a scenario here for an infinite loop but it's | ||
| 190 | * very unlikely to happen. For it to happen, the current polling | ||
| 191 | * process need to be interrupted by another process and another | ||
| 192 | * process needs to update the last_seq btw the atomic read and | ||
| 193 | * xchg of the current process. | ||
| 194 | * | ||
| 195 | * More over for this to go in infinite loop there need to be | ||
| 196 | * continuously new fence signaled ie radeon_fence_read needs | ||
| 197 | * to return a different value each time for both the currently | ||
| 198 | * polling process and the other process that xchg the last_seq | ||
| 199 | * btw atomic read and xchg of the current process. And the | ||
| 200 | * value the other process set as last seq must be higher than | ||
| 201 | * the seq value we just read. Which means that current process | ||
| 202 | * need to be interrupted after radeon_fence_read and before | ||
| 203 | * atomic xchg. | ||
| 204 | * | ||
| 205 | * To be even more safe we count the number of time we loop and | ||
| 206 | * we bail after 10 loop just accepting the fact that we might | ||
| 207 | * have temporarly set the last_seq not to the true real last | ||
| 208 | * seq but to an older one. | ||
| 209 | */ | ||
| 210 | last_seq = atomic64_read(&ring->fence_drv.last_seq); | ||
| 211 | do { | ||
| 212 | last_emitted = ring->fence_drv.sync_seq[ring->idx]; | ||
| 213 | seq = amdgpu_fence_read(ring); | ||
| 214 | seq |= last_seq & 0xffffffff00000000LL; | ||
| 215 | if (seq < last_seq) { | ||
| 216 | seq &= 0xffffffff; | ||
| 217 | seq |= last_emitted & 0xffffffff00000000LL; | ||
| 218 | } | ||
| 219 | |||
| 220 | if (seq <= last_seq || seq > last_emitted) { | ||
| 221 | break; | ||
| 222 | } | ||
| 223 | /* If we loop over we don't want to return without | ||
| 224 | * checking if a fence is signaled as it means that the | ||
| 225 | * seq we just read is different from the previous on. | ||
| 226 | */ | ||
| 227 | wake = true; | ||
| 228 | last_seq = seq; | ||
| 229 | if ((count_loop++) > 10) { | ||
| 230 | /* We looped over too many time leave with the | ||
| 231 | * fact that we might have set an older fence | ||
| 232 | * seq then the current real last seq as signaled | ||
| 233 | * by the hw. | ||
| 234 | */ | ||
| 235 | break; | ||
| 236 | } | ||
| 237 | } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); | ||
| 238 | |||
| 239 | if (seq < last_emitted) | ||
| 240 | amdgpu_fence_schedule_check(ring); | ||
| 241 | |||
| 242 | return wake; | ||
| 243 | } | ||
| 244 | |||
| 245 | /** | ||
| 246 | * amdgpu_fence_check_lockup - check for hardware lockup | ||
| 247 | * | ||
| 248 | * @work: delayed work item | ||
| 249 | * | ||
| 250 | * Checks for fence activity and if there is none probe | ||
| 251 | * the hardware if a lockup occured. | ||
| 252 | */ | ||
| 253 | static void amdgpu_fence_check_lockup(struct work_struct *work) | ||
| 254 | { | ||
| 255 | struct amdgpu_fence_driver *fence_drv; | ||
| 256 | struct amdgpu_ring *ring; | ||
| 257 | |||
| 258 | fence_drv = container_of(work, struct amdgpu_fence_driver, | ||
| 259 | lockup_work.work); | ||
| 260 | ring = fence_drv->ring; | ||
| 261 | |||
| 262 | if (!down_read_trylock(&ring->adev->exclusive_lock)) { | ||
| 263 | /* just reschedule the check if a reset is going on */ | ||
| 264 | amdgpu_fence_schedule_check(ring); | ||
| 265 | return; | ||
| 266 | } | ||
| 267 | |||
| 268 | if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) { | ||
| 269 | fence_drv->delayed_irq = false; | ||
| 270 | amdgpu_irq_update(ring->adev, fence_drv->irq_src, | ||
| 271 | fence_drv->irq_type); | ||
| 272 | } | ||
| 273 | |||
| 274 | if (amdgpu_fence_activity(ring)) | ||
| 275 | wake_up_all(&ring->adev->fence_queue); | ||
| 276 | else if (amdgpu_ring_is_lockup(ring)) { | ||
| 277 | /* good news we believe it's a lockup */ | ||
| 278 | dev_warn(ring->adev->dev, "GPU lockup (current fence id " | ||
| 279 | "0x%016llx last fence id 0x%016llx on ring %d)\n", | ||
| 280 | (uint64_t)atomic64_read(&fence_drv->last_seq), | ||
| 281 | fence_drv->sync_seq[ring->idx], ring->idx); | ||
| 282 | |||
| 283 | /* remember that we need an reset */ | ||
| 284 | ring->adev->needs_reset = true; | ||
| 285 | wake_up_all(&ring->adev->fence_queue); | ||
| 286 | } | ||
| 287 | up_read(&ring->adev->exclusive_lock); | ||
| 288 | } | ||
| 289 | |||
| 290 | /** | ||
| 291 | * amdgpu_fence_process - process a fence | ||
| 292 | * | ||
| 293 | * @adev: amdgpu_device pointer | ||
| 294 | * @ring: ring index the fence is associated with | ||
| 295 | * | ||
| 296 | * Checks the current fence value and wakes the fence queue | ||
| 297 | * if the sequence number has increased (all asics). | ||
| 298 | */ | ||
| 299 | void amdgpu_fence_process(struct amdgpu_ring *ring) | ||
| 300 | { | ||
| 301 | uint64_t seq, last_seq, last_emitted; | ||
| 302 | unsigned count_loop = 0; | ||
| 303 | bool wake = false; | ||
| 304 | |||
| 305 | /* Note there is a scenario here for an infinite loop but it's | ||
| 306 | * very unlikely to happen. For it to happen, the current polling | ||
| 307 | * process need to be interrupted by another process and another | ||
| 308 | * process needs to update the last_seq btw the atomic read and | ||
| 309 | * xchg of the current process. | ||
| 310 | * | ||
| 311 | * More over for this to go in infinite loop there need to be | ||
| 312 | * continuously new fence signaled ie amdgpu_fence_read needs | ||
| 313 | * to return a different value each time for both the currently | ||
| 314 | * polling process and the other process that xchg the last_seq | ||
| 315 | * btw atomic read and xchg of the current process. And the | ||
| 316 | * value the other process set as last seq must be higher than | ||
| 317 | * the seq value we just read. Which means that current process | ||
| 318 | * need to be interrupted after amdgpu_fence_read and before | ||
| 319 | * atomic xchg. | ||
| 320 | * | ||
| 321 | * To be even more safe we count the number of time we loop and | ||
| 322 | * we bail after 10 loop just accepting the fact that we might | ||
| 323 | * have temporarly set the last_seq not to the true real last | ||
| 324 | * seq but to an older one. | ||
| 325 | */ | ||
| 326 | last_seq = atomic64_read(&ring->fence_drv.last_seq); | ||
| 327 | do { | ||
| 328 | last_emitted = ring->fence_drv.sync_seq[ring->idx]; | ||
| 329 | seq = amdgpu_fence_read(ring); | ||
| 330 | seq |= last_seq & 0xffffffff00000000LL; | ||
| 331 | if (seq < last_seq) { | ||
| 332 | seq &= 0xffffffff; | ||
| 333 | seq |= last_emitted & 0xffffffff00000000LL; | ||
| 334 | } | ||
| 335 | |||
| 336 | if (seq <= last_seq || seq > last_emitted) { | ||
| 337 | break; | ||
| 338 | } | ||
| 339 | /* If we loop over we don't want to return without | ||
| 340 | * checking if a fence is signaled as it means that the | ||
| 341 | * seq we just read is different from the previous on. | ||
| 342 | */ | ||
| 343 | wake = true; | ||
| 344 | last_seq = seq; | ||
| 345 | if ((count_loop++) > 10) { | ||
| 346 | /* We looped over too many time leave with the | ||
| 347 | * fact that we might have set an older fence | ||
| 348 | * seq then the current real last seq as signaled | ||
| 349 | * by the hw. | ||
| 350 | */ | ||
| 351 | break; | ||
| 352 | } | ||
| 353 | } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); | ||
| 354 | |||
| 355 | if (wake) | ||
| 356 | wake_up_all(&ring->adev->fence_queue); | ||
| 357 | } | ||
| 358 | |||
| 359 | /** | ||
| 360 | * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled | ||
| 361 | * | ||
| 362 | * @ring: ring the fence is associated with | ||
| 363 | * @seq: sequence number | ||
| 364 | * | ||
| 365 | * Check if the last signaled fence sequnce number is >= the requested | ||
| 366 | * sequence number (all asics). | ||
| 367 | * Returns true if the fence has signaled (current fence value | ||
| 368 | * is >= requested value) or false if it has not (current fence | ||
| 369 | * value is < the requested value. Helper function for | ||
| 370 | * amdgpu_fence_signaled(). | ||
| 371 | */ | ||
| 372 | static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq) | ||
| 373 | { | ||
| 374 | if (atomic64_read(&ring->fence_drv.last_seq) >= seq) | ||
| 375 | return true; | ||
| 376 | |||
| 377 | /* poll new last sequence at least once */ | ||
| 378 | amdgpu_fence_process(ring); | ||
| 379 | if (atomic64_read(&ring->fence_drv.last_seq) >= seq) | ||
| 380 | return true; | ||
| 381 | |||
| 382 | return false; | ||
| 383 | } | ||
| 384 | |||
| 385 | static bool amdgpu_fence_is_signaled(struct fence *f) | ||
| 386 | { | ||
| 387 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | ||
| 388 | struct amdgpu_ring *ring = fence->ring; | ||
| 389 | struct amdgpu_device *adev = ring->adev; | ||
| 390 | |||
| 391 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) | ||
| 392 | return true; | ||
| 393 | |||
| 394 | if (down_read_trylock(&adev->exclusive_lock)) { | ||
| 395 | amdgpu_fence_process(ring); | ||
| 396 | up_read(&adev->exclusive_lock); | ||
| 397 | |||
| 398 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) | ||
| 399 | return true; | ||
| 400 | } | ||
| 401 | return false; | ||
| 402 | } | ||
| 403 | |||
| 404 | /** | ||
| 405 | * amdgpu_fence_enable_signaling - enable signalling on fence | ||
| 406 | * @fence: fence | ||
| 407 | * | ||
| 408 | * This function is called with fence_queue lock held, and adds a callback | ||
| 409 | * to fence_queue that checks if this fence is signaled, and if so it | ||
| 410 | * signals the fence and removes itself. | ||
| 411 | */ | ||
| 412 | static bool amdgpu_fence_enable_signaling(struct fence *f) | ||
| 413 | { | ||
| 414 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | ||
| 415 | struct amdgpu_ring *ring = fence->ring; | ||
| 416 | struct amdgpu_device *adev = ring->adev; | ||
| 417 | |||
| 418 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) | ||
| 419 | return false; | ||
| 420 | |||
| 421 | if (down_read_trylock(&adev->exclusive_lock)) { | ||
| 422 | amdgpu_irq_get(adev, ring->fence_drv.irq_src, | ||
| 423 | ring->fence_drv.irq_type); | ||
| 424 | if (amdgpu_fence_activity(ring)) | ||
| 425 | wake_up_all_locked(&adev->fence_queue); | ||
| 426 | |||
| 427 | /* did fence get signaled after we enabled the sw irq? */ | ||
| 428 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) { | ||
| 429 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, | ||
| 430 | ring->fence_drv.irq_type); | ||
| 431 | up_read(&adev->exclusive_lock); | ||
| 432 | return false; | ||
| 433 | } | ||
| 434 | |||
| 435 | up_read(&adev->exclusive_lock); | ||
| 436 | } else { | ||
| 437 | /* we're probably in a lockup, lets not fiddle too much */ | ||
| 438 | if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src, | ||
| 439 | ring->fence_drv.irq_type)) | ||
| 440 | ring->fence_drv.delayed_irq = true; | ||
| 441 | amdgpu_fence_schedule_check(ring); | ||
| 442 | } | ||
| 443 | |||
| 444 | fence->fence_wake.flags = 0; | ||
| 445 | fence->fence_wake.private = NULL; | ||
| 446 | fence->fence_wake.func = amdgpu_fence_check_signaled; | ||
| 447 | __add_wait_queue(&adev->fence_queue, &fence->fence_wake); | ||
| 448 | fence_get(f); | ||
| 449 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); | ||
| 450 | return true; | ||
| 451 | } | ||
| 452 | |||
| 453 | /** | ||
| 454 | * amdgpu_fence_signaled - check if a fence has signaled | ||
| 455 | * | ||
| 456 | * @fence: amdgpu fence object | ||
| 457 | * | ||
| 458 | * Check if the requested fence has signaled (all asics). | ||
| 459 | * Returns true if the fence has signaled or false if it has not. | ||
| 460 | */ | ||
| 461 | bool amdgpu_fence_signaled(struct amdgpu_fence *fence) | ||
| 462 | { | ||
| 463 | if (!fence) | ||
| 464 | return true; | ||
| 465 | |||
| 466 | if (fence->seq == AMDGPU_FENCE_SIGNALED_SEQ) | ||
| 467 | return true; | ||
| 468 | |||
| 469 | if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) { | ||
| 470 | fence->seq = AMDGPU_FENCE_SIGNALED_SEQ; | ||
| 471 | if (!fence_signal(&fence->base)) | ||
| 472 | FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n"); | ||
| 473 | return true; | ||
| 474 | } | ||
| 475 | |||
| 476 | return false; | ||
| 477 | } | ||
| 478 | |||
| 479 | /** | ||
| 480 | * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled | ||
| 481 | * | ||
| 482 | * @adev: amdgpu device pointer | ||
| 483 | * @seq: sequence numbers | ||
| 484 | * | ||
| 485 | * Check if the last signaled fence sequnce number is >= the requested | ||
| 486 | * sequence number (all asics). | ||
| 487 | * Returns true if any has signaled (current value is >= requested value) | ||
| 488 | * or false if it has not. Helper function for amdgpu_fence_wait_seq. | ||
| 489 | */ | ||
| 490 | static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq) | ||
| 491 | { | ||
| 492 | unsigned i; | ||
| 493 | |||
| 494 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 495 | if (!adev->rings[i] || !seq[i]) | ||
| 496 | continue; | ||
| 497 | |||
| 498 | if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i])) | ||
| 499 | return true; | ||
| 500 | } | ||
| 501 | |||
| 502 | return false; | ||
| 503 | } | ||
| 504 | |||
| 505 | /** | ||
| 506 | * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers | ||
| 507 | * | ||
| 508 | * @adev: amdgpu device pointer | ||
| 509 | * @target_seq: sequence number(s) we want to wait for | ||
| 510 | * @intr: use interruptable sleep | ||
| 511 | * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait | ||
| 512 | * | ||
| 513 | * Wait for the requested sequence number(s) to be written by any ring | ||
| 514 | * (all asics). Sequnce number array is indexed by ring id. | ||
| 515 | * @intr selects whether to use interruptable (true) or non-interruptable | ||
| 516 | * (false) sleep when waiting for the sequence number. Helper function | ||
| 517 | * for amdgpu_fence_wait_*(). | ||
| 518 | * Returns remaining time if the sequence number has passed, 0 when | ||
| 519 | * the wait timeout, or an error for all other cases. | ||
| 520 | * -EDEADLK is returned when a GPU lockup has been detected. | ||
| 521 | */ | ||
| 522 | long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq, | ||
| 523 | bool intr, long timeout) | ||
| 524 | { | ||
| 525 | uint64_t last_seq[AMDGPU_MAX_RINGS]; | ||
| 526 | bool signaled; | ||
| 527 | int i, r; | ||
| 528 | |||
| 529 | while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) { | ||
| 530 | |||
| 531 | /* Save current sequence values, used to check for GPU lockups */ | ||
| 532 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 533 | struct amdgpu_ring *ring = adev->rings[i]; | ||
| 534 | |||
| 535 | if (!ring || !target_seq[i]) | ||
| 536 | continue; | ||
| 537 | |||
| 538 | last_seq[i] = atomic64_read(&ring->fence_drv.last_seq); | ||
| 539 | trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]); | ||
| 540 | amdgpu_irq_get(adev, ring->fence_drv.irq_src, | ||
| 541 | ring->fence_drv.irq_type); | ||
| 542 | } | ||
| 543 | |||
| 544 | if (intr) { | ||
| 545 | r = wait_event_interruptible_timeout(adev->fence_queue, ( | ||
| 546 | (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq)) | ||
| 547 | || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); | ||
| 548 | } else { | ||
| 549 | r = wait_event_timeout(adev->fence_queue, ( | ||
| 550 | (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq)) | ||
| 551 | || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); | ||
| 552 | } | ||
| 553 | |||
| 554 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 555 | struct amdgpu_ring *ring = adev->rings[i]; | ||
| 556 | |||
| 557 | if (!ring || !target_seq[i]) | ||
| 558 | continue; | ||
| 559 | |||
| 560 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, | ||
| 561 | ring->fence_drv.irq_type); | ||
| 562 | trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]); | ||
| 563 | } | ||
| 564 | |||
| 565 | if (unlikely(r < 0)) | ||
| 566 | return r; | ||
| 567 | |||
| 568 | if (unlikely(!signaled)) { | ||
| 569 | |||
| 570 | if (adev->needs_reset) | ||
| 571 | return -EDEADLK; | ||
| 572 | |||
| 573 | /* we were interrupted for some reason and fence | ||
| 574 | * isn't signaled yet, resume waiting */ | ||
| 575 | if (r) | ||
| 576 | continue; | ||
| 577 | |||
| 578 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 579 | struct amdgpu_ring *ring = adev->rings[i]; | ||
| 580 | |||
| 581 | if (!ring || !target_seq[i]) | ||
| 582 | continue; | ||
| 583 | |||
| 584 | if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq)) | ||
| 585 | break; | ||
| 586 | } | ||
| 587 | |||
| 588 | if (i != AMDGPU_MAX_RINGS) | ||
| 589 | continue; | ||
| 590 | |||
| 591 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 592 | if (!adev->rings[i] || !target_seq[i]) | ||
| 593 | continue; | ||
| 594 | |||
| 595 | if (amdgpu_ring_is_lockup(adev->rings[i])) | ||
| 596 | break; | ||
| 597 | } | ||
| 598 | |||
| 599 | if (i < AMDGPU_MAX_RINGS) { | ||
| 600 | /* good news we believe it's a lockup */ | ||
| 601 | dev_warn(adev->dev, "GPU lockup (waiting for " | ||
| 602 | "0x%016llx last fence id 0x%016llx on" | ||
| 603 | " ring %d)\n", | ||
| 604 | target_seq[i], last_seq[i], i); | ||
| 605 | |||
| 606 | /* remember that we need an reset */ | ||
| 607 | adev->needs_reset = true; | ||
| 608 | wake_up_all(&adev->fence_queue); | ||
| 609 | return -EDEADLK; | ||
| 610 | } | ||
| 611 | |||
| 612 | if (timeout < MAX_SCHEDULE_TIMEOUT) { | ||
| 613 | timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT; | ||
| 614 | if (timeout <= 0) { | ||
| 615 | return 0; | ||
| 616 | } | ||
| 617 | } | ||
| 618 | } | ||
| 619 | } | ||
| 620 | return timeout; | ||
| 621 | } | ||
| 622 | |||
| 623 | /** | ||
| 624 | * amdgpu_fence_wait - wait for a fence to signal | ||
| 625 | * | ||
| 626 | * @fence: amdgpu fence object | ||
| 627 | * @intr: use interruptable sleep | ||
| 628 | * | ||
| 629 | * Wait for the requested fence to signal (all asics). | ||
| 630 | * @intr selects whether to use interruptable (true) or non-interruptable | ||
| 631 | * (false) sleep when waiting for the fence. | ||
| 632 | * Returns 0 if the fence has passed, error for all other cases. | ||
| 633 | */ | ||
| 634 | int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr) | ||
| 635 | { | ||
| 636 | uint64_t seq[AMDGPU_MAX_RINGS] = {}; | ||
| 637 | long r; | ||
| 638 | |||
| 639 | seq[fence->ring->idx] = fence->seq; | ||
| 640 | if (seq[fence->ring->idx] == AMDGPU_FENCE_SIGNALED_SEQ) | ||
| 641 | return 0; | ||
| 642 | |||
| 643 | r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT); | ||
| 644 | if (r < 0) { | ||
| 645 | return r; | ||
| 646 | } | ||
| 647 | |||
| 648 | fence->seq = AMDGPU_FENCE_SIGNALED_SEQ; | ||
| 649 | r = fence_signal(&fence->base); | ||
| 650 | if (!r) | ||
| 651 | FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); | ||
| 652 | return 0; | ||
| 653 | } | ||
| 654 | |||
| 655 | /** | ||
| 656 | * amdgpu_fence_wait_any - wait for a fence to signal on any ring | ||
| 657 | * | ||
| 658 | * @adev: amdgpu device pointer | ||
| 659 | * @fences: amdgpu fence object(s) | ||
| 660 | * @intr: use interruptable sleep | ||
| 661 | * | ||
| 662 | * Wait for any requested fence to signal (all asics). Fence | ||
| 663 | * array is indexed by ring id. @intr selects whether to use | ||
| 664 | * interruptable (true) or non-interruptable (false) sleep when | ||
| 665 | * waiting for the fences. Used by the suballocator. | ||
| 666 | * Returns 0 if any fence has passed, error for all other cases. | ||
| 667 | */ | ||
| 668 | int amdgpu_fence_wait_any(struct amdgpu_device *adev, | ||
| 669 | struct amdgpu_fence **fences, | ||
| 670 | bool intr) | ||
| 671 | { | ||
| 672 | uint64_t seq[AMDGPU_MAX_RINGS]; | ||
| 673 | unsigned i, num_rings = 0; | ||
| 674 | long r; | ||
| 675 | |||
| 676 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 677 | seq[i] = 0; | ||
| 678 | |||
| 679 | if (!fences[i]) { | ||
| 680 | continue; | ||
| 681 | } | ||
| 682 | |||
| 683 | seq[i] = fences[i]->seq; | ||
| 684 | ++num_rings; | ||
| 685 | |||
| 686 | /* test if something was allready signaled */ | ||
| 687 | if (seq[i] == AMDGPU_FENCE_SIGNALED_SEQ) | ||
| 688 | return 0; | ||
| 689 | } | ||
| 690 | |||
| 691 | /* nothing to wait for ? */ | ||
| 692 | if (num_rings == 0) | ||
| 693 | return -ENOENT; | ||
| 694 | |||
| 695 | r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT); | ||
| 696 | if (r < 0) { | ||
| 697 | return r; | ||
| 698 | } | ||
| 699 | return 0; | ||
| 700 | } | ||
| 701 | |||
| 702 | /** | ||
| 703 | * amdgpu_fence_wait_next - wait for the next fence to signal | ||
| 704 | * | ||
| 705 | * @adev: amdgpu device pointer | ||
| 706 | * @ring: ring index the fence is associated with | ||
| 707 | * | ||
| 708 | * Wait for the next fence on the requested ring to signal (all asics). | ||
| 709 | * Returns 0 if the next fence has passed, error for all other cases. | ||
| 710 | * Caller must hold ring lock. | ||
| 711 | */ | ||
| 712 | int amdgpu_fence_wait_next(struct amdgpu_ring *ring) | ||
| 713 | { | ||
| 714 | uint64_t seq[AMDGPU_MAX_RINGS] = {}; | ||
| 715 | long r; | ||
| 716 | |||
| 717 | seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; | ||
| 718 | if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) { | ||
| 719 | /* nothing to wait for, last_seq is | ||
| 720 | already the last emited fence */ | ||
| 721 | return -ENOENT; | ||
| 722 | } | ||
| 723 | r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT); | ||
| 724 | if (r < 0) | ||
| 725 | return r; | ||
| 726 | return 0; | ||
| 727 | } | ||
| 728 | |||
| 729 | /** | ||
| 730 | * amdgpu_fence_wait_empty - wait for all fences to signal | ||
| 731 | * | ||
| 732 | * @adev: amdgpu device pointer | ||
| 733 | * @ring: ring index the fence is associated with | ||
| 734 | * | ||
| 735 | * Wait for all fences on the requested ring to signal (all asics). | ||
| 736 | * Returns 0 if the fences have passed, error for all other cases. | ||
| 737 | * Caller must hold ring lock. | ||
| 738 | */ | ||
| 739 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) | ||
| 740 | { | ||
| 741 | struct amdgpu_device *adev = ring->adev; | ||
| 742 | uint64_t seq[AMDGPU_MAX_RINGS] = {}; | ||
| 743 | long r; | ||
| 744 | |||
| 745 | seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx]; | ||
| 746 | if (!seq[ring->idx]) | ||
| 747 | return 0; | ||
| 748 | |||
| 749 | r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT); | ||
| 750 | if (r < 0) { | ||
| 751 | if (r == -EDEADLK) | ||
| 752 | return -EDEADLK; | ||
| 753 | |||
| 754 | dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n", | ||
| 755 | ring->idx, r); | ||
| 756 | } | ||
| 757 | return 0; | ||
| 758 | } | ||
| 759 | |||
| 760 | /** | ||
| 761 | * amdgpu_fence_ref - take a ref on a fence | ||
| 762 | * | ||
| 763 | * @fence: amdgpu fence object | ||
| 764 | * | ||
| 765 | * Take a reference on a fence (all asics). | ||
| 766 | * Returns the fence. | ||
| 767 | */ | ||
| 768 | struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence) | ||
| 769 | { | ||
| 770 | fence_get(&fence->base); | ||
| 771 | return fence; | ||
| 772 | } | ||
| 773 | |||
| 774 | /** | ||
| 775 | * amdgpu_fence_unref - remove a ref on a fence | ||
| 776 | * | ||
| 777 | * @fence: amdgpu fence object | ||
| 778 | * | ||
| 779 | * Remove a reference on a fence (all asics). | ||
| 780 | */ | ||
| 781 | void amdgpu_fence_unref(struct amdgpu_fence **fence) | ||
| 782 | { | ||
| 783 | struct amdgpu_fence *tmp = *fence; | ||
| 784 | |||
| 785 | *fence = NULL; | ||
| 786 | if (tmp) | ||
| 787 | fence_put(&tmp->base); | ||
| 788 | } | ||
| 789 | |||
| 790 | /** | ||
| 791 | * amdgpu_fence_count_emitted - get the count of emitted fences | ||
| 792 | * | ||
| 793 | * @ring: ring the fence is associated with | ||
| 794 | * | ||
| 795 | * Get the number of fences emitted on the requested ring (all asics). | ||
| 796 | * Returns the number of emitted fences on the ring. Used by the | ||
| 797 | * dynpm code to ring track activity. | ||
| 798 | */ | ||
| 799 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) | ||
| 800 | { | ||
| 801 | uint64_t emitted; | ||
| 802 | |||
| 803 | /* We are not protected by ring lock when reading the last sequence | ||
| 804 | * but it's ok to report slightly wrong fence count here. | ||
| 805 | */ | ||
| 806 | amdgpu_fence_process(ring); | ||
| 807 | emitted = ring->fence_drv.sync_seq[ring->idx] | ||
| 808 | - atomic64_read(&ring->fence_drv.last_seq); | ||
| 809 | /* to avoid 32bits warp around */ | ||
| 810 | if (emitted > 0x10000000) | ||
| 811 | emitted = 0x10000000; | ||
| 812 | |||
| 813 | return (unsigned)emitted; | ||
| 814 | } | ||
| 815 | |||
| 816 | /** | ||
| 817 | * amdgpu_fence_need_sync - do we need a semaphore | ||
| 818 | * | ||
| 819 | * @fence: amdgpu fence object | ||
| 820 | * @dst_ring: which ring to check against | ||
| 821 | * | ||
| 822 | * Check if the fence needs to be synced against another ring | ||
| 823 | * (all asics). If so, we need to emit a semaphore. | ||
| 824 | * Returns true if we need to sync with another ring, false if | ||
| 825 | * not. | ||
| 826 | */ | ||
| 827 | bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, | ||
| 828 | struct amdgpu_ring *dst_ring) | ||
| 829 | { | ||
| 830 | struct amdgpu_fence_driver *fdrv; | ||
| 831 | |||
| 832 | if (!fence) | ||
| 833 | return false; | ||
| 834 | |||
| 835 | if (fence->ring == dst_ring) | ||
| 836 | return false; | ||
| 837 | |||
| 838 | /* we are protected by the ring mutex */ | ||
| 839 | fdrv = &dst_ring->fence_drv; | ||
| 840 | if (fence->seq <= fdrv->sync_seq[fence->ring->idx]) | ||
| 841 | return false; | ||
| 842 | |||
| 843 | return true; | ||
| 844 | } | ||
| 845 | |||
| 846 | /** | ||
| 847 | * amdgpu_fence_note_sync - record the sync point | ||
| 848 | * | ||
| 849 | * @fence: amdgpu fence object | ||
| 850 | * @dst_ring: which ring to check against | ||
| 851 | * | ||
| 852 | * Note the sequence number at which point the fence will | ||
| 853 | * be synced with the requested ring (all asics). | ||
| 854 | */ | ||
| 855 | void amdgpu_fence_note_sync(struct amdgpu_fence *fence, | ||
| 856 | struct amdgpu_ring *dst_ring) | ||
| 857 | { | ||
| 858 | struct amdgpu_fence_driver *dst, *src; | ||
| 859 | unsigned i; | ||
| 860 | |||
| 861 | if (!fence) | ||
| 862 | return; | ||
| 863 | |||
| 864 | if (fence->ring == dst_ring) | ||
| 865 | return; | ||
| 866 | |||
| 867 | /* we are protected by the ring mutex */ | ||
| 868 | src = &fence->ring->fence_drv; | ||
| 869 | dst = &dst_ring->fence_drv; | ||
| 870 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 871 | if (i == dst_ring->idx) | ||
| 872 | continue; | ||
| 873 | |||
| 874 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); | ||
| 875 | } | ||
| 876 | } | ||
| 877 | |||
| 878 | /** | ||
| 879 | * amdgpu_fence_driver_start_ring - make the fence driver | ||
| 880 | * ready for use on the requested ring. | ||
| 881 | * | ||
| 882 | * @ring: ring to start the fence driver on | ||
| 883 | * @irq_src: interrupt source to use for this ring | ||
| 884 | * @irq_type: interrupt type to use for this ring | ||
| 885 | * | ||
| 886 | * Make the fence driver ready for processing (all asics). | ||
| 887 | * Not all asics have all rings, so each asic will only | ||
| 888 | * start the fence driver on the rings it has. | ||
| 889 | * Returns 0 for success, errors for failure. | ||
| 890 | */ | ||
| 891 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | ||
| 892 | struct amdgpu_irq_src *irq_src, | ||
| 893 | unsigned irq_type) | ||
| 894 | { | ||
| 895 | struct amdgpu_device *adev = ring->adev; | ||
| 896 | uint64_t index; | ||
| 897 | |||
| 898 | if (ring != &adev->uvd.ring) { | ||
| 899 | ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; | ||
| 900 | ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); | ||
| 901 | } else { | ||
| 902 | /* put fence directly behind firmware */ | ||
| 903 | index = ALIGN(adev->uvd.fw->size, 8); | ||
| 904 | ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; | ||
| 905 | ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; | ||
| 906 | } | ||
| 907 | amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq)); | ||
| 908 | ring->fence_drv.initialized = true; | ||
| 909 | ring->fence_drv.irq_src = irq_src; | ||
| 910 | ring->fence_drv.irq_type = irq_type; | ||
| 911 | dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " | ||
| 912 | "cpu addr 0x%p\n", ring->idx, | ||
| 913 | ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); | ||
| 914 | return 0; | ||
| 915 | } | ||
| 916 | |||
| 917 | /** | ||
| 918 | * amdgpu_fence_driver_init_ring - init the fence driver | ||
| 919 | * for the requested ring. | ||
| 920 | * | ||
| 921 | * @ring: ring to init the fence driver on | ||
| 922 | * | ||
| 923 | * Init the fence driver for the requested ring (all asics). | ||
| 924 | * Helper function for amdgpu_fence_driver_init(). | ||
| 925 | */ | ||
| 926 | void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) | ||
| 927 | { | ||
| 928 | int i; | ||
| 929 | |||
| 930 | ring->fence_drv.cpu_addr = NULL; | ||
| 931 | ring->fence_drv.gpu_addr = 0; | ||
| 932 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | ||
| 933 | ring->fence_drv.sync_seq[i] = 0; | ||
| 934 | |||
| 935 | atomic64_set(&ring->fence_drv.last_seq, 0); | ||
| 936 | ring->fence_drv.initialized = false; | ||
| 937 | |||
| 938 | INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, | ||
| 939 | amdgpu_fence_check_lockup); | ||
| 940 | ring->fence_drv.ring = ring; | ||
| 941 | } | ||
| 942 | |||
| 943 | /** | ||
| 944 | * amdgpu_fence_driver_init - init the fence driver | ||
| 945 | * for all possible rings. | ||
| 946 | * | ||
| 947 | * @adev: amdgpu device pointer | ||
| 948 | * | ||
| 949 | * Init the fence driver for all possible rings (all asics). | ||
| 950 | * Not all asics have all rings, so each asic will only | ||
| 951 | * start the fence driver on the rings it has using | ||
| 952 | * amdgpu_fence_driver_start_ring(). | ||
| 953 | * Returns 0 for success. | ||
| 954 | */ | ||
| 955 | int amdgpu_fence_driver_init(struct amdgpu_device *adev) | ||
| 956 | { | ||
| 957 | init_waitqueue_head(&adev->fence_queue); | ||
| 958 | if (amdgpu_debugfs_fence_init(adev)) | ||
| 959 | dev_err(adev->dev, "fence debugfs file creation failed\n"); | ||
| 960 | |||
| 961 | return 0; | ||
| 962 | } | ||
| 963 | |||
| 964 | /** | ||
| 965 | * amdgpu_fence_driver_fini - tear down the fence driver | ||
| 966 | * for all possible rings. | ||
| 967 | * | ||
| 968 | * @adev: amdgpu device pointer | ||
| 969 | * | ||
| 970 | * Tear down the fence driver for all possible rings (all asics). | ||
| 971 | */ | ||
| 972 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | ||
| 973 | { | ||
| 974 | int i, r; | ||
| 975 | |||
| 976 | mutex_lock(&adev->ring_lock); | ||
| 977 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | ||
| 978 | struct amdgpu_ring *ring = adev->rings[i]; | ||
| 979 | if (!ring || !ring->fence_drv.initialized) | ||
| 980 | continue; | ||
| 981 | r = amdgpu_fence_wait_empty(ring); | ||
| 982 | if (r) { | ||
| 983 | /* no need to trigger GPU reset as we are unloading */ | ||
| 984 | amdgpu_fence_driver_force_completion(adev); | ||
| 985 | } | ||
| 986 | wake_up_all(&adev->fence_queue); | ||
| 987 | ring->fence_drv.initialized = false; | ||
| 988 | } | ||
| 989 | mutex_unlock(&adev->ring_lock); | ||
| 990 | } | ||
| 991 | |||
| 992 | /** | ||
| 993 | * amdgpu_fence_driver_force_completion - force all fence waiter to complete | ||
| 994 | * | ||
| 995 | * @adev: amdgpu device pointer | ||
| 996 | * | ||
| 997 | * In case of GPU reset failure make sure no process keep waiting on fence | ||
| 998 | * that will never complete. | ||
| 999 | */ | ||
| 1000 | void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) | ||
| 1001 | { | ||
| 1002 | int i; | ||
| 1003 | |||
| 1004 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | ||
| 1005 | struct amdgpu_ring *ring = adev->rings[i]; | ||
| 1006 | if (!ring || !ring->fence_drv.initialized) | ||
| 1007 | continue; | ||
| 1008 | |||
| 1009 | amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]); | ||
| 1010 | } | ||
| 1011 | } | ||
| 1012 | |||
| 1013 | |||
| 1014 | /* | ||
| 1015 | * Fence debugfs | ||
| 1016 | */ | ||
| 1017 | #if defined(CONFIG_DEBUG_FS) | ||
| 1018 | static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) | ||
| 1019 | { | ||
| 1020 | struct drm_info_node *node = (struct drm_info_node *)m->private; | ||
| 1021 | struct drm_device *dev = node->minor->dev; | ||
| 1022 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1023 | int i, j; | ||
| 1024 | |||
| 1025 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 1026 | struct amdgpu_ring *ring = adev->rings[i]; | ||
| 1027 | if (!ring || !ring->fence_drv.initialized) | ||
| 1028 | continue; | ||
| 1029 | |||
| 1030 | amdgpu_fence_process(ring); | ||
| 1031 | |||
| 1032 | seq_printf(m, "--- ring %d ---\n", i); | ||
| 1033 | seq_printf(m, "Last signaled fence 0x%016llx\n", | ||
| 1034 | (unsigned long long)atomic64_read(&ring->fence_drv.last_seq)); | ||
| 1035 | seq_printf(m, "Last emitted 0x%016llx\n", | ||
| 1036 | ring->fence_drv.sync_seq[i]); | ||
| 1037 | |||
| 1038 | for (j = 0; j < AMDGPU_MAX_RINGS; ++j) { | ||
| 1039 | struct amdgpu_ring *other = adev->rings[j]; | ||
| 1040 | if (i != j && other && other->fence_drv.initialized) | ||
| 1041 | seq_printf(m, "Last sync to ring %d 0x%016llx\n", | ||
| 1042 | j, ring->fence_drv.sync_seq[j]); | ||
| 1043 | } | ||
| 1044 | } | ||
| 1045 | return 0; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | static struct drm_info_list amdgpu_debugfs_fence_list[] = { | ||
| 1049 | {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, | ||
| 1050 | }; | ||
| 1051 | #endif | ||
| 1052 | |||
| 1053 | int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) | ||
| 1054 | { | ||
| 1055 | #if defined(CONFIG_DEBUG_FS) | ||
| 1056 | return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1); | ||
| 1057 | #else | ||
| 1058 | return 0; | ||
| 1059 | #endif | ||
| 1060 | } | ||
| 1061 | |||
| 1062 | static const char *amdgpu_fence_get_driver_name(struct fence *fence) | ||
| 1063 | { | ||
| 1064 | return "amdgpu"; | ||
| 1065 | } | ||
| 1066 | |||
| 1067 | static const char *amdgpu_fence_get_timeline_name(struct fence *f) | ||
| 1068 | { | ||
| 1069 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | ||
| 1070 | return (const char *)fence->ring->name; | ||
| 1071 | } | ||
| 1072 | |||
| 1073 | static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence) | ||
| 1074 | { | ||
| 1075 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); | ||
| 1076 | } | ||
| 1077 | |||
| 1078 | struct amdgpu_wait_cb { | ||
| 1079 | struct fence_cb base; | ||
| 1080 | struct task_struct *task; | ||
| 1081 | }; | ||
| 1082 | |||
| 1083 | static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb) | ||
| 1084 | { | ||
| 1085 | struct amdgpu_wait_cb *wait = | ||
| 1086 | container_of(cb, struct amdgpu_wait_cb, base); | ||
| 1087 | wake_up_process(wait->task); | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, | ||
| 1091 | signed long t) | ||
| 1092 | { | ||
| 1093 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | ||
| 1094 | struct amdgpu_device *adev = fence->ring->adev; | ||
| 1095 | struct amdgpu_wait_cb cb; | ||
| 1096 | |||
| 1097 | cb.task = current; | ||
| 1098 | |||
| 1099 | if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb)) | ||
| 1100 | return t; | ||
| 1101 | |||
| 1102 | while (t > 0) { | ||
| 1103 | if (intr) | ||
| 1104 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1105 | else | ||
| 1106 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
| 1107 | |||
| 1108 | /* | ||
| 1109 | * amdgpu_test_signaled must be called after | ||
| 1110 | * set_current_state to prevent a race with wake_up_process | ||
| 1111 | */ | ||
| 1112 | if (amdgpu_test_signaled(fence)) | ||
| 1113 | break; | ||
| 1114 | |||
| 1115 | if (adev->needs_reset) { | ||
| 1116 | t = -EDEADLK; | ||
| 1117 | break; | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | t = schedule_timeout(t); | ||
| 1121 | |||
| 1122 | if (t > 0 && intr && signal_pending(current)) | ||
| 1123 | t = -ERESTARTSYS; | ||
| 1124 | } | ||
| 1125 | |||
| 1126 | __set_current_state(TASK_RUNNING); | ||
| 1127 | fence_remove_callback(f, &cb.base); | ||
| 1128 | |||
| 1129 | return t; | ||
| 1130 | } | ||
| 1131 | |||
| 1132 | const struct fence_ops amdgpu_fence_ops = { | ||
| 1133 | .get_driver_name = amdgpu_fence_get_driver_name, | ||
| 1134 | .get_timeline_name = amdgpu_fence_get_timeline_name, | ||
| 1135 | .enable_signaling = amdgpu_fence_enable_signaling, | ||
| 1136 | .signaled = amdgpu_fence_is_signaled, | ||
| 1137 | .wait = amdgpu_fence_default_wait, | ||
| 1138 | .release = NULL, | ||
| 1139 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c new file mode 100644 index 000000000000..e02db0b2e839 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | |||
| @@ -0,0 +1,371 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #include <drm/drmP.h> | ||
| 29 | #include <drm/amdgpu_drm.h> | ||
| 30 | #include "amdgpu.h" | ||
| 31 | |||
| 32 | /* | ||
| 33 | * GART | ||
| 34 | * The GART (Graphics Aperture Remapping Table) is an aperture | ||
| 35 | * in the GPU's address space. System pages can be mapped into | ||
| 36 | * the aperture and look like contiguous pages from the GPU's | ||
| 37 | * perspective. A page table maps the pages in the aperture | ||
| 38 | * to the actual backing pages in system memory. | ||
| 39 | * | ||
| 40 | * Radeon GPUs support both an internal GART, as described above, | ||
| 41 | * and AGP. AGP works similarly, but the GART table is configured | ||
| 42 | * and maintained by the northbridge rather than the driver. | ||
| 43 | * Radeon hw has a separate AGP aperture that is programmed to | ||
| 44 | * point to the AGP aperture provided by the northbridge and the | ||
| 45 | * requests are passed through to the northbridge aperture. | ||
| 46 | * Both AGP and internal GART can be used at the same time, however | ||
| 47 | * that is not currently supported by the driver. | ||
| 48 | * | ||
| 49 | * This file handles the common internal GART management. | ||
| 50 | */ | ||
| 51 | |||
| 52 | /* | ||
| 53 | * Common GART table functions. | ||
| 54 | */ | ||
| 55 | /** | ||
| 56 | * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table | ||
| 57 | * | ||
| 58 | * @adev: amdgpu_device pointer | ||
| 59 | * | ||
| 60 | * Allocate system memory for GART page table | ||
| 61 | * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the | ||
| 62 | * gart table to be in system memory. | ||
| 63 | * Returns 0 for success, -ENOMEM for failure. | ||
| 64 | */ | ||
| 65 | int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev) | ||
| 66 | { | ||
| 67 | void *ptr; | ||
| 68 | |||
| 69 | ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size, | ||
| 70 | &adev->gart.table_addr); | ||
| 71 | if (ptr == NULL) { | ||
| 72 | return -ENOMEM; | ||
| 73 | } | ||
| 74 | #ifdef CONFIG_X86 | ||
| 75 | if (0) { | ||
| 76 | set_memory_uc((unsigned long)ptr, | ||
| 77 | adev->gart.table_size >> PAGE_SHIFT); | ||
| 78 | } | ||
| 79 | #endif | ||
| 80 | adev->gart.ptr = ptr; | ||
| 81 | memset((void *)adev->gart.ptr, 0, adev->gart.table_size); | ||
| 82 | return 0; | ||
| 83 | } | ||
| 84 | |||
| 85 | /** | ||
| 86 | * amdgpu_gart_table_ram_free - free system ram for gart page table | ||
| 87 | * | ||
| 88 | * @adev: amdgpu_device pointer | ||
| 89 | * | ||
| 90 | * Free system memory for GART page table | ||
| 91 | * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the | ||
| 92 | * gart table to be in system memory. | ||
| 93 | */ | ||
| 94 | void amdgpu_gart_table_ram_free(struct amdgpu_device *adev) | ||
| 95 | { | ||
| 96 | if (adev->gart.ptr == NULL) { | ||
| 97 | return; | ||
| 98 | } | ||
| 99 | #ifdef CONFIG_X86 | ||
| 100 | if (0) { | ||
| 101 | set_memory_wb((unsigned long)adev->gart.ptr, | ||
| 102 | adev->gart.table_size >> PAGE_SHIFT); | ||
| 103 | } | ||
| 104 | #endif | ||
| 105 | pci_free_consistent(adev->pdev, adev->gart.table_size, | ||
| 106 | (void *)adev->gart.ptr, | ||
| 107 | adev->gart.table_addr); | ||
| 108 | adev->gart.ptr = NULL; | ||
| 109 | adev->gart.table_addr = 0; | ||
| 110 | } | ||
| 111 | |||
| 112 | /** | ||
| 113 | * amdgpu_gart_table_vram_alloc - allocate vram for gart page table | ||
| 114 | * | ||
| 115 | * @adev: amdgpu_device pointer | ||
| 116 | * | ||
| 117 | * Allocate video memory for GART page table | ||
| 118 | * (pcie r4xx, r5xx+). These asics require the | ||
| 119 | * gart table to be in video memory. | ||
| 120 | * Returns 0 for success, error for failure. | ||
| 121 | */ | ||
| 122 | int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) | ||
| 123 | { | ||
| 124 | int r; | ||
| 125 | |||
| 126 | if (adev->gart.robj == NULL) { | ||
| 127 | r = amdgpu_bo_create(adev, adev->gart.table_size, | ||
| 128 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, | ||
| 129 | NULL, &adev->gart.robj); | ||
| 130 | if (r) { | ||
| 131 | return r; | ||
| 132 | } | ||
| 133 | } | ||
| 134 | return 0; | ||
| 135 | } | ||
| 136 | |||
| 137 | /** | ||
| 138 | * amdgpu_gart_table_vram_pin - pin gart page table in vram | ||
| 139 | * | ||
| 140 | * @adev: amdgpu_device pointer | ||
| 141 | * | ||
| 142 | * Pin the GART page table in vram so it will not be moved | ||
| 143 | * by the memory manager (pcie r4xx, r5xx+). These asics require the | ||
| 144 | * gart table to be in video memory. | ||
| 145 | * Returns 0 for success, error for failure. | ||
| 146 | */ | ||
| 147 | int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev) | ||
| 148 | { | ||
| 149 | uint64_t gpu_addr; | ||
| 150 | int r; | ||
| 151 | |||
| 152 | r = amdgpu_bo_reserve(adev->gart.robj, false); | ||
| 153 | if (unlikely(r != 0)) | ||
| 154 | return r; | ||
| 155 | r = amdgpu_bo_pin(adev->gart.robj, | ||
| 156 | AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr); | ||
| 157 | if (r) { | ||
| 158 | amdgpu_bo_unreserve(adev->gart.robj); | ||
| 159 | return r; | ||
| 160 | } | ||
| 161 | r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr); | ||
| 162 | if (r) | ||
| 163 | amdgpu_bo_unpin(adev->gart.robj); | ||
| 164 | amdgpu_bo_unreserve(adev->gart.robj); | ||
| 165 | adev->gart.table_addr = gpu_addr; | ||
| 166 | return r; | ||
| 167 | } | ||
| 168 | |||
| 169 | /** | ||
| 170 | * amdgpu_gart_table_vram_unpin - unpin gart page table in vram | ||
| 171 | * | ||
| 172 | * @adev: amdgpu_device pointer | ||
| 173 | * | ||
| 174 | * Unpin the GART page table in vram (pcie r4xx, r5xx+). | ||
| 175 | * These asics require the gart table to be in video memory. | ||
| 176 | */ | ||
| 177 | void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev) | ||
| 178 | { | ||
| 179 | int r; | ||
| 180 | |||
| 181 | if (adev->gart.robj == NULL) { | ||
| 182 | return; | ||
| 183 | } | ||
| 184 | r = amdgpu_bo_reserve(adev->gart.robj, false); | ||
| 185 | if (likely(r == 0)) { | ||
| 186 | amdgpu_bo_kunmap(adev->gart.robj); | ||
| 187 | amdgpu_bo_unpin(adev->gart.robj); | ||
| 188 | amdgpu_bo_unreserve(adev->gart.robj); | ||
| 189 | adev->gart.ptr = NULL; | ||
| 190 | } | ||
| 191 | } | ||
| 192 | |||
| 193 | /** | ||
| 194 | * amdgpu_gart_table_vram_free - free gart page table vram | ||
| 195 | * | ||
| 196 | * @adev: amdgpu_device pointer | ||
| 197 | * | ||
| 198 | * Free the video memory used for the GART page table | ||
| 199 | * (pcie r4xx, r5xx+). These asics require the gart table to | ||
| 200 | * be in video memory. | ||
| 201 | */ | ||
| 202 | void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) | ||
| 203 | { | ||
| 204 | if (adev->gart.robj == NULL) { | ||
| 205 | return; | ||
| 206 | } | ||
| 207 | amdgpu_bo_unref(&adev->gart.robj); | ||
| 208 | } | ||
| 209 | |||
| 210 | /* | ||
| 211 | * Common gart functions. | ||
| 212 | */ | ||
| 213 | /** | ||
| 214 | * amdgpu_gart_unbind - unbind pages from the gart page table | ||
| 215 | * | ||
| 216 | * @adev: amdgpu_device pointer | ||
| 217 | * @offset: offset into the GPU's gart aperture | ||
| 218 | * @pages: number of pages to unbind | ||
| 219 | * | ||
| 220 | * Unbinds the requested pages from the gart page table and | ||
| 221 | * replaces them with the dummy page (all asics). | ||
| 222 | */ | ||
| 223 | void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, | ||
| 224 | int pages) | ||
| 225 | { | ||
| 226 | unsigned t; | ||
| 227 | unsigned p; | ||
| 228 | int i, j; | ||
| 229 | u64 page_base; | ||
| 230 | uint32_t flags = AMDGPU_PTE_SYSTEM; | ||
| 231 | |||
| 232 | if (!adev->gart.ready) { | ||
| 233 | WARN(1, "trying to unbind memory from uninitialized GART !\n"); | ||
| 234 | return; | ||
| 235 | } | ||
| 236 | |||
| 237 | t = offset / AMDGPU_GPU_PAGE_SIZE; | ||
| 238 | p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); | ||
| 239 | for (i = 0; i < pages; i++, p++) { | ||
| 240 | if (adev->gart.pages[p]) { | ||
| 241 | adev->gart.pages[p] = NULL; | ||
| 242 | adev->gart.pages_addr[p] = adev->dummy_page.addr; | ||
| 243 | page_base = adev->gart.pages_addr[p]; | ||
| 244 | if (!adev->gart.ptr) | ||
| 245 | continue; | ||
| 246 | |||
| 247 | for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { | ||
| 248 | amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, | ||
| 249 | t, page_base, flags); | ||
| 250 | page_base += AMDGPU_GPU_PAGE_SIZE; | ||
| 251 | } | ||
| 252 | } | ||
| 253 | } | ||
| 254 | mb(); | ||
| 255 | amdgpu_gart_flush_gpu_tlb(adev, 0); | ||
| 256 | } | ||
| 257 | |||
| 258 | /** | ||
| 259 | * amdgpu_gart_bind - bind pages into the gart page table | ||
| 260 | * | ||
| 261 | * @adev: amdgpu_device pointer | ||
| 262 | * @offset: offset into the GPU's gart aperture | ||
| 263 | * @pages: number of pages to bind | ||
| 264 | * @pagelist: pages to bind | ||
| 265 | * @dma_addr: DMA addresses of pages | ||
| 266 | * | ||
| 267 | * Binds the requested pages to the gart page table | ||
| 268 | * (all asics). | ||
| 269 | * Returns 0 for success, -EINVAL for failure. | ||
| 270 | */ | ||
| 271 | int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, | ||
| 272 | int pages, struct page **pagelist, dma_addr_t *dma_addr, | ||
| 273 | uint32_t flags) | ||
| 274 | { | ||
| 275 | unsigned t; | ||
| 276 | unsigned p; | ||
| 277 | uint64_t page_base; | ||
| 278 | int i, j; | ||
| 279 | |||
| 280 | if (!adev->gart.ready) { | ||
| 281 | WARN(1, "trying to bind memory to uninitialized GART !\n"); | ||
| 282 | return -EINVAL; | ||
| 283 | } | ||
| 284 | |||
| 285 | t = offset / AMDGPU_GPU_PAGE_SIZE; | ||
| 286 | p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); | ||
| 287 | |||
| 288 | for (i = 0; i < pages; i++, p++) { | ||
| 289 | adev->gart.pages_addr[p] = dma_addr[i]; | ||
| 290 | adev->gart.pages[p] = pagelist[i]; | ||
| 291 | if (adev->gart.ptr) { | ||
| 292 | page_base = adev->gart.pages_addr[p]; | ||
| 293 | for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { | ||
| 294 | amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags); | ||
| 295 | page_base += AMDGPU_GPU_PAGE_SIZE; | ||
| 296 | } | ||
| 297 | } | ||
| 298 | } | ||
| 299 | mb(); | ||
| 300 | amdgpu_gart_flush_gpu_tlb(adev, 0); | ||
| 301 | return 0; | ||
| 302 | } | ||
| 303 | |||
| 304 | /** | ||
| 305 | * amdgpu_gart_init - init the driver info for managing the gart | ||
| 306 | * | ||
| 307 | * @adev: amdgpu_device pointer | ||
| 308 | * | ||
| 309 | * Allocate the dummy page and init the gart driver info (all asics). | ||
| 310 | * Returns 0 for success, error for failure. | ||
| 311 | */ | ||
| 312 | int amdgpu_gart_init(struct amdgpu_device *adev) | ||
| 313 | { | ||
| 314 | int r, i; | ||
| 315 | |||
| 316 | if (adev->gart.pages) { | ||
| 317 | return 0; | ||
| 318 | } | ||
| 319 | /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */ | ||
| 320 | if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) { | ||
| 321 | DRM_ERROR("Page size is smaller than GPU page size!\n"); | ||
| 322 | return -EINVAL; | ||
| 323 | } | ||
| 324 | r = amdgpu_dummy_page_init(adev); | ||
| 325 | if (r) | ||
| 326 | return r; | ||
| 327 | /* Compute table size */ | ||
| 328 | adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE; | ||
| 329 | adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE; | ||
| 330 | DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", | ||
| 331 | adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); | ||
| 332 | /* Allocate pages table */ | ||
| 333 | adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages); | ||
| 334 | if (adev->gart.pages == NULL) { | ||
| 335 | amdgpu_gart_fini(adev); | ||
| 336 | return -ENOMEM; | ||
| 337 | } | ||
| 338 | adev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * | ||
| 339 | adev->gart.num_cpu_pages); | ||
| 340 | if (adev->gart.pages_addr == NULL) { | ||
| 341 | amdgpu_gart_fini(adev); | ||
| 342 | return -ENOMEM; | ||
| 343 | } | ||
| 344 | /* set GART entry to point to the dummy page by default */ | ||
| 345 | for (i = 0; i < adev->gart.num_cpu_pages; i++) { | ||
| 346 | adev->gart.pages_addr[i] = adev->dummy_page.addr; | ||
| 347 | } | ||
| 348 | return 0; | ||
| 349 | } | ||
| 350 | |||
| 351 | /** | ||
| 352 | * amdgpu_gart_fini - tear down the driver info for managing the gart | ||
| 353 | * | ||
| 354 | * @adev: amdgpu_device pointer | ||
| 355 | * | ||
| 356 | * Tear down the gart driver info and free the dummy page (all asics). | ||
| 357 | */ | ||
| 358 | void amdgpu_gart_fini(struct amdgpu_device *adev) | ||
| 359 | { | ||
| 360 | if (adev->gart.pages && adev->gart.pages_addr && adev->gart.ready) { | ||
| 361 | /* unbind pages */ | ||
| 362 | amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages); | ||
| 363 | } | ||
| 364 | adev->gart.ready = false; | ||
| 365 | vfree(adev->gart.pages); | ||
| 366 | vfree(adev->gart.pages_addr); | ||
| 367 | adev->gart.pages = NULL; | ||
| 368 | adev->gart.pages_addr = NULL; | ||
| 369 | |||
| 370 | amdgpu_dummy_page_fini(adev); | ||
| 371 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h new file mode 100644 index 000000000000..c3f4e85594ff --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h | |||
| @@ -0,0 +1,72 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __AMDGPU_GDS_H__ | ||
| 25 | #define __AMDGPU_GDS_H__ | ||
| 26 | |||
| 27 | /* Because TTM request that alloacted buffer should be PAGE_SIZE aligned, | ||
| 28 | * we should report GDS/GWS/OA size as PAGE_SIZE aligned | ||
| 29 | * */ | ||
| 30 | #define AMDGPU_GDS_SHIFT 2 | ||
| 31 | #define AMDGPU_GWS_SHIFT PAGE_SHIFT | ||
| 32 | #define AMDGPU_OA_SHIFT PAGE_SHIFT | ||
| 33 | |||
| 34 | #define AMDGPU_PL_GDS TTM_PL_PRIV0 | ||
| 35 | #define AMDGPU_PL_GWS TTM_PL_PRIV1 | ||
| 36 | #define AMDGPU_PL_OA TTM_PL_PRIV2 | ||
| 37 | |||
| 38 | #define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0 | ||
| 39 | #define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1 | ||
| 40 | #define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2 | ||
| 41 | |||
| 42 | struct amdgpu_ring; | ||
| 43 | struct amdgpu_bo; | ||
| 44 | |||
| 45 | struct amdgpu_gds_asic_info { | ||
| 46 | uint32_t total_size; | ||
| 47 | uint32_t gfx_partition_size; | ||
| 48 | uint32_t cs_partition_size; | ||
| 49 | }; | ||
| 50 | |||
| 51 | struct amdgpu_gds { | ||
| 52 | struct amdgpu_gds_asic_info mem; | ||
| 53 | struct amdgpu_gds_asic_info gws; | ||
| 54 | struct amdgpu_gds_asic_info oa; | ||
| 55 | /* At present, GDS, GWS and OA resources for gfx (graphics) | ||
| 56 | * is always pre-allocated and available for graphics operation. | ||
| 57 | * Such resource is shared between all gfx clients. | ||
| 58 | * TODO: move this operation to user space | ||
| 59 | * */ | ||
| 60 | struct amdgpu_bo* gds_gfx_bo; | ||
| 61 | struct amdgpu_bo* gws_gfx_bo; | ||
| 62 | struct amdgpu_bo* oa_gfx_bo; | ||
| 63 | }; | ||
| 64 | |||
| 65 | struct amdgpu_gds_reg_offset { | ||
| 66 | uint32_t mem_base; | ||
| 67 | uint32_t mem_size; | ||
| 68 | uint32_t gws; | ||
| 69 | uint32_t oa; | ||
| 70 | }; | ||
| 71 | |||
| 72 | #endif /* __AMDGPU_GDS_H__ */ | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c new file mode 100644 index 000000000000..5fd0bc73b302 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
| @@ -0,0 +1,735 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #include <linux/ktime.h> | ||
| 29 | #include <drm/drmP.h> | ||
| 30 | #include <drm/amdgpu_drm.h> | ||
| 31 | #include "amdgpu.h" | ||
| 32 | |||
| 33 | void amdgpu_gem_object_free(struct drm_gem_object *gobj) | ||
| 34 | { | ||
| 35 | struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); | ||
| 36 | |||
| 37 | if (robj) { | ||
| 38 | if (robj->gem_base.import_attach) | ||
| 39 | drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); | ||
| 40 | amdgpu_bo_unref(&robj); | ||
| 41 | } | ||
| 42 | } | ||
| 43 | |||
| 44 | int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, | ||
| 45 | int alignment, u32 initial_domain, | ||
| 46 | u64 flags, bool kernel, | ||
| 47 | struct drm_gem_object **obj) | ||
| 48 | { | ||
| 49 | struct amdgpu_bo *robj; | ||
| 50 | unsigned long max_size; | ||
| 51 | int r; | ||
| 52 | |||
| 53 | *obj = NULL; | ||
| 54 | /* At least align on page size */ | ||
| 55 | if (alignment < PAGE_SIZE) { | ||
| 56 | alignment = PAGE_SIZE; | ||
| 57 | } | ||
| 58 | |||
| 59 | if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) { | ||
| 60 | /* Maximum bo size is the unpinned gtt size since we use the gtt to | ||
| 61 | * handle vram to system pool migrations. | ||
| 62 | */ | ||
| 63 | max_size = adev->mc.gtt_size - adev->gart_pin_size; | ||
| 64 | if (size > max_size) { | ||
| 65 | DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", | ||
| 66 | size >> 20, max_size >> 20); | ||
| 67 | return -ENOMEM; | ||
| 68 | } | ||
| 69 | } | ||
| 70 | retry: | ||
| 71 | r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj); | ||
| 72 | if (r) { | ||
| 73 | if (r != -ERESTARTSYS) { | ||
| 74 | if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { | ||
| 75 | initial_domain |= AMDGPU_GEM_DOMAIN_GTT; | ||
| 76 | goto retry; | ||
| 77 | } | ||
| 78 | DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", | ||
| 79 | size, initial_domain, alignment, r); | ||
| 80 | } | ||
| 81 | return r; | ||
| 82 | } | ||
| 83 | *obj = &robj->gem_base; | ||
| 84 | robj->pid = task_pid_nr(current); | ||
| 85 | |||
| 86 | mutex_lock(&adev->gem.mutex); | ||
| 87 | list_add_tail(&robj->list, &adev->gem.objects); | ||
| 88 | mutex_unlock(&adev->gem.mutex); | ||
| 89 | |||
| 90 | return 0; | ||
| 91 | } | ||
| 92 | |||
| 93 | int amdgpu_gem_init(struct amdgpu_device *adev) | ||
| 94 | { | ||
| 95 | INIT_LIST_HEAD(&adev->gem.objects); | ||
| 96 | return 0; | ||
| 97 | } | ||
| 98 | |||
| 99 | void amdgpu_gem_fini(struct amdgpu_device *adev) | ||
| 100 | { | ||
| 101 | amdgpu_bo_force_delete(adev); | ||
| 102 | } | ||
| 103 | |||
| 104 | /* | ||
| 105 | * Call from drm_gem_handle_create which appear in both new and open ioctl | ||
| 106 | * case. | ||
| 107 | */ | ||
| 108 | int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) | ||
| 109 | { | ||
| 110 | struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj); | ||
| 111 | struct amdgpu_device *adev = rbo->adev; | ||
| 112 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; | ||
| 113 | struct amdgpu_vm *vm = &fpriv->vm; | ||
| 114 | struct amdgpu_bo_va *bo_va; | ||
| 115 | int r; | ||
| 116 | |||
| 117 | r = amdgpu_bo_reserve(rbo, false); | ||
| 118 | if (r) { | ||
| 119 | return r; | ||
| 120 | } | ||
| 121 | |||
| 122 | bo_va = amdgpu_vm_bo_find(vm, rbo); | ||
| 123 | if (!bo_va) { | ||
| 124 | bo_va = amdgpu_vm_bo_add(adev, vm, rbo); | ||
| 125 | } else { | ||
| 126 | ++bo_va->ref_count; | ||
| 127 | } | ||
| 128 | amdgpu_bo_unreserve(rbo); | ||
| 129 | |||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | void amdgpu_gem_object_close(struct drm_gem_object *obj, | ||
| 134 | struct drm_file *file_priv) | ||
| 135 | { | ||
| 136 | struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj); | ||
| 137 | struct amdgpu_device *adev = rbo->adev; | ||
| 138 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; | ||
| 139 | struct amdgpu_vm *vm = &fpriv->vm; | ||
| 140 | struct amdgpu_bo_va *bo_va; | ||
| 141 | int r; | ||
| 142 | |||
| 143 | r = amdgpu_bo_reserve(rbo, true); | ||
| 144 | if (r) { | ||
| 145 | dev_err(adev->dev, "leaking bo va because " | ||
| 146 | "we fail to reserve bo (%d)\n", r); | ||
| 147 | return; | ||
| 148 | } | ||
| 149 | bo_va = amdgpu_vm_bo_find(vm, rbo); | ||
| 150 | if (bo_va) { | ||
| 151 | if (--bo_va->ref_count == 0) { | ||
| 152 | amdgpu_vm_bo_rmv(adev, bo_va); | ||
| 153 | } | ||
| 154 | } | ||
| 155 | amdgpu_bo_unreserve(rbo); | ||
| 156 | } | ||
| 157 | |||
| 158 | static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) | ||
| 159 | { | ||
| 160 | if (r == -EDEADLK) { | ||
| 161 | r = amdgpu_gpu_reset(adev); | ||
| 162 | if (!r) | ||
| 163 | r = -EAGAIN; | ||
| 164 | } | ||
| 165 | return r; | ||
| 166 | } | ||
| 167 | |||
| 168 | /* | ||
| 169 | * GEM ioctls. | ||
| 170 | */ | ||
| 171 | int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, | ||
| 172 | struct drm_file *filp) | ||
| 173 | { | ||
| 174 | struct amdgpu_device *adev = dev->dev_private; | ||
| 175 | union drm_amdgpu_gem_create *args = data; | ||
| 176 | uint64_t size = args->in.bo_size; | ||
| 177 | struct drm_gem_object *gobj; | ||
| 178 | uint32_t handle; | ||
| 179 | bool kernel = false; | ||
| 180 | int r; | ||
| 181 | |||
| 182 | down_read(&adev->exclusive_lock); | ||
| 183 | /* create a gem object to contain this object in */ | ||
| 184 | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | | ||
| 185 | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { | ||
| 186 | kernel = true; | ||
| 187 | if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) | ||
| 188 | size = size << AMDGPU_GDS_SHIFT; | ||
| 189 | else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS) | ||
| 190 | size = size << AMDGPU_GWS_SHIFT; | ||
| 191 | else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA) | ||
| 192 | size = size << AMDGPU_OA_SHIFT; | ||
| 193 | else { | ||
| 194 | r = -EINVAL; | ||
| 195 | goto error_unlock; | ||
| 196 | } | ||
| 197 | } | ||
| 198 | size = roundup(size, PAGE_SIZE); | ||
| 199 | |||
| 200 | r = amdgpu_gem_object_create(adev, size, args->in.alignment, | ||
| 201 | (u32)(0xffffffff & args->in.domains), | ||
| 202 | args->in.domain_flags, | ||
| 203 | kernel, &gobj); | ||
| 204 | if (r) | ||
| 205 | goto error_unlock; | ||
| 206 | |||
| 207 | r = drm_gem_handle_create(filp, gobj, &handle); | ||
| 208 | /* drop reference from allocate - handle holds it now */ | ||
| 209 | drm_gem_object_unreference_unlocked(gobj); | ||
| 210 | if (r) | ||
| 211 | goto error_unlock; | ||
| 212 | |||
| 213 | memset(args, 0, sizeof(*args)); | ||
| 214 | args->out.handle = handle; | ||
| 215 | up_read(&adev->exclusive_lock); | ||
| 216 | return 0; | ||
| 217 | |||
| 218 | error_unlock: | ||
| 219 | up_read(&adev->exclusive_lock); | ||
| 220 | r = amdgpu_gem_handle_lockup(adev, r); | ||
| 221 | return r; | ||
| 222 | } | ||
| 223 | |||
| 224 | int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, | ||
| 225 | struct drm_file *filp) | ||
| 226 | { | ||
| 227 | struct amdgpu_device *adev = dev->dev_private; | ||
| 228 | struct drm_amdgpu_gem_userptr *args = data; | ||
| 229 | struct drm_gem_object *gobj; | ||
| 230 | struct amdgpu_bo *bo; | ||
| 231 | uint32_t handle; | ||
| 232 | int r; | ||
| 233 | |||
| 234 | if (offset_in_page(args->addr | args->size)) | ||
| 235 | return -EINVAL; | ||
| 236 | |||
| 237 | /* reject unknown flag values */ | ||
| 238 | if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY | | ||
| 239 | AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE | | ||
| 240 | AMDGPU_GEM_USERPTR_REGISTER)) | ||
| 241 | return -EINVAL; | ||
| 242 | |||
| 243 | if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || | ||
| 244 | !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { | ||
| 245 | |||
| 246 | /* if we want to write to it we must require anonymous | ||
| 247 | memory and install a MMU notifier */ | ||
| 248 | return -EACCES; | ||
| 249 | } | ||
| 250 | |||
| 251 | down_read(&adev->exclusive_lock); | ||
| 252 | |||
| 253 | /* create a gem object to contain this object in */ | ||
| 254 | r = amdgpu_gem_object_create(adev, args->size, 0, | ||
| 255 | AMDGPU_GEM_DOMAIN_CPU, 0, | ||
| 256 | 0, &gobj); | ||
| 257 | if (r) | ||
| 258 | goto handle_lockup; | ||
| 259 | |||
| 260 | bo = gem_to_amdgpu_bo(gobj); | ||
| 261 | r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); | ||
| 262 | if (r) | ||
| 263 | goto release_object; | ||
| 264 | |||
| 265 | if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { | ||
| 266 | r = amdgpu_mn_register(bo, args->addr); | ||
| 267 | if (r) | ||
| 268 | goto release_object; | ||
| 269 | } | ||
| 270 | |||
| 271 | if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { | ||
| 272 | down_read(¤t->mm->mmap_sem); | ||
| 273 | r = amdgpu_bo_reserve(bo, true); | ||
| 274 | if (r) { | ||
| 275 | up_read(¤t->mm->mmap_sem); | ||
| 276 | goto release_object; | ||
| 277 | } | ||
| 278 | |||
| 279 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); | ||
| 280 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | ||
| 281 | amdgpu_bo_unreserve(bo); | ||
| 282 | up_read(¤t->mm->mmap_sem); | ||
| 283 | if (r) | ||
| 284 | goto release_object; | ||
| 285 | } | ||
| 286 | |||
| 287 | r = drm_gem_handle_create(filp, gobj, &handle); | ||
| 288 | /* drop reference from allocate - handle holds it now */ | ||
| 289 | drm_gem_object_unreference_unlocked(gobj); | ||
| 290 | if (r) | ||
| 291 | goto handle_lockup; | ||
| 292 | |||
| 293 | args->handle = handle; | ||
| 294 | up_read(&adev->exclusive_lock); | ||
| 295 | return 0; | ||
| 296 | |||
| 297 | release_object: | ||
| 298 | drm_gem_object_unreference_unlocked(gobj); | ||
| 299 | |||
| 300 | handle_lockup: | ||
| 301 | up_read(&adev->exclusive_lock); | ||
| 302 | r = amdgpu_gem_handle_lockup(adev, r); | ||
| 303 | |||
| 304 | return r; | ||
| 305 | } | ||
| 306 | |||
| 307 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, | ||
| 308 | struct drm_device *dev, | ||
| 309 | uint32_t handle, uint64_t *offset_p) | ||
| 310 | { | ||
| 311 | struct drm_gem_object *gobj; | ||
| 312 | struct amdgpu_bo *robj; | ||
| 313 | |||
| 314 | gobj = drm_gem_object_lookup(dev, filp, handle); | ||
| 315 | if (gobj == NULL) { | ||
| 316 | return -ENOENT; | ||
| 317 | } | ||
| 318 | robj = gem_to_amdgpu_bo(gobj); | ||
| 319 | if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) { | ||
| 320 | drm_gem_object_unreference_unlocked(gobj); | ||
| 321 | return -EPERM; | ||
| 322 | } | ||
| 323 | *offset_p = amdgpu_bo_mmap_offset(robj); | ||
| 324 | drm_gem_object_unreference_unlocked(gobj); | ||
| 325 | return 0; | ||
| 326 | } | ||
| 327 | |||
| 328 | int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, | ||
| 329 | struct drm_file *filp) | ||
| 330 | { | ||
| 331 | union drm_amdgpu_gem_mmap *args = data; | ||
| 332 | uint32_t handle = args->in.handle; | ||
| 333 | memset(args, 0, sizeof(*args)); | ||
| 334 | return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); | ||
| 335 | } | ||
| 336 | |||
| 337 | /** | ||
| 338 | * amdgpu_gem_timeout - calculate jiffies timeout from absolute value | ||
| 339 | * | ||
| 340 | * @timeout_ns: timeout in ns | ||
| 341 | * | ||
| 342 | * Calculate the timeout in jiffies from an absolute timeout in ns. | ||
| 343 | */ | ||
| 344 | unsigned long amdgpu_gem_timeout(uint64_t timeout_ns) | ||
| 345 | { | ||
| 346 | unsigned long timeout_jiffies; | ||
| 347 | ktime_t timeout; | ||
| 348 | |||
| 349 | /* clamp timeout if it's to large */ | ||
| 350 | if (((int64_t)timeout_ns) < 0) | ||
| 351 | return MAX_SCHEDULE_TIMEOUT; | ||
| 352 | |||
| 353 | timeout = ktime_sub_ns(ktime_get(), timeout_ns); | ||
| 354 | if (ktime_to_ns(timeout) < 0) | ||
| 355 | return 0; | ||
| 356 | |||
| 357 | timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout)); | ||
| 358 | /* clamp timeout to avoid unsigned-> signed overflow */ | ||
| 359 | if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT ) | ||
| 360 | return MAX_SCHEDULE_TIMEOUT - 1; | ||
| 361 | |||
| 362 | return timeout_jiffies; | ||
| 363 | } | ||
| 364 | |||
| 365 | int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | ||
| 366 | struct drm_file *filp) | ||
| 367 | { | ||
| 368 | struct amdgpu_device *adev = dev->dev_private; | ||
| 369 | union drm_amdgpu_gem_wait_idle *args = data; | ||
| 370 | struct drm_gem_object *gobj; | ||
| 371 | struct amdgpu_bo *robj; | ||
| 372 | uint32_t handle = args->in.handle; | ||
| 373 | unsigned long timeout = amdgpu_gem_timeout(args->in.timeout); | ||
| 374 | int r = 0; | ||
| 375 | long ret; | ||
| 376 | |||
| 377 | gobj = drm_gem_object_lookup(dev, filp, handle); | ||
| 378 | if (gobj == NULL) { | ||
| 379 | return -ENOENT; | ||
| 380 | } | ||
| 381 | robj = gem_to_amdgpu_bo(gobj); | ||
| 382 | if (timeout == 0) | ||
| 383 | ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true); | ||
| 384 | else | ||
| 385 | ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout); | ||
| 386 | |||
| 387 | /* ret == 0 means not signaled, | ||
| 388 | * ret > 0 means signaled | ||
| 389 | * ret < 0 means interrupted before timeout | ||
| 390 | */ | ||
| 391 | if (ret >= 0) { | ||
| 392 | memset(args, 0, sizeof(*args)); | ||
| 393 | args->out.status = (ret == 0); | ||
| 394 | } else | ||
| 395 | r = ret; | ||
| 396 | |||
| 397 | drm_gem_object_unreference_unlocked(gobj); | ||
| 398 | r = amdgpu_gem_handle_lockup(adev, r); | ||
| 399 | return r; | ||
| 400 | } | ||
| 401 | |||
| 402 | int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, | ||
| 403 | struct drm_file *filp) | ||
| 404 | { | ||
| 405 | struct drm_amdgpu_gem_metadata *args = data; | ||
| 406 | struct drm_gem_object *gobj; | ||
| 407 | struct amdgpu_bo *robj; | ||
| 408 | int r = -1; | ||
| 409 | |||
| 410 | DRM_DEBUG("%d \n", args->handle); | ||
| 411 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | ||
| 412 | if (gobj == NULL) | ||
| 413 | return -ENOENT; | ||
| 414 | robj = gem_to_amdgpu_bo(gobj); | ||
| 415 | |||
| 416 | r = amdgpu_bo_reserve(robj, false); | ||
| 417 | if (unlikely(r != 0)) | ||
| 418 | goto out; | ||
| 419 | |||
| 420 | if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) { | ||
| 421 | amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info); | ||
| 422 | r = amdgpu_bo_get_metadata(robj, args->data.data, | ||
| 423 | sizeof(args->data.data), | ||
| 424 | &args->data.data_size_bytes, | ||
| 425 | &args->data.flags); | ||
| 426 | } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { | ||
| 427 | r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); | ||
| 428 | if (!r) | ||
| 429 | r = amdgpu_bo_set_metadata(robj, args->data.data, | ||
| 430 | args->data.data_size_bytes, | ||
| 431 | args->data.flags); | ||
| 432 | } | ||
| 433 | |||
| 434 | amdgpu_bo_unreserve(robj); | ||
| 435 | out: | ||
| 436 | drm_gem_object_unreference_unlocked(gobj); | ||
| 437 | return r; | ||
| 438 | } | ||
| 439 | |||
| 440 | /** | ||
| 441 | * amdgpu_gem_va_update_vm -update the bo_va in its VM | ||
| 442 | * | ||
| 443 | * @adev: amdgpu_device pointer | ||
| 444 | * @bo_va: bo_va to update | ||
| 445 | * | ||
| 446 | * Update the bo_va directly after setting it's address. Errors are not | ||
| 447 | * vital here, so they are not reported back to userspace. | ||
| 448 | */ | ||
| 449 | static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | ||
| 450 | struct amdgpu_bo_va *bo_va) | ||
| 451 | { | ||
| 452 | struct ttm_validate_buffer tv, *entry; | ||
| 453 | struct amdgpu_bo_list_entry *vm_bos; | ||
| 454 | struct ww_acquire_ctx ticket; | ||
| 455 | struct list_head list; | ||
| 456 | unsigned domain; | ||
| 457 | int r; | ||
| 458 | |||
| 459 | INIT_LIST_HEAD(&list); | ||
| 460 | |||
| 461 | tv.bo = &bo_va->bo->tbo; | ||
| 462 | tv.shared = true; | ||
| 463 | list_add(&tv.head, &list); | ||
| 464 | |||
| 465 | vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list); | ||
| 466 | if (!vm_bos) | ||
| 467 | return; | ||
| 468 | |||
| 469 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); | ||
| 470 | if (r) | ||
| 471 | goto error_free; | ||
| 472 | |||
| 473 | list_for_each_entry(entry, &list, head) { | ||
| 474 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); | ||
| 475 | /* if anything is swapped out don't swap it in here, | ||
| 476 | just abort and wait for the next CS */ | ||
| 477 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | ||
| 478 | goto error_unreserve; | ||
| 479 | } | ||
| 480 | |||
| 481 | mutex_lock(&bo_va->vm->mutex); | ||
| 482 | r = amdgpu_vm_clear_freed(adev, bo_va->vm); | ||
| 483 | if (r) | ||
| 484 | goto error_unlock; | ||
| 485 | |||
| 486 | r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); | ||
| 487 | |||
| 488 | error_unlock: | ||
| 489 | mutex_unlock(&bo_va->vm->mutex); | ||
| 490 | |||
| 491 | error_unreserve: | ||
| 492 | ttm_eu_backoff_reservation(&ticket, &list); | ||
| 493 | |||
| 494 | error_free: | ||
| 495 | drm_free_large(vm_bos); | ||
| 496 | |||
| 497 | if (r) | ||
| 498 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); | ||
| 499 | } | ||
| 500 | |||
| 501 | |||
| 502 | |||
| 503 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | ||
| 504 | struct drm_file *filp) | ||
| 505 | { | ||
| 506 | union drm_amdgpu_gem_va *args = data; | ||
| 507 | struct drm_gem_object *gobj; | ||
| 508 | struct amdgpu_device *adev = dev->dev_private; | ||
| 509 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | ||
| 510 | struct amdgpu_bo *rbo; | ||
| 511 | struct amdgpu_bo_va *bo_va; | ||
| 512 | uint32_t invalid_flags, va_flags = 0; | ||
| 513 | int r = 0; | ||
| 514 | |||
| 515 | if (!adev->vm_manager.enabled) { | ||
| 516 | memset(args, 0, sizeof(*args)); | ||
| 517 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
| 518 | return -ENOTTY; | ||
| 519 | } | ||
| 520 | |||
| 521 | if (args->in.va_address < AMDGPU_VA_RESERVED_SIZE) { | ||
| 522 | dev_err(&dev->pdev->dev, | ||
| 523 | "va_address 0x%lX is in reserved area 0x%X\n", | ||
| 524 | (unsigned long)args->in.va_address, | ||
| 525 | AMDGPU_VA_RESERVED_SIZE); | ||
| 526 | memset(args, 0, sizeof(*args)); | ||
| 527 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
| 528 | return -EINVAL; | ||
| 529 | } | ||
| 530 | |||
| 531 | invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | | ||
| 532 | AMDGPU_VM_PAGE_EXECUTABLE); | ||
| 533 | if ((args->in.flags & invalid_flags)) { | ||
| 534 | dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", | ||
| 535 | args->in.flags, invalid_flags); | ||
| 536 | memset(args, 0, sizeof(*args)); | ||
| 537 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
| 538 | return -EINVAL; | ||
| 539 | } | ||
| 540 | |||
| 541 | switch (args->in.operation) { | ||
| 542 | case AMDGPU_VA_OP_MAP: | ||
| 543 | case AMDGPU_VA_OP_UNMAP: | ||
| 544 | break; | ||
| 545 | default: | ||
| 546 | dev_err(&dev->pdev->dev, "unsupported operation %d\n", | ||
| 547 | args->in.operation); | ||
| 548 | memset(args, 0, sizeof(*args)); | ||
| 549 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
| 550 | return -EINVAL; | ||
| 551 | } | ||
| 552 | |||
| 553 | gobj = drm_gem_object_lookup(dev, filp, args->in.handle); | ||
| 554 | if (gobj == NULL) { | ||
| 555 | memset(args, 0, sizeof(*args)); | ||
| 556 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
| 557 | return -ENOENT; | ||
| 558 | } | ||
| 559 | rbo = gem_to_amdgpu_bo(gobj); | ||
| 560 | r = amdgpu_bo_reserve(rbo, false); | ||
| 561 | if (r) { | ||
| 562 | if (r != -ERESTARTSYS) { | ||
| 563 | memset(args, 0, sizeof(*args)); | ||
| 564 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
| 565 | } | ||
| 566 | drm_gem_object_unreference_unlocked(gobj); | ||
| 567 | return r; | ||
| 568 | } | ||
| 569 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); | ||
| 570 | if (!bo_va) { | ||
| 571 | memset(args, 0, sizeof(*args)); | ||
| 572 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
| 573 | drm_gem_object_unreference_unlocked(gobj); | ||
| 574 | return -ENOENT; | ||
| 575 | } | ||
| 576 | |||
| 577 | switch (args->in.operation) { | ||
| 578 | case AMDGPU_VA_OP_MAP: | ||
| 579 | if (args->in.flags & AMDGPU_VM_PAGE_READABLE) | ||
| 580 | va_flags |= AMDGPU_PTE_READABLE; | ||
| 581 | if (args->in.flags & AMDGPU_VM_PAGE_WRITEABLE) | ||
| 582 | va_flags |= AMDGPU_PTE_WRITEABLE; | ||
| 583 | if (args->in.flags & AMDGPU_VM_PAGE_EXECUTABLE) | ||
| 584 | va_flags |= AMDGPU_PTE_EXECUTABLE; | ||
| 585 | r = amdgpu_vm_bo_map(adev, bo_va, args->in.va_address, 0, | ||
| 586 | amdgpu_bo_size(bo_va->bo), va_flags); | ||
| 587 | break; | ||
| 588 | case AMDGPU_VA_OP_UNMAP: | ||
| 589 | r = amdgpu_vm_bo_unmap(adev, bo_va, args->in.va_address); | ||
| 590 | break; | ||
| 591 | default: | ||
| 592 | break; | ||
| 593 | } | ||
| 594 | |||
| 595 | if (!r) { | ||
| 596 | amdgpu_gem_va_update_vm(adev, bo_va); | ||
| 597 | memset(args, 0, sizeof(*args)); | ||
| 598 | args->out.result = AMDGPU_VA_RESULT_OK; | ||
| 599 | } else { | ||
| 600 | memset(args, 0, sizeof(*args)); | ||
| 601 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
| 602 | } | ||
| 603 | |||
| 604 | drm_gem_object_unreference_unlocked(gobj); | ||
| 605 | return r; | ||
| 606 | } | ||
| 607 | |||
| 608 | int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, | ||
| 609 | struct drm_file *filp) | ||
| 610 | { | ||
| 611 | struct drm_amdgpu_gem_op *args = data; | ||
| 612 | struct drm_gem_object *gobj; | ||
| 613 | struct amdgpu_bo *robj; | ||
| 614 | int r; | ||
| 615 | |||
| 616 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | ||
| 617 | if (gobj == NULL) { | ||
| 618 | return -ENOENT; | ||
| 619 | } | ||
| 620 | robj = gem_to_amdgpu_bo(gobj); | ||
| 621 | |||
| 622 | r = amdgpu_bo_reserve(robj, false); | ||
| 623 | if (unlikely(r)) | ||
| 624 | goto out; | ||
| 625 | |||
| 626 | switch (args->op) { | ||
| 627 | case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { | ||
| 628 | struct drm_amdgpu_gem_create_in info; | ||
| 629 | void __user *out = (void __user *)(long)args->value; | ||
| 630 | |||
| 631 | info.bo_size = robj->gem_base.size; | ||
| 632 | info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; | ||
| 633 | info.domains = robj->initial_domain; | ||
| 634 | info.domain_flags = robj->flags; | ||
| 635 | if (copy_to_user(out, &info, sizeof(info))) | ||
| 636 | r = -EFAULT; | ||
| 637 | break; | ||
| 638 | } | ||
| 639 | case AMDGPU_GEM_OP_SET_INITIAL_DOMAIN: | ||
| 640 | if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) { | ||
| 641 | r = -EPERM; | ||
| 642 | break; | ||
| 643 | } | ||
| 644 | robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM | | ||
| 645 | AMDGPU_GEM_DOMAIN_GTT | | ||
| 646 | AMDGPU_GEM_DOMAIN_CPU); | ||
| 647 | break; | ||
| 648 | default: | ||
| 649 | r = -EINVAL; | ||
| 650 | } | ||
| 651 | |||
| 652 | amdgpu_bo_unreserve(robj); | ||
| 653 | out: | ||
| 654 | drm_gem_object_unreference_unlocked(gobj); | ||
| 655 | return r; | ||
| 656 | } | ||
| 657 | |||
| 658 | int amdgpu_mode_dumb_create(struct drm_file *file_priv, | ||
| 659 | struct drm_device *dev, | ||
| 660 | struct drm_mode_create_dumb *args) | ||
| 661 | { | ||
| 662 | struct amdgpu_device *adev = dev->dev_private; | ||
| 663 | struct drm_gem_object *gobj; | ||
| 664 | uint32_t handle; | ||
| 665 | int r; | ||
| 666 | |||
| 667 | args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); | ||
| 668 | args->size = args->pitch * args->height; | ||
| 669 | args->size = ALIGN(args->size, PAGE_SIZE); | ||
| 670 | |||
| 671 | r = amdgpu_gem_object_create(adev, args->size, 0, | ||
| 672 | AMDGPU_GEM_DOMAIN_VRAM, | ||
| 673 | 0, ttm_bo_type_device, | ||
| 674 | &gobj); | ||
| 675 | if (r) | ||
| 676 | return -ENOMEM; | ||
| 677 | |||
| 678 | r = drm_gem_handle_create(file_priv, gobj, &handle); | ||
| 679 | /* drop reference from allocate - handle holds it now */ | ||
| 680 | drm_gem_object_unreference_unlocked(gobj); | ||
| 681 | if (r) { | ||
| 682 | return r; | ||
| 683 | } | ||
| 684 | args->handle = handle; | ||
| 685 | return 0; | ||
| 686 | } | ||
| 687 | |||
| 688 | #if defined(CONFIG_DEBUG_FS) | ||
| 689 | static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) | ||
| 690 | { | ||
| 691 | struct drm_info_node *node = (struct drm_info_node *)m->private; | ||
| 692 | struct drm_device *dev = node->minor->dev; | ||
| 693 | struct amdgpu_device *adev = dev->dev_private; | ||
| 694 | struct amdgpu_bo *rbo; | ||
| 695 | unsigned i = 0; | ||
| 696 | |||
| 697 | mutex_lock(&adev->gem.mutex); | ||
| 698 | list_for_each_entry(rbo, &adev->gem.objects, list) { | ||
| 699 | unsigned domain; | ||
| 700 | const char *placement; | ||
| 701 | |||
| 702 | domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type); | ||
| 703 | switch (domain) { | ||
| 704 | case AMDGPU_GEM_DOMAIN_VRAM: | ||
| 705 | placement = "VRAM"; | ||
| 706 | break; | ||
| 707 | case AMDGPU_GEM_DOMAIN_GTT: | ||
| 708 | placement = " GTT"; | ||
| 709 | break; | ||
| 710 | case AMDGPU_GEM_DOMAIN_CPU: | ||
| 711 | default: | ||
| 712 | placement = " CPU"; | ||
| 713 | break; | ||
| 714 | } | ||
| 715 | seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", | ||
| 716 | i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20, | ||
| 717 | placement, (unsigned long)rbo->pid); | ||
| 718 | i++; | ||
| 719 | } | ||
| 720 | mutex_unlock(&adev->gem.mutex); | ||
| 721 | return 0; | ||
| 722 | } | ||
| 723 | |||
| 724 | static struct drm_info_list amdgpu_debugfs_gem_list[] = { | ||
| 725 | {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL}, | ||
| 726 | }; | ||
| 727 | #endif | ||
| 728 | |||
| 729 | int amdgpu_gem_debugfs_init(struct amdgpu_device *adev) | ||
| 730 | { | ||
| 731 | #if defined(CONFIG_DEBUG_FS) | ||
| 732 | return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1); | ||
| 733 | #endif | ||
| 734 | return 0; | ||
| 735 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c new file mode 100644 index 000000000000..9f95da4f0536 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | |||
| @@ -0,0 +1,72 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | */ | ||
| 25 | #include <drm/drmP.h> | ||
| 26 | #include "amdgpu.h" | ||
| 27 | |||
| 28 | /* | ||
| 29 | * GPU scratch registers helpers function. | ||
| 30 | */ | ||
| 31 | /** | ||
| 32 | * amdgpu_gfx_scratch_get - Allocate a scratch register | ||
| 33 | * | ||
| 34 | * @adev: amdgpu_device pointer | ||
| 35 | * @reg: scratch register mmio offset | ||
| 36 | * | ||
| 37 | * Allocate a CP scratch register for use by the driver (all asics). | ||
| 38 | * Returns 0 on success or -EINVAL on failure. | ||
| 39 | */ | ||
| 40 | int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg) | ||
| 41 | { | ||
| 42 | int i; | ||
| 43 | |||
| 44 | for (i = 0; i < adev->gfx.scratch.num_reg; i++) { | ||
| 45 | if (adev->gfx.scratch.free[i]) { | ||
| 46 | adev->gfx.scratch.free[i] = false; | ||
| 47 | *reg = adev->gfx.scratch.reg[i]; | ||
| 48 | return 0; | ||
| 49 | } | ||
| 50 | } | ||
| 51 | return -EINVAL; | ||
| 52 | } | ||
| 53 | |||
| 54 | /** | ||
| 55 | * amdgpu_gfx_scratch_free - Free a scratch register | ||
| 56 | * | ||
| 57 | * @adev: amdgpu_device pointer | ||
| 58 | * @reg: scratch register mmio offset | ||
| 59 | * | ||
| 60 | * Free a CP scratch register allocated for use by the driver (all asics) | ||
| 61 | */ | ||
| 62 | void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg) | ||
| 63 | { | ||
| 64 | int i; | ||
| 65 | |||
| 66 | for (i = 0; i < adev->gfx.scratch.num_reg; i++) { | ||
| 67 | if (adev->gfx.scratch.reg[i] == reg) { | ||
| 68 | adev->gfx.scratch.free[i] = true; | ||
| 69 | return; | ||
| 70 | } | ||
| 71 | } | ||
| 72 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h new file mode 100644 index 000000000000..dc06cbda7be6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __AMDGPU_GFX_H__ | ||
| 25 | #define __AMDGPU_GFX_H__ | ||
| 26 | |||
| 27 | int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); | ||
| 28 | void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); | ||
| 29 | |||
| 30 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c new file mode 100644 index 000000000000..31a676376d73 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c | |||
| @@ -0,0 +1,395 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2007-8 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the "Software"), | ||
| 7 | * to deal in the Software without restriction, including without limitation | ||
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 10 | * Software is furnished to do so, subject to the following conditions: | ||
| 11 | * | ||
| 12 | * The above copyright notice and this permission notice shall be included in | ||
| 13 | * all copies or substantial portions of the Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 22 | * | ||
| 23 | * Authors: Dave Airlie | ||
| 24 | * Alex Deucher | ||
| 25 | */ | ||
| 26 | #include <linux/export.h> | ||
| 27 | |||
| 28 | #include <drm/drmP.h> | ||
| 29 | #include <drm/drm_edid.h> | ||
| 30 | #include <drm/amdgpu_drm.h> | ||
| 31 | #include "amdgpu.h" | ||
| 32 | #include "amdgpu_i2c.h" | ||
| 33 | #include "amdgpu_atombios.h" | ||
| 34 | #include "atom.h" | ||
| 35 | #include "atombios_dp.h" | ||
| 36 | #include "atombios_i2c.h" | ||
| 37 | |||
| 38 | /* bit banging i2c */ | ||
| 39 | static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap) | ||
| 40 | { | ||
| 41 | struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
| 42 | struct amdgpu_device *adev = i2c->dev->dev_private; | ||
| 43 | struct amdgpu_i2c_bus_rec *rec = &i2c->rec; | ||
| 44 | uint32_t temp; | ||
| 45 | |||
| 46 | mutex_lock(&i2c->mutex); | ||
| 47 | |||
| 48 | /* switch the pads to ddc mode */ | ||
| 49 | if (rec->hw_capable) { | ||
| 50 | temp = RREG32(rec->mask_clk_reg); | ||
| 51 | temp &= ~(1 << 16); | ||
| 52 | WREG32(rec->mask_clk_reg, temp); | ||
| 53 | } | ||
| 54 | |||
| 55 | /* clear the output pin values */ | ||
| 56 | temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; | ||
| 57 | WREG32(rec->a_clk_reg, temp); | ||
| 58 | |||
| 59 | temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask; | ||
| 60 | WREG32(rec->a_data_reg, temp); | ||
| 61 | |||
| 62 | /* set the pins to input */ | ||
| 63 | temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask; | ||
| 64 | WREG32(rec->en_clk_reg, temp); | ||
| 65 | |||
| 66 | temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask; | ||
| 67 | WREG32(rec->en_data_reg, temp); | ||
| 68 | |||
| 69 | /* mask the gpio pins for software use */ | ||
| 70 | temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask; | ||
| 71 | WREG32(rec->mask_clk_reg, temp); | ||
| 72 | temp = RREG32(rec->mask_clk_reg); | ||
| 73 | |||
| 74 | temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask; | ||
| 75 | WREG32(rec->mask_data_reg, temp); | ||
| 76 | temp = RREG32(rec->mask_data_reg); | ||
| 77 | |||
| 78 | return 0; | ||
| 79 | } | ||
| 80 | |||
| 81 | static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap) | ||
| 82 | { | ||
| 83 | struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
| 84 | struct amdgpu_device *adev = i2c->dev->dev_private; | ||
| 85 | struct amdgpu_i2c_bus_rec *rec = &i2c->rec; | ||
| 86 | uint32_t temp; | ||
| 87 | |||
| 88 | /* unmask the gpio pins for software use */ | ||
| 89 | temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask; | ||
| 90 | WREG32(rec->mask_clk_reg, temp); | ||
| 91 | temp = RREG32(rec->mask_clk_reg); | ||
| 92 | |||
| 93 | temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask; | ||
| 94 | WREG32(rec->mask_data_reg, temp); | ||
| 95 | temp = RREG32(rec->mask_data_reg); | ||
| 96 | |||
| 97 | mutex_unlock(&i2c->mutex); | ||
| 98 | } | ||
| 99 | |||
| 100 | static int amdgpu_i2c_get_clock(void *i2c_priv) | ||
| 101 | { | ||
| 102 | struct amdgpu_i2c_chan *i2c = i2c_priv; | ||
| 103 | struct amdgpu_device *adev = i2c->dev->dev_private; | ||
| 104 | struct amdgpu_i2c_bus_rec *rec = &i2c->rec; | ||
| 105 | uint32_t val; | ||
| 106 | |||
| 107 | /* read the value off the pin */ | ||
| 108 | val = RREG32(rec->y_clk_reg); | ||
| 109 | val &= rec->y_clk_mask; | ||
| 110 | |||
| 111 | return (val != 0); | ||
| 112 | } | ||
| 113 | |||
| 114 | |||
| 115 | static int amdgpu_i2c_get_data(void *i2c_priv) | ||
| 116 | { | ||
| 117 | struct amdgpu_i2c_chan *i2c = i2c_priv; | ||
| 118 | struct amdgpu_device *adev = i2c->dev->dev_private; | ||
| 119 | struct amdgpu_i2c_bus_rec *rec = &i2c->rec; | ||
| 120 | uint32_t val; | ||
| 121 | |||
| 122 | /* read the value off the pin */ | ||
| 123 | val = RREG32(rec->y_data_reg); | ||
| 124 | val &= rec->y_data_mask; | ||
| 125 | |||
| 126 | return (val != 0); | ||
| 127 | } | ||
| 128 | |||
| 129 | static void amdgpu_i2c_set_clock(void *i2c_priv, int clock) | ||
| 130 | { | ||
| 131 | struct amdgpu_i2c_chan *i2c = i2c_priv; | ||
| 132 | struct amdgpu_device *adev = i2c->dev->dev_private; | ||
| 133 | struct amdgpu_i2c_bus_rec *rec = &i2c->rec; | ||
| 134 | uint32_t val; | ||
| 135 | |||
| 136 | /* set pin direction */ | ||
| 137 | val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask; | ||
| 138 | val |= clock ? 0 : rec->en_clk_mask; | ||
| 139 | WREG32(rec->en_clk_reg, val); | ||
| 140 | } | ||
| 141 | |||
| 142 | static void amdgpu_i2c_set_data(void *i2c_priv, int data) | ||
| 143 | { | ||
| 144 | struct amdgpu_i2c_chan *i2c = i2c_priv; | ||
| 145 | struct amdgpu_device *adev = i2c->dev->dev_private; | ||
| 146 | struct amdgpu_i2c_bus_rec *rec = &i2c->rec; | ||
| 147 | uint32_t val; | ||
| 148 | |||
| 149 | /* set pin direction */ | ||
| 150 | val = RREG32(rec->en_data_reg) & ~rec->en_data_mask; | ||
| 151 | val |= data ? 0 : rec->en_data_mask; | ||
| 152 | WREG32(rec->en_data_reg, val); | ||
| 153 | } | ||
| 154 | |||
| 155 | static const struct i2c_algorithm amdgpu_atombios_i2c_algo = { | ||
| 156 | .master_xfer = amdgpu_atombios_i2c_xfer, | ||
| 157 | .functionality = amdgpu_atombios_i2c_func, | ||
| 158 | }; | ||
| 159 | |||
| 160 | struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, | ||
| 161 | struct amdgpu_i2c_bus_rec *rec, | ||
| 162 | const char *name) | ||
| 163 | { | ||
| 164 | struct amdgpu_i2c_chan *i2c; | ||
| 165 | int ret; | ||
| 166 | |||
| 167 | /* don't add the mm_i2c bus unless hw_i2c is enabled */ | ||
| 168 | if (rec->mm_i2c && (amdgpu_hw_i2c == 0)) | ||
| 169 | return NULL; | ||
| 170 | |||
| 171 | i2c = kzalloc(sizeof(struct amdgpu_i2c_chan), GFP_KERNEL); | ||
| 172 | if (i2c == NULL) | ||
| 173 | return NULL; | ||
| 174 | |||
| 175 | i2c->rec = *rec; | ||
| 176 | i2c->adapter.owner = THIS_MODULE; | ||
| 177 | i2c->adapter.class = I2C_CLASS_DDC; | ||
| 178 | i2c->adapter.dev.parent = &dev->pdev->dev; | ||
| 179 | i2c->dev = dev; | ||
| 180 | i2c_set_adapdata(&i2c->adapter, i2c); | ||
| 181 | mutex_init(&i2c->mutex); | ||
| 182 | if (rec->hw_capable && | ||
| 183 | amdgpu_hw_i2c) { | ||
| 184 | /* hw i2c using atom */ | ||
| 185 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), | ||
| 186 | "AMDGPU i2c hw bus %s", name); | ||
| 187 | i2c->adapter.algo = &amdgpu_atombios_i2c_algo; | ||
| 188 | ret = i2c_add_adapter(&i2c->adapter); | ||
| 189 | if (ret) { | ||
| 190 | DRM_ERROR("Failed to register hw i2c %s\n", name); | ||
| 191 | goto out_free; | ||
| 192 | } | ||
| 193 | } else { | ||
| 194 | /* set the amdgpu bit adapter */ | ||
| 195 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), | ||
| 196 | "AMDGPU i2c bit bus %s", name); | ||
| 197 | i2c->adapter.algo_data = &i2c->bit; | ||
| 198 | i2c->bit.pre_xfer = amdgpu_i2c_pre_xfer; | ||
| 199 | i2c->bit.post_xfer = amdgpu_i2c_post_xfer; | ||
| 200 | i2c->bit.setsda = amdgpu_i2c_set_data; | ||
| 201 | i2c->bit.setscl = amdgpu_i2c_set_clock; | ||
| 202 | i2c->bit.getsda = amdgpu_i2c_get_data; | ||
| 203 | i2c->bit.getscl = amdgpu_i2c_get_clock; | ||
| 204 | i2c->bit.udelay = 10; | ||
| 205 | i2c->bit.timeout = usecs_to_jiffies(2200); /* from VESA */ | ||
| 206 | i2c->bit.data = i2c; | ||
| 207 | ret = i2c_bit_add_bus(&i2c->adapter); | ||
| 208 | if (ret) { | ||
| 209 | DRM_ERROR("Failed to register bit i2c %s\n", name); | ||
| 210 | goto out_free; | ||
| 211 | } | ||
| 212 | } | ||
| 213 | |||
| 214 | return i2c; | ||
| 215 | out_free: | ||
| 216 | kfree(i2c); | ||
| 217 | return NULL; | ||
| 218 | |||
| 219 | } | ||
| 220 | |||
| 221 | void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c) | ||
| 222 | { | ||
| 223 | if (!i2c) | ||
| 224 | return; | ||
| 225 | i2c_del_adapter(&i2c->adapter); | ||
| 226 | kfree(i2c); | ||
| 227 | } | ||
| 228 | |||
| 229 | /* Add the default buses */ | ||
| 230 | void amdgpu_i2c_init(struct amdgpu_device *adev) | ||
| 231 | { | ||
| 232 | if (amdgpu_hw_i2c) | ||
| 233 | DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n"); | ||
| 234 | |||
| 235 | if (adev->is_atom_bios) | ||
| 236 | amdgpu_atombios_i2c_init(adev); | ||
| 237 | } | ||
| 238 | |||
| 239 | /* remove all the buses */ | ||
| 240 | void amdgpu_i2c_fini(struct amdgpu_device *adev) | ||
| 241 | { | ||
| 242 | int i; | ||
| 243 | |||
| 244 | for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) { | ||
| 245 | if (adev->i2c_bus[i]) { | ||
| 246 | amdgpu_i2c_destroy(adev->i2c_bus[i]); | ||
| 247 | adev->i2c_bus[i] = NULL; | ||
| 248 | } | ||
| 249 | } | ||
| 250 | } | ||
| 251 | |||
| 252 | /* Add additional buses */ | ||
| 253 | void amdgpu_i2c_add(struct amdgpu_device *adev, | ||
| 254 | struct amdgpu_i2c_bus_rec *rec, | ||
| 255 | const char *name) | ||
| 256 | { | ||
| 257 | struct drm_device *dev = adev->ddev; | ||
| 258 | int i; | ||
| 259 | |||
| 260 | for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) { | ||
| 261 | if (!adev->i2c_bus[i]) { | ||
| 262 | adev->i2c_bus[i] = amdgpu_i2c_create(dev, rec, name); | ||
| 263 | return; | ||
| 264 | } | ||
| 265 | } | ||
| 266 | } | ||
| 267 | |||
| 268 | /* looks up bus based on id */ | ||
| 269 | struct amdgpu_i2c_chan * | ||
| 270 | amdgpu_i2c_lookup(struct amdgpu_device *adev, | ||
| 271 | struct amdgpu_i2c_bus_rec *i2c_bus) | ||
| 272 | { | ||
| 273 | int i; | ||
| 274 | |||
| 275 | for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) { | ||
| 276 | if (adev->i2c_bus[i] && | ||
| 277 | (adev->i2c_bus[i]->rec.i2c_id == i2c_bus->i2c_id)) { | ||
| 278 | return adev->i2c_bus[i]; | ||
| 279 | } | ||
| 280 | } | ||
| 281 | return NULL; | ||
| 282 | } | ||
| 283 | |||
| 284 | static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus, | ||
| 285 | u8 slave_addr, | ||
| 286 | u8 addr, | ||
| 287 | u8 *val) | ||
| 288 | { | ||
| 289 | u8 out_buf[2]; | ||
| 290 | u8 in_buf[2]; | ||
| 291 | struct i2c_msg msgs[] = { | ||
| 292 | { | ||
| 293 | .addr = slave_addr, | ||
| 294 | .flags = 0, | ||
| 295 | .len = 1, | ||
| 296 | .buf = out_buf, | ||
| 297 | }, | ||
| 298 | { | ||
| 299 | .addr = slave_addr, | ||
| 300 | .flags = I2C_M_RD, | ||
| 301 | .len = 1, | ||
| 302 | .buf = in_buf, | ||
| 303 | } | ||
| 304 | }; | ||
| 305 | |||
| 306 | out_buf[0] = addr; | ||
| 307 | out_buf[1] = 0; | ||
| 308 | |||
| 309 | if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) { | ||
| 310 | *val = in_buf[0]; | ||
| 311 | DRM_DEBUG("val = 0x%02x\n", *val); | ||
| 312 | } else { | ||
| 313 | DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n", | ||
| 314 | addr, *val); | ||
| 315 | } | ||
| 316 | } | ||
| 317 | |||
| 318 | static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus, | ||
| 319 | u8 slave_addr, | ||
| 320 | u8 addr, | ||
| 321 | u8 val) | ||
| 322 | { | ||
| 323 | uint8_t out_buf[2]; | ||
| 324 | struct i2c_msg msg = { | ||
| 325 | .addr = slave_addr, | ||
| 326 | .flags = 0, | ||
| 327 | .len = 2, | ||
| 328 | .buf = out_buf, | ||
| 329 | }; | ||
| 330 | |||
| 331 | out_buf[0] = addr; | ||
| 332 | out_buf[1] = val; | ||
| 333 | |||
| 334 | if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) | ||
| 335 | DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n", | ||
| 336 | addr, val); | ||
| 337 | } | ||
| 338 | |||
| 339 | /* ddc router switching */ | ||
| 340 | void | ||
| 341 | amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector) | ||
| 342 | { | ||
| 343 | u8 val; | ||
| 344 | |||
| 345 | if (!amdgpu_connector->router.ddc_valid) | ||
| 346 | return; | ||
| 347 | |||
| 348 | if (!amdgpu_connector->router_bus) | ||
| 349 | return; | ||
| 350 | |||
| 351 | amdgpu_i2c_get_byte(amdgpu_connector->router_bus, | ||
| 352 | amdgpu_connector->router.i2c_addr, | ||
| 353 | 0x3, &val); | ||
| 354 | val &= ~amdgpu_connector->router.ddc_mux_control_pin; | ||
| 355 | amdgpu_i2c_put_byte(amdgpu_connector->router_bus, | ||
| 356 | amdgpu_connector->router.i2c_addr, | ||
| 357 | 0x3, val); | ||
| 358 | amdgpu_i2c_get_byte(amdgpu_connector->router_bus, | ||
| 359 | amdgpu_connector->router.i2c_addr, | ||
| 360 | 0x1, &val); | ||
| 361 | val &= ~amdgpu_connector->router.ddc_mux_control_pin; | ||
| 362 | val |= amdgpu_connector->router.ddc_mux_state; | ||
| 363 | amdgpu_i2c_put_byte(amdgpu_connector->router_bus, | ||
| 364 | amdgpu_connector->router.i2c_addr, | ||
| 365 | 0x1, val); | ||
| 366 | } | ||
| 367 | |||
| 368 | /* clock/data router switching */ | ||
| 369 | void | ||
| 370 | amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector) | ||
| 371 | { | ||
| 372 | u8 val; | ||
| 373 | |||
| 374 | if (!amdgpu_connector->router.cd_valid) | ||
| 375 | return; | ||
| 376 | |||
| 377 | if (!amdgpu_connector->router_bus) | ||
| 378 | return; | ||
| 379 | |||
| 380 | amdgpu_i2c_get_byte(amdgpu_connector->router_bus, | ||
| 381 | amdgpu_connector->router.i2c_addr, | ||
| 382 | 0x3, &val); | ||
| 383 | val &= ~amdgpu_connector->router.cd_mux_control_pin; | ||
| 384 | amdgpu_i2c_put_byte(amdgpu_connector->router_bus, | ||
| 385 | amdgpu_connector->router.i2c_addr, | ||
| 386 | 0x3, val); | ||
| 387 | amdgpu_i2c_get_byte(amdgpu_connector->router_bus, | ||
| 388 | amdgpu_connector->router.i2c_addr, | ||
| 389 | 0x1, &val); | ||
| 390 | val &= ~amdgpu_connector->router.cd_mux_control_pin; | ||
| 391 | val |= amdgpu_connector->router.cd_mux_state; | ||
| 392 | amdgpu_i2c_put_byte(amdgpu_connector->router_bus, | ||
| 393 | amdgpu_connector->router.i2c_addr, | ||
| 394 | 0x1, val); | ||
| 395 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h new file mode 100644 index 000000000000..d81e19b53973 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h | |||
| @@ -0,0 +1,44 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __AMDGPU_I2C_H__ | ||
| 25 | #define __AMDGPU_I2C_H__ | ||
| 26 | |||
| 27 | struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, | ||
| 28 | struct amdgpu_i2c_bus_rec *rec, | ||
| 29 | const char *name); | ||
| 30 | void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c); | ||
| 31 | void amdgpu_i2c_init(struct amdgpu_device *adev); | ||
| 32 | void amdgpu_i2c_fini(struct amdgpu_device *adev); | ||
| 33 | void amdgpu_i2c_add(struct amdgpu_device *adev, | ||
| 34 | struct amdgpu_i2c_bus_rec *rec, | ||
| 35 | const char *name); | ||
| 36 | struct amdgpu_i2c_chan * | ||
| 37 | amdgpu_i2c_lookup(struct amdgpu_device *adev, | ||
| 38 | struct amdgpu_i2c_bus_rec *i2c_bus); | ||
| 39 | void | ||
| 40 | amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector); | ||
| 41 | void | ||
| 42 | amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector); | ||
| 43 | |||
| 44 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c new file mode 100644 index 000000000000..847cab2b3fff --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
| @@ -0,0 +1,345 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | * Christian König | ||
| 28 | */ | ||
| 29 | #include <linux/seq_file.h> | ||
| 30 | #include <linux/slab.h> | ||
| 31 | #include <drm/drmP.h> | ||
| 32 | #include <drm/amdgpu_drm.h> | ||
| 33 | #include "amdgpu.h" | ||
| 34 | #include "atom.h" | ||
| 35 | |||
| 36 | /* | ||
| 37 | * IB | ||
| 38 | * IBs (Indirect Buffers) and areas of GPU accessible memory where | ||
| 39 | * commands are stored. You can put a pointer to the IB in the | ||
| 40 | * command ring and the hw will fetch the commands from the IB | ||
| 41 | * and execute them. Generally userspace acceleration drivers | ||
| 42 | * produce command buffers which are send to the kernel and | ||
| 43 | * put in IBs for execution by the requested ring. | ||
| 44 | */ | ||
| 45 | static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); | ||
| 46 | |||
| 47 | /** | ||
| 48 | * amdgpu_ib_get - request an IB (Indirect Buffer) | ||
| 49 | * | ||
| 50 | * @ring: ring index the IB is associated with | ||
| 51 | * @size: requested IB size | ||
| 52 | * @ib: IB object returned | ||
| 53 | * | ||
| 54 | * Request an IB (all asics). IBs are allocated using the | ||
| 55 | * suballocator. | ||
| 56 | * Returns 0 on success, error on failure. | ||
| 57 | */ | ||
| 58 | int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, | ||
| 59 | unsigned size, struct amdgpu_ib *ib) | ||
| 60 | { | ||
| 61 | struct amdgpu_device *adev = ring->adev; | ||
| 62 | int r; | ||
| 63 | |||
| 64 | if (size) { | ||
| 65 | r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, | ||
| 66 | &ib->sa_bo, size, 256); | ||
| 67 | if (r) { | ||
| 68 | dev_err(adev->dev, "failed to get a new IB (%d)\n", r); | ||
| 69 | return r; | ||
| 70 | } | ||
| 71 | |||
| 72 | ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); | ||
| 73 | |||
| 74 | if (!vm) | ||
| 75 | ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); | ||
| 76 | else | ||
| 77 | ib->gpu_addr = 0; | ||
| 78 | |||
| 79 | } else { | ||
| 80 | ib->sa_bo = NULL; | ||
| 81 | ib->ptr = NULL; | ||
| 82 | ib->gpu_addr = 0; | ||
| 83 | } | ||
| 84 | |||
| 85 | amdgpu_sync_create(&ib->sync); | ||
| 86 | |||
| 87 | ib->ring = ring; | ||
| 88 | ib->fence = NULL; | ||
| 89 | ib->user = NULL; | ||
| 90 | ib->vm = vm; | ||
| 91 | ib->is_const_ib = false; | ||
| 92 | ib->gds_base = 0; | ||
| 93 | ib->gds_size = 0; | ||
| 94 | ib->gws_base = 0; | ||
| 95 | ib->gws_size = 0; | ||
| 96 | ib->oa_base = 0; | ||
| 97 | ib->oa_size = 0; | ||
| 98 | |||
| 99 | return 0; | ||
| 100 | } | ||
| 101 | |||
| 102 | /** | ||
| 103 | * amdgpu_ib_free - free an IB (Indirect Buffer) | ||
| 104 | * | ||
| 105 | * @adev: amdgpu_device pointer | ||
| 106 | * @ib: IB object to free | ||
| 107 | * | ||
| 108 | * Free an IB (all asics). | ||
| 109 | */ | ||
| 110 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) | ||
| 111 | { | ||
| 112 | amdgpu_sync_free(adev, &ib->sync, ib->fence); | ||
| 113 | amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence); | ||
| 114 | amdgpu_fence_unref(&ib->fence); | ||
| 115 | } | ||
| 116 | |||
| 117 | /** | ||
| 118 | * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring | ||
| 119 | * | ||
| 120 | * @adev: amdgpu_device pointer | ||
| 121 | * @num_ibs: number of IBs to schedule | ||
| 122 | * @ibs: IB objects to schedule | ||
| 123 | * @owner: owner for creating the fences | ||
| 124 | * | ||
| 125 | * Schedule an IB on the associated ring (all asics). | ||
| 126 | * Returns 0 on success, error on failure. | ||
| 127 | * | ||
| 128 | * On SI, there are two parallel engines fed from the primary ring, | ||
| 129 | * the CE (Constant Engine) and the DE (Drawing Engine). Since | ||
| 130 | * resource descriptors have moved to memory, the CE allows you to | ||
| 131 | * prime the caches while the DE is updating register state so that | ||
| 132 | * the resource descriptors will be already in cache when the draw is | ||
| 133 | * processed. To accomplish this, the userspace driver submits two | ||
| 134 | * IBs, one for the CE and one for the DE. If there is a CE IB (called | ||
| 135 | * a CONST_IB), it will be put on the ring prior to the DE IB. Prior | ||
| 136 | * to SI there was just a DE IB. | ||
| 137 | */ | ||
| 138 | int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | ||
| 139 | struct amdgpu_ib *ibs, void *owner) | ||
| 140 | { | ||
| 141 | struct amdgpu_ring *ring; | ||
| 142 | struct amdgpu_vm *vm = ibs->vm; | ||
| 143 | struct amdgpu_ib *ib = &ibs[0]; | ||
| 144 | unsigned i; | ||
| 145 | int r = 0; | ||
| 146 | bool flush_hdp = true; | ||
| 147 | |||
| 148 | if (num_ibs == 0) | ||
| 149 | return -EINVAL; | ||
| 150 | |||
| 151 | ring = ibs->ring; | ||
| 152 | if (!ring->ready) { | ||
| 153 | dev_err(adev->dev, "couldn't schedule ib\n"); | ||
| 154 | return -EINVAL; | ||
| 155 | } | ||
| 156 | |||
| 157 | r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs); | ||
| 158 | if (r) { | ||
| 159 | dev_err(adev->dev, "scheduling IB failed (%d).\n", r); | ||
| 160 | return r; | ||
| 161 | } | ||
| 162 | |||
| 163 | if (vm) { | ||
| 164 | /* grab a vm id if necessary */ | ||
| 165 | struct amdgpu_fence *vm_id_fence = NULL; | ||
| 166 | vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm); | ||
| 167 | amdgpu_sync_fence(&ibs->sync, vm_id_fence); | ||
| 168 | } | ||
| 169 | |||
| 170 | r = amdgpu_sync_rings(&ibs->sync, ring); | ||
| 171 | if (r) { | ||
| 172 | amdgpu_ring_unlock_undo(ring); | ||
| 173 | dev_err(adev->dev, "failed to sync rings (%d)\n", r); | ||
| 174 | return r; | ||
| 175 | } | ||
| 176 | |||
| 177 | if (vm) { | ||
| 178 | /* do context switch */ | ||
| 179 | amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); | ||
| 180 | } | ||
| 181 | |||
| 182 | if (ring->funcs->emit_gds_switch && ib->vm && ib->gds_needed) | ||
| 183 | amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, | ||
| 184 | ib->gds_base, ib->gds_size, | ||
| 185 | ib->gws_base, ib->gws_size, | ||
| 186 | ib->oa_base, ib->oa_size); | ||
| 187 | |||
| 188 | for (i = 0; i < num_ibs; ++i) { | ||
| 189 | ib = &ibs[i]; | ||
| 190 | |||
| 191 | if (ib->ring != ring) { | ||
| 192 | amdgpu_ring_unlock_undo(ring); | ||
| 193 | return -EINVAL; | ||
| 194 | } | ||
| 195 | ib->flush_hdp_writefifo = flush_hdp; | ||
| 196 | flush_hdp = false; | ||
| 197 | amdgpu_ring_emit_ib(ring, ib); | ||
| 198 | } | ||
| 199 | |||
| 200 | r = amdgpu_fence_emit(ring, owner, &ib->fence); | ||
| 201 | if (r) { | ||
| 202 | dev_err(adev->dev, "failed to emit fence (%d)\n", r); | ||
| 203 | amdgpu_ring_unlock_undo(ring); | ||
| 204 | return r; | ||
| 205 | } | ||
| 206 | |||
| 207 | /* wrap the last IB with fence */ | ||
| 208 | if (ib->user) { | ||
| 209 | uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); | ||
| 210 | addr += ib->user->offset; | ||
| 211 | amdgpu_ring_emit_fence(ring, addr, ib->fence->seq, true); | ||
| 212 | } | ||
| 213 | |||
| 214 | if (ib->vm) | ||
| 215 | amdgpu_vm_fence(adev, ib->vm, ib->fence); | ||
| 216 | |||
| 217 | amdgpu_ring_unlock_commit(ring); | ||
| 218 | return 0; | ||
| 219 | } | ||
| 220 | |||
| 221 | /** | ||
| 222 | * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool | ||
| 223 | * | ||
| 224 | * @adev: amdgpu_device pointer | ||
| 225 | * | ||
| 226 | * Initialize the suballocator to manage a pool of memory | ||
| 227 | * for use as IBs (all asics). | ||
| 228 | * Returns 0 on success, error on failure. | ||
| 229 | */ | ||
| 230 | int amdgpu_ib_pool_init(struct amdgpu_device *adev) | ||
| 231 | { | ||
| 232 | int r; | ||
| 233 | |||
| 234 | if (adev->ib_pool_ready) { | ||
| 235 | return 0; | ||
| 236 | } | ||
| 237 | r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo, | ||
| 238 | AMDGPU_IB_POOL_SIZE*64*1024, | ||
| 239 | AMDGPU_GPU_PAGE_SIZE, | ||
| 240 | AMDGPU_GEM_DOMAIN_GTT); | ||
| 241 | if (r) { | ||
| 242 | return r; | ||
| 243 | } | ||
| 244 | |||
| 245 | r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo); | ||
| 246 | if (r) { | ||
| 247 | return r; | ||
| 248 | } | ||
| 249 | |||
| 250 | adev->ib_pool_ready = true; | ||
| 251 | if (amdgpu_debugfs_sa_init(adev)) { | ||
| 252 | dev_err(adev->dev, "failed to register debugfs file for SA\n"); | ||
| 253 | } | ||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | |||
| 257 | /** | ||
| 258 | * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool | ||
| 259 | * | ||
| 260 | * @adev: amdgpu_device pointer | ||
| 261 | * | ||
| 262 | * Tear down the suballocator managing the pool of memory | ||
| 263 | * for use as IBs (all asics). | ||
| 264 | */ | ||
| 265 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev) | ||
| 266 | { | ||
| 267 | if (adev->ib_pool_ready) { | ||
| 268 | amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo); | ||
| 269 | amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo); | ||
| 270 | adev->ib_pool_ready = false; | ||
| 271 | } | ||
| 272 | } | ||
| 273 | |||
| 274 | /** | ||
| 275 | * amdgpu_ib_ring_tests - test IBs on the rings | ||
| 276 | * | ||
| 277 | * @adev: amdgpu_device pointer | ||
| 278 | * | ||
| 279 | * Test an IB (Indirect Buffer) on each ring. | ||
| 280 | * If the test fails, disable the ring. | ||
| 281 | * Returns 0 on success, error if the primary GFX ring | ||
| 282 | * IB test fails. | ||
| 283 | */ | ||
| 284 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev) | ||
| 285 | { | ||
| 286 | unsigned i; | ||
| 287 | int r; | ||
| 288 | |||
| 289 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 290 | struct amdgpu_ring *ring = adev->rings[i]; | ||
| 291 | |||
| 292 | if (!ring || !ring->ready) | ||
| 293 | continue; | ||
| 294 | |||
| 295 | r = amdgpu_ring_test_ib(ring); | ||
| 296 | if (r) { | ||
| 297 | ring->ready = false; | ||
| 298 | adev->needs_reset = false; | ||
| 299 | |||
| 300 | if (ring == &adev->gfx.gfx_ring[0]) { | ||
| 301 | /* oh, oh, that's really bad */ | ||
| 302 | DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r); | ||
| 303 | adev->accel_working = false; | ||
| 304 | return r; | ||
| 305 | |||
| 306 | } else { | ||
| 307 | /* still not good, but we can live with it */ | ||
| 308 | DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); | ||
| 309 | } | ||
| 310 | } | ||
| 311 | } | ||
| 312 | return 0; | ||
| 313 | } | ||
| 314 | |||
| 315 | /* | ||
| 316 | * Debugfs info | ||
| 317 | */ | ||
| 318 | #if defined(CONFIG_DEBUG_FS) | ||
| 319 | |||
| 320 | static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) | ||
| 321 | { | ||
| 322 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
| 323 | struct drm_device *dev = node->minor->dev; | ||
| 324 | struct amdgpu_device *adev = dev->dev_private; | ||
| 325 | |||
| 326 | amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m); | ||
| 327 | |||
| 328 | return 0; | ||
| 329 | |||
| 330 | } | ||
| 331 | |||
| 332 | static struct drm_info_list amdgpu_debugfs_sa_list[] = { | ||
| 333 | {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL}, | ||
| 334 | }; | ||
| 335 | |||
| 336 | #endif | ||
| 337 | |||
| 338 | static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev) | ||
| 339 | { | ||
| 340 | #if defined(CONFIG_DEBUG_FS) | ||
| 341 | return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1); | ||
| 342 | #else | ||
| 343 | return 0; | ||
| 344 | #endif | ||
| 345 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c new file mode 100644 index 000000000000..db5422e65ec5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | |||
| @@ -0,0 +1,216 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <drm/drmP.h> | ||
| 25 | #include "amdgpu.h" | ||
| 26 | #include "amdgpu_ih.h" | ||
| 27 | |||
| 28 | /** | ||
| 29 | * amdgpu_ih_ring_alloc - allocate memory for the IH ring | ||
| 30 | * | ||
| 31 | * @adev: amdgpu_device pointer | ||
| 32 | * | ||
| 33 | * Allocate a ring buffer for the interrupt controller. | ||
| 34 | * Returns 0 for success, errors for failure. | ||
| 35 | */ | ||
| 36 | static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev) | ||
| 37 | { | ||
| 38 | int r; | ||
| 39 | |||
| 40 | /* Allocate ring buffer */ | ||
| 41 | if (adev->irq.ih.ring_obj == NULL) { | ||
| 42 | r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, | ||
| 43 | PAGE_SIZE, true, | ||
| 44 | AMDGPU_GEM_DOMAIN_GTT, 0, | ||
| 45 | NULL, &adev->irq.ih.ring_obj); | ||
| 46 | if (r) { | ||
| 47 | DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); | ||
| 48 | return r; | ||
| 49 | } | ||
| 50 | r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false); | ||
| 51 | if (unlikely(r != 0)) | ||
| 52 | return r; | ||
| 53 | r = amdgpu_bo_pin(adev->irq.ih.ring_obj, | ||
| 54 | AMDGPU_GEM_DOMAIN_GTT, | ||
| 55 | &adev->irq.ih.gpu_addr); | ||
| 56 | if (r) { | ||
| 57 | amdgpu_bo_unreserve(adev->irq.ih.ring_obj); | ||
| 58 | DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r); | ||
| 59 | return r; | ||
| 60 | } | ||
| 61 | r = amdgpu_bo_kmap(adev->irq.ih.ring_obj, | ||
| 62 | (void **)&adev->irq.ih.ring); | ||
| 63 | amdgpu_bo_unreserve(adev->irq.ih.ring_obj); | ||
| 64 | if (r) { | ||
| 65 | DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r); | ||
| 66 | return r; | ||
| 67 | } | ||
| 68 | } | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | /** | ||
| 73 | * amdgpu_ih_ring_init - initialize the IH state | ||
| 74 | * | ||
| 75 | * @adev: amdgpu_device pointer | ||
| 76 | * | ||
| 77 | * Initializes the IH state and allocates a buffer | ||
| 78 | * for the IH ring buffer. | ||
| 79 | * Returns 0 for success, errors for failure. | ||
| 80 | */ | ||
| 81 | int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, | ||
| 82 | bool use_bus_addr) | ||
| 83 | { | ||
| 84 | u32 rb_bufsz; | ||
| 85 | int r; | ||
| 86 | |||
| 87 | /* Align ring size */ | ||
| 88 | rb_bufsz = order_base_2(ring_size / 4); | ||
| 89 | ring_size = (1 << rb_bufsz) * 4; | ||
| 90 | adev->irq.ih.ring_size = ring_size; | ||
| 91 | adev->irq.ih.ptr_mask = adev->irq.ih.ring_size - 1; | ||
| 92 | adev->irq.ih.rptr = 0; | ||
| 93 | adev->irq.ih.use_bus_addr = use_bus_addr; | ||
| 94 | |||
| 95 | if (adev->irq.ih.use_bus_addr) { | ||
| 96 | if (!adev->irq.ih.ring) { | ||
| 97 | /* add 8 bytes for the rptr/wptr shadows and | ||
| 98 | * add them to the end of the ring allocation. | ||
| 99 | */ | ||
| 100 | adev->irq.ih.ring = kzalloc(adev->irq.ih.ring_size + 8, GFP_KERNEL); | ||
| 101 | if (adev->irq.ih.ring == NULL) | ||
| 102 | return -ENOMEM; | ||
| 103 | adev->irq.ih.rb_dma_addr = pci_map_single(adev->pdev, | ||
| 104 | (void *)adev->irq.ih.ring, | ||
| 105 | adev->irq.ih.ring_size, | ||
| 106 | PCI_DMA_BIDIRECTIONAL); | ||
| 107 | if (pci_dma_mapping_error(adev->pdev, adev->irq.ih.rb_dma_addr)) { | ||
| 108 | dev_err(&adev->pdev->dev, "Failed to DMA MAP the IH RB page\n"); | ||
| 109 | kfree((void *)adev->irq.ih.ring); | ||
| 110 | return -ENOMEM; | ||
| 111 | } | ||
| 112 | adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0; | ||
| 113 | adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1; | ||
| 114 | } | ||
| 115 | return 0; | ||
| 116 | } else { | ||
| 117 | r = amdgpu_wb_get(adev, &adev->irq.ih.wptr_offs); | ||
| 118 | if (r) { | ||
| 119 | dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r); | ||
| 120 | return r; | ||
| 121 | } | ||
| 122 | |||
| 123 | r = amdgpu_wb_get(adev, &adev->irq.ih.rptr_offs); | ||
| 124 | if (r) { | ||
| 125 | amdgpu_wb_free(adev, adev->irq.ih.wptr_offs); | ||
| 126 | dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r); | ||
| 127 | return r; | ||
| 128 | } | ||
| 129 | |||
| 130 | return amdgpu_ih_ring_alloc(adev); | ||
| 131 | } | ||
| 132 | } | ||
| 133 | |||
| 134 | /** | ||
| 135 | * amdgpu_ih_ring_fini - tear down the IH state | ||
| 136 | * | ||
| 137 | * @adev: amdgpu_device pointer | ||
| 138 | * | ||
| 139 | * Tears down the IH state and frees buffer | ||
| 140 | * used for the IH ring buffer. | ||
| 141 | */ | ||
| 142 | void amdgpu_ih_ring_fini(struct amdgpu_device *adev) | ||
| 143 | { | ||
| 144 | int r; | ||
| 145 | |||
| 146 | if (adev->irq.ih.use_bus_addr) { | ||
| 147 | if (adev->irq.ih.ring) { | ||
| 148 | /* add 8 bytes for the rptr/wptr shadows and | ||
| 149 | * add them to the end of the ring allocation. | ||
| 150 | */ | ||
| 151 | pci_unmap_single(adev->pdev, adev->irq.ih.rb_dma_addr, | ||
| 152 | adev->irq.ih.ring_size + 8, PCI_DMA_BIDIRECTIONAL); | ||
| 153 | kfree((void *)adev->irq.ih.ring); | ||
| 154 | adev->irq.ih.ring = NULL; | ||
| 155 | } | ||
| 156 | } else { | ||
| 157 | if (adev->irq.ih.ring_obj) { | ||
| 158 | r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false); | ||
| 159 | if (likely(r == 0)) { | ||
| 160 | amdgpu_bo_kunmap(adev->irq.ih.ring_obj); | ||
| 161 | amdgpu_bo_unpin(adev->irq.ih.ring_obj); | ||
| 162 | amdgpu_bo_unreserve(adev->irq.ih.ring_obj); | ||
| 163 | } | ||
| 164 | amdgpu_bo_unref(&adev->irq.ih.ring_obj); | ||
| 165 | adev->irq.ih.ring = NULL; | ||
| 166 | adev->irq.ih.ring_obj = NULL; | ||
| 167 | } | ||
| 168 | amdgpu_wb_free(adev, adev->irq.ih.wptr_offs); | ||
| 169 | amdgpu_wb_free(adev, adev->irq.ih.rptr_offs); | ||
| 170 | } | ||
| 171 | } | ||
| 172 | |||
| 173 | /** | ||
| 174 | * amdgpu_ih_process - interrupt handler | ||
| 175 | * | ||
| 176 | * @adev: amdgpu_device pointer | ||
| 177 | * | ||
| 178 | * Interrupt hander (VI), walk the IH ring. | ||
| 179 | * Returns irq process return code. | ||
| 180 | */ | ||
| 181 | int amdgpu_ih_process(struct amdgpu_device *adev) | ||
| 182 | { | ||
| 183 | struct amdgpu_iv_entry entry; | ||
| 184 | u32 wptr; | ||
| 185 | |||
| 186 | if (!adev->irq.ih.enabled || adev->shutdown) | ||
| 187 | return IRQ_NONE; | ||
| 188 | |||
| 189 | wptr = amdgpu_ih_get_wptr(adev); | ||
| 190 | |||
| 191 | restart_ih: | ||
| 192 | /* is somebody else already processing irqs? */ | ||
| 193 | if (atomic_xchg(&adev->irq.ih.lock, 1)) | ||
| 194 | return IRQ_NONE; | ||
| 195 | |||
| 196 | DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr); | ||
| 197 | |||
| 198 | /* Order reading of wptr vs. reading of IH ring data */ | ||
| 199 | rmb(); | ||
| 200 | |||
| 201 | while (adev->irq.ih.rptr != wptr) { | ||
| 202 | amdgpu_ih_decode_iv(adev, &entry); | ||
| 203 | adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; | ||
| 204 | |||
| 205 | amdgpu_irq_dispatch(adev, &entry); | ||
| 206 | } | ||
| 207 | amdgpu_ih_set_rptr(adev); | ||
| 208 | atomic_set(&adev->irq.ih.lock, 0); | ||
| 209 | |||
| 210 | /* make sure wptr hasn't changed while processing */ | ||
| 211 | wptr = amdgpu_ih_get_wptr(adev); | ||
| 212 | if (wptr != adev->irq.ih.rptr) | ||
| 213 | goto restart_ih; | ||
| 214 | |||
| 215 | return IRQ_HANDLED; | ||
| 216 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h new file mode 100644 index 000000000000..c62b09e555d6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __AMDGPU_IH_H__ | ||
| 25 | #define __AMDGPU_IH_H__ | ||
| 26 | |||
| 27 | struct amdgpu_device; | ||
| 28 | |||
| 29 | /* | ||
| 30 | * R6xx+ IH ring | ||
| 31 | */ | ||
| 32 | struct amdgpu_ih_ring { | ||
| 33 | struct amdgpu_bo *ring_obj; | ||
| 34 | volatile uint32_t *ring; | ||
| 35 | unsigned rptr; | ||
| 36 | unsigned ring_size; | ||
| 37 | uint64_t gpu_addr; | ||
| 38 | uint32_t ptr_mask; | ||
| 39 | atomic_t lock; | ||
| 40 | bool enabled; | ||
| 41 | unsigned wptr_offs; | ||
| 42 | unsigned rptr_offs; | ||
| 43 | u32 doorbell_index; | ||
| 44 | bool use_doorbell; | ||
| 45 | bool use_bus_addr; | ||
| 46 | dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */ | ||
| 47 | }; | ||
| 48 | |||
| 49 | struct amdgpu_iv_entry { | ||
| 50 | unsigned src_id; | ||
| 51 | unsigned src_data; | ||
| 52 | unsigned ring_id; | ||
| 53 | unsigned vm_id; | ||
| 54 | unsigned pas_id; | ||
| 55 | }; | ||
| 56 | |||
| 57 | int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, | ||
| 58 | bool use_bus_addr); | ||
| 59 | void amdgpu_ih_ring_fini(struct amdgpu_device *adev); | ||
| 60 | int amdgpu_ih_process(struct amdgpu_device *adev); | ||
| 61 | |||
| 62 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c new file mode 100644 index 000000000000..26482914dc4b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c | |||
| @@ -0,0 +1,47 @@ | |||
| 1 | /** | ||
| 2 | * \file amdgpu_ioc32.c | ||
| 3 | * | ||
| 4 | * 32-bit ioctl compatibility routines for the AMDGPU DRM. | ||
| 5 | * | ||
| 6 | * \author Paul Mackerras <paulus@samba.org> | ||
| 7 | * | ||
| 8 | * Copyright (C) Paul Mackerras 2005 | ||
| 9 | * All Rights Reserved. | ||
| 10 | * | ||
| 11 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 12 | * copy of this software and associated documentation files (the "Software"), | ||
| 13 | * to deal in the Software without restriction, including without limitation | ||
| 14 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 15 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 16 | * Software is furnished to do so, subject to the following conditions: | ||
| 17 | * | ||
| 18 | * The above copyright notice and this permission notice (including the next | ||
| 19 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 20 | * Software. | ||
| 21 | * | ||
| 22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 24 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 25 | * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, | ||
| 26 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
| 27 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
| 28 | * IN THE SOFTWARE. | ||
| 29 | */ | ||
| 30 | #include <linux/compat.h> | ||
| 31 | |||
| 32 | #include <drm/drmP.h> | ||
| 33 | #include <drm/amdgpu_drm.h> | ||
| 34 | #include "amdgpu_drv.h" | ||
| 35 | |||
| 36 | long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
| 37 | { | ||
| 38 | unsigned int nr = DRM_IOCTL_NR(cmd); | ||
| 39 | int ret; | ||
| 40 | |||
| 41 | if (nr < DRM_COMMAND_BASE) | ||
| 42 | return drm_compat_ioctl(filp, cmd, arg); | ||
| 43 | |||
| 44 | ret = amdgpu_drm_ioctl(filp, cmd, arg); | ||
| 45 | |||
| 46 | return ret; | ||
| 47 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c new file mode 100644 index 000000000000..2187960baf7c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | |||
| @@ -0,0 +1,456 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #include <drm/drmP.h> | ||
| 29 | #include <drm/drm_crtc_helper.h> | ||
| 30 | #include <drm/amdgpu_drm.h> | ||
| 31 | #include "amdgpu.h" | ||
| 32 | #include "amdgpu_ih.h" | ||
| 33 | #include "atom.h" | ||
| 34 | #include "amdgpu_connectors.h" | ||
| 35 | |||
| 36 | #include <linux/pm_runtime.h> | ||
| 37 | |||
| 38 | #define AMDGPU_WAIT_IDLE_TIMEOUT 200 | ||
| 39 | |||
| 40 | /* | ||
| 41 | * Handle hotplug events outside the interrupt handler proper. | ||
| 42 | */ | ||
| 43 | /** | ||
| 44 | * amdgpu_hotplug_work_func - display hotplug work handler | ||
| 45 | * | ||
| 46 | * @work: work struct | ||
| 47 | * | ||
| 48 | * This is the hot plug event work handler (all asics). | ||
| 49 | * The work gets scheduled from the irq handler if there | ||
| 50 | * was a hot plug interrupt. It walks the connector table | ||
| 51 | * and calls the hotplug handler for each one, then sends | ||
| 52 | * a drm hotplug event to alert userspace. | ||
| 53 | */ | ||
| 54 | static void amdgpu_hotplug_work_func(struct work_struct *work) | ||
| 55 | { | ||
| 56 | struct amdgpu_device *adev = container_of(work, struct amdgpu_device, | ||
| 57 | hotplug_work); | ||
| 58 | struct drm_device *dev = adev->ddev; | ||
| 59 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
| 60 | struct drm_connector *connector; | ||
| 61 | |||
| 62 | if (mode_config->num_connector) { | ||
| 63 | list_for_each_entry(connector, &mode_config->connector_list, head) | ||
| 64 | amdgpu_connector_hotplug(connector); | ||
| 65 | } | ||
| 66 | /* Just fire off a uevent and let userspace tell us what to do */ | ||
| 67 | drm_helper_hpd_irq_event(dev); | ||
| 68 | } | ||
| 69 | |||
| 70 | /** | ||
| 71 | * amdgpu_irq_reset_work_func - execute gpu reset | ||
| 72 | * | ||
| 73 | * @work: work struct | ||
| 74 | * | ||
| 75 | * Execute scheduled gpu reset (cayman+). | ||
| 76 | * This function is called when the irq handler | ||
| 77 | * thinks we need a gpu reset. | ||
| 78 | */ | ||
| 79 | static void amdgpu_irq_reset_work_func(struct work_struct *work) | ||
| 80 | { | ||
| 81 | struct amdgpu_device *adev = container_of(work, struct amdgpu_device, | ||
| 82 | reset_work); | ||
| 83 | |||
| 84 | amdgpu_gpu_reset(adev); | ||
| 85 | } | ||
| 86 | |||
| 87 | /* Disable *all* interrupts */ | ||
| 88 | static void amdgpu_irq_disable_all(struct amdgpu_device *adev) | ||
| 89 | { | ||
| 90 | unsigned long irqflags; | ||
| 91 | unsigned i, j; | ||
| 92 | int r; | ||
| 93 | |||
| 94 | spin_lock_irqsave(&adev->irq.lock, irqflags); | ||
| 95 | for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { | ||
| 96 | struct amdgpu_irq_src *src = adev->irq.sources[i]; | ||
| 97 | |||
| 98 | if (!src || !src->funcs->set || !src->num_types) | ||
| 99 | continue; | ||
| 100 | |||
| 101 | for (j = 0; j < src->num_types; ++j) { | ||
| 102 | atomic_set(&src->enabled_types[j], 0); | ||
| 103 | r = src->funcs->set(adev, src, j, | ||
| 104 | AMDGPU_IRQ_STATE_DISABLE); | ||
| 105 | if (r) | ||
| 106 | DRM_ERROR("error disabling interrupt (%d)\n", | ||
| 107 | r); | ||
| 108 | } | ||
| 109 | } | ||
| 110 | spin_unlock_irqrestore(&adev->irq.lock, irqflags); | ||
| 111 | } | ||
| 112 | |||
| 113 | /** | ||
| 114 | * amdgpu_irq_preinstall - drm irq preinstall callback | ||
| 115 | * | ||
| 116 | * @dev: drm dev pointer | ||
| 117 | * | ||
| 118 | * Gets the hw ready to enable irqs (all asics). | ||
| 119 | * This function disables all interrupt sources on the GPU. | ||
| 120 | */ | ||
| 121 | void amdgpu_irq_preinstall(struct drm_device *dev) | ||
| 122 | { | ||
| 123 | struct amdgpu_device *adev = dev->dev_private; | ||
| 124 | |||
| 125 | /* Disable *all* interrupts */ | ||
| 126 | amdgpu_irq_disable_all(adev); | ||
| 127 | /* Clear bits */ | ||
| 128 | amdgpu_ih_process(adev); | ||
| 129 | } | ||
| 130 | |||
| 131 | /** | ||
| 132 | * amdgpu_irq_postinstall - drm irq preinstall callback | ||
| 133 | * | ||
| 134 | * @dev: drm dev pointer | ||
| 135 | * | ||
| 136 | * Handles stuff to be done after enabling irqs (all asics). | ||
| 137 | * Returns 0 on success. | ||
| 138 | */ | ||
| 139 | int amdgpu_irq_postinstall(struct drm_device *dev) | ||
| 140 | { | ||
| 141 | dev->max_vblank_count = 0x001fffff; | ||
| 142 | return 0; | ||
| 143 | } | ||
| 144 | |||
| 145 | /** | ||
| 146 | * amdgpu_irq_uninstall - drm irq uninstall callback | ||
| 147 | * | ||
| 148 | * @dev: drm dev pointer | ||
| 149 | * | ||
| 150 | * This function disables all interrupt sources on the GPU (all asics). | ||
| 151 | */ | ||
| 152 | void amdgpu_irq_uninstall(struct drm_device *dev) | ||
| 153 | { | ||
| 154 | struct amdgpu_device *adev = dev->dev_private; | ||
| 155 | |||
| 156 | if (adev == NULL) { | ||
| 157 | return; | ||
| 158 | } | ||
| 159 | amdgpu_irq_disable_all(adev); | ||
| 160 | } | ||
| 161 | |||
| 162 | /** | ||
| 163 | * amdgpu_irq_handler - irq handler | ||
| 164 | * | ||
| 165 | * @int irq, void *arg: args | ||
| 166 | * | ||
| 167 | * This is the irq handler for the amdgpu driver (all asics). | ||
| 168 | */ | ||
| 169 | irqreturn_t amdgpu_irq_handler(int irq, void *arg) | ||
| 170 | { | ||
| 171 | struct drm_device *dev = (struct drm_device *) arg; | ||
| 172 | struct amdgpu_device *adev = dev->dev_private; | ||
| 173 | irqreturn_t ret; | ||
| 174 | |||
| 175 | ret = amdgpu_ih_process(adev); | ||
| 176 | if (ret == IRQ_HANDLED) | ||
| 177 | pm_runtime_mark_last_busy(dev->dev); | ||
| 178 | return ret; | ||
| 179 | } | ||
| 180 | |||
| 181 | /** | ||
| 182 | * amdgpu_msi_ok - asic specific msi checks | ||
| 183 | * | ||
| 184 | * @adev: amdgpu device pointer | ||
| 185 | * | ||
| 186 | * Handles asic specific MSI checks to determine if | ||
| 187 | * MSIs should be enabled on a particular chip (all asics). | ||
| 188 | * Returns true if MSIs should be enabled, false if MSIs | ||
| 189 | * should not be enabled. | ||
| 190 | */ | ||
| 191 | static bool amdgpu_msi_ok(struct amdgpu_device *adev) | ||
| 192 | { | ||
| 193 | /* force MSI on */ | ||
| 194 | if (amdgpu_msi == 1) | ||
| 195 | return true; | ||
| 196 | else if (amdgpu_msi == 0) | ||
| 197 | return false; | ||
| 198 | |||
| 199 | return true; | ||
| 200 | } | ||
| 201 | |||
| 202 | /** | ||
| 203 | * amdgpu_irq_init - init driver interrupt info | ||
| 204 | * | ||
| 205 | * @adev: amdgpu device pointer | ||
| 206 | * | ||
| 207 | * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics). | ||
| 208 | * Returns 0 for success, error for failure. | ||
| 209 | */ | ||
| 210 | int amdgpu_irq_init(struct amdgpu_device *adev) | ||
| 211 | { | ||
| 212 | int r = 0; | ||
| 213 | |||
| 214 | spin_lock_init(&adev->irq.lock); | ||
| 215 | r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); | ||
| 216 | if (r) { | ||
| 217 | return r; | ||
| 218 | } | ||
| 219 | /* enable msi */ | ||
| 220 | adev->irq.msi_enabled = false; | ||
| 221 | |||
| 222 | if (amdgpu_msi_ok(adev)) { | ||
| 223 | int ret = pci_enable_msi(adev->pdev); | ||
| 224 | if (!ret) { | ||
| 225 | adev->irq.msi_enabled = true; | ||
| 226 | dev_info(adev->dev, "amdgpu: using MSI.\n"); | ||
| 227 | } | ||
| 228 | } | ||
| 229 | |||
| 230 | INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func); | ||
| 231 | INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func); | ||
| 232 | |||
| 233 | adev->irq.installed = true; | ||
| 234 | r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); | ||
| 235 | if (r) { | ||
| 236 | adev->irq.installed = false; | ||
| 237 | flush_work(&adev->hotplug_work); | ||
| 238 | return r; | ||
| 239 | } | ||
| 240 | |||
| 241 | DRM_INFO("amdgpu: irq initialized.\n"); | ||
| 242 | return 0; | ||
| 243 | } | ||
| 244 | |||
| 245 | /** | ||
| 246 | * amdgpu_irq_fini - tear down driver interrupt info | ||
| 247 | * | ||
| 248 | * @adev: amdgpu device pointer | ||
| 249 | * | ||
| 250 | * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). | ||
| 251 | */ | ||
| 252 | void amdgpu_irq_fini(struct amdgpu_device *adev) | ||
| 253 | { | ||
| 254 | unsigned i; | ||
| 255 | |||
| 256 | drm_vblank_cleanup(adev->ddev); | ||
| 257 | if (adev->irq.installed) { | ||
| 258 | drm_irq_uninstall(adev->ddev); | ||
| 259 | adev->irq.installed = false; | ||
| 260 | if (adev->irq.msi_enabled) | ||
| 261 | pci_disable_msi(adev->pdev); | ||
| 262 | flush_work(&adev->hotplug_work); | ||
| 263 | } | ||
| 264 | |||
| 265 | for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { | ||
| 266 | struct amdgpu_irq_src *src = adev->irq.sources[i]; | ||
| 267 | |||
| 268 | if (!src) | ||
| 269 | continue; | ||
| 270 | |||
| 271 | kfree(src->enabled_types); | ||
| 272 | src->enabled_types = NULL; | ||
| 273 | } | ||
| 274 | } | ||
| 275 | |||
| 276 | /** | ||
| 277 | * amdgpu_irq_add_id - register irq source | ||
| 278 | * | ||
| 279 | * @adev: amdgpu device pointer | ||
| 280 | * @src_id: source id for this source | ||
| 281 | * @source: irq source | ||
| 282 | * | ||
| 283 | */ | ||
| 284 | int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id, | ||
| 285 | struct amdgpu_irq_src *source) | ||
| 286 | { | ||
| 287 | if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) | ||
| 288 | return -EINVAL; | ||
| 289 | |||
| 290 | if (adev->irq.sources[src_id] != NULL) | ||
| 291 | return -EINVAL; | ||
| 292 | |||
| 293 | if (!source->funcs) | ||
| 294 | return -EINVAL; | ||
| 295 | |||
| 296 | if (source->num_types && !source->enabled_types) { | ||
| 297 | atomic_t *types; | ||
| 298 | |||
| 299 | types = kcalloc(source->num_types, sizeof(atomic_t), | ||
| 300 | GFP_KERNEL); | ||
| 301 | if (!types) | ||
| 302 | return -ENOMEM; | ||
| 303 | |||
| 304 | source->enabled_types = types; | ||
| 305 | } | ||
| 306 | |||
| 307 | adev->irq.sources[src_id] = source; | ||
| 308 | return 0; | ||
| 309 | } | ||
| 310 | |||
| 311 | /** | ||
| 312 | * amdgpu_irq_dispatch - dispatch irq to IP blocks | ||
| 313 | * | ||
| 314 | * @adev: amdgpu device pointer | ||
| 315 | * @entry: interrupt vector | ||
| 316 | * | ||
| 317 | * Dispatches the irq to the different IP blocks | ||
| 318 | */ | ||
| 319 | void amdgpu_irq_dispatch(struct amdgpu_device *adev, | ||
| 320 | struct amdgpu_iv_entry *entry) | ||
| 321 | { | ||
| 322 | unsigned src_id = entry->src_id; | ||
| 323 | struct amdgpu_irq_src *src; | ||
| 324 | int r; | ||
| 325 | |||
| 326 | if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { | ||
| 327 | DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); | ||
| 328 | return; | ||
| 329 | } | ||
| 330 | |||
| 331 | src = adev->irq.sources[src_id]; | ||
| 332 | if (!src) { | ||
| 333 | DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); | ||
| 334 | return; | ||
| 335 | } | ||
| 336 | |||
| 337 | r = src->funcs->process(adev, src, entry); | ||
| 338 | if (r) | ||
| 339 | DRM_ERROR("error processing interrupt (%d)\n", r); | ||
| 340 | } | ||
| 341 | |||
| 342 | /** | ||
| 343 | * amdgpu_irq_update - update hw interrupt state | ||
| 344 | * | ||
| 345 | * @adev: amdgpu device pointer | ||
| 346 | * @src: interrupt src you want to enable | ||
| 347 | * @type: type of interrupt you want to update | ||
| 348 | * | ||
| 349 | * Updates the interrupt state for a specific src (all asics). | ||
| 350 | */ | ||
| 351 | int amdgpu_irq_update(struct amdgpu_device *adev, | ||
| 352 | struct amdgpu_irq_src *src, unsigned type) | ||
| 353 | { | ||
| 354 | unsigned long irqflags; | ||
| 355 | enum amdgpu_interrupt_state state; | ||
| 356 | int r; | ||
| 357 | |||
| 358 | spin_lock_irqsave(&adev->irq.lock, irqflags); | ||
| 359 | |||
| 360 | /* we need to determine after taking the lock, otherwise | ||
| 361 | we might disable just enabled interrupts again */ | ||
| 362 | if (amdgpu_irq_enabled(adev, src, type)) | ||
| 363 | state = AMDGPU_IRQ_STATE_ENABLE; | ||
| 364 | else | ||
| 365 | state = AMDGPU_IRQ_STATE_DISABLE; | ||
| 366 | |||
| 367 | r = src->funcs->set(adev, src, type, state); | ||
| 368 | spin_unlock_irqrestore(&adev->irq.lock, irqflags); | ||
| 369 | return r; | ||
| 370 | } | ||
| 371 | |||
| 372 | /** | ||
| 373 | * amdgpu_irq_get - enable interrupt | ||
| 374 | * | ||
| 375 | * @adev: amdgpu device pointer | ||
| 376 | * @src: interrupt src you want to enable | ||
| 377 | * @type: type of interrupt you want to enable | ||
| 378 | * | ||
| 379 | * Enables the interrupt type for a specific src (all asics). | ||
| 380 | */ | ||
| 381 | int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | ||
| 382 | unsigned type) | ||
| 383 | { | ||
| 384 | if (!adev->ddev->irq_enabled) | ||
| 385 | return -ENOENT; | ||
| 386 | |||
| 387 | if (type >= src->num_types) | ||
| 388 | return -EINVAL; | ||
| 389 | |||
| 390 | if (!src->enabled_types || !src->funcs->set) | ||
| 391 | return -EINVAL; | ||
| 392 | |||
| 393 | if (atomic_inc_return(&src->enabled_types[type]) == 1) | ||
| 394 | return amdgpu_irq_update(adev, src, type); | ||
| 395 | |||
| 396 | return 0; | ||
| 397 | } | ||
| 398 | |||
| 399 | bool amdgpu_irq_get_delayed(struct amdgpu_device *adev, | ||
| 400 | struct amdgpu_irq_src *src, | ||
| 401 | unsigned type) | ||
| 402 | { | ||
| 403 | if ((type >= src->num_types) || !src->enabled_types) | ||
| 404 | return false; | ||
| 405 | return atomic_inc_return(&src->enabled_types[type]) == 1; | ||
| 406 | } | ||
| 407 | |||
| 408 | /** | ||
| 409 | * amdgpu_irq_put - disable interrupt | ||
| 410 | * | ||
| 411 | * @adev: amdgpu device pointer | ||
| 412 | * @src: interrupt src you want to disable | ||
| 413 | * @type: type of interrupt you want to disable | ||
| 414 | * | ||
| 415 | * Disables the interrupt type for a specific src (all asics). | ||
| 416 | */ | ||
| 417 | int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | ||
| 418 | unsigned type) | ||
| 419 | { | ||
| 420 | if (!adev->ddev->irq_enabled) | ||
| 421 | return -ENOENT; | ||
| 422 | |||
| 423 | if (type >= src->num_types) | ||
| 424 | return -EINVAL; | ||
| 425 | |||
| 426 | if (!src->enabled_types || !src->funcs->set) | ||
| 427 | return -EINVAL; | ||
| 428 | |||
| 429 | if (atomic_dec_and_test(&src->enabled_types[type])) | ||
| 430 | return amdgpu_irq_update(adev, src, type); | ||
| 431 | |||
| 432 | return 0; | ||
| 433 | } | ||
| 434 | |||
| 435 | /** | ||
| 436 | * amdgpu_irq_enabled - test if irq is enabled or not | ||
| 437 | * | ||
| 438 | * @adev: amdgpu device pointer | ||
| 439 | * @idx: interrupt src you want to test | ||
| 440 | * | ||
| 441 | * Tests if the given interrupt source is enabled or not | ||
| 442 | */ | ||
| 443 | bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | ||
| 444 | unsigned type) | ||
| 445 | { | ||
| 446 | if (!adev->ddev->irq_enabled) | ||
| 447 | return false; | ||
| 448 | |||
| 449 | if (type >= src->num_types) | ||
| 450 | return false; | ||
| 451 | |||
| 452 | if (!src->enabled_types || !src->funcs->set) | ||
| 453 | return false; | ||
| 454 | |||
| 455 | return !!atomic_read(&src->enabled_types[type]); | ||
| 456 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h new file mode 100644 index 000000000000..8299795f2b2d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h | |||
| @@ -0,0 +1,92 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __AMDGPU_IRQ_H__ | ||
| 25 | #define __AMDGPU_IRQ_H__ | ||
| 26 | |||
| 27 | #include "amdgpu_ih.h" | ||
| 28 | |||
| 29 | #define AMDGPU_MAX_IRQ_SRC_ID 0x100 | ||
| 30 | |||
| 31 | struct amdgpu_device; | ||
| 32 | struct amdgpu_iv_entry; | ||
| 33 | |||
| 34 | enum amdgpu_interrupt_state { | ||
| 35 | AMDGPU_IRQ_STATE_DISABLE, | ||
| 36 | AMDGPU_IRQ_STATE_ENABLE, | ||
| 37 | }; | ||
| 38 | |||
| 39 | struct amdgpu_irq_src { | ||
| 40 | unsigned num_types; | ||
| 41 | atomic_t *enabled_types; | ||
| 42 | const struct amdgpu_irq_src_funcs *funcs; | ||
| 43 | }; | ||
| 44 | |||
| 45 | /* provided by interrupt generating IP blocks */ | ||
| 46 | struct amdgpu_irq_src_funcs { | ||
| 47 | int (*set)(struct amdgpu_device *adev, struct amdgpu_irq_src *source, | ||
| 48 | unsigned type, enum amdgpu_interrupt_state state); | ||
| 49 | |||
| 50 | int (*process)(struct amdgpu_device *adev, | ||
| 51 | struct amdgpu_irq_src *source, | ||
| 52 | struct amdgpu_iv_entry *entry); | ||
| 53 | }; | ||
| 54 | |||
| 55 | struct amdgpu_irq { | ||
| 56 | bool installed; | ||
| 57 | spinlock_t lock; | ||
| 58 | /* interrupt sources */ | ||
| 59 | struct amdgpu_irq_src *sources[AMDGPU_MAX_IRQ_SRC_ID]; | ||
| 60 | |||
| 61 | /* status, etc. */ | ||
| 62 | bool msi_enabled; /* msi enabled */ | ||
| 63 | |||
| 64 | /* interrupt ring */ | ||
| 65 | struct amdgpu_ih_ring ih; | ||
| 66 | const struct amdgpu_ih_funcs *ih_funcs; | ||
| 67 | }; | ||
| 68 | |||
| 69 | void amdgpu_irq_preinstall(struct drm_device *dev); | ||
| 70 | int amdgpu_irq_postinstall(struct drm_device *dev); | ||
| 71 | void amdgpu_irq_uninstall(struct drm_device *dev); | ||
| 72 | irqreturn_t amdgpu_irq_handler(int irq, void *arg); | ||
| 73 | |||
| 74 | int amdgpu_irq_init(struct amdgpu_device *adev); | ||
| 75 | void amdgpu_irq_fini(struct amdgpu_device *adev); | ||
| 76 | int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id, | ||
| 77 | struct amdgpu_irq_src *source); | ||
| 78 | void amdgpu_irq_dispatch(struct amdgpu_device *adev, | ||
| 79 | struct amdgpu_iv_entry *entry); | ||
| 80 | int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | ||
| 81 | unsigned type); | ||
| 82 | int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | ||
| 83 | unsigned type); | ||
| 84 | bool amdgpu_irq_get_delayed(struct amdgpu_device *adev, | ||
| 85 | struct amdgpu_irq_src *src, | ||
| 86 | unsigned type); | ||
| 87 | int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | ||
| 88 | unsigned type); | ||
| 89 | bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | ||
| 90 | unsigned type); | ||
| 91 | |||
| 92 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c new file mode 100644 index 000000000000..c271da34998d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
| @@ -0,0 +1,674 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #include <drm/drmP.h> | ||
| 29 | #include "amdgpu.h" | ||
| 30 | #include <drm/amdgpu_drm.h> | ||
| 31 | #include "amdgpu_uvd.h" | ||
| 32 | #include "amdgpu_vce.h" | ||
| 33 | |||
| 34 | #include <linux/vga_switcheroo.h> | ||
| 35 | #include <linux/slab.h> | ||
| 36 | #include <linux/pm_runtime.h> | ||
| 37 | |||
| 38 | #if defined(CONFIG_VGA_SWITCHEROO) | ||
| 39 | bool amdgpu_has_atpx(void); | ||
| 40 | #else | ||
| 41 | static inline bool amdgpu_has_atpx(void) { return false; } | ||
| 42 | #endif | ||
| 43 | |||
| 44 | /** | ||
| 45 | * amdgpu_driver_unload_kms - Main unload function for KMS. | ||
| 46 | * | ||
| 47 | * @dev: drm dev pointer | ||
| 48 | * | ||
| 49 | * This is the main unload function for KMS (all asics). | ||
| 50 | * Returns 0 on success. | ||
| 51 | */ | ||
| 52 | int amdgpu_driver_unload_kms(struct drm_device *dev) | ||
| 53 | { | ||
| 54 | struct amdgpu_device *adev = dev->dev_private; | ||
| 55 | |||
| 56 | if (adev == NULL) | ||
| 57 | return 0; | ||
| 58 | |||
| 59 | if (adev->rmmio == NULL) | ||
| 60 | goto done_free; | ||
| 61 | |||
| 62 | pm_runtime_get_sync(dev->dev); | ||
| 63 | |||
| 64 | amdgpu_acpi_fini(adev); | ||
| 65 | |||
| 66 | amdgpu_device_fini(adev); | ||
| 67 | |||
| 68 | done_free: | ||
| 69 | kfree(adev); | ||
| 70 | dev->dev_private = NULL; | ||
| 71 | return 0; | ||
| 72 | } | ||
| 73 | |||
| 74 | /** | ||
| 75 | * amdgpu_driver_load_kms - Main load function for KMS. | ||
| 76 | * | ||
| 77 | * @dev: drm dev pointer | ||
| 78 | * @flags: device flags | ||
| 79 | * | ||
| 80 | * This is the main load function for KMS (all asics). | ||
| 81 | * Returns 0 on success, error on failure. | ||
| 82 | */ | ||
| 83 | int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) | ||
| 84 | { | ||
| 85 | struct amdgpu_device *adev; | ||
| 86 | int r, acpi_status; | ||
| 87 | |||
| 88 | adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); | ||
| 89 | if (adev == NULL) { | ||
| 90 | return -ENOMEM; | ||
| 91 | } | ||
| 92 | dev->dev_private = (void *)adev; | ||
| 93 | |||
| 94 | if ((amdgpu_runtime_pm != 0) && | ||
| 95 | amdgpu_has_atpx() && | ||
| 96 | ((flags & AMDGPU_IS_APU) == 0)) | ||
| 97 | flags |= AMDGPU_IS_PX; | ||
| 98 | |||
| 99 | /* amdgpu_device_init should report only fatal error | ||
| 100 | * like memory allocation failure or iomapping failure, | ||
| 101 | * or memory manager initialization failure, it must | ||
| 102 | * properly initialize the GPU MC controller and permit | ||
| 103 | * VRAM allocation | ||
| 104 | */ | ||
| 105 | r = amdgpu_device_init(adev, dev, dev->pdev, flags); | ||
| 106 | if (r) { | ||
| 107 | dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); | ||
| 108 | goto out; | ||
| 109 | } | ||
| 110 | |||
| 111 | /* Call ACPI methods: require modeset init | ||
| 112 | * but failure is not fatal | ||
| 113 | */ | ||
| 114 | if (!r) { | ||
| 115 | acpi_status = amdgpu_acpi_init(adev); | ||
| 116 | if (acpi_status) | ||
| 117 | dev_dbg(&dev->pdev->dev, | ||
| 118 | "Error during ACPI methods call\n"); | ||
| 119 | } | ||
| 120 | |||
| 121 | if (amdgpu_device_is_px(dev)) { | ||
| 122 | pm_runtime_use_autosuspend(dev->dev); | ||
| 123 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); | ||
| 124 | pm_runtime_set_active(dev->dev); | ||
| 125 | pm_runtime_allow(dev->dev); | ||
| 126 | pm_runtime_mark_last_busy(dev->dev); | ||
| 127 | pm_runtime_put_autosuspend(dev->dev); | ||
| 128 | } | ||
| 129 | |||
| 130 | out: | ||
| 131 | if (r) | ||
| 132 | amdgpu_driver_unload_kms(dev); | ||
| 133 | |||
| 134 | |||
| 135 | return r; | ||
| 136 | } | ||
| 137 | |||
| 138 | /* | ||
| 139 | * Userspace get information ioctl | ||
| 140 | */ | ||
| 141 | /** | ||
| 142 | * amdgpu_info_ioctl - answer a device specific request. | ||
| 143 | * | ||
| 144 | * @adev: amdgpu device pointer | ||
| 145 | * @data: request object | ||
| 146 | * @filp: drm filp | ||
| 147 | * | ||
| 148 | * This function is used to pass device specific parameters to the userspace | ||
| 149 | * drivers. Examples include: pci device id, pipeline parms, tiling params, | ||
| 150 | * etc. (all asics). | ||
| 151 | * Returns 0 on success, -EINVAL on failure. | ||
| 152 | */ | ||
| 153 | static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | ||
| 154 | { | ||
| 155 | struct amdgpu_device *adev = dev->dev_private; | ||
| 156 | struct drm_amdgpu_info *info = data; | ||
| 157 | struct amdgpu_mode_info *minfo = &adev->mode_info; | ||
| 158 | void __user *out = (void __user *)(long)info->return_pointer; | ||
| 159 | uint32_t size = info->return_size; | ||
| 160 | struct drm_crtc *crtc; | ||
| 161 | uint32_t ui32 = 0; | ||
| 162 | uint64_t ui64 = 0; | ||
| 163 | int i, found; | ||
| 164 | |||
| 165 | if (!info->return_size || !info->return_pointer) | ||
| 166 | return -EINVAL; | ||
| 167 | |||
| 168 | switch (info->query) { | ||
| 169 | case AMDGPU_INFO_ACCEL_WORKING: | ||
| 170 | ui32 = adev->accel_working; | ||
| 171 | return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; | ||
| 172 | case AMDGPU_INFO_CRTC_FROM_ID: | ||
| 173 | for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) { | ||
| 174 | crtc = (struct drm_crtc *)minfo->crtcs[i]; | ||
| 175 | if (crtc && crtc->base.id == info->mode_crtc.id) { | ||
| 176 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 177 | ui32 = amdgpu_crtc->crtc_id; | ||
| 178 | found = 1; | ||
| 179 | break; | ||
| 180 | } | ||
| 181 | } | ||
| 182 | if (!found) { | ||
| 183 | DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id); | ||
| 184 | return -EINVAL; | ||
| 185 | } | ||
| 186 | return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; | ||
| 187 | case AMDGPU_INFO_HW_IP_INFO: { | ||
| 188 | struct drm_amdgpu_info_hw_ip ip = {}; | ||
| 189 | enum amdgpu_ip_block_type type; | ||
| 190 | uint32_t ring_mask = 0; | ||
| 191 | |||
| 192 | if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) | ||
| 193 | return -EINVAL; | ||
| 194 | |||
| 195 | switch (info->query_hw_ip.type) { | ||
| 196 | case AMDGPU_HW_IP_GFX: | ||
| 197 | type = AMDGPU_IP_BLOCK_TYPE_GFX; | ||
| 198 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | ||
| 199 | ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i); | ||
| 200 | break; | ||
| 201 | case AMDGPU_HW_IP_COMPUTE: | ||
| 202 | type = AMDGPU_IP_BLOCK_TYPE_GFX; | ||
| 203 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | ||
| 204 | ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i); | ||
| 205 | break; | ||
| 206 | case AMDGPU_HW_IP_DMA: | ||
| 207 | type = AMDGPU_IP_BLOCK_TYPE_SDMA; | ||
| 208 | ring_mask = adev->sdma[0].ring.ready ? 1 : 0; | ||
| 209 | ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1); | ||
| 210 | break; | ||
| 211 | case AMDGPU_HW_IP_UVD: | ||
| 212 | type = AMDGPU_IP_BLOCK_TYPE_UVD; | ||
| 213 | ring_mask = adev->uvd.ring.ready ? 1 : 0; | ||
| 214 | break; | ||
| 215 | case AMDGPU_HW_IP_VCE: | ||
| 216 | type = AMDGPU_IP_BLOCK_TYPE_VCE; | ||
| 217 | for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++) | ||
| 218 | ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i); | ||
| 219 | break; | ||
| 220 | default: | ||
| 221 | return -EINVAL; | ||
| 222 | } | ||
| 223 | |||
| 224 | for (i = 0; i < adev->num_ip_blocks; i++) { | ||
| 225 | if (adev->ip_blocks[i].type == type && | ||
| 226 | adev->ip_block_enabled[i]) { | ||
| 227 | ip.hw_ip_version_major = adev->ip_blocks[i].major; | ||
| 228 | ip.hw_ip_version_minor = adev->ip_blocks[i].minor; | ||
| 229 | ip.capabilities_flags = 0; | ||
| 230 | ip.available_rings = ring_mask; | ||
| 231 | break; | ||
| 232 | } | ||
| 233 | } | ||
| 234 | return copy_to_user(out, &ip, | ||
| 235 | min((size_t)size, sizeof(ip))) ? -EFAULT : 0; | ||
| 236 | } | ||
| 237 | case AMDGPU_INFO_HW_IP_COUNT: { | ||
| 238 | enum amdgpu_ip_block_type type; | ||
| 239 | uint32_t count = 0; | ||
| 240 | |||
| 241 | switch (info->query_hw_ip.type) { | ||
| 242 | case AMDGPU_HW_IP_GFX: | ||
| 243 | type = AMDGPU_IP_BLOCK_TYPE_GFX; | ||
| 244 | break; | ||
| 245 | case AMDGPU_HW_IP_COMPUTE: | ||
| 246 | type = AMDGPU_IP_BLOCK_TYPE_GFX; | ||
| 247 | break; | ||
| 248 | case AMDGPU_HW_IP_DMA: | ||
| 249 | type = AMDGPU_IP_BLOCK_TYPE_SDMA; | ||
| 250 | break; | ||
| 251 | case AMDGPU_HW_IP_UVD: | ||
| 252 | type = AMDGPU_IP_BLOCK_TYPE_UVD; | ||
| 253 | break; | ||
| 254 | case AMDGPU_HW_IP_VCE: | ||
| 255 | type = AMDGPU_IP_BLOCK_TYPE_VCE; | ||
| 256 | break; | ||
| 257 | default: | ||
| 258 | return -EINVAL; | ||
| 259 | } | ||
| 260 | |||
| 261 | for (i = 0; i < adev->num_ip_blocks; i++) | ||
| 262 | if (adev->ip_blocks[i].type == type && | ||
| 263 | adev->ip_block_enabled[i] && | ||
| 264 | count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) | ||
| 265 | count++; | ||
| 266 | |||
| 267 | return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; | ||
| 268 | } | ||
| 269 | case AMDGPU_INFO_TIMESTAMP: | ||
| 270 | ui64 = amdgpu_asic_get_gpu_clock_counter(adev); | ||
| 271 | return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; | ||
| 272 | case AMDGPU_INFO_FW_VERSION: { | ||
| 273 | struct drm_amdgpu_info_firmware fw_info; | ||
| 274 | |||
| 275 | /* We only support one instance of each IP block right now. */ | ||
| 276 | if (info->query_fw.ip_instance != 0) | ||
| 277 | return -EINVAL; | ||
| 278 | |||
| 279 | switch (info->query_fw.fw_type) { | ||
| 280 | case AMDGPU_INFO_FW_VCE: | ||
| 281 | fw_info.ver = adev->vce.fw_version; | ||
| 282 | fw_info.feature = adev->vce.fb_version; | ||
| 283 | break; | ||
| 284 | case AMDGPU_INFO_FW_UVD: | ||
| 285 | fw_info.ver = 0; | ||
| 286 | fw_info.feature = 0; | ||
| 287 | break; | ||
| 288 | case AMDGPU_INFO_FW_GMC: | ||
| 289 | fw_info.ver = adev->mc.fw_version; | ||
| 290 | fw_info.feature = 0; | ||
| 291 | break; | ||
| 292 | case AMDGPU_INFO_FW_GFX_ME: | ||
| 293 | fw_info.ver = adev->gfx.me_fw_version; | ||
| 294 | fw_info.feature = 0; | ||
| 295 | break; | ||
| 296 | case AMDGPU_INFO_FW_GFX_PFP: | ||
| 297 | fw_info.ver = adev->gfx.pfp_fw_version; | ||
| 298 | fw_info.feature = 0; | ||
| 299 | break; | ||
| 300 | case AMDGPU_INFO_FW_GFX_CE: | ||
| 301 | fw_info.ver = adev->gfx.ce_fw_version; | ||
| 302 | fw_info.feature = 0; | ||
| 303 | break; | ||
| 304 | case AMDGPU_INFO_FW_GFX_RLC: | ||
| 305 | fw_info.ver = adev->gfx.rlc_fw_version; | ||
| 306 | fw_info.feature = 0; | ||
| 307 | break; | ||
| 308 | case AMDGPU_INFO_FW_GFX_MEC: | ||
| 309 | if (info->query_fw.index == 0) | ||
| 310 | fw_info.ver = adev->gfx.mec_fw_version; | ||
| 311 | else if (info->query_fw.index == 1) | ||
| 312 | fw_info.ver = adev->gfx.mec2_fw_version; | ||
| 313 | else | ||
| 314 | return -EINVAL; | ||
| 315 | fw_info.feature = 0; | ||
| 316 | break; | ||
| 317 | case AMDGPU_INFO_FW_SMC: | ||
| 318 | fw_info.ver = adev->pm.fw_version; | ||
| 319 | fw_info.feature = 0; | ||
| 320 | break; | ||
| 321 | case AMDGPU_INFO_FW_SDMA: | ||
| 322 | if (info->query_fw.index >= 2) | ||
| 323 | return -EINVAL; | ||
| 324 | fw_info.ver = adev->sdma[info->query_fw.index].fw_version; | ||
| 325 | fw_info.feature = 0; | ||
| 326 | break; | ||
| 327 | default: | ||
| 328 | return -EINVAL; | ||
| 329 | } | ||
| 330 | return copy_to_user(out, &fw_info, | ||
| 331 | min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; | ||
| 332 | } | ||
| 333 | case AMDGPU_INFO_NUM_BYTES_MOVED: | ||
| 334 | ui64 = atomic64_read(&adev->num_bytes_moved); | ||
| 335 | return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; | ||
| 336 | case AMDGPU_INFO_VRAM_USAGE: | ||
| 337 | ui64 = atomic64_read(&adev->vram_usage); | ||
| 338 | return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; | ||
| 339 | case AMDGPU_INFO_VIS_VRAM_USAGE: | ||
| 340 | ui64 = atomic64_read(&adev->vram_vis_usage); | ||
| 341 | return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; | ||
| 342 | case AMDGPU_INFO_GTT_USAGE: | ||
| 343 | ui64 = atomic64_read(&adev->gtt_usage); | ||
| 344 | return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; | ||
| 345 | case AMDGPU_INFO_GDS_CONFIG: { | ||
| 346 | struct drm_amdgpu_info_gds gds_info; | ||
| 347 | |||
| 348 | gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT; | ||
| 349 | gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT; | ||
| 350 | gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT; | ||
| 351 | gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT; | ||
| 352 | gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT; | ||
| 353 | gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT; | ||
| 354 | gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT; | ||
| 355 | return copy_to_user(out, &gds_info, | ||
| 356 | min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0; | ||
| 357 | } | ||
| 358 | case AMDGPU_INFO_VRAM_GTT: { | ||
| 359 | struct drm_amdgpu_info_vram_gtt vram_gtt; | ||
| 360 | |||
| 361 | vram_gtt.vram_size = adev->mc.real_vram_size; | ||
| 362 | vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; | ||
| 363 | vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size; | ||
| 364 | vram_gtt.gtt_size = adev->mc.gtt_size; | ||
| 365 | vram_gtt.gtt_size -= adev->gart_pin_size; | ||
| 366 | return copy_to_user(out, &vram_gtt, | ||
| 367 | min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; | ||
| 368 | } | ||
| 369 | case AMDGPU_INFO_READ_MMR_REG: { | ||
| 370 | unsigned n, alloc_size = info->read_mmr_reg.count * 4; | ||
| 371 | uint32_t *regs; | ||
| 372 | unsigned se_num = (info->read_mmr_reg.instance >> | ||
| 373 | AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & | ||
| 374 | AMDGPU_INFO_MMR_SE_INDEX_MASK; | ||
| 375 | unsigned sh_num = (info->read_mmr_reg.instance >> | ||
| 376 | AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & | ||
| 377 | AMDGPU_INFO_MMR_SH_INDEX_MASK; | ||
| 378 | |||
| 379 | /* set full masks if the userspace set all bits | ||
| 380 | * in the bitfields */ | ||
| 381 | if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) | ||
| 382 | se_num = 0xffffffff; | ||
| 383 | if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) | ||
| 384 | sh_num = 0xffffffff; | ||
| 385 | |||
| 386 | regs = kmalloc(alloc_size, GFP_KERNEL); | ||
| 387 | if (!regs) | ||
| 388 | return -ENOMEM; | ||
| 389 | |||
| 390 | for (i = 0; i < info->read_mmr_reg.count; i++) | ||
| 391 | if (amdgpu_asic_read_register(adev, se_num, sh_num, | ||
| 392 | info->read_mmr_reg.dword_offset + i, | ||
| 393 | ®s[i])) { | ||
| 394 | DRM_DEBUG_KMS("unallowed offset %#x\n", | ||
| 395 | info->read_mmr_reg.dword_offset + i); | ||
| 396 | kfree(regs); | ||
| 397 | return -EFAULT; | ||
| 398 | } | ||
| 399 | n = copy_to_user(out, regs, min(size, alloc_size)); | ||
| 400 | kfree(regs); | ||
| 401 | return n ? -EFAULT : 0; | ||
| 402 | } | ||
| 403 | case AMDGPU_INFO_DEV_INFO: { | ||
| 404 | struct drm_amdgpu_info_device dev_info; | ||
| 405 | struct amdgpu_cu_info cu_info; | ||
| 406 | |||
| 407 | dev_info.device_id = dev->pdev->device; | ||
| 408 | dev_info.chip_rev = adev->rev_id; | ||
| 409 | dev_info.external_rev = adev->external_rev_id; | ||
| 410 | dev_info.pci_rev = dev->pdev->revision; | ||
| 411 | dev_info.family = adev->family; | ||
| 412 | dev_info.num_shader_engines = adev->gfx.config.max_shader_engines; | ||
| 413 | dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; | ||
| 414 | /* return all clocks in KHz */ | ||
| 415 | dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; | ||
| 416 | if (adev->pm.dpm_enabled) | ||
| 417 | dev_info.max_engine_clock = | ||
| 418 | adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; | ||
| 419 | else | ||
| 420 | dev_info.max_engine_clock = adev->pm.default_sclk * 10; | ||
| 421 | dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; | ||
| 422 | dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * | ||
| 423 | adev->gfx.config.max_shader_engines; | ||
| 424 | dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; | ||
| 425 | dev_info._pad = 0; | ||
| 426 | dev_info.ids_flags = 0; | ||
| 427 | if (adev->flags & AMDGPU_IS_APU) | ||
| 428 | dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; | ||
| 429 | dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; | ||
| 430 | dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL); | ||
| 431 | dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) * | ||
| 432 | AMDGPU_GPU_PAGE_SIZE; | ||
| 433 | dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; | ||
| 434 | |||
| 435 | amdgpu_asic_get_cu_info(adev, &cu_info); | ||
| 436 | dev_info.cu_active_number = cu_info.number; | ||
| 437 | dev_info.cu_ao_mask = cu_info.ao_cu_mask; | ||
| 438 | memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); | ||
| 439 | |||
| 440 | return copy_to_user(out, &dev_info, | ||
| 441 | min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; | ||
| 442 | } | ||
| 443 | default: | ||
| 444 | DRM_DEBUG_KMS("Invalid request %d\n", info->query); | ||
| 445 | return -EINVAL; | ||
| 446 | } | ||
| 447 | return 0; | ||
| 448 | } | ||
| 449 | |||
| 450 | |||
| 451 | /* | ||
| 452 | * Outdated mess for old drm with Xorg being in charge (void function now). | ||
| 453 | */ | ||
| 454 | /** | ||
| 455 | * amdgpu_driver_firstopen_kms - drm callback for last close | ||
| 456 | * | ||
| 457 | * @dev: drm dev pointer | ||
| 458 | * | ||
| 459 | * Switch vga switcheroo state after last close (all asics). | ||
| 460 | */ | ||
| 461 | void amdgpu_driver_lastclose_kms(struct drm_device *dev) | ||
| 462 | { | ||
| 463 | vga_switcheroo_process_delayed_switch(); | ||
| 464 | } | ||
| 465 | |||
| 466 | /** | ||
| 467 | * amdgpu_driver_open_kms - drm callback for open | ||
| 468 | * | ||
| 469 | * @dev: drm dev pointer | ||
| 470 | * @file_priv: drm file | ||
| 471 | * | ||
| 472 | * On device open, init vm on cayman+ (all asics). | ||
| 473 | * Returns 0 on success, error on failure. | ||
| 474 | */ | ||
| 475 | int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | ||
| 476 | { | ||
| 477 | struct amdgpu_device *adev = dev->dev_private; | ||
| 478 | struct amdgpu_fpriv *fpriv; | ||
| 479 | int r; | ||
| 480 | |||
| 481 | file_priv->driver_priv = NULL; | ||
| 482 | |||
| 483 | r = pm_runtime_get_sync(dev->dev); | ||
| 484 | if (r < 0) | ||
| 485 | return r; | ||
| 486 | |||
| 487 | fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); | ||
| 488 | if (unlikely(!fpriv)) | ||
| 489 | return -ENOMEM; | ||
| 490 | |||
| 491 | r = amdgpu_vm_init(adev, &fpriv->vm); | ||
| 492 | if (r) | ||
| 493 | goto error_free; | ||
| 494 | |||
| 495 | mutex_init(&fpriv->bo_list_lock); | ||
| 496 | idr_init(&fpriv->bo_list_handles); | ||
| 497 | |||
| 498 | /* init context manager */ | ||
| 499 | mutex_init(&fpriv->ctx_mgr.hlock); | ||
| 500 | idr_init(&fpriv->ctx_mgr.ctx_handles); | ||
| 501 | fpriv->ctx_mgr.adev = adev; | ||
| 502 | |||
| 503 | file_priv->driver_priv = fpriv; | ||
| 504 | |||
| 505 | pm_runtime_mark_last_busy(dev->dev); | ||
| 506 | pm_runtime_put_autosuspend(dev->dev); | ||
| 507 | return 0; | ||
| 508 | |||
| 509 | error_free: | ||
| 510 | kfree(fpriv); | ||
| 511 | |||
| 512 | return r; | ||
| 513 | } | ||
| 514 | |||
| 515 | /** | ||
| 516 | * amdgpu_driver_postclose_kms - drm callback for post close | ||
| 517 | * | ||
| 518 | * @dev: drm dev pointer | ||
| 519 | * @file_priv: drm file | ||
| 520 | * | ||
| 521 | * On device post close, tear down vm on cayman+ (all asics). | ||
| 522 | */ | ||
| 523 | void amdgpu_driver_postclose_kms(struct drm_device *dev, | ||
| 524 | struct drm_file *file_priv) | ||
| 525 | { | ||
| 526 | struct amdgpu_device *adev = dev->dev_private; | ||
| 527 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; | ||
| 528 | struct amdgpu_bo_list *list; | ||
| 529 | int handle; | ||
| 530 | |||
| 531 | if (!fpriv) | ||
| 532 | return; | ||
| 533 | |||
| 534 | amdgpu_vm_fini(adev, &fpriv->vm); | ||
| 535 | |||
| 536 | idr_for_each_entry(&fpriv->bo_list_handles, list, handle) | ||
| 537 | amdgpu_bo_list_free(list); | ||
| 538 | |||
| 539 | idr_destroy(&fpriv->bo_list_handles); | ||
| 540 | mutex_destroy(&fpriv->bo_list_lock); | ||
| 541 | |||
| 542 | /* release context */ | ||
| 543 | amdgpu_ctx_fini(fpriv); | ||
| 544 | |||
| 545 | kfree(fpriv); | ||
| 546 | file_priv->driver_priv = NULL; | ||
| 547 | } | ||
| 548 | |||
| 549 | /** | ||
| 550 | * amdgpu_driver_preclose_kms - drm callback for pre close | ||
| 551 | * | ||
| 552 | * @dev: drm dev pointer | ||
| 553 | * @file_priv: drm file | ||
| 554 | * | ||
| 555 | * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx | ||
| 556 | * (all asics). | ||
| 557 | */ | ||
| 558 | void amdgpu_driver_preclose_kms(struct drm_device *dev, | ||
| 559 | struct drm_file *file_priv) | ||
| 560 | { | ||
| 561 | struct amdgpu_device *adev = dev->dev_private; | ||
| 562 | |||
| 563 | amdgpu_uvd_free_handles(adev, file_priv); | ||
| 564 | amdgpu_vce_free_handles(adev, file_priv); | ||
| 565 | } | ||
| 566 | |||
| 567 | /* | ||
| 568 | * VBlank related functions. | ||
| 569 | */ | ||
| 570 | /** | ||
| 571 | * amdgpu_get_vblank_counter_kms - get frame count | ||
| 572 | * | ||
| 573 | * @dev: drm dev pointer | ||
| 574 | * @crtc: crtc to get the frame count from | ||
| 575 | * | ||
| 576 | * Gets the frame count on the requested crtc (all asics). | ||
| 577 | * Returns frame count on success, -EINVAL on failure. | ||
| 578 | */ | ||
| 579 | u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc) | ||
| 580 | { | ||
| 581 | struct amdgpu_device *adev = dev->dev_private; | ||
| 582 | |||
| 583 | if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { | ||
| 584 | DRM_ERROR("Invalid crtc %d\n", crtc); | ||
| 585 | return -EINVAL; | ||
| 586 | } | ||
| 587 | |||
| 588 | return amdgpu_display_vblank_get_counter(adev, crtc); | ||
| 589 | } | ||
| 590 | |||
| 591 | /** | ||
| 592 | * amdgpu_enable_vblank_kms - enable vblank interrupt | ||
| 593 | * | ||
| 594 | * @dev: drm dev pointer | ||
| 595 | * @crtc: crtc to enable vblank interrupt for | ||
| 596 | * | ||
| 597 | * Enable the interrupt on the requested crtc (all asics). | ||
| 598 | * Returns 0 on success, -EINVAL on failure. | ||
| 599 | */ | ||
| 600 | int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc) | ||
| 601 | { | ||
| 602 | struct amdgpu_device *adev = dev->dev_private; | ||
| 603 | int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc); | ||
| 604 | |||
| 605 | return amdgpu_irq_get(adev, &adev->crtc_irq, idx); | ||
| 606 | } | ||
| 607 | |||
| 608 | /** | ||
| 609 | * amdgpu_disable_vblank_kms - disable vblank interrupt | ||
| 610 | * | ||
| 611 | * @dev: drm dev pointer | ||
| 612 | * @crtc: crtc to disable vblank interrupt for | ||
| 613 | * | ||
| 614 | * Disable the interrupt on the requested crtc (all asics). | ||
| 615 | */ | ||
| 616 | void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc) | ||
| 617 | { | ||
| 618 | struct amdgpu_device *adev = dev->dev_private; | ||
| 619 | int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc); | ||
| 620 | |||
| 621 | amdgpu_irq_put(adev, &adev->crtc_irq, idx); | ||
| 622 | } | ||
| 623 | |||
| 624 | /** | ||
| 625 | * amdgpu_get_vblank_timestamp_kms - get vblank timestamp | ||
| 626 | * | ||
| 627 | * @dev: drm dev pointer | ||
| 628 | * @crtc: crtc to get the timestamp for | ||
| 629 | * @max_error: max error | ||
| 630 | * @vblank_time: time value | ||
| 631 | * @flags: flags passed to the driver | ||
| 632 | * | ||
| 633 | * Gets the timestamp on the requested crtc based on the | ||
| 634 | * scanout position. (all asics). | ||
| 635 | * Returns postive status flags on success, negative error on failure. | ||
| 636 | */ | ||
| 637 | int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, | ||
| 638 | int *max_error, | ||
| 639 | struct timeval *vblank_time, | ||
| 640 | unsigned flags) | ||
| 641 | { | ||
| 642 | struct drm_crtc *drmcrtc; | ||
| 643 | struct amdgpu_device *adev = dev->dev_private; | ||
| 644 | |||
| 645 | if (crtc < 0 || crtc >= dev->num_crtcs) { | ||
| 646 | DRM_ERROR("Invalid crtc %d\n", crtc); | ||
| 647 | return -EINVAL; | ||
| 648 | } | ||
| 649 | |||
| 650 | /* Get associated drm_crtc: */ | ||
| 651 | drmcrtc = &adev->mode_info.crtcs[crtc]->base; | ||
| 652 | |||
| 653 | /* Helper routine in DRM core does all the work: */ | ||
| 654 | return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, | ||
| 655 | vblank_time, flags, | ||
| 656 | drmcrtc, &drmcrtc->hwmode); | ||
| 657 | } | ||
| 658 | |||
| 659 | const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { | ||
| 660 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 661 | DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 662 | DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 663 | /* KMS */ | ||
| 664 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 665 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 666 | DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 667 | DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 668 | DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 669 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 670 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 671 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 672 | DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 673 | }; | ||
| 674 | int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c new file mode 100644 index 000000000000..e94429185660 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |||
| @@ -0,0 +1,319 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the | ||
| 7 | * "Software"), to deal in the Software without restriction, including | ||
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 10 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 11 | * the following conditions: | ||
| 12 | * | ||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 20 | * | ||
| 21 | * The above copyright notice and this permission notice (including the | ||
| 22 | * next paragraph) shall be included in all copies or substantial portions | ||
| 23 | * of the Software. | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | /* | ||
| 27 | * Authors: | ||
| 28 | * Christian König <christian.koenig@amd.com> | ||
| 29 | */ | ||
| 30 | |||
| 31 | #include <linux/firmware.h> | ||
| 32 | #include <linux/module.h> | ||
| 33 | #include <linux/mmu_notifier.h> | ||
| 34 | #include <drm/drmP.h> | ||
| 35 | #include <drm/drm.h> | ||
| 36 | |||
| 37 | #include "amdgpu.h" | ||
| 38 | |||
| 39 | struct amdgpu_mn { | ||
| 40 | /* constant after initialisation */ | ||
| 41 | struct amdgpu_device *adev; | ||
| 42 | struct mm_struct *mm; | ||
| 43 | struct mmu_notifier mn; | ||
| 44 | |||
| 45 | /* only used on destruction */ | ||
| 46 | struct work_struct work; | ||
| 47 | |||
| 48 | /* protected by adev->mn_lock */ | ||
| 49 | struct hlist_node node; | ||
| 50 | |||
| 51 | /* objects protected by lock */ | ||
| 52 | struct mutex lock; | ||
| 53 | struct rb_root objects; | ||
| 54 | }; | ||
| 55 | |||
| 56 | struct amdgpu_mn_node { | ||
| 57 | struct interval_tree_node it; | ||
| 58 | struct list_head bos; | ||
| 59 | }; | ||
| 60 | |||
| 61 | /** | ||
| 62 | * amdgpu_mn_destroy - destroy the rmn | ||
| 63 | * | ||
| 64 | * @work: previously sheduled work item | ||
| 65 | * | ||
| 66 | * Lazy destroys the notifier from a work item | ||
| 67 | */ | ||
| 68 | static void amdgpu_mn_destroy(struct work_struct *work) | ||
| 69 | { | ||
| 70 | struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work); | ||
| 71 | struct amdgpu_device *adev = rmn->adev; | ||
| 72 | struct amdgpu_mn_node *node, *next_node; | ||
| 73 | struct amdgpu_bo *bo, *next_bo; | ||
| 74 | |||
| 75 | mutex_lock(&adev->mn_lock); | ||
| 76 | mutex_lock(&rmn->lock); | ||
| 77 | hash_del(&rmn->node); | ||
| 78 | rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, | ||
| 79 | it.rb) { | ||
| 80 | |||
| 81 | interval_tree_remove(&node->it, &rmn->objects); | ||
| 82 | list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { | ||
| 83 | bo->mn = NULL; | ||
| 84 | list_del_init(&bo->mn_list); | ||
| 85 | } | ||
| 86 | kfree(node); | ||
| 87 | } | ||
| 88 | mutex_unlock(&rmn->lock); | ||
| 89 | mutex_unlock(&adev->mn_lock); | ||
| 90 | mmu_notifier_unregister(&rmn->mn, rmn->mm); | ||
| 91 | kfree(rmn); | ||
| 92 | } | ||
| 93 | |||
| 94 | /** | ||
| 95 | * amdgpu_mn_release - callback to notify about mm destruction | ||
| 96 | * | ||
| 97 | * @mn: our notifier | ||
| 98 | * @mn: the mm this callback is about | ||
| 99 | * | ||
| 100 | * Shedule a work item to lazy destroy our notifier. | ||
| 101 | */ | ||
| 102 | static void amdgpu_mn_release(struct mmu_notifier *mn, | ||
| 103 | struct mm_struct *mm) | ||
| 104 | { | ||
| 105 | struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); | ||
| 106 | INIT_WORK(&rmn->work, amdgpu_mn_destroy); | ||
| 107 | schedule_work(&rmn->work); | ||
| 108 | } | ||
| 109 | |||
| 110 | /** | ||
| 111 | * amdgpu_mn_invalidate_range_start - callback to notify about mm change | ||
| 112 | * | ||
| 113 | * @mn: our notifier | ||
| 114 | * @mn: the mm this callback is about | ||
| 115 | * @start: start of updated range | ||
| 116 | * @end: end of updated range | ||
| 117 | * | ||
| 118 | * We block for all BOs between start and end to be idle and | ||
| 119 | * unmap them by move them into system domain again. | ||
| 120 | */ | ||
| 121 | static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, | ||
| 122 | struct mm_struct *mm, | ||
| 123 | unsigned long start, | ||
| 124 | unsigned long end) | ||
| 125 | { | ||
| 126 | struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); | ||
| 127 | struct interval_tree_node *it; | ||
| 128 | |||
| 129 | /* notification is exclusive, but interval is inclusive */ | ||
| 130 | end -= 1; | ||
| 131 | |||
| 132 | mutex_lock(&rmn->lock); | ||
| 133 | |||
| 134 | it = interval_tree_iter_first(&rmn->objects, start, end); | ||
| 135 | while (it) { | ||
| 136 | struct amdgpu_mn_node *node; | ||
| 137 | struct amdgpu_bo *bo; | ||
| 138 | int r; | ||
| 139 | |||
| 140 | node = container_of(it, struct amdgpu_mn_node, it); | ||
| 141 | it = interval_tree_iter_next(it, start, end); | ||
| 142 | |||
| 143 | list_for_each_entry(bo, &node->bos, mn_list) { | ||
| 144 | |||
| 145 | r = amdgpu_bo_reserve(bo, true); | ||
| 146 | if (r) { | ||
| 147 | DRM_ERROR("(%d) failed to reserve user bo\n", r); | ||
| 148 | continue; | ||
| 149 | } | ||
| 150 | |||
| 151 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, | ||
| 152 | true, false, MAX_SCHEDULE_TIMEOUT); | ||
| 153 | if (r) | ||
| 154 | DRM_ERROR("(%d) failed to wait for user bo\n", r); | ||
| 155 | |||
| 156 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); | ||
| 157 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
| 158 | if (r) | ||
| 159 | DRM_ERROR("(%d) failed to validate user bo\n", r); | ||
| 160 | |||
| 161 | amdgpu_bo_unreserve(bo); | ||
| 162 | } | ||
| 163 | } | ||
| 164 | |||
| 165 | mutex_unlock(&rmn->lock); | ||
| 166 | } | ||
| 167 | |||
| 168 | static const struct mmu_notifier_ops amdgpu_mn_ops = { | ||
| 169 | .release = amdgpu_mn_release, | ||
| 170 | .invalidate_range_start = amdgpu_mn_invalidate_range_start, | ||
| 171 | }; | ||
| 172 | |||
| 173 | /** | ||
| 174 | * amdgpu_mn_get - create notifier context | ||
| 175 | * | ||
| 176 | * @adev: amdgpu device pointer | ||
| 177 | * | ||
| 178 | * Creates a notifier context for current->mm. | ||
| 179 | */ | ||
| 180 | static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) | ||
| 181 | { | ||
| 182 | struct mm_struct *mm = current->mm; | ||
| 183 | struct amdgpu_mn *rmn; | ||
| 184 | int r; | ||
| 185 | |||
| 186 | down_write(&mm->mmap_sem); | ||
| 187 | mutex_lock(&adev->mn_lock); | ||
| 188 | |||
| 189 | hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm) | ||
| 190 | if (rmn->mm == mm) | ||
| 191 | goto release_locks; | ||
| 192 | |||
| 193 | rmn = kzalloc(sizeof(*rmn), GFP_KERNEL); | ||
| 194 | if (!rmn) { | ||
| 195 | rmn = ERR_PTR(-ENOMEM); | ||
| 196 | goto release_locks; | ||
| 197 | } | ||
| 198 | |||
| 199 | rmn->adev = adev; | ||
| 200 | rmn->mm = mm; | ||
| 201 | rmn->mn.ops = &amdgpu_mn_ops; | ||
| 202 | mutex_init(&rmn->lock); | ||
| 203 | rmn->objects = RB_ROOT; | ||
| 204 | |||
| 205 | r = __mmu_notifier_register(&rmn->mn, mm); | ||
| 206 | if (r) | ||
| 207 | goto free_rmn; | ||
| 208 | |||
| 209 | hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm); | ||
| 210 | |||
| 211 | release_locks: | ||
| 212 | mutex_unlock(&adev->mn_lock); | ||
| 213 | up_write(&mm->mmap_sem); | ||
| 214 | |||
| 215 | return rmn; | ||
| 216 | |||
| 217 | free_rmn: | ||
| 218 | mutex_unlock(&adev->mn_lock); | ||
| 219 | up_write(&mm->mmap_sem); | ||
| 220 | kfree(rmn); | ||
| 221 | |||
| 222 | return ERR_PTR(r); | ||
| 223 | } | ||
| 224 | |||
| 225 | /** | ||
| 226 | * amdgpu_mn_register - register a BO for notifier updates | ||
| 227 | * | ||
| 228 | * @bo: amdgpu buffer object | ||
| 229 | * @addr: userptr addr we should monitor | ||
| 230 | * | ||
| 231 | * Registers an MMU notifier for the given BO at the specified address. | ||
| 232 | * Returns 0 on success, -ERRNO if anything goes wrong. | ||
| 233 | */ | ||
| 234 | int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) | ||
| 235 | { | ||
| 236 | unsigned long end = addr + amdgpu_bo_size(bo) - 1; | ||
| 237 | struct amdgpu_device *adev = bo->adev; | ||
| 238 | struct amdgpu_mn *rmn; | ||
| 239 | struct amdgpu_mn_node *node = NULL; | ||
| 240 | struct list_head bos; | ||
| 241 | struct interval_tree_node *it; | ||
| 242 | |||
| 243 | rmn = amdgpu_mn_get(adev); | ||
| 244 | if (IS_ERR(rmn)) | ||
| 245 | return PTR_ERR(rmn); | ||
| 246 | |||
| 247 | INIT_LIST_HEAD(&bos); | ||
| 248 | |||
| 249 | mutex_lock(&rmn->lock); | ||
| 250 | |||
| 251 | while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { | ||
| 252 | kfree(node); | ||
| 253 | node = container_of(it, struct amdgpu_mn_node, it); | ||
| 254 | interval_tree_remove(&node->it, &rmn->objects); | ||
| 255 | addr = min(it->start, addr); | ||
| 256 | end = max(it->last, end); | ||
| 257 | list_splice(&node->bos, &bos); | ||
| 258 | } | ||
| 259 | |||
| 260 | if (!node) { | ||
| 261 | node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); | ||
| 262 | if (!node) { | ||
| 263 | mutex_unlock(&rmn->lock); | ||
| 264 | return -ENOMEM; | ||
| 265 | } | ||
| 266 | } | ||
| 267 | |||
| 268 | bo->mn = rmn; | ||
| 269 | |||
| 270 | node->it.start = addr; | ||
| 271 | node->it.last = end; | ||
| 272 | INIT_LIST_HEAD(&node->bos); | ||
| 273 | list_splice(&bos, &node->bos); | ||
| 274 | list_add(&bo->mn_list, &node->bos); | ||
| 275 | |||
| 276 | interval_tree_insert(&node->it, &rmn->objects); | ||
| 277 | |||
| 278 | mutex_unlock(&rmn->lock); | ||
| 279 | |||
| 280 | return 0; | ||
| 281 | } | ||
| 282 | |||
| 283 | /** | ||
| 284 | * amdgpu_mn_unregister - unregister a BO for notifier updates | ||
| 285 | * | ||
| 286 | * @bo: amdgpu buffer object | ||
| 287 | * | ||
| 288 | * Remove any registration of MMU notifier updates from the buffer object. | ||
| 289 | */ | ||
| 290 | void amdgpu_mn_unregister(struct amdgpu_bo *bo) | ||
| 291 | { | ||
| 292 | struct amdgpu_device *adev = bo->adev; | ||
| 293 | struct amdgpu_mn *rmn; | ||
| 294 | struct list_head *head; | ||
| 295 | |||
| 296 | mutex_lock(&adev->mn_lock); | ||
| 297 | rmn = bo->mn; | ||
| 298 | if (rmn == NULL) { | ||
| 299 | mutex_unlock(&adev->mn_lock); | ||
| 300 | return; | ||
| 301 | } | ||
| 302 | |||
| 303 | mutex_lock(&rmn->lock); | ||
| 304 | /* save the next list entry for later */ | ||
| 305 | head = bo->mn_list.next; | ||
| 306 | |||
| 307 | bo->mn = NULL; | ||
| 308 | list_del(&bo->mn_list); | ||
| 309 | |||
| 310 | if (list_empty(head)) { | ||
| 311 | struct amdgpu_mn_node *node; | ||
| 312 | node = container_of(head, struct amdgpu_mn_node, bos); | ||
| 313 | interval_tree_remove(&node->it, &rmn->objects); | ||
| 314 | kfree(node); | ||
| 315 | } | ||
| 316 | |||
| 317 | mutex_unlock(&rmn->lock); | ||
| 318 | mutex_unlock(&adev->mn_lock); | ||
| 319 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h new file mode 100644 index 000000000000..64efe5b52e65 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | |||
| @@ -0,0 +1,586 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and | ||
| 3 | * VA Linux Systems Inc., Fremont, California. | ||
| 4 | * Copyright 2008 Red Hat Inc. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Original Authors: | ||
| 25 | * Kevin E. Martin, Rickard E. Faith, Alan Hourihane | ||
| 26 | * | ||
| 27 | * Kernel port Author: Dave Airlie | ||
| 28 | */ | ||
| 29 | |||
| 30 | #ifndef AMDGPU_MODE_H | ||
| 31 | #define AMDGPU_MODE_H | ||
| 32 | |||
| 33 | #include <drm/drm_crtc.h> | ||
| 34 | #include <drm/drm_edid.h> | ||
| 35 | #include <drm/drm_dp_helper.h> | ||
| 36 | #include <drm/drm_fixed.h> | ||
| 37 | #include <drm/drm_crtc_helper.h> | ||
| 38 | #include <drm/drm_plane_helper.h> | ||
| 39 | #include <linux/i2c.h> | ||
| 40 | #include <linux/i2c-algo-bit.h> | ||
| 41 | |||
| 42 | struct amdgpu_bo; | ||
| 43 | struct amdgpu_device; | ||
| 44 | struct amdgpu_encoder; | ||
| 45 | struct amdgpu_router; | ||
| 46 | struct amdgpu_hpd; | ||
| 47 | |||
| 48 | #define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base) | ||
| 49 | #define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base) | ||
| 50 | #define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base) | ||
| 51 | #define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base) | ||
| 52 | |||
| 53 | #define AMDGPU_MAX_HPD_PINS 6 | ||
| 54 | #define AMDGPU_MAX_CRTCS 6 | ||
| 55 | #define AMDGPU_MAX_AFMT_BLOCKS 7 | ||
| 56 | |||
| 57 | enum amdgpu_rmx_type { | ||
| 58 | RMX_OFF, | ||
| 59 | RMX_FULL, | ||
| 60 | RMX_CENTER, | ||
| 61 | RMX_ASPECT | ||
| 62 | }; | ||
| 63 | |||
| 64 | enum amdgpu_underscan_type { | ||
| 65 | UNDERSCAN_OFF, | ||
| 66 | UNDERSCAN_ON, | ||
| 67 | UNDERSCAN_AUTO, | ||
| 68 | }; | ||
| 69 | |||
| 70 | #define AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS 50 | ||
| 71 | #define AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS 10 | ||
| 72 | |||
| 73 | enum amdgpu_hpd_id { | ||
| 74 | AMDGPU_HPD_1 = 0, | ||
| 75 | AMDGPU_HPD_2, | ||
| 76 | AMDGPU_HPD_3, | ||
| 77 | AMDGPU_HPD_4, | ||
| 78 | AMDGPU_HPD_5, | ||
| 79 | AMDGPU_HPD_6, | ||
| 80 | AMDGPU_HPD_LAST, | ||
| 81 | AMDGPU_HPD_NONE = 0xff, | ||
| 82 | }; | ||
| 83 | |||
| 84 | enum amdgpu_crtc_irq { | ||
| 85 | AMDGPU_CRTC_IRQ_VBLANK1 = 0, | ||
| 86 | AMDGPU_CRTC_IRQ_VBLANK2, | ||
| 87 | AMDGPU_CRTC_IRQ_VBLANK3, | ||
| 88 | AMDGPU_CRTC_IRQ_VBLANK4, | ||
| 89 | AMDGPU_CRTC_IRQ_VBLANK5, | ||
| 90 | AMDGPU_CRTC_IRQ_VBLANK6, | ||
| 91 | AMDGPU_CRTC_IRQ_VLINE1, | ||
| 92 | AMDGPU_CRTC_IRQ_VLINE2, | ||
| 93 | AMDGPU_CRTC_IRQ_VLINE3, | ||
| 94 | AMDGPU_CRTC_IRQ_VLINE4, | ||
| 95 | AMDGPU_CRTC_IRQ_VLINE5, | ||
| 96 | AMDGPU_CRTC_IRQ_VLINE6, | ||
| 97 | AMDGPU_CRTC_IRQ_LAST, | ||
| 98 | AMDGPU_CRTC_IRQ_NONE = 0xff | ||
| 99 | }; | ||
| 100 | |||
| 101 | enum amdgpu_pageflip_irq { | ||
| 102 | AMDGPU_PAGEFLIP_IRQ_D1 = 0, | ||
| 103 | AMDGPU_PAGEFLIP_IRQ_D2, | ||
| 104 | AMDGPU_PAGEFLIP_IRQ_D3, | ||
| 105 | AMDGPU_PAGEFLIP_IRQ_D4, | ||
| 106 | AMDGPU_PAGEFLIP_IRQ_D5, | ||
| 107 | AMDGPU_PAGEFLIP_IRQ_D6, | ||
| 108 | AMDGPU_PAGEFLIP_IRQ_LAST, | ||
| 109 | AMDGPU_PAGEFLIP_IRQ_NONE = 0xff | ||
| 110 | }; | ||
| 111 | |||
| 112 | enum amdgpu_flip_status { | ||
| 113 | AMDGPU_FLIP_NONE, | ||
| 114 | AMDGPU_FLIP_PENDING, | ||
| 115 | AMDGPU_FLIP_SUBMITTED | ||
| 116 | }; | ||
| 117 | |||
| 118 | #define AMDGPU_MAX_I2C_BUS 16 | ||
| 119 | |||
| 120 | /* amdgpu gpio-based i2c | ||
| 121 | * 1. "mask" reg and bits | ||
| 122 | * grabs the gpio pins for software use | ||
| 123 | * 0=not held 1=held | ||
| 124 | * 2. "a" reg and bits | ||
| 125 | * output pin value | ||
| 126 | * 0=low 1=high | ||
| 127 | * 3. "en" reg and bits | ||
| 128 | * sets the pin direction | ||
| 129 | * 0=input 1=output | ||
| 130 | * 4. "y" reg and bits | ||
| 131 | * input pin value | ||
| 132 | * 0=low 1=high | ||
| 133 | */ | ||
| 134 | struct amdgpu_i2c_bus_rec { | ||
| 135 | bool valid; | ||
| 136 | /* id used by atom */ | ||
| 137 | uint8_t i2c_id; | ||
| 138 | /* id used by atom */ | ||
| 139 | enum amdgpu_hpd_id hpd; | ||
| 140 | /* can be used with hw i2c engine */ | ||
| 141 | bool hw_capable; | ||
| 142 | /* uses multi-media i2c engine */ | ||
| 143 | bool mm_i2c; | ||
| 144 | /* regs and bits */ | ||
| 145 | uint32_t mask_clk_reg; | ||
| 146 | uint32_t mask_data_reg; | ||
| 147 | uint32_t a_clk_reg; | ||
| 148 | uint32_t a_data_reg; | ||
| 149 | uint32_t en_clk_reg; | ||
| 150 | uint32_t en_data_reg; | ||
| 151 | uint32_t y_clk_reg; | ||
| 152 | uint32_t y_data_reg; | ||
| 153 | uint32_t mask_clk_mask; | ||
| 154 | uint32_t mask_data_mask; | ||
| 155 | uint32_t a_clk_mask; | ||
| 156 | uint32_t a_data_mask; | ||
| 157 | uint32_t en_clk_mask; | ||
| 158 | uint32_t en_data_mask; | ||
| 159 | uint32_t y_clk_mask; | ||
| 160 | uint32_t y_data_mask; | ||
| 161 | }; | ||
| 162 | |||
| 163 | #define AMDGPU_MAX_BIOS_CONNECTOR 16 | ||
| 164 | |||
| 165 | /* pll flags */ | ||
| 166 | #define AMDGPU_PLL_USE_BIOS_DIVS (1 << 0) | ||
| 167 | #define AMDGPU_PLL_NO_ODD_POST_DIV (1 << 1) | ||
| 168 | #define AMDGPU_PLL_USE_REF_DIV (1 << 2) | ||
| 169 | #define AMDGPU_PLL_LEGACY (1 << 3) | ||
| 170 | #define AMDGPU_PLL_PREFER_LOW_REF_DIV (1 << 4) | ||
| 171 | #define AMDGPU_PLL_PREFER_HIGH_REF_DIV (1 << 5) | ||
| 172 | #define AMDGPU_PLL_PREFER_LOW_FB_DIV (1 << 6) | ||
| 173 | #define AMDGPU_PLL_PREFER_HIGH_FB_DIV (1 << 7) | ||
| 174 | #define AMDGPU_PLL_PREFER_LOW_POST_DIV (1 << 8) | ||
| 175 | #define AMDGPU_PLL_PREFER_HIGH_POST_DIV (1 << 9) | ||
| 176 | #define AMDGPU_PLL_USE_FRAC_FB_DIV (1 << 10) | ||
| 177 | #define AMDGPU_PLL_PREFER_CLOSEST_LOWER (1 << 11) | ||
| 178 | #define AMDGPU_PLL_USE_POST_DIV (1 << 12) | ||
| 179 | #define AMDGPU_PLL_IS_LCD (1 << 13) | ||
| 180 | #define AMDGPU_PLL_PREFER_MINM_OVER_MAXP (1 << 14) | ||
| 181 | |||
| 182 | struct amdgpu_pll { | ||
| 183 | /* reference frequency */ | ||
| 184 | uint32_t reference_freq; | ||
| 185 | |||
| 186 | /* fixed dividers */ | ||
| 187 | uint32_t reference_div; | ||
| 188 | uint32_t post_div; | ||
| 189 | |||
| 190 | /* pll in/out limits */ | ||
| 191 | uint32_t pll_in_min; | ||
| 192 | uint32_t pll_in_max; | ||
| 193 | uint32_t pll_out_min; | ||
| 194 | uint32_t pll_out_max; | ||
| 195 | uint32_t lcd_pll_out_min; | ||
| 196 | uint32_t lcd_pll_out_max; | ||
| 197 | uint32_t best_vco; | ||
| 198 | |||
| 199 | /* divider limits */ | ||
| 200 | uint32_t min_ref_div; | ||
| 201 | uint32_t max_ref_div; | ||
| 202 | uint32_t min_post_div; | ||
| 203 | uint32_t max_post_div; | ||
| 204 | uint32_t min_feedback_div; | ||
| 205 | uint32_t max_feedback_div; | ||
| 206 | uint32_t min_frac_feedback_div; | ||
| 207 | uint32_t max_frac_feedback_div; | ||
| 208 | |||
| 209 | /* flags for the current clock */ | ||
| 210 | uint32_t flags; | ||
| 211 | |||
| 212 | /* pll id */ | ||
| 213 | uint32_t id; | ||
| 214 | }; | ||
| 215 | |||
| 216 | struct amdgpu_i2c_chan { | ||
| 217 | struct i2c_adapter adapter; | ||
| 218 | struct drm_device *dev; | ||
| 219 | struct i2c_algo_bit_data bit; | ||
| 220 | struct amdgpu_i2c_bus_rec rec; | ||
| 221 | struct drm_dp_aux aux; | ||
| 222 | bool has_aux; | ||
| 223 | struct mutex mutex; | ||
| 224 | }; | ||
| 225 | |||
| 226 | struct amdgpu_fbdev; | ||
| 227 | |||
| 228 | struct amdgpu_afmt { | ||
| 229 | bool enabled; | ||
| 230 | int offset; | ||
| 231 | bool last_buffer_filled_status; | ||
| 232 | int id; | ||
| 233 | struct amdgpu_audio_pin *pin; | ||
| 234 | }; | ||
| 235 | |||
| 236 | /* | ||
| 237 | * Audio | ||
| 238 | */ | ||
| 239 | struct amdgpu_audio_pin { | ||
| 240 | int channels; | ||
| 241 | int rate; | ||
| 242 | int bits_per_sample; | ||
| 243 | u8 status_bits; | ||
| 244 | u8 category_code; | ||
| 245 | u32 offset; | ||
| 246 | bool connected; | ||
| 247 | u32 id; | ||
| 248 | }; | ||
| 249 | |||
| 250 | struct amdgpu_audio { | ||
| 251 | bool enabled; | ||
| 252 | struct amdgpu_audio_pin pin[AMDGPU_MAX_AFMT_BLOCKS]; | ||
| 253 | int num_pins; | ||
| 254 | }; | ||
| 255 | |||
| 256 | struct amdgpu_mode_mc_save { | ||
| 257 | u32 vga_render_control; | ||
| 258 | u32 vga_hdp_control; | ||
| 259 | bool crtc_enabled[AMDGPU_MAX_CRTCS]; | ||
| 260 | }; | ||
| 261 | |||
| 262 | struct amdgpu_display_funcs { | ||
| 263 | /* vga render */ | ||
| 264 | void (*set_vga_render_state)(struct amdgpu_device *adev, bool render); | ||
| 265 | /* display watermarks */ | ||
| 266 | void (*bandwidth_update)(struct amdgpu_device *adev); | ||
| 267 | /* get frame count */ | ||
| 268 | u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc); | ||
| 269 | /* wait for vblank */ | ||
| 270 | void (*vblank_wait)(struct amdgpu_device *adev, int crtc); | ||
| 271 | /* is dce hung */ | ||
| 272 | bool (*is_display_hung)(struct amdgpu_device *adev); | ||
| 273 | /* set backlight level */ | ||
| 274 | void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder, | ||
| 275 | u8 level); | ||
| 276 | /* get backlight level */ | ||
| 277 | u8 (*backlight_get_level)(struct amdgpu_encoder *amdgpu_encoder); | ||
| 278 | /* hotplug detect */ | ||
| 279 | bool (*hpd_sense)(struct amdgpu_device *adev, enum amdgpu_hpd_id hpd); | ||
| 280 | void (*hpd_set_polarity)(struct amdgpu_device *adev, | ||
| 281 | enum amdgpu_hpd_id hpd); | ||
| 282 | u32 (*hpd_get_gpio_reg)(struct amdgpu_device *adev); | ||
| 283 | /* pageflipping */ | ||
| 284 | void (*page_flip)(struct amdgpu_device *adev, | ||
| 285 | int crtc_id, u64 crtc_base); | ||
| 286 | int (*page_flip_get_scanoutpos)(struct amdgpu_device *adev, int crtc, | ||
| 287 | u32 *vbl, u32 *position); | ||
| 288 | /* display topology setup */ | ||
| 289 | void (*add_encoder)(struct amdgpu_device *adev, | ||
| 290 | uint32_t encoder_enum, | ||
| 291 | uint32_t supported_device, | ||
| 292 | u16 caps); | ||
| 293 | void (*add_connector)(struct amdgpu_device *adev, | ||
| 294 | uint32_t connector_id, | ||
| 295 | uint32_t supported_device, | ||
| 296 | int connector_type, | ||
| 297 | struct amdgpu_i2c_bus_rec *i2c_bus, | ||
| 298 | uint16_t connector_object_id, | ||
| 299 | struct amdgpu_hpd *hpd, | ||
| 300 | struct amdgpu_router *router); | ||
| 301 | void (*stop_mc_access)(struct amdgpu_device *adev, | ||
| 302 | struct amdgpu_mode_mc_save *save); | ||
| 303 | void (*resume_mc_access)(struct amdgpu_device *adev, | ||
| 304 | struct amdgpu_mode_mc_save *save); | ||
| 305 | }; | ||
| 306 | |||
| 307 | struct amdgpu_mode_info { | ||
| 308 | struct atom_context *atom_context; | ||
| 309 | struct card_info *atom_card_info; | ||
| 310 | bool mode_config_initialized; | ||
| 311 | struct amdgpu_crtc *crtcs[6]; | ||
| 312 | struct amdgpu_afmt *afmt[7]; | ||
| 313 | /* DVI-I properties */ | ||
| 314 | struct drm_property *coherent_mode_property; | ||
| 315 | /* DAC enable load detect */ | ||
| 316 | struct drm_property *load_detect_property; | ||
| 317 | /* underscan */ | ||
| 318 | struct drm_property *underscan_property; | ||
| 319 | struct drm_property *underscan_hborder_property; | ||
| 320 | struct drm_property *underscan_vborder_property; | ||
| 321 | /* audio */ | ||
| 322 | struct drm_property *audio_property; | ||
| 323 | /* FMT dithering */ | ||
| 324 | struct drm_property *dither_property; | ||
| 325 | /* hardcoded DFP edid from BIOS */ | ||
| 326 | struct edid *bios_hardcoded_edid; | ||
| 327 | int bios_hardcoded_edid_size; | ||
| 328 | |||
| 329 | /* pointer to fbdev info structure */ | ||
| 330 | struct amdgpu_fbdev *rfbdev; | ||
| 331 | /* firmware flags */ | ||
| 332 | u16 firmware_flags; | ||
| 333 | /* pointer to backlight encoder */ | ||
| 334 | struct amdgpu_encoder *bl_encoder; | ||
| 335 | struct amdgpu_audio audio; /* audio stuff */ | ||
| 336 | int num_crtc; /* number of crtcs */ | ||
| 337 | int num_hpd; /* number of hpd pins */ | ||
| 338 | int num_dig; /* number of dig blocks */ | ||
| 339 | int disp_priority; | ||
| 340 | const struct amdgpu_display_funcs *funcs; | ||
| 341 | }; | ||
| 342 | |||
| 343 | #define AMDGPU_MAX_BL_LEVEL 0xFF | ||
| 344 | |||
| 345 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) | ||
| 346 | |||
| 347 | struct amdgpu_backlight_privdata { | ||
| 348 | struct amdgpu_encoder *encoder; | ||
| 349 | uint8_t negative; | ||
| 350 | }; | ||
| 351 | |||
| 352 | #endif | ||
| 353 | |||
| 354 | struct amdgpu_atom_ss { | ||
| 355 | uint16_t percentage; | ||
| 356 | uint16_t percentage_divider; | ||
| 357 | uint8_t type; | ||
| 358 | uint16_t step; | ||
| 359 | uint8_t delay; | ||
| 360 | uint8_t range; | ||
| 361 | uint8_t refdiv; | ||
| 362 | /* asic_ss */ | ||
| 363 | uint16_t rate; | ||
| 364 | uint16_t amount; | ||
| 365 | }; | ||
| 366 | |||
| 367 | struct amdgpu_crtc { | ||
| 368 | struct drm_crtc base; | ||
| 369 | int crtc_id; | ||
| 370 | u16 lut_r[256], lut_g[256], lut_b[256]; | ||
| 371 | bool enabled; | ||
| 372 | bool can_tile; | ||
| 373 | uint32_t crtc_offset; | ||
| 374 | struct drm_gem_object *cursor_bo; | ||
| 375 | uint64_t cursor_addr; | ||
| 376 | int cursor_width; | ||
| 377 | int cursor_height; | ||
| 378 | int max_cursor_width; | ||
| 379 | int max_cursor_height; | ||
| 380 | enum amdgpu_rmx_type rmx_type; | ||
| 381 | u8 h_border; | ||
| 382 | u8 v_border; | ||
| 383 | fixed20_12 vsc; | ||
| 384 | fixed20_12 hsc; | ||
| 385 | struct drm_display_mode native_mode; | ||
| 386 | u32 pll_id; | ||
| 387 | /* page flipping */ | ||
| 388 | struct workqueue_struct *pflip_queue; | ||
| 389 | struct amdgpu_flip_work *pflip_works; | ||
| 390 | enum amdgpu_flip_status pflip_status; | ||
| 391 | int deferred_flip_completion; | ||
| 392 | /* pll sharing */ | ||
| 393 | struct amdgpu_atom_ss ss; | ||
| 394 | bool ss_enabled; | ||
| 395 | u32 adjusted_clock; | ||
| 396 | int bpc; | ||
| 397 | u32 pll_reference_div; | ||
| 398 | u32 pll_post_div; | ||
| 399 | u32 pll_flags; | ||
| 400 | struct drm_encoder *encoder; | ||
| 401 | struct drm_connector *connector; | ||
| 402 | /* for dpm */ | ||
| 403 | u32 line_time; | ||
| 404 | u32 wm_low; | ||
| 405 | u32 wm_high; | ||
| 406 | struct drm_display_mode hw_mode; | ||
| 407 | }; | ||
| 408 | |||
| 409 | struct amdgpu_encoder_atom_dig { | ||
| 410 | bool linkb; | ||
| 411 | /* atom dig */ | ||
| 412 | bool coherent_mode; | ||
| 413 | int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */ | ||
| 414 | /* atom lvds/edp */ | ||
| 415 | uint32_t lcd_misc; | ||
| 416 | uint16_t panel_pwr_delay; | ||
| 417 | uint32_t lcd_ss_id; | ||
| 418 | /* panel mode */ | ||
| 419 | struct drm_display_mode native_mode; | ||
| 420 | struct backlight_device *bl_dev; | ||
| 421 | int dpms_mode; | ||
| 422 | uint8_t backlight_level; | ||
| 423 | int panel_mode; | ||
| 424 | struct amdgpu_afmt *afmt; | ||
| 425 | }; | ||
| 426 | |||
| 427 | struct amdgpu_encoder { | ||
| 428 | struct drm_encoder base; | ||
| 429 | uint32_t encoder_enum; | ||
| 430 | uint32_t encoder_id; | ||
| 431 | uint32_t devices; | ||
| 432 | uint32_t active_device; | ||
| 433 | uint32_t flags; | ||
| 434 | uint32_t pixel_clock; | ||
| 435 | enum amdgpu_rmx_type rmx_type; | ||
| 436 | enum amdgpu_underscan_type underscan_type; | ||
| 437 | uint32_t underscan_hborder; | ||
| 438 | uint32_t underscan_vborder; | ||
| 439 | struct drm_display_mode native_mode; | ||
| 440 | void *enc_priv; | ||
| 441 | int audio_polling_active; | ||
| 442 | bool is_ext_encoder; | ||
| 443 | u16 caps; | ||
| 444 | }; | ||
| 445 | |||
| 446 | struct amdgpu_connector_atom_dig { | ||
| 447 | /* displayport */ | ||
| 448 | u8 dpcd[DP_RECEIVER_CAP_SIZE]; | ||
| 449 | u8 dp_sink_type; | ||
| 450 | int dp_clock; | ||
| 451 | int dp_lane_count; | ||
| 452 | bool edp_on; | ||
| 453 | }; | ||
| 454 | |||
| 455 | struct amdgpu_gpio_rec { | ||
| 456 | bool valid; | ||
| 457 | u8 id; | ||
| 458 | u32 reg; | ||
| 459 | u32 mask; | ||
| 460 | u32 shift; | ||
| 461 | }; | ||
| 462 | |||
| 463 | struct amdgpu_hpd { | ||
| 464 | enum amdgpu_hpd_id hpd; | ||
| 465 | u8 plugged_state; | ||
| 466 | struct amdgpu_gpio_rec gpio; | ||
| 467 | }; | ||
| 468 | |||
| 469 | struct amdgpu_router { | ||
| 470 | u32 router_id; | ||
| 471 | struct amdgpu_i2c_bus_rec i2c_info; | ||
| 472 | u8 i2c_addr; | ||
| 473 | /* i2c mux */ | ||
| 474 | bool ddc_valid; | ||
| 475 | u8 ddc_mux_type; | ||
| 476 | u8 ddc_mux_control_pin; | ||
| 477 | u8 ddc_mux_state; | ||
| 478 | /* clock/data mux */ | ||
| 479 | bool cd_valid; | ||
| 480 | u8 cd_mux_type; | ||
| 481 | u8 cd_mux_control_pin; | ||
| 482 | u8 cd_mux_state; | ||
| 483 | }; | ||
| 484 | |||
| 485 | enum amdgpu_connector_audio { | ||
| 486 | AMDGPU_AUDIO_DISABLE = 0, | ||
| 487 | AMDGPU_AUDIO_ENABLE = 1, | ||
| 488 | AMDGPU_AUDIO_AUTO = 2 | ||
| 489 | }; | ||
| 490 | |||
| 491 | enum amdgpu_connector_dither { | ||
| 492 | AMDGPU_FMT_DITHER_DISABLE = 0, | ||
| 493 | AMDGPU_FMT_DITHER_ENABLE = 1, | ||
| 494 | }; | ||
| 495 | |||
| 496 | struct amdgpu_connector { | ||
| 497 | struct drm_connector base; | ||
| 498 | uint32_t connector_id; | ||
| 499 | uint32_t devices; | ||
| 500 | struct amdgpu_i2c_chan *ddc_bus; | ||
| 501 | /* some systems have an hdmi and vga port with a shared ddc line */ | ||
| 502 | bool shared_ddc; | ||
| 503 | bool use_digital; | ||
| 504 | /* we need to mind the EDID between detect | ||
| 505 | and get modes due to analog/digital/tvencoder */ | ||
| 506 | struct edid *edid; | ||
| 507 | void *con_priv; | ||
| 508 | bool dac_load_detect; | ||
| 509 | bool detected_by_load; /* if the connection status was determined by load */ | ||
| 510 | uint16_t connector_object_id; | ||
| 511 | struct amdgpu_hpd hpd; | ||
| 512 | struct amdgpu_router router; | ||
| 513 | struct amdgpu_i2c_chan *router_bus; | ||
| 514 | enum amdgpu_connector_audio audio; | ||
| 515 | enum amdgpu_connector_dither dither; | ||
| 516 | unsigned pixelclock_for_modeset; | ||
| 517 | }; | ||
| 518 | |||
| 519 | struct amdgpu_framebuffer { | ||
| 520 | struct drm_framebuffer base; | ||
| 521 | struct drm_gem_object *obj; | ||
| 522 | }; | ||
| 523 | |||
| 524 | #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ | ||
| 525 | ((em) == ATOM_ENCODER_MODE_DP_MST)) | ||
| 526 | |||
| 527 | void amdgpu_link_encoder_connector(struct drm_device *dev); | ||
| 528 | |||
| 529 | struct drm_connector * | ||
| 530 | amdgpu_get_connector_for_encoder(struct drm_encoder *encoder); | ||
| 531 | struct drm_connector * | ||
| 532 | amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder); | ||
| 533 | bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder, | ||
| 534 | u32 pixel_clock); | ||
| 535 | |||
| 536 | u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder); | ||
| 537 | struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder); | ||
| 538 | |||
| 539 | bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux); | ||
| 540 | |||
| 541 | void amdgpu_encoder_set_active_device(struct drm_encoder *encoder); | ||
| 542 | |||
| 543 | int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, | ||
| 544 | unsigned int flags, | ||
| 545 | int *vpos, int *hpos, ktime_t *stime, | ||
| 546 | ktime_t *etime); | ||
| 547 | |||
| 548 | int amdgpu_framebuffer_init(struct drm_device *dev, | ||
| 549 | struct amdgpu_framebuffer *rfb, | ||
| 550 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
| 551 | struct drm_gem_object *obj); | ||
| 552 | |||
| 553 | int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb); | ||
| 554 | |||
| 555 | void amdgpu_enc_destroy(struct drm_encoder *encoder); | ||
| 556 | void amdgpu_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); | ||
| 557 | bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | ||
| 558 | const struct drm_display_mode *mode, | ||
| 559 | struct drm_display_mode *adjusted_mode); | ||
| 560 | void amdgpu_panel_mode_fixup(struct drm_encoder *encoder, | ||
| 561 | struct drm_display_mode *adjusted_mode); | ||
| 562 | int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc); | ||
| 563 | |||
| 564 | /* fbdev layer */ | ||
| 565 | int amdgpu_fbdev_init(struct amdgpu_device *adev); | ||
| 566 | void amdgpu_fbdev_fini(struct amdgpu_device *adev); | ||
| 567 | void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state); | ||
| 568 | int amdgpu_fbdev_total_size(struct amdgpu_device *adev); | ||
| 569 | bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj); | ||
| 570 | |||
| 571 | void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev); | ||
| 572 | |||
| 573 | |||
| 574 | int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled); | ||
| 575 | |||
| 576 | /* amdgpu_display.c */ | ||
| 577 | void amdgpu_print_display_setup(struct drm_device *dev); | ||
| 578 | int amdgpu_modeset_create_props(struct amdgpu_device *adev); | ||
| 579 | int amdgpu_crtc_set_config(struct drm_mode_set *set); | ||
| 580 | int amdgpu_crtc_page_flip(struct drm_crtc *crtc, | ||
| 581 | struct drm_framebuffer *fb, | ||
| 582 | struct drm_pending_vblank_event *event, | ||
| 583 | uint32_t page_flip_flags); | ||
| 584 | extern const struct drm_mode_config_funcs amdgpu_mode_funcs; | ||
| 585 | |||
| 586 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c new file mode 100644 index 000000000000..b51582714c21 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
| @@ -0,0 +1,646 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2009 Jerome Glisse. | ||
| 3 | * All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the | ||
| 7 | * "Software"), to deal in the Software without restriction, including | ||
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 10 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 11 | * the following conditions: | ||
| 12 | * | ||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 20 | * | ||
| 21 | * The above copyright notice and this permission notice (including the | ||
| 22 | * next paragraph) shall be included in all copies or substantial portions | ||
| 23 | * of the Software. | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | /* | ||
| 27 | * Authors: | ||
| 28 | * Jerome Glisse <glisse@freedesktop.org> | ||
| 29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | ||
| 30 | * Dave Airlie | ||
| 31 | */ | ||
| 32 | #include <linux/list.h> | ||
| 33 | #include <linux/slab.h> | ||
| 34 | #include <drm/drmP.h> | ||
| 35 | #include <drm/amdgpu_drm.h> | ||
| 36 | #include "amdgpu.h" | ||
| 37 | #include "amdgpu_trace.h" | ||
| 38 | |||
| 39 | |||
| 40 | int amdgpu_ttm_init(struct amdgpu_device *adev); | ||
| 41 | void amdgpu_ttm_fini(struct amdgpu_device *adev); | ||
| 42 | |||
| 43 | static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, | ||
| 44 | struct ttm_mem_reg * mem) | ||
| 45 | { | ||
| 46 | u64 ret = 0; | ||
| 47 | if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) { | ||
| 48 | ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) > | ||
| 49 | adev->mc.visible_vram_size ? | ||
| 50 | adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT): | ||
| 51 | mem->size; | ||
| 52 | } | ||
| 53 | return ret; | ||
| 54 | } | ||
| 55 | |||
| 56 | static void amdgpu_update_memory_usage(struct amdgpu_device *adev, | ||
| 57 | struct ttm_mem_reg *old_mem, | ||
| 58 | struct ttm_mem_reg *new_mem) | ||
| 59 | { | ||
| 60 | u64 vis_size; | ||
| 61 | if (!adev) | ||
| 62 | return; | ||
| 63 | |||
| 64 | if (new_mem) { | ||
| 65 | switch (new_mem->mem_type) { | ||
| 66 | case TTM_PL_TT: | ||
| 67 | atomic64_add(new_mem->size, &adev->gtt_usage); | ||
| 68 | break; | ||
| 69 | case TTM_PL_VRAM: | ||
| 70 | atomic64_add(new_mem->size, &adev->vram_usage); | ||
| 71 | vis_size = amdgpu_get_vis_part_size(adev, new_mem); | ||
| 72 | atomic64_add(vis_size, &adev->vram_vis_usage); | ||
| 73 | break; | ||
| 74 | } | ||
| 75 | } | ||
| 76 | |||
| 77 | if (old_mem) { | ||
| 78 | switch (old_mem->mem_type) { | ||
| 79 | case TTM_PL_TT: | ||
| 80 | atomic64_sub(old_mem->size, &adev->gtt_usage); | ||
| 81 | break; | ||
| 82 | case TTM_PL_VRAM: | ||
| 83 | atomic64_sub(old_mem->size, &adev->vram_usage); | ||
| 84 | vis_size = amdgpu_get_vis_part_size(adev, old_mem); | ||
| 85 | atomic64_sub(vis_size, &adev->vram_vis_usage); | ||
| 86 | break; | ||
| 87 | } | ||
| 88 | } | ||
| 89 | } | ||
| 90 | |||
| 91 | static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | ||
| 92 | { | ||
| 93 | struct amdgpu_bo *bo; | ||
| 94 | |||
| 95 | bo = container_of(tbo, struct amdgpu_bo, tbo); | ||
| 96 | |||
| 97 | amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); | ||
| 98 | amdgpu_mn_unregister(bo); | ||
| 99 | |||
| 100 | mutex_lock(&bo->adev->gem.mutex); | ||
| 101 | list_del_init(&bo->list); | ||
| 102 | mutex_unlock(&bo->adev->gem.mutex); | ||
| 103 | drm_gem_object_release(&bo->gem_base); | ||
| 104 | kfree(bo->metadata); | ||
| 105 | kfree(bo); | ||
| 106 | } | ||
| 107 | |||
| 108 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) | ||
| 109 | { | ||
| 110 | if (bo->destroy == &amdgpu_ttm_bo_destroy) | ||
| 111 | return true; | ||
| 112 | return false; | ||
| 113 | } | ||
| 114 | |||
| 115 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) | ||
| 116 | { | ||
| 117 | u32 c = 0, i; | ||
| 118 | rbo->placement.placement = rbo->placements; | ||
| 119 | rbo->placement.busy_placement = rbo->placements; | ||
| 120 | |||
| 121 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { | ||
| 122 | if (rbo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && | ||
| 123 | rbo->adev->mc.visible_vram_size < rbo->adev->mc.real_vram_size) { | ||
| 124 | rbo->placements[c].fpfn = | ||
| 125 | rbo->adev->mc.visible_vram_size >> PAGE_SHIFT; | ||
| 126 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | ||
| 127 | TTM_PL_FLAG_VRAM; | ||
| 128 | } | ||
| 129 | rbo->placements[c].fpfn = 0; | ||
| 130 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | ||
| 131 | TTM_PL_FLAG_VRAM; | ||
| 132 | } | ||
| 133 | |||
| 134 | if (domain & AMDGPU_GEM_DOMAIN_GTT) { | ||
| 135 | if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_UC) { | ||
| 136 | rbo->placements[c].fpfn = 0; | ||
| 137 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; | ||
| 138 | } else if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_WC) { | ||
| 139 | rbo->placements[c].fpfn = 0; | ||
| 140 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | | ||
| 141 | TTM_PL_FLAG_UNCACHED; | ||
| 142 | } else { | ||
| 143 | rbo->placements[c].fpfn = 0; | ||
| 144 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | if (domain & AMDGPU_GEM_DOMAIN_CPU) { | ||
| 149 | if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_UC) { | ||
| 150 | rbo->placements[c].fpfn = 0; | ||
| 151 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; | ||
| 152 | } else if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_WC) { | ||
| 153 | rbo->placements[c].fpfn = 0; | ||
| 154 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM | | ||
| 155 | TTM_PL_FLAG_UNCACHED; | ||
| 156 | } else { | ||
| 157 | rbo->placements[c].fpfn = 0; | ||
| 158 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; | ||
| 159 | } | ||
| 160 | } | ||
| 161 | |||
| 162 | if (domain & AMDGPU_GEM_DOMAIN_GDS) { | ||
| 163 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | | ||
| 164 | AMDGPU_PL_FLAG_GDS; | ||
| 165 | } | ||
| 166 | if (domain & AMDGPU_GEM_DOMAIN_GWS) { | ||
| 167 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | | ||
| 168 | AMDGPU_PL_FLAG_GWS; | ||
| 169 | } | ||
| 170 | if (domain & AMDGPU_GEM_DOMAIN_OA) { | ||
| 171 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | | ||
| 172 | AMDGPU_PL_FLAG_OA; | ||
| 173 | } | ||
| 174 | |||
| 175 | if (!c) { | ||
| 176 | rbo->placements[c].fpfn = 0; | ||
| 177 | rbo->placements[c++].flags = TTM_PL_MASK_CACHING | | ||
| 178 | TTM_PL_FLAG_SYSTEM; | ||
| 179 | } | ||
| 180 | rbo->placement.num_placement = c; | ||
| 181 | rbo->placement.num_busy_placement = c; | ||
| 182 | |||
| 183 | for (i = 0; i < c; i++) { | ||
| 184 | if ((rbo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && | ||
| 185 | (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && | ||
| 186 | !rbo->placements[i].fpfn) | ||
| 187 | rbo->placements[i].lpfn = | ||
| 188 | rbo->adev->mc.visible_vram_size >> PAGE_SHIFT; | ||
| 189 | else | ||
| 190 | rbo->placements[i].lpfn = 0; | ||
| 191 | } | ||
| 192 | |||
| 193 | if (rbo->tbo.mem.size > 512 * 1024) { | ||
| 194 | for (i = 0; i < c; i++) { | ||
| 195 | rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; | ||
| 196 | } | ||
| 197 | } | ||
| 198 | } | ||
| 199 | |||
| 200 | int amdgpu_bo_create(struct amdgpu_device *adev, | ||
| 201 | unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, | ||
| 202 | struct sg_table *sg, struct amdgpu_bo **bo_ptr) | ||
| 203 | { | ||
| 204 | struct amdgpu_bo *bo; | ||
| 205 | enum ttm_bo_type type; | ||
| 206 | unsigned long page_align; | ||
| 207 | size_t acc_size; | ||
| 208 | int r; | ||
| 209 | |||
| 210 | /* VI has a hw bug where VM PTEs have to be allocated in groups of 8. | ||
| 211 | * do this as a temporary workaround | ||
| 212 | */ | ||
| 213 | if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) { | ||
| 214 | if (adev->asic_type >= CHIP_TOPAZ) { | ||
| 215 | if (byte_align & 0x7fff) | ||
| 216 | byte_align = ALIGN(byte_align, 0x8000); | ||
| 217 | if (size & 0x7fff) | ||
| 218 | size = ALIGN(size, 0x8000); | ||
| 219 | } | ||
| 220 | } | ||
| 221 | |||
| 222 | page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; | ||
| 223 | size = ALIGN(size, PAGE_SIZE); | ||
| 224 | |||
| 225 | if (kernel) { | ||
| 226 | type = ttm_bo_type_kernel; | ||
| 227 | } else if (sg) { | ||
| 228 | type = ttm_bo_type_sg; | ||
| 229 | } else { | ||
| 230 | type = ttm_bo_type_device; | ||
| 231 | } | ||
| 232 | *bo_ptr = NULL; | ||
| 233 | |||
| 234 | acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, | ||
| 235 | sizeof(struct amdgpu_bo)); | ||
| 236 | |||
| 237 | bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); | ||
| 238 | if (bo == NULL) | ||
| 239 | return -ENOMEM; | ||
| 240 | r = drm_gem_object_init(adev->ddev, &bo->gem_base, size); | ||
| 241 | if (unlikely(r)) { | ||
| 242 | kfree(bo); | ||
| 243 | return r; | ||
| 244 | } | ||
| 245 | bo->adev = adev; | ||
| 246 | INIT_LIST_HEAD(&bo->list); | ||
| 247 | INIT_LIST_HEAD(&bo->va); | ||
| 248 | bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM | | ||
| 249 | AMDGPU_GEM_DOMAIN_GTT | | ||
| 250 | AMDGPU_GEM_DOMAIN_CPU | | ||
| 251 | AMDGPU_GEM_DOMAIN_GDS | | ||
| 252 | AMDGPU_GEM_DOMAIN_GWS | | ||
| 253 | AMDGPU_GEM_DOMAIN_OA); | ||
| 254 | |||
| 255 | bo->flags = flags; | ||
| 256 | amdgpu_ttm_placement_from_domain(bo, domain); | ||
| 257 | /* Kernel allocation are uninterruptible */ | ||
| 258 | down_read(&adev->pm.mclk_lock); | ||
| 259 | r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, | ||
| 260 | &bo->placement, page_align, !kernel, NULL, | ||
| 261 | acc_size, sg, NULL, &amdgpu_ttm_bo_destroy); | ||
| 262 | up_read(&adev->pm.mclk_lock); | ||
| 263 | if (unlikely(r != 0)) { | ||
| 264 | return r; | ||
| 265 | } | ||
| 266 | *bo_ptr = bo; | ||
| 267 | |||
| 268 | trace_amdgpu_bo_create(bo); | ||
| 269 | |||
| 270 | return 0; | ||
| 271 | } | ||
| 272 | |||
| 273 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) | ||
| 274 | { | ||
| 275 | bool is_iomem; | ||
| 276 | int r; | ||
| 277 | |||
| 278 | if (bo->kptr) { | ||
| 279 | if (ptr) { | ||
| 280 | *ptr = bo->kptr; | ||
| 281 | } | ||
| 282 | return 0; | ||
| 283 | } | ||
| 284 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); | ||
| 285 | if (r) { | ||
| 286 | return r; | ||
| 287 | } | ||
| 288 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); | ||
| 289 | if (ptr) { | ||
| 290 | *ptr = bo->kptr; | ||
| 291 | } | ||
| 292 | return 0; | ||
| 293 | } | ||
| 294 | |||
| 295 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) | ||
| 296 | { | ||
| 297 | if (bo->kptr == NULL) | ||
| 298 | return; | ||
| 299 | bo->kptr = NULL; | ||
| 300 | ttm_bo_kunmap(&bo->kmap); | ||
| 301 | } | ||
| 302 | |||
| 303 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) | ||
| 304 | { | ||
| 305 | if (bo == NULL) | ||
| 306 | return NULL; | ||
| 307 | |||
| 308 | ttm_bo_reference(&bo->tbo); | ||
| 309 | return bo; | ||
| 310 | } | ||
| 311 | |||
| 312 | void amdgpu_bo_unref(struct amdgpu_bo **bo) | ||
| 313 | { | ||
| 314 | struct ttm_buffer_object *tbo; | ||
| 315 | |||
| 316 | if ((*bo) == NULL) | ||
| 317 | return; | ||
| 318 | |||
| 319 | tbo = &((*bo)->tbo); | ||
| 320 | ttm_bo_unref(&tbo); | ||
| 321 | if (tbo == NULL) | ||
| 322 | *bo = NULL; | ||
| 323 | } | ||
| 324 | |||
| 325 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 max_offset, | ||
| 326 | u64 *gpu_addr) | ||
| 327 | { | ||
| 328 | int r, i; | ||
| 329 | |||
| 330 | if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) | ||
| 331 | return -EPERM; | ||
| 332 | |||
| 333 | if (bo->pin_count) { | ||
| 334 | bo->pin_count++; | ||
| 335 | if (gpu_addr) | ||
| 336 | *gpu_addr = amdgpu_bo_gpu_offset(bo); | ||
| 337 | |||
| 338 | if (max_offset != 0) { | ||
| 339 | u64 domain_start; | ||
| 340 | |||
| 341 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) | ||
| 342 | domain_start = bo->adev->mc.vram_start; | ||
| 343 | else | ||
| 344 | domain_start = bo->adev->mc.gtt_start; | ||
| 345 | WARN_ON_ONCE(max_offset < | ||
| 346 | (amdgpu_bo_gpu_offset(bo) - domain_start)); | ||
| 347 | } | ||
| 348 | |||
| 349 | return 0; | ||
| 350 | } | ||
| 351 | amdgpu_ttm_placement_from_domain(bo, domain); | ||
| 352 | for (i = 0; i < bo->placement.num_placement; i++) { | ||
| 353 | /* force to pin into visible video ram */ | ||
| 354 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && | ||
| 355 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && | ||
| 356 | (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) | ||
| 357 | bo->placements[i].lpfn = | ||
| 358 | bo->adev->mc.visible_vram_size >> PAGE_SHIFT; | ||
| 359 | else | ||
| 360 | bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; | ||
| 361 | |||
| 362 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; | ||
| 363 | } | ||
| 364 | |||
| 365 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
| 366 | if (likely(r == 0)) { | ||
| 367 | bo->pin_count = 1; | ||
| 368 | if (gpu_addr != NULL) | ||
| 369 | *gpu_addr = amdgpu_bo_gpu_offset(bo); | ||
| 370 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) | ||
| 371 | bo->adev->vram_pin_size += amdgpu_bo_size(bo); | ||
| 372 | else | ||
| 373 | bo->adev->gart_pin_size += amdgpu_bo_size(bo); | ||
| 374 | } else { | ||
| 375 | dev_err(bo->adev->dev, "%p pin failed\n", bo); | ||
| 376 | } | ||
| 377 | return r; | ||
| 378 | } | ||
| 379 | |||
| 380 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) | ||
| 381 | { | ||
| 382 | return amdgpu_bo_pin_restricted(bo, domain, 0, gpu_addr); | ||
| 383 | } | ||
| 384 | |||
| 385 | int amdgpu_bo_unpin(struct amdgpu_bo *bo) | ||
| 386 | { | ||
| 387 | int r, i; | ||
| 388 | |||
| 389 | if (!bo->pin_count) { | ||
| 390 | dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo); | ||
| 391 | return 0; | ||
| 392 | } | ||
| 393 | bo->pin_count--; | ||
| 394 | if (bo->pin_count) | ||
| 395 | return 0; | ||
| 396 | for (i = 0; i < bo->placement.num_placement; i++) { | ||
| 397 | bo->placements[i].lpfn = 0; | ||
| 398 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; | ||
| 399 | } | ||
| 400 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
| 401 | if (likely(r == 0)) { | ||
| 402 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) | ||
| 403 | bo->adev->vram_pin_size -= amdgpu_bo_size(bo); | ||
| 404 | else | ||
| 405 | bo->adev->gart_pin_size -= amdgpu_bo_size(bo); | ||
| 406 | } else { | ||
| 407 | dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); | ||
| 408 | } | ||
| 409 | return r; | ||
| 410 | } | ||
| 411 | |||
| 412 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev) | ||
| 413 | { | ||
| 414 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ | ||
| 415 | if (0 && (adev->flags & AMDGPU_IS_APU)) { | ||
| 416 | /* Useless to evict on IGP chips */ | ||
| 417 | return 0; | ||
| 418 | } | ||
| 419 | return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); | ||
| 420 | } | ||
| 421 | |||
| 422 | void amdgpu_bo_force_delete(struct amdgpu_device *adev) | ||
| 423 | { | ||
| 424 | struct amdgpu_bo *bo, *n; | ||
| 425 | |||
| 426 | if (list_empty(&adev->gem.objects)) { | ||
| 427 | return; | ||
| 428 | } | ||
| 429 | dev_err(adev->dev, "Userspace still has active objects !\n"); | ||
| 430 | list_for_each_entry_safe(bo, n, &adev->gem.objects, list) { | ||
| 431 | mutex_lock(&adev->ddev->struct_mutex); | ||
| 432 | dev_err(adev->dev, "%p %p %lu %lu force free\n", | ||
| 433 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, | ||
| 434 | *((unsigned long *)&bo->gem_base.refcount)); | ||
| 435 | mutex_lock(&bo->adev->gem.mutex); | ||
| 436 | list_del_init(&bo->list); | ||
| 437 | mutex_unlock(&bo->adev->gem.mutex); | ||
| 438 | /* this should unref the ttm bo */ | ||
| 439 | drm_gem_object_unreference(&bo->gem_base); | ||
| 440 | mutex_unlock(&adev->ddev->struct_mutex); | ||
| 441 | } | ||
| 442 | } | ||
| 443 | |||
| 444 | int amdgpu_bo_init(struct amdgpu_device *adev) | ||
| 445 | { | ||
| 446 | /* Add an MTRR for the VRAM */ | ||
| 447 | adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, | ||
| 448 | adev->mc.aper_size); | ||
| 449 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", | ||
| 450 | adev->mc.mc_vram_size >> 20, | ||
| 451 | (unsigned long long)adev->mc.aper_size >> 20); | ||
| 452 | DRM_INFO("RAM width %dbits DDR\n", | ||
| 453 | adev->mc.vram_width); | ||
| 454 | return amdgpu_ttm_init(adev); | ||
| 455 | } | ||
| 456 | |||
| 457 | void amdgpu_bo_fini(struct amdgpu_device *adev) | ||
| 458 | { | ||
| 459 | amdgpu_ttm_fini(adev); | ||
| 460 | arch_phys_wc_del(adev->mc.vram_mtrr); | ||
| 461 | } | ||
| 462 | |||
| 463 | int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, | ||
| 464 | struct vm_area_struct *vma) | ||
| 465 | { | ||
| 466 | return ttm_fbdev_mmap(vma, &bo->tbo); | ||
| 467 | } | ||
| 468 | |||
| 469 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) | ||
| 470 | { | ||
| 471 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; | ||
| 472 | |||
| 473 | bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK; | ||
| 474 | bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK; | ||
| 475 | mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK; | ||
| 476 | tilesplit = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK; | ||
| 477 | stilesplit = (tiling_flags >> AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK; | ||
| 478 | switch (bankw) { | ||
| 479 | case 0: | ||
| 480 | case 1: | ||
| 481 | case 2: | ||
| 482 | case 4: | ||
| 483 | case 8: | ||
| 484 | break; | ||
| 485 | default: | ||
| 486 | return -EINVAL; | ||
| 487 | } | ||
| 488 | switch (bankh) { | ||
| 489 | case 0: | ||
| 490 | case 1: | ||
| 491 | case 2: | ||
| 492 | case 4: | ||
| 493 | case 8: | ||
| 494 | break; | ||
| 495 | default: | ||
| 496 | return -EINVAL; | ||
| 497 | } | ||
| 498 | switch (mtaspect) { | ||
| 499 | case 0: | ||
| 500 | case 1: | ||
| 501 | case 2: | ||
| 502 | case 4: | ||
| 503 | case 8: | ||
| 504 | break; | ||
| 505 | default: | ||
| 506 | return -EINVAL; | ||
| 507 | } | ||
| 508 | if (tilesplit > 6) { | ||
| 509 | return -EINVAL; | ||
| 510 | } | ||
| 511 | if (stilesplit > 6) { | ||
| 512 | return -EINVAL; | ||
| 513 | } | ||
| 514 | |||
| 515 | bo->tiling_flags = tiling_flags; | ||
| 516 | return 0; | ||
| 517 | } | ||
| 518 | |||
| 519 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) | ||
| 520 | { | ||
| 521 | lockdep_assert_held(&bo->tbo.resv->lock.base); | ||
| 522 | |||
| 523 | if (tiling_flags) | ||
| 524 | *tiling_flags = bo->tiling_flags; | ||
| 525 | } | ||
| 526 | |||
| 527 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, | ||
| 528 | uint32_t metadata_size, uint64_t flags) | ||
| 529 | { | ||
| 530 | void *buffer; | ||
| 531 | |||
| 532 | if (!metadata_size) { | ||
| 533 | if (bo->metadata_size) { | ||
| 534 | kfree(bo->metadata); | ||
| 535 | bo->metadata_size = 0; | ||
| 536 | } | ||
| 537 | return 0; | ||
| 538 | } | ||
| 539 | |||
| 540 | if (metadata == NULL) | ||
| 541 | return -EINVAL; | ||
| 542 | |||
| 543 | buffer = kzalloc(metadata_size, GFP_KERNEL); | ||
| 544 | if (buffer == NULL) | ||
| 545 | return -ENOMEM; | ||
| 546 | |||
| 547 | memcpy(buffer, metadata, metadata_size); | ||
| 548 | |||
| 549 | kfree(bo->metadata); | ||
| 550 | bo->metadata_flags = flags; | ||
| 551 | bo->metadata = buffer; | ||
| 552 | bo->metadata_size = metadata_size; | ||
| 553 | |||
| 554 | return 0; | ||
| 555 | } | ||
| 556 | |||
| 557 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | ||
| 558 | size_t buffer_size, uint32_t *metadata_size, | ||
| 559 | uint64_t *flags) | ||
| 560 | { | ||
| 561 | if (!buffer && !metadata_size) | ||
| 562 | return -EINVAL; | ||
| 563 | |||
| 564 | if (buffer) { | ||
| 565 | if (buffer_size < bo->metadata_size) | ||
| 566 | return -EINVAL; | ||
| 567 | |||
| 568 | if (bo->metadata_size) | ||
| 569 | memcpy(buffer, bo->metadata, bo->metadata_size); | ||
| 570 | } | ||
| 571 | |||
| 572 | if (metadata_size) | ||
| 573 | *metadata_size = bo->metadata_size; | ||
| 574 | if (flags) | ||
| 575 | *flags = bo->metadata_flags; | ||
| 576 | |||
| 577 | return 0; | ||
| 578 | } | ||
| 579 | |||
| 580 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | ||
| 581 | struct ttm_mem_reg *new_mem) | ||
| 582 | { | ||
| 583 | struct amdgpu_bo *rbo; | ||
| 584 | |||
| 585 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) | ||
| 586 | return; | ||
| 587 | |||
| 588 | rbo = container_of(bo, struct amdgpu_bo, tbo); | ||
| 589 | amdgpu_vm_bo_invalidate(rbo->adev, rbo); | ||
| 590 | |||
| 591 | /* update statistics */ | ||
| 592 | if (!new_mem) | ||
| 593 | return; | ||
| 594 | |||
| 595 | /* move_notify is called before move happens */ | ||
| 596 | amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); | ||
| 597 | } | ||
| 598 | |||
| 599 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | ||
| 600 | { | ||
| 601 | struct amdgpu_device *adev; | ||
| 602 | struct amdgpu_bo *rbo; | ||
| 603 | unsigned long offset, size; | ||
| 604 | int r; | ||
| 605 | |||
| 606 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) | ||
| 607 | return 0; | ||
| 608 | rbo = container_of(bo, struct amdgpu_bo, tbo); | ||
| 609 | adev = rbo->adev; | ||
| 610 | if (bo->mem.mem_type == TTM_PL_VRAM) { | ||
| 611 | size = bo->mem.num_pages << PAGE_SHIFT; | ||
| 612 | offset = bo->mem.start << PAGE_SHIFT; | ||
| 613 | if ((offset + size) > adev->mc.visible_vram_size) { | ||
| 614 | /* hurrah the memory is not visible ! */ | ||
| 615 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_VRAM); | ||
| 616 | rbo->placements[0].lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; | ||
| 617 | r = ttm_bo_validate(bo, &rbo->placement, false, false); | ||
| 618 | if (unlikely(r != 0)) | ||
| 619 | return r; | ||
| 620 | offset = bo->mem.start << PAGE_SHIFT; | ||
| 621 | /* this should not happen */ | ||
| 622 | if ((offset + size) > adev->mc.visible_vram_size) | ||
| 623 | return -EINVAL; | ||
| 624 | } | ||
| 625 | } | ||
| 626 | return 0; | ||
| 627 | } | ||
| 628 | |||
| 629 | /** | ||
| 630 | * amdgpu_bo_fence - add fence to buffer object | ||
| 631 | * | ||
| 632 | * @bo: buffer object in question | ||
| 633 | * @fence: fence to add | ||
| 634 | * @shared: true if fence should be added shared | ||
| 635 | * | ||
| 636 | */ | ||
| 637 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence, | ||
| 638 | bool shared) | ||
| 639 | { | ||
| 640 | struct reservation_object *resv = bo->tbo.resv; | ||
| 641 | |||
| 642 | if (shared) | ||
| 643 | reservation_object_add_shared_fence(resv, &fence->base); | ||
| 644 | else | ||
| 645 | reservation_object_add_excl_fence(resv, &fence->base); | ||
| 646 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h new file mode 100644 index 000000000000..b1e0a03c1d78 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | |||
| @@ -0,0 +1,196 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __AMDGPU_OBJECT_H__ | ||
| 29 | #define __AMDGPU_OBJECT_H__ | ||
| 30 | |||
| 31 | #include <drm/amdgpu_drm.h> | ||
| 32 | #include "amdgpu.h" | ||
| 33 | |||
| 34 | /** | ||
| 35 | * amdgpu_mem_type_to_domain - return domain corresponding to mem_type | ||
| 36 | * @mem_type: ttm memory type | ||
| 37 | * | ||
| 38 | * Returns corresponding domain of the ttm mem_type | ||
| 39 | */ | ||
| 40 | static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) | ||
| 41 | { | ||
| 42 | switch (mem_type) { | ||
| 43 | case TTM_PL_VRAM: | ||
| 44 | return AMDGPU_GEM_DOMAIN_VRAM; | ||
| 45 | case TTM_PL_TT: | ||
| 46 | return AMDGPU_GEM_DOMAIN_GTT; | ||
| 47 | case TTM_PL_SYSTEM: | ||
| 48 | return AMDGPU_GEM_DOMAIN_CPU; | ||
| 49 | case AMDGPU_PL_GDS: | ||
| 50 | return AMDGPU_GEM_DOMAIN_GDS; | ||
| 51 | case AMDGPU_PL_GWS: | ||
| 52 | return AMDGPU_GEM_DOMAIN_GWS; | ||
| 53 | case AMDGPU_PL_OA: | ||
| 54 | return AMDGPU_GEM_DOMAIN_OA; | ||
| 55 | default: | ||
| 56 | break; | ||
| 57 | } | ||
| 58 | return 0; | ||
| 59 | } | ||
| 60 | |||
| 61 | /** | ||
| 62 | * amdgpu_bo_reserve - reserve bo | ||
| 63 | * @bo: bo structure | ||
| 64 | * @no_intr: don't return -ERESTARTSYS on pending signal | ||
| 65 | * | ||
| 66 | * Returns: | ||
| 67 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by | ||
| 68 | * a signal. Release all buffer reservations and return to user-space. | ||
| 69 | */ | ||
| 70 | static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) | ||
| 71 | { | ||
| 72 | int r; | ||
| 73 | |||
| 74 | r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); | ||
| 75 | if (unlikely(r != 0)) { | ||
| 76 | if (r != -ERESTARTSYS) | ||
| 77 | dev_err(bo->adev->dev, "%p reserve failed\n", bo); | ||
| 78 | return r; | ||
| 79 | } | ||
| 80 | return 0; | ||
| 81 | } | ||
| 82 | |||
| 83 | static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) | ||
| 84 | { | ||
| 85 | ttm_bo_unreserve(&bo->tbo); | ||
| 86 | } | ||
| 87 | |||
| 88 | /** | ||
| 89 | * amdgpu_bo_gpu_offset - return GPU offset of bo | ||
| 90 | * @bo: amdgpu object for which we query the offset | ||
| 91 | * | ||
| 92 | * Returns current GPU offset of the object. | ||
| 93 | * | ||
| 94 | * Note: object should either be pinned or reserved when calling this | ||
| 95 | * function, it might be useful to add check for this for debugging. | ||
| 96 | */ | ||
| 97 | static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) | ||
| 98 | { | ||
| 99 | return bo->tbo.offset; | ||
| 100 | } | ||
| 101 | |||
| 102 | static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) | ||
| 103 | { | ||
| 104 | return bo->tbo.num_pages << PAGE_SHIFT; | ||
| 105 | } | ||
| 106 | |||
| 107 | static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) | ||
| 108 | { | ||
| 109 | return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; | ||
| 110 | } | ||
| 111 | |||
| 112 | static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) | ||
| 113 | { | ||
| 114 | return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; | ||
| 115 | } | ||
| 116 | |||
| 117 | /** | ||
| 118 | * amdgpu_bo_mmap_offset - return mmap offset of bo | ||
| 119 | * @bo: amdgpu object for which we query the offset | ||
| 120 | * | ||
| 121 | * Returns mmap offset of the object. | ||
| 122 | */ | ||
| 123 | static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) | ||
| 124 | { | ||
| 125 | return drm_vma_node_offset_addr(&bo->tbo.vma_node); | ||
| 126 | } | ||
| 127 | |||
| 128 | int amdgpu_bo_create(struct amdgpu_device *adev, | ||
| 129 | unsigned long size, int byte_align, | ||
| 130 | bool kernel, u32 domain, u64 flags, | ||
| 131 | struct sg_table *sg, | ||
| 132 | struct amdgpu_bo **bo_ptr); | ||
| 133 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); | ||
| 134 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo); | ||
| 135 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); | ||
| 136 | void amdgpu_bo_unref(struct amdgpu_bo **bo); | ||
| 137 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr); | ||
| 138 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | ||
| 139 | u64 max_offset, u64 *gpu_addr); | ||
| 140 | int amdgpu_bo_unpin(struct amdgpu_bo *bo); | ||
| 141 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev); | ||
| 142 | void amdgpu_bo_force_delete(struct amdgpu_device *adev); | ||
| 143 | int amdgpu_bo_init(struct amdgpu_device *adev); | ||
| 144 | void amdgpu_bo_fini(struct amdgpu_device *adev); | ||
| 145 | int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, | ||
| 146 | struct vm_area_struct *vma); | ||
| 147 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); | ||
| 148 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); | ||
| 149 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, | ||
| 150 | uint32_t metadata_size, uint64_t flags); | ||
| 151 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | ||
| 152 | size_t buffer_size, uint32_t *metadata_size, | ||
| 153 | uint64_t *flags); | ||
| 154 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | ||
| 155 | struct ttm_mem_reg *new_mem); | ||
| 156 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | ||
| 157 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence, | ||
| 158 | bool shared); | ||
| 159 | |||
| 160 | /* | ||
| 161 | * sub allocation | ||
| 162 | */ | ||
| 163 | |||
| 164 | static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo) | ||
| 165 | { | ||
| 166 | return sa_bo->manager->gpu_addr + sa_bo->soffset; | ||
| 167 | } | ||
| 168 | |||
| 169 | static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo) | ||
| 170 | { | ||
| 171 | return sa_bo->manager->cpu_ptr + sa_bo->soffset; | ||
| 172 | } | ||
| 173 | |||
| 174 | int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, | ||
| 175 | struct amdgpu_sa_manager *sa_manager, | ||
| 176 | unsigned size, u32 align, u32 domain); | ||
| 177 | void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, | ||
| 178 | struct amdgpu_sa_manager *sa_manager); | ||
| 179 | int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, | ||
| 180 | struct amdgpu_sa_manager *sa_manager); | ||
| 181 | int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, | ||
| 182 | struct amdgpu_sa_manager *sa_manager); | ||
| 183 | int amdgpu_sa_bo_new(struct amdgpu_device *adev, | ||
| 184 | struct amdgpu_sa_manager *sa_manager, | ||
| 185 | struct amdgpu_sa_bo **sa_bo, | ||
| 186 | unsigned size, unsigned align); | ||
| 187 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, | ||
| 188 | struct amdgpu_sa_bo **sa_bo, | ||
| 189 | struct amdgpu_fence *fence); | ||
| 190 | #if defined(CONFIG_DEBUG_FS) | ||
| 191 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, | ||
| 192 | struct seq_file *m); | ||
| 193 | #endif | ||
| 194 | |||
| 195 | |||
| 196 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c new file mode 100644 index 000000000000..d15314957732 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c | |||
| @@ -0,0 +1,350 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | #include <drm/drmP.h> | ||
| 24 | #include <drm/amdgpu_drm.h> | ||
| 25 | #include "amdgpu.h" | ||
| 26 | #include "atom.h" | ||
| 27 | #include "atombios_encoders.h" | ||
| 28 | #include <asm/div64.h> | ||
| 29 | #include <linux/gcd.h> | ||
| 30 | |||
| 31 | /** | ||
| 32 | * amdgpu_pll_reduce_ratio - fractional number reduction | ||
| 33 | * | ||
| 34 | * @nom: nominator | ||
| 35 | * @den: denominator | ||
| 36 | * @nom_min: minimum value for nominator | ||
| 37 | * @den_min: minimum value for denominator | ||
| 38 | * | ||
| 39 | * Find the greatest common divisor and apply it on both nominator and | ||
| 40 | * denominator, but make nominator and denominator are at least as large | ||
| 41 | * as their minimum values. | ||
| 42 | */ | ||
| 43 | static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den, | ||
| 44 | unsigned nom_min, unsigned den_min) | ||
| 45 | { | ||
| 46 | unsigned tmp; | ||
| 47 | |||
| 48 | /* reduce the numbers to a simpler ratio */ | ||
| 49 | tmp = gcd(*nom, *den); | ||
| 50 | *nom /= tmp; | ||
| 51 | *den /= tmp; | ||
| 52 | |||
| 53 | /* make sure nominator is large enough */ | ||
| 54 | if (*nom < nom_min) { | ||
| 55 | tmp = DIV_ROUND_UP(nom_min, *nom); | ||
| 56 | *nom *= tmp; | ||
| 57 | *den *= tmp; | ||
| 58 | } | ||
| 59 | |||
| 60 | /* make sure the denominator is large enough */ | ||
| 61 | if (*den < den_min) { | ||
| 62 | tmp = DIV_ROUND_UP(den_min, *den); | ||
| 63 | *nom *= tmp; | ||
| 64 | *den *= tmp; | ||
| 65 | } | ||
| 66 | } | ||
| 67 | |||
| 68 | /** | ||
| 69 | * amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation | ||
| 70 | * | ||
| 71 | * @nom: nominator | ||
| 72 | * @den: denominator | ||
| 73 | * @post_div: post divider | ||
| 74 | * @fb_div_max: feedback divider maximum | ||
| 75 | * @ref_div_max: reference divider maximum | ||
| 76 | * @fb_div: resulting feedback divider | ||
| 77 | * @ref_div: resulting reference divider | ||
| 78 | * | ||
| 79 | * Calculate feedback and reference divider for a given post divider. Makes | ||
| 80 | * sure we stay within the limits. | ||
| 81 | */ | ||
| 82 | static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, | ||
| 83 | unsigned fb_div_max, unsigned ref_div_max, | ||
| 84 | unsigned *fb_div, unsigned *ref_div) | ||
| 85 | { | ||
| 86 | /* limit reference * post divider to a maximum */ | ||
| 87 | ref_div_max = min(128 / post_div, ref_div_max); | ||
| 88 | |||
| 89 | /* get matching reference and feedback divider */ | ||
| 90 | *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); | ||
| 91 | *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); | ||
| 92 | |||
| 93 | /* limit fb divider to its maximum */ | ||
| 94 | if (*fb_div > fb_div_max) { | ||
| 95 | *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div); | ||
| 96 | *fb_div = fb_div_max; | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | /** | ||
| 101 | * amdgpu_pll_compute - compute PLL paramaters | ||
| 102 | * | ||
| 103 | * @pll: information about the PLL | ||
| 104 | * @dot_clock_p: resulting pixel clock | ||
| 105 | * fb_div_p: resulting feedback divider | ||
| 106 | * frac_fb_div_p: fractional part of the feedback divider | ||
| 107 | * ref_div_p: resulting reference divider | ||
| 108 | * post_div_p: resulting reference divider | ||
| 109 | * | ||
| 110 | * Try to calculate the PLL parameters to generate the given frequency: | ||
| 111 | * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div) | ||
| 112 | */ | ||
| 113 | void amdgpu_pll_compute(struct amdgpu_pll *pll, | ||
| 114 | u32 freq, | ||
| 115 | u32 *dot_clock_p, | ||
| 116 | u32 *fb_div_p, | ||
| 117 | u32 *frac_fb_div_p, | ||
| 118 | u32 *ref_div_p, | ||
| 119 | u32 *post_div_p) | ||
| 120 | { | ||
| 121 | unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ? | ||
| 122 | freq : freq / 10; | ||
| 123 | |||
| 124 | unsigned fb_div_min, fb_div_max, fb_div; | ||
| 125 | unsigned post_div_min, post_div_max, post_div; | ||
| 126 | unsigned ref_div_min, ref_div_max, ref_div; | ||
| 127 | unsigned post_div_best, diff_best; | ||
| 128 | unsigned nom, den; | ||
| 129 | |||
| 130 | /* determine allowed feedback divider range */ | ||
| 131 | fb_div_min = pll->min_feedback_div; | ||
| 132 | fb_div_max = pll->max_feedback_div; | ||
| 133 | |||
| 134 | if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) { | ||
| 135 | fb_div_min *= 10; | ||
| 136 | fb_div_max *= 10; | ||
| 137 | } | ||
| 138 | |||
| 139 | /* determine allowed ref divider range */ | ||
| 140 | if (pll->flags & AMDGPU_PLL_USE_REF_DIV) | ||
| 141 | ref_div_min = pll->reference_div; | ||
| 142 | else | ||
| 143 | ref_div_min = pll->min_ref_div; | ||
| 144 | |||
| 145 | if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && | ||
| 146 | pll->flags & AMDGPU_PLL_USE_REF_DIV) | ||
| 147 | ref_div_max = pll->reference_div; | ||
| 148 | else | ||
| 149 | ref_div_max = pll->max_ref_div; | ||
| 150 | |||
| 151 | /* determine allowed post divider range */ | ||
| 152 | if (pll->flags & AMDGPU_PLL_USE_POST_DIV) { | ||
| 153 | post_div_min = pll->post_div; | ||
| 154 | post_div_max = pll->post_div; | ||
| 155 | } else { | ||
| 156 | unsigned vco_min, vco_max; | ||
| 157 | |||
| 158 | if (pll->flags & AMDGPU_PLL_IS_LCD) { | ||
| 159 | vco_min = pll->lcd_pll_out_min; | ||
| 160 | vco_max = pll->lcd_pll_out_max; | ||
| 161 | } else { | ||
| 162 | vco_min = pll->pll_out_min; | ||
| 163 | vco_max = pll->pll_out_max; | ||
| 164 | } | ||
| 165 | |||
| 166 | if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) { | ||
| 167 | vco_min *= 10; | ||
| 168 | vco_max *= 10; | ||
| 169 | } | ||
| 170 | |||
| 171 | post_div_min = vco_min / target_clock; | ||
| 172 | if ((target_clock * post_div_min) < vco_min) | ||
| 173 | ++post_div_min; | ||
| 174 | if (post_div_min < pll->min_post_div) | ||
| 175 | post_div_min = pll->min_post_div; | ||
| 176 | |||
| 177 | post_div_max = vco_max / target_clock; | ||
| 178 | if ((target_clock * post_div_max) > vco_max) | ||
| 179 | --post_div_max; | ||
| 180 | if (post_div_max > pll->max_post_div) | ||
| 181 | post_div_max = pll->max_post_div; | ||
| 182 | } | ||
| 183 | |||
| 184 | /* represent the searched ratio as fractional number */ | ||
| 185 | nom = target_clock; | ||
| 186 | den = pll->reference_freq; | ||
| 187 | |||
| 188 | /* reduce the numbers to a simpler ratio */ | ||
| 189 | amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min); | ||
| 190 | |||
| 191 | /* now search for a post divider */ | ||
| 192 | if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP) | ||
| 193 | post_div_best = post_div_min; | ||
| 194 | else | ||
| 195 | post_div_best = post_div_max; | ||
| 196 | diff_best = ~0; | ||
| 197 | |||
| 198 | for (post_div = post_div_min; post_div <= post_div_max; ++post_div) { | ||
| 199 | unsigned diff; | ||
| 200 | amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, | ||
| 201 | ref_div_max, &fb_div, &ref_div); | ||
| 202 | diff = abs(target_clock - (pll->reference_freq * fb_div) / | ||
| 203 | (ref_div * post_div)); | ||
| 204 | |||
| 205 | if (diff < diff_best || (diff == diff_best && | ||
| 206 | !(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) { | ||
| 207 | |||
| 208 | post_div_best = post_div; | ||
| 209 | diff_best = diff; | ||
| 210 | } | ||
| 211 | } | ||
| 212 | post_div = post_div_best; | ||
| 213 | |||
| 214 | /* get the feedback and reference divider for the optimal value */ | ||
| 215 | amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max, | ||
| 216 | &fb_div, &ref_div); | ||
| 217 | |||
| 218 | /* reduce the numbers to a simpler ratio once more */ | ||
| 219 | /* this also makes sure that the reference divider is large enough */ | ||
| 220 | amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); | ||
| 221 | |||
| 222 | /* avoid high jitter with small fractional dividers */ | ||
| 223 | if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { | ||
| 224 | fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60); | ||
| 225 | if (fb_div < fb_div_min) { | ||
| 226 | unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); | ||
| 227 | fb_div *= tmp; | ||
| 228 | ref_div *= tmp; | ||
| 229 | } | ||
| 230 | } | ||
| 231 | |||
| 232 | /* and finally save the result */ | ||
| 233 | if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) { | ||
| 234 | *fb_div_p = fb_div / 10; | ||
| 235 | *frac_fb_div_p = fb_div % 10; | ||
| 236 | } else { | ||
| 237 | *fb_div_p = fb_div; | ||
| 238 | *frac_fb_div_p = 0; | ||
| 239 | } | ||
| 240 | |||
| 241 | *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) + | ||
| 242 | (pll->reference_freq * *frac_fb_div_p)) / | ||
| 243 | (ref_div * post_div * 10); | ||
| 244 | *ref_div_p = ref_div; | ||
| 245 | *post_div_p = post_div; | ||
| 246 | |||
| 247 | DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n", | ||
| 248 | freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, | ||
| 249 | ref_div, post_div); | ||
| 250 | } | ||
| 251 | |||
| 252 | /** | ||
| 253 | * amdgpu_pll_get_use_mask - look up a mask of which pplls are in use | ||
| 254 | * | ||
| 255 | * @crtc: drm crtc | ||
| 256 | * | ||
| 257 | * Returns the mask of which PPLLs (Pixel PLLs) are in use. | ||
| 258 | */ | ||
| 259 | u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc) | ||
| 260 | { | ||
| 261 | struct drm_device *dev = crtc->dev; | ||
| 262 | struct drm_crtc *test_crtc; | ||
| 263 | struct amdgpu_crtc *test_amdgpu_crtc; | ||
| 264 | u32 pll_in_use = 0; | ||
| 265 | |||
| 266 | list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { | ||
| 267 | if (crtc == test_crtc) | ||
| 268 | continue; | ||
| 269 | |||
| 270 | test_amdgpu_crtc = to_amdgpu_crtc(test_crtc); | ||
| 271 | if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID) | ||
| 272 | pll_in_use |= (1 << test_amdgpu_crtc->pll_id); | ||
| 273 | } | ||
| 274 | return pll_in_use; | ||
| 275 | } | ||
| 276 | |||
| 277 | /** | ||
| 278 | * amdgpu_pll_get_shared_dp_ppll - return the PPLL used by another crtc for DP | ||
| 279 | * | ||
| 280 | * @crtc: drm crtc | ||
| 281 | * | ||
| 282 | * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is | ||
| 283 | * also in DP mode. For DP, a single PPLL can be used for all DP | ||
| 284 | * crtcs/encoders. | ||
| 285 | */ | ||
| 286 | int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc) | ||
| 287 | { | ||
| 288 | struct drm_device *dev = crtc->dev; | ||
| 289 | struct drm_crtc *test_crtc; | ||
| 290 | struct amdgpu_crtc *test_amdgpu_crtc; | ||
| 291 | |||
| 292 | list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { | ||
| 293 | if (crtc == test_crtc) | ||
| 294 | continue; | ||
| 295 | test_amdgpu_crtc = to_amdgpu_crtc(test_crtc); | ||
| 296 | if (test_amdgpu_crtc->encoder && | ||
| 297 | ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) { | ||
| 298 | /* for DP use the same PLL for all */ | ||
| 299 | if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID) | ||
| 300 | return test_amdgpu_crtc->pll_id; | ||
| 301 | } | ||
| 302 | } | ||
| 303 | return ATOM_PPLL_INVALID; | ||
| 304 | } | ||
| 305 | |||
| 306 | /** | ||
| 307 | * amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc | ||
| 308 | * | ||
| 309 | * @crtc: drm crtc | ||
| 310 | * @encoder: drm encoder | ||
| 311 | * | ||
| 312 | * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can | ||
| 313 | * be shared (i.e., same clock). | ||
| 314 | */ | ||
| 315 | int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc) | ||
| 316 | { | ||
| 317 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 318 | struct drm_device *dev = crtc->dev; | ||
| 319 | struct drm_crtc *test_crtc; | ||
| 320 | struct amdgpu_crtc *test_amdgpu_crtc; | ||
| 321 | u32 adjusted_clock, test_adjusted_clock; | ||
| 322 | |||
| 323 | adjusted_clock = amdgpu_crtc->adjusted_clock; | ||
| 324 | |||
| 325 | if (adjusted_clock == 0) | ||
| 326 | return ATOM_PPLL_INVALID; | ||
| 327 | |||
| 328 | list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { | ||
| 329 | if (crtc == test_crtc) | ||
| 330 | continue; | ||
| 331 | test_amdgpu_crtc = to_amdgpu_crtc(test_crtc); | ||
| 332 | if (test_amdgpu_crtc->encoder && | ||
| 333 | !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) { | ||
| 334 | /* check if we are already driving this connector with another crtc */ | ||
| 335 | if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) { | ||
| 336 | /* if we are, return that pll */ | ||
| 337 | if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID) | ||
| 338 | return test_amdgpu_crtc->pll_id; | ||
| 339 | } | ||
| 340 | /* for non-DP check the clock */ | ||
| 341 | test_adjusted_clock = test_amdgpu_crtc->adjusted_clock; | ||
| 342 | if ((crtc->mode.clock == test_crtc->mode.clock) && | ||
| 343 | (adjusted_clock == test_adjusted_clock) && | ||
| 344 | (amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) && | ||
| 345 | (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)) | ||
| 346 | return test_amdgpu_crtc->pll_id; | ||
| 347 | } | ||
| 348 | } | ||
| 349 | return ATOM_PPLL_INVALID; | ||
| 350 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h new file mode 100644 index 000000000000..db6136f68b82 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h | |||
| @@ -0,0 +1,38 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __AMDGPU_PLL_H__ | ||
| 25 | #define __AMDGPU_PLL_H__ | ||
| 26 | |||
| 27 | void amdgpu_pll_compute(struct amdgpu_pll *pll, | ||
| 28 | u32 freq, | ||
| 29 | u32 *dot_clock_p, | ||
| 30 | u32 *fb_div_p, | ||
| 31 | u32 *frac_fb_div_p, | ||
| 32 | u32 *ref_div_p, | ||
| 33 | u32 *post_div_p); | ||
| 34 | u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc); | ||
| 35 | int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc); | ||
| 36 | int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc); | ||
| 37 | |||
| 38 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c new file mode 100644 index 000000000000..89782543f854 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
| @@ -0,0 +1,801 @@ | |||
| 1 | /* | ||
| 2 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 3 | * copy of this software and associated documentation files (the "Software"), | ||
| 4 | * to deal in the Software without restriction, including without limitation | ||
| 5 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 6 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 7 | * Software is furnished to do so, subject to the following conditions: | ||
| 8 | * | ||
| 9 | * The above copyright notice and this permission notice shall be included in | ||
| 10 | * all copies or substantial portions of the Software. | ||
| 11 | * | ||
| 12 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 13 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 15 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 16 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 17 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 18 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 19 | * | ||
| 20 | * Authors: Rafał Miłecki <zajec5@gmail.com> | ||
| 21 | * Alex Deucher <alexdeucher@gmail.com> | ||
| 22 | */ | ||
| 23 | #include <drm/drmP.h> | ||
| 24 | #include "amdgpu.h" | ||
| 25 | #include "amdgpu_drv.h" | ||
| 26 | #include "amdgpu_pm.h" | ||
| 27 | #include "amdgpu_dpm.h" | ||
| 28 | #include "atom.h" | ||
| 29 | #include <linux/power_supply.h> | ||
| 30 | #include <linux/hwmon.h> | ||
| 31 | #include <linux/hwmon-sysfs.h> | ||
| 32 | |||
| 33 | static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); | ||
| 34 | |||
| 35 | void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) | ||
| 36 | { | ||
| 37 | if (adev->pm.dpm_enabled) { | ||
| 38 | mutex_lock(&adev->pm.mutex); | ||
| 39 | if (power_supply_is_system_supplied() > 0) | ||
| 40 | adev->pm.dpm.ac_power = true; | ||
| 41 | else | ||
| 42 | adev->pm.dpm.ac_power = false; | ||
| 43 | if (adev->pm.funcs->enable_bapm) | ||
| 44 | amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power); | ||
| 45 | mutex_unlock(&adev->pm.mutex); | ||
| 46 | } | ||
| 47 | } | ||
| 48 | |||
| 49 | static ssize_t amdgpu_get_dpm_state(struct device *dev, | ||
| 50 | struct device_attribute *attr, | ||
| 51 | char *buf) | ||
| 52 | { | ||
| 53 | struct drm_device *ddev = dev_get_drvdata(dev); | ||
| 54 | struct amdgpu_device *adev = ddev->dev_private; | ||
| 55 | enum amdgpu_pm_state_type pm = adev->pm.dpm.user_state; | ||
| 56 | |||
| 57 | return snprintf(buf, PAGE_SIZE, "%s\n", | ||
| 58 | (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : | ||
| 59 | (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); | ||
| 60 | } | ||
| 61 | |||
| 62 | static ssize_t amdgpu_set_dpm_state(struct device *dev, | ||
| 63 | struct device_attribute *attr, | ||
| 64 | const char *buf, | ||
| 65 | size_t count) | ||
| 66 | { | ||
| 67 | struct drm_device *ddev = dev_get_drvdata(dev); | ||
| 68 | struct amdgpu_device *adev = ddev->dev_private; | ||
| 69 | |||
| 70 | mutex_lock(&adev->pm.mutex); | ||
| 71 | if (strncmp("battery", buf, strlen("battery")) == 0) | ||
| 72 | adev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; | ||
| 73 | else if (strncmp("balanced", buf, strlen("balanced")) == 0) | ||
| 74 | adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; | ||
| 75 | else if (strncmp("performance", buf, strlen("performance")) == 0) | ||
| 76 | adev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; | ||
| 77 | else { | ||
| 78 | mutex_unlock(&adev->pm.mutex); | ||
| 79 | count = -EINVAL; | ||
| 80 | goto fail; | ||
| 81 | } | ||
| 82 | mutex_unlock(&adev->pm.mutex); | ||
| 83 | |||
| 84 | /* Can't set dpm state when the card is off */ | ||
| 85 | if (!(adev->flags & AMDGPU_IS_PX) || | ||
| 86 | (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) | ||
| 87 | amdgpu_pm_compute_clocks(adev); | ||
| 88 | fail: | ||
| 89 | return count; | ||
| 90 | } | ||
| 91 | |||
| 92 | static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, | ||
| 93 | struct device_attribute *attr, | ||
| 94 | char *buf) | ||
| 95 | { | ||
| 96 | struct drm_device *ddev = dev_get_drvdata(dev); | ||
| 97 | struct amdgpu_device *adev = ddev->dev_private; | ||
| 98 | enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; | ||
| 99 | |||
| 100 | return snprintf(buf, PAGE_SIZE, "%s\n", | ||
| 101 | (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" : | ||
| 102 | (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); | ||
| 103 | } | ||
| 104 | |||
| 105 | static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, | ||
| 106 | struct device_attribute *attr, | ||
| 107 | const char *buf, | ||
| 108 | size_t count) | ||
| 109 | { | ||
| 110 | struct drm_device *ddev = dev_get_drvdata(dev); | ||
| 111 | struct amdgpu_device *adev = ddev->dev_private; | ||
| 112 | enum amdgpu_dpm_forced_level level; | ||
| 113 | int ret = 0; | ||
| 114 | |||
| 115 | mutex_lock(&adev->pm.mutex); | ||
| 116 | if (strncmp("low", buf, strlen("low")) == 0) { | ||
| 117 | level = AMDGPU_DPM_FORCED_LEVEL_LOW; | ||
| 118 | } else if (strncmp("high", buf, strlen("high")) == 0) { | ||
| 119 | level = AMDGPU_DPM_FORCED_LEVEL_HIGH; | ||
| 120 | } else if (strncmp("auto", buf, strlen("auto")) == 0) { | ||
| 121 | level = AMDGPU_DPM_FORCED_LEVEL_AUTO; | ||
| 122 | } else { | ||
| 123 | count = -EINVAL; | ||
| 124 | goto fail; | ||
| 125 | } | ||
| 126 | if (adev->pm.funcs->force_performance_level) { | ||
| 127 | if (adev->pm.dpm.thermal_active) { | ||
| 128 | count = -EINVAL; | ||
| 129 | goto fail; | ||
| 130 | } | ||
| 131 | ret = amdgpu_dpm_force_performance_level(adev, level); | ||
| 132 | if (ret) | ||
| 133 | count = -EINVAL; | ||
| 134 | } | ||
| 135 | fail: | ||
| 136 | mutex_unlock(&adev->pm.mutex); | ||
| 137 | |||
| 138 | return count; | ||
| 139 | } | ||
| 140 | |||
| 141 | static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); | ||
| 142 | static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, | ||
| 143 | amdgpu_get_dpm_forced_performance_level, | ||
| 144 | amdgpu_set_dpm_forced_performance_level); | ||
| 145 | |||
| 146 | static ssize_t amdgpu_hwmon_show_temp(struct device *dev, | ||
| 147 | struct device_attribute *attr, | ||
| 148 | char *buf) | ||
| 149 | { | ||
| 150 | struct amdgpu_device *adev = dev_get_drvdata(dev); | ||
| 151 | int temp; | ||
| 152 | |||
| 153 | if (adev->pm.funcs->get_temperature) | ||
| 154 | temp = amdgpu_dpm_get_temperature(adev); | ||
| 155 | else | ||
| 156 | temp = 0; | ||
| 157 | |||
| 158 | return snprintf(buf, PAGE_SIZE, "%d\n", temp); | ||
| 159 | } | ||
| 160 | |||
| 161 | static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, | ||
| 162 | struct device_attribute *attr, | ||
| 163 | char *buf) | ||
| 164 | { | ||
| 165 | struct amdgpu_device *adev = dev_get_drvdata(dev); | ||
| 166 | int hyst = to_sensor_dev_attr(attr)->index; | ||
| 167 | int temp; | ||
| 168 | |||
| 169 | if (hyst) | ||
| 170 | temp = adev->pm.dpm.thermal.min_temp; | ||
| 171 | else | ||
| 172 | temp = adev->pm.dpm.thermal.max_temp; | ||
| 173 | |||
| 174 | return snprintf(buf, PAGE_SIZE, "%d\n", temp); | ||
| 175 | } | ||
| 176 | |||
| 177 | static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, | ||
| 178 | struct device_attribute *attr, | ||
| 179 | char *buf) | ||
| 180 | { | ||
| 181 | struct amdgpu_device *adev = dev_get_drvdata(dev); | ||
| 182 | u32 pwm_mode = 0; | ||
| 183 | |||
| 184 | if (adev->pm.funcs->get_fan_control_mode) | ||
| 185 | pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); | ||
| 186 | |||
| 187 | /* never 0 (full-speed), fuse or smc-controlled always */ | ||
| 188 | return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2); | ||
| 189 | } | ||
| 190 | |||
| 191 | static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, | ||
| 192 | struct device_attribute *attr, | ||
| 193 | const char *buf, | ||
| 194 | size_t count) | ||
| 195 | { | ||
| 196 | struct amdgpu_device *adev = dev_get_drvdata(dev); | ||
| 197 | int err; | ||
| 198 | int value; | ||
| 199 | |||
| 200 | if(!adev->pm.funcs->set_fan_control_mode) | ||
| 201 | return -EINVAL; | ||
| 202 | |||
| 203 | err = kstrtoint(buf, 10, &value); | ||
| 204 | if (err) | ||
| 205 | return err; | ||
| 206 | |||
| 207 | switch (value) { | ||
| 208 | case 1: /* manual, percent-based */ | ||
| 209 | amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC); | ||
| 210 | break; | ||
| 211 | default: /* disable */ | ||
| 212 | amdgpu_dpm_set_fan_control_mode(adev, 0); | ||
| 213 | break; | ||
| 214 | } | ||
| 215 | |||
| 216 | return count; | ||
| 217 | } | ||
| 218 | |||
| 219 | static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, | ||
| 220 | struct device_attribute *attr, | ||
| 221 | char *buf) | ||
| 222 | { | ||
| 223 | return sprintf(buf, "%i\n", 0); | ||
| 224 | } | ||
| 225 | |||
| 226 | static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, | ||
| 227 | struct device_attribute *attr, | ||
| 228 | char *buf) | ||
| 229 | { | ||
| 230 | return sprintf(buf, "%i\n", 255); | ||
| 231 | } | ||
| 232 | |||
| 233 | static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, | ||
| 234 | struct device_attribute *attr, | ||
| 235 | const char *buf, size_t count) | ||
| 236 | { | ||
| 237 | struct amdgpu_device *adev = dev_get_drvdata(dev); | ||
| 238 | int err; | ||
| 239 | u32 value; | ||
| 240 | |||
| 241 | err = kstrtou32(buf, 10, &value); | ||
| 242 | if (err) | ||
| 243 | return err; | ||
| 244 | |||
| 245 | value = (value * 100) / 255; | ||
| 246 | |||
| 247 | err = amdgpu_dpm_set_fan_speed_percent(adev, value); | ||
| 248 | if (err) | ||
| 249 | return err; | ||
| 250 | |||
| 251 | return count; | ||
| 252 | } | ||
| 253 | |||
| 254 | static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, | ||
| 255 | struct device_attribute *attr, | ||
| 256 | char *buf) | ||
| 257 | { | ||
| 258 | struct amdgpu_device *adev = dev_get_drvdata(dev); | ||
| 259 | int err; | ||
| 260 | u32 speed; | ||
| 261 | |||
| 262 | err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); | ||
| 263 | if (err) | ||
| 264 | return err; | ||
| 265 | |||
| 266 | speed = (speed * 255) / 100; | ||
| 267 | |||
| 268 | return sprintf(buf, "%i\n", speed); | ||
| 269 | } | ||
| 270 | |||
| 271 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0); | ||
| 272 | static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); | ||
| 273 | static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); | ||
| 274 | static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); | ||
| 275 | static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); | ||
| 276 | static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); | ||
| 277 | static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); | ||
| 278 | |||
| 279 | static struct attribute *hwmon_attributes[] = { | ||
| 280 | &sensor_dev_attr_temp1_input.dev_attr.attr, | ||
| 281 | &sensor_dev_attr_temp1_crit.dev_attr.attr, | ||
| 282 | &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, | ||
| 283 | &sensor_dev_attr_pwm1.dev_attr.attr, | ||
| 284 | &sensor_dev_attr_pwm1_enable.dev_attr.attr, | ||
| 285 | &sensor_dev_attr_pwm1_min.dev_attr.attr, | ||
| 286 | &sensor_dev_attr_pwm1_max.dev_attr.attr, | ||
| 287 | NULL | ||
| 288 | }; | ||
| 289 | |||
| 290 | static umode_t hwmon_attributes_visible(struct kobject *kobj, | ||
| 291 | struct attribute *attr, int index) | ||
| 292 | { | ||
| 293 | struct device *dev = container_of(kobj, struct device, kobj); | ||
| 294 | struct amdgpu_device *adev = dev_get_drvdata(dev); | ||
| 295 | umode_t effective_mode = attr->mode; | ||
| 296 | |||
| 297 | /* Skip limit attributes if DPM is not enabled */ | ||
| 298 | if (!adev->pm.dpm_enabled && | ||
| 299 | (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || | ||
| 300 | attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) | ||
| 301 | return 0; | ||
| 302 | |||
| 303 | /* Skip fan attributes if fan is not present */ | ||
| 304 | if (adev->pm.no_fan && | ||
| 305 | (attr == &sensor_dev_attr_pwm1.dev_attr.attr || | ||
| 306 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || | ||
| 307 | attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || | ||
| 308 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) | ||
| 309 | return 0; | ||
| 310 | |||
| 311 | /* mask fan attributes if we have no bindings for this asic to expose */ | ||
| 312 | if ((!adev->pm.funcs->get_fan_speed_percent && | ||
| 313 | attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ | ||
| 314 | (!adev->pm.funcs->get_fan_control_mode && | ||
| 315 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ | ||
| 316 | effective_mode &= ~S_IRUGO; | ||
| 317 | |||
| 318 | if ((!adev->pm.funcs->set_fan_speed_percent && | ||
| 319 | attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ | ||
| 320 | (!adev->pm.funcs->set_fan_control_mode && | ||
| 321 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ | ||
| 322 | effective_mode &= ~S_IWUSR; | ||
| 323 | |||
| 324 | /* hide max/min values if we can't both query and manage the fan */ | ||
| 325 | if ((!adev->pm.funcs->set_fan_speed_percent && | ||
| 326 | !adev->pm.funcs->get_fan_speed_percent) && | ||
| 327 | (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || | ||
| 328 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) | ||
| 329 | return 0; | ||
| 330 | |||
| 331 | return effective_mode; | ||
| 332 | } | ||
| 333 | |||
| 334 | static const struct attribute_group hwmon_attrgroup = { | ||
| 335 | .attrs = hwmon_attributes, | ||
| 336 | .is_visible = hwmon_attributes_visible, | ||
| 337 | }; | ||
| 338 | |||
| 339 | static const struct attribute_group *hwmon_groups[] = { | ||
| 340 | &hwmon_attrgroup, | ||
| 341 | NULL | ||
| 342 | }; | ||
| 343 | |||
| 344 | void amdgpu_dpm_thermal_work_handler(struct work_struct *work) | ||
| 345 | { | ||
| 346 | struct amdgpu_device *adev = | ||
| 347 | container_of(work, struct amdgpu_device, | ||
| 348 | pm.dpm.thermal.work); | ||
| 349 | /* switch to the thermal state */ | ||
| 350 | enum amdgpu_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; | ||
| 351 | |||
| 352 | if (!adev->pm.dpm_enabled) | ||
| 353 | return; | ||
| 354 | |||
| 355 | if (adev->pm.funcs->get_temperature) { | ||
| 356 | int temp = amdgpu_dpm_get_temperature(adev); | ||
| 357 | |||
| 358 | if (temp < adev->pm.dpm.thermal.min_temp) | ||
| 359 | /* switch back the user state */ | ||
| 360 | dpm_state = adev->pm.dpm.user_state; | ||
| 361 | } else { | ||
| 362 | if (adev->pm.dpm.thermal.high_to_low) | ||
| 363 | /* switch back the user state */ | ||
| 364 | dpm_state = adev->pm.dpm.user_state; | ||
| 365 | } | ||
| 366 | mutex_lock(&adev->pm.mutex); | ||
| 367 | if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) | ||
| 368 | adev->pm.dpm.thermal_active = true; | ||
| 369 | else | ||
| 370 | adev->pm.dpm.thermal_active = false; | ||
| 371 | adev->pm.dpm.state = dpm_state; | ||
| 372 | mutex_unlock(&adev->pm.mutex); | ||
| 373 | |||
| 374 | amdgpu_pm_compute_clocks(adev); | ||
| 375 | } | ||
| 376 | |||
| 377 | static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, | ||
| 378 | enum amdgpu_pm_state_type dpm_state) | ||
| 379 | { | ||
| 380 | int i; | ||
| 381 | struct amdgpu_ps *ps; | ||
| 382 | u32 ui_class; | ||
| 383 | bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? | ||
| 384 | true : false; | ||
| 385 | |||
| 386 | /* check if the vblank period is too short to adjust the mclk */ | ||
| 387 | if (single_display && adev->pm.funcs->vblank_too_short) { | ||
| 388 | if (amdgpu_dpm_vblank_too_short(adev)) | ||
| 389 | single_display = false; | ||
| 390 | } | ||
| 391 | |||
| 392 | /* certain older asics have a separare 3D performance state, | ||
| 393 | * so try that first if the user selected performance | ||
| 394 | */ | ||
| 395 | if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) | ||
| 396 | dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; | ||
| 397 | /* balanced states don't exist at the moment */ | ||
| 398 | if (dpm_state == POWER_STATE_TYPE_BALANCED) | ||
| 399 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; | ||
| 400 | |||
| 401 | restart_search: | ||
| 402 | /* Pick the best power state based on current conditions */ | ||
| 403 | for (i = 0; i < adev->pm.dpm.num_ps; i++) { | ||
| 404 | ps = &adev->pm.dpm.ps[i]; | ||
| 405 | ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; | ||
| 406 | switch (dpm_state) { | ||
| 407 | /* user states */ | ||
| 408 | case POWER_STATE_TYPE_BATTERY: | ||
| 409 | if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { | ||
| 410 | if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { | ||
| 411 | if (single_display) | ||
| 412 | return ps; | ||
| 413 | } else | ||
| 414 | return ps; | ||
| 415 | } | ||
| 416 | break; | ||
| 417 | case POWER_STATE_TYPE_BALANCED: | ||
| 418 | if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { | ||
| 419 | if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { | ||
| 420 | if (single_display) | ||
| 421 | return ps; | ||
| 422 | } else | ||
| 423 | return ps; | ||
| 424 | } | ||
| 425 | break; | ||
| 426 | case POWER_STATE_TYPE_PERFORMANCE: | ||
| 427 | if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { | ||
| 428 | if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { | ||
| 429 | if (single_display) | ||
| 430 | return ps; | ||
| 431 | } else | ||
| 432 | return ps; | ||
| 433 | } | ||
| 434 | break; | ||
| 435 | /* internal states */ | ||
| 436 | case POWER_STATE_TYPE_INTERNAL_UVD: | ||
| 437 | if (adev->pm.dpm.uvd_ps) | ||
| 438 | return adev->pm.dpm.uvd_ps; | ||
| 439 | else | ||
| 440 | break; | ||
| 441 | case POWER_STATE_TYPE_INTERNAL_UVD_SD: | ||
| 442 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) | ||
| 443 | return ps; | ||
| 444 | break; | ||
| 445 | case POWER_STATE_TYPE_INTERNAL_UVD_HD: | ||
| 446 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) | ||
| 447 | return ps; | ||
| 448 | break; | ||
| 449 | case POWER_STATE_TYPE_INTERNAL_UVD_HD2: | ||
| 450 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) | ||
| 451 | return ps; | ||
| 452 | break; | ||
| 453 | case POWER_STATE_TYPE_INTERNAL_UVD_MVC: | ||
| 454 | if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) | ||
| 455 | return ps; | ||
| 456 | break; | ||
| 457 | case POWER_STATE_TYPE_INTERNAL_BOOT: | ||
| 458 | return adev->pm.dpm.boot_ps; | ||
| 459 | case POWER_STATE_TYPE_INTERNAL_THERMAL: | ||
| 460 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) | ||
| 461 | return ps; | ||
| 462 | break; | ||
| 463 | case POWER_STATE_TYPE_INTERNAL_ACPI: | ||
| 464 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) | ||
| 465 | return ps; | ||
| 466 | break; | ||
| 467 | case POWER_STATE_TYPE_INTERNAL_ULV: | ||
| 468 | if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) | ||
| 469 | return ps; | ||
| 470 | break; | ||
| 471 | case POWER_STATE_TYPE_INTERNAL_3DPERF: | ||
| 472 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) | ||
| 473 | return ps; | ||
| 474 | break; | ||
| 475 | default: | ||
| 476 | break; | ||
| 477 | } | ||
| 478 | } | ||
| 479 | /* use a fallback state if we didn't match */ | ||
| 480 | switch (dpm_state) { | ||
| 481 | case POWER_STATE_TYPE_INTERNAL_UVD_SD: | ||
| 482 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; | ||
| 483 | goto restart_search; | ||
| 484 | case POWER_STATE_TYPE_INTERNAL_UVD_HD: | ||
| 485 | case POWER_STATE_TYPE_INTERNAL_UVD_HD2: | ||
| 486 | case POWER_STATE_TYPE_INTERNAL_UVD_MVC: | ||
| 487 | if (adev->pm.dpm.uvd_ps) { | ||
| 488 | return adev->pm.dpm.uvd_ps; | ||
| 489 | } else { | ||
| 490 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; | ||
| 491 | goto restart_search; | ||
| 492 | } | ||
| 493 | case POWER_STATE_TYPE_INTERNAL_THERMAL: | ||
| 494 | dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; | ||
| 495 | goto restart_search; | ||
| 496 | case POWER_STATE_TYPE_INTERNAL_ACPI: | ||
| 497 | dpm_state = POWER_STATE_TYPE_BATTERY; | ||
| 498 | goto restart_search; | ||
| 499 | case POWER_STATE_TYPE_BATTERY: | ||
| 500 | case POWER_STATE_TYPE_BALANCED: | ||
| 501 | case POWER_STATE_TYPE_INTERNAL_3DPERF: | ||
| 502 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; | ||
| 503 | goto restart_search; | ||
| 504 | default: | ||
| 505 | break; | ||
| 506 | } | ||
| 507 | |||
| 508 | return NULL; | ||
| 509 | } | ||
| 510 | |||
| 511 | static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) | ||
| 512 | { | ||
| 513 | int i; | ||
| 514 | struct amdgpu_ps *ps; | ||
| 515 | enum amdgpu_pm_state_type dpm_state; | ||
| 516 | int ret; | ||
| 517 | |||
| 518 | /* if dpm init failed */ | ||
| 519 | if (!adev->pm.dpm_enabled) | ||
| 520 | return; | ||
| 521 | |||
| 522 | if (adev->pm.dpm.user_state != adev->pm.dpm.state) { | ||
| 523 | /* add other state override checks here */ | ||
| 524 | if ((!adev->pm.dpm.thermal_active) && | ||
| 525 | (!adev->pm.dpm.uvd_active)) | ||
| 526 | adev->pm.dpm.state = adev->pm.dpm.user_state; | ||
| 527 | } | ||
| 528 | dpm_state = adev->pm.dpm.state; | ||
| 529 | |||
| 530 | ps = amdgpu_dpm_pick_power_state(adev, dpm_state); | ||
| 531 | if (ps) | ||
| 532 | adev->pm.dpm.requested_ps = ps; | ||
| 533 | else | ||
| 534 | return; | ||
| 535 | |||
| 536 | /* no need to reprogram if nothing changed unless we are on BTC+ */ | ||
| 537 | if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) { | ||
| 538 | /* vce just modifies an existing state so force a change */ | ||
| 539 | if (ps->vce_active != adev->pm.dpm.vce_active) | ||
| 540 | goto force; | ||
| 541 | if (adev->flags & AMDGPU_IS_APU) { | ||
| 542 | /* for APUs if the num crtcs changed but state is the same, | ||
| 543 | * all we need to do is update the display configuration. | ||
| 544 | */ | ||
| 545 | if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) { | ||
| 546 | /* update display watermarks based on new power state */ | ||
| 547 | amdgpu_display_bandwidth_update(adev); | ||
| 548 | /* update displays */ | ||
| 549 | amdgpu_dpm_display_configuration_changed(adev); | ||
| 550 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; | ||
| 551 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; | ||
| 552 | } | ||
| 553 | return; | ||
| 554 | } else { | ||
| 555 | /* for BTC+ if the num crtcs hasn't changed and state is the same, | ||
| 556 | * nothing to do, if the num crtcs is > 1 and state is the same, | ||
| 557 | * update display configuration. | ||
| 558 | */ | ||
| 559 | if (adev->pm.dpm.new_active_crtcs == | ||
| 560 | adev->pm.dpm.current_active_crtcs) { | ||
| 561 | return; | ||
| 562 | } else if ((adev->pm.dpm.current_active_crtc_count > 1) && | ||
| 563 | (adev->pm.dpm.new_active_crtc_count > 1)) { | ||
| 564 | /* update display watermarks based on new power state */ | ||
| 565 | amdgpu_display_bandwidth_update(adev); | ||
| 566 | /* update displays */ | ||
| 567 | amdgpu_dpm_display_configuration_changed(adev); | ||
| 568 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; | ||
| 569 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; | ||
| 570 | return; | ||
| 571 | } | ||
| 572 | } | ||
| 573 | } | ||
| 574 | |||
| 575 | force: | ||
| 576 | if (amdgpu_dpm == 1) { | ||
| 577 | printk("switching from power state:\n"); | ||
| 578 | amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); | ||
| 579 | printk("switching to power state:\n"); | ||
| 580 | amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); | ||
| 581 | } | ||
| 582 | |||
| 583 | mutex_lock(&adev->ddev->struct_mutex); | ||
| 584 | down_write(&adev->pm.mclk_lock); | ||
| 585 | mutex_lock(&adev->ring_lock); | ||
| 586 | |||
| 587 | /* update whether vce is active */ | ||
| 588 | ps->vce_active = adev->pm.dpm.vce_active; | ||
| 589 | |||
| 590 | ret = amdgpu_dpm_pre_set_power_state(adev); | ||
| 591 | if (ret) | ||
| 592 | goto done; | ||
| 593 | |||
| 594 | /* update display watermarks based on new power state */ | ||
| 595 | amdgpu_display_bandwidth_update(adev); | ||
| 596 | /* update displays */ | ||
| 597 | amdgpu_dpm_display_configuration_changed(adev); | ||
| 598 | |||
| 599 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; | ||
| 600 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; | ||
| 601 | |||
| 602 | /* wait for the rings to drain */ | ||
| 603 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | ||
| 604 | struct amdgpu_ring *ring = adev->rings[i]; | ||
| 605 | if (ring && ring->ready) | ||
| 606 | amdgpu_fence_wait_empty(ring); | ||
| 607 | } | ||
| 608 | |||
| 609 | /* program the new power state */ | ||
| 610 | amdgpu_dpm_set_power_state(adev); | ||
| 611 | |||
| 612 | /* update current power state */ | ||
| 613 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps; | ||
| 614 | |||
| 615 | amdgpu_dpm_post_set_power_state(adev); | ||
| 616 | |||
| 617 | if (adev->pm.funcs->force_performance_level) { | ||
| 618 | if (adev->pm.dpm.thermal_active) { | ||
| 619 | enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; | ||
| 620 | /* force low perf level for thermal */ | ||
| 621 | amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW); | ||
| 622 | /* save the user's level */ | ||
| 623 | adev->pm.dpm.forced_level = level; | ||
| 624 | } else { | ||
| 625 | /* otherwise, user selected level */ | ||
| 626 | amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); | ||
| 627 | } | ||
| 628 | } | ||
| 629 | |||
| 630 | done: | ||
| 631 | mutex_unlock(&adev->ring_lock); | ||
| 632 | up_write(&adev->pm.mclk_lock); | ||
| 633 | mutex_unlock(&adev->ddev->struct_mutex); | ||
| 634 | } | ||
| 635 | |||
| 636 | void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) | ||
| 637 | { | ||
| 638 | if (adev->pm.funcs->powergate_uvd) { | ||
| 639 | mutex_lock(&adev->pm.mutex); | ||
| 640 | /* enable/disable UVD */ | ||
| 641 | amdgpu_dpm_powergate_uvd(adev, !enable); | ||
| 642 | mutex_unlock(&adev->pm.mutex); | ||
| 643 | } else { | ||
| 644 | if (enable) { | ||
| 645 | mutex_lock(&adev->pm.mutex); | ||
| 646 | adev->pm.dpm.uvd_active = true; | ||
| 647 | adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; | ||
| 648 | mutex_unlock(&adev->pm.mutex); | ||
| 649 | } else { | ||
| 650 | mutex_lock(&adev->pm.mutex); | ||
| 651 | adev->pm.dpm.uvd_active = false; | ||
| 652 | mutex_unlock(&adev->pm.mutex); | ||
| 653 | } | ||
| 654 | |||
| 655 | amdgpu_pm_compute_clocks(adev); | ||
| 656 | } | ||
| 657 | } | ||
| 658 | |||
| 659 | void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) | ||
| 660 | { | ||
| 661 | if (enable) { | ||
| 662 | mutex_lock(&adev->pm.mutex); | ||
| 663 | adev->pm.dpm.vce_active = true; | ||
| 664 | /* XXX select vce level based on ring/task */ | ||
| 665 | adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; | ||
| 666 | mutex_unlock(&adev->pm.mutex); | ||
| 667 | } else { | ||
| 668 | mutex_lock(&adev->pm.mutex); | ||
| 669 | adev->pm.dpm.vce_active = false; | ||
| 670 | mutex_unlock(&adev->pm.mutex); | ||
| 671 | } | ||
| 672 | |||
| 673 | amdgpu_pm_compute_clocks(adev); | ||
| 674 | } | ||
| 675 | |||
| 676 | void amdgpu_pm_print_power_states(struct amdgpu_device *adev) | ||
| 677 | { | ||
| 678 | int i; | ||
| 679 | |||
| 680 | for (i = 0; i < adev->pm.dpm.num_ps; i++) { | ||
| 681 | printk("== power state %d ==\n", i); | ||
| 682 | amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); | ||
| 683 | } | ||
| 684 | } | ||
| 685 | |||
| 686 | int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) | ||
| 687 | { | ||
| 688 | int ret; | ||
| 689 | |||
| 690 | if (adev->pm.funcs->get_temperature == NULL) | ||
| 691 | return 0; | ||
| 692 | adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, | ||
| 693 | DRIVER_NAME, adev, | ||
| 694 | hwmon_groups); | ||
| 695 | if (IS_ERR(adev->pm.int_hwmon_dev)) { | ||
| 696 | ret = PTR_ERR(adev->pm.int_hwmon_dev); | ||
| 697 | dev_err(adev->dev, | ||
| 698 | "Unable to register hwmon device: %d\n", ret); | ||
| 699 | return ret; | ||
| 700 | } | ||
| 701 | |||
| 702 | ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); | ||
| 703 | if (ret) { | ||
| 704 | DRM_ERROR("failed to create device file for dpm state\n"); | ||
| 705 | return ret; | ||
| 706 | } | ||
| 707 | ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); | ||
| 708 | if (ret) { | ||
| 709 | DRM_ERROR("failed to create device file for dpm state\n"); | ||
| 710 | return ret; | ||
| 711 | } | ||
| 712 | ret = amdgpu_debugfs_pm_init(adev); | ||
| 713 | if (ret) { | ||
| 714 | DRM_ERROR("Failed to register debugfs file for dpm!\n"); | ||
| 715 | return ret; | ||
| 716 | } | ||
| 717 | |||
| 718 | return 0; | ||
| 719 | } | ||
| 720 | |||
| 721 | void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) | ||
| 722 | { | ||
| 723 | if (adev->pm.int_hwmon_dev) | ||
| 724 | hwmon_device_unregister(adev->pm.int_hwmon_dev); | ||
| 725 | device_remove_file(adev->dev, &dev_attr_power_dpm_state); | ||
| 726 | device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); | ||
| 727 | } | ||
| 728 | |||
| 729 | void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | ||
| 730 | { | ||
| 731 | struct drm_device *ddev = adev->ddev; | ||
| 732 | struct drm_crtc *crtc; | ||
| 733 | struct amdgpu_crtc *amdgpu_crtc; | ||
| 734 | |||
| 735 | if (!adev->pm.dpm_enabled) | ||
| 736 | return; | ||
| 737 | |||
| 738 | mutex_lock(&adev->pm.mutex); | ||
| 739 | |||
| 740 | /* update active crtc counts */ | ||
| 741 | adev->pm.dpm.new_active_crtcs = 0; | ||
| 742 | adev->pm.dpm.new_active_crtc_count = 0; | ||
| 743 | if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { | ||
| 744 | list_for_each_entry(crtc, | ||
| 745 | &ddev->mode_config.crtc_list, head) { | ||
| 746 | amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 747 | if (crtc->enabled) { | ||
| 748 | adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); | ||
| 749 | adev->pm.dpm.new_active_crtc_count++; | ||
| 750 | } | ||
| 751 | } | ||
| 752 | } | ||
| 753 | |||
| 754 | /* update battery/ac status */ | ||
| 755 | if (power_supply_is_system_supplied() > 0) | ||
| 756 | adev->pm.dpm.ac_power = true; | ||
| 757 | else | ||
| 758 | adev->pm.dpm.ac_power = false; | ||
| 759 | |||
| 760 | amdgpu_dpm_change_power_state_locked(adev); | ||
| 761 | |||
| 762 | mutex_unlock(&adev->pm.mutex); | ||
| 763 | |||
| 764 | } | ||
| 765 | |||
| 766 | /* | ||
| 767 | * Debugfs info | ||
| 768 | */ | ||
| 769 | #if defined(CONFIG_DEBUG_FS) | ||
| 770 | |||
| 771 | static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) | ||
| 772 | { | ||
| 773 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
| 774 | struct drm_device *dev = node->minor->dev; | ||
| 775 | struct amdgpu_device *adev = dev->dev_private; | ||
| 776 | |||
| 777 | if (adev->pm.dpm_enabled) { | ||
| 778 | mutex_lock(&adev->pm.mutex); | ||
| 779 | if (adev->pm.funcs->debugfs_print_current_performance_level) | ||
| 780 | amdgpu_dpm_debugfs_print_current_performance_level(adev, m); | ||
| 781 | else | ||
| 782 | seq_printf(m, "Debugfs support not implemented for this asic\n"); | ||
| 783 | mutex_unlock(&adev->pm.mutex); | ||
| 784 | } | ||
| 785 | |||
| 786 | return 0; | ||
| 787 | } | ||
| 788 | |||
| 789 | static struct drm_info_list amdgpu_pm_info_list[] = { | ||
| 790 | {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, | ||
| 791 | }; | ||
| 792 | #endif | ||
| 793 | |||
| 794 | static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) | ||
| 795 | { | ||
| 796 | #if defined(CONFIG_DEBUG_FS) | ||
| 797 | return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); | ||
| 798 | #else | ||
| 799 | return 0; | ||
| 800 | #endif | ||
| 801 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h new file mode 100644 index 000000000000..5fd7734f15ca --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h | |||
| @@ -0,0 +1,35 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __AMDGPU_PM_H__ | ||
| 25 | #define __AMDGPU_PM_H__ | ||
| 26 | |||
| 27 | int amdgpu_pm_sysfs_init(struct amdgpu_device *adev); | ||
| 28 | void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev); | ||
| 29 | void amdgpu_pm_print_power_states(struct amdgpu_device *adev); | ||
| 30 | void amdgpu_pm_compute_clocks(struct amdgpu_device *adev); | ||
| 31 | void amdgpu_dpm_thermal_work_handler(struct work_struct *work); | ||
| 32 | void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); | ||
| 33 | void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); | ||
| 34 | |||
| 35 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c new file mode 100644 index 000000000000..d9652fe32d6a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | |||
| @@ -0,0 +1,125 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2012 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * based on nouveau_prime.c | ||
| 23 | * | ||
| 24 | * Authors: Alex Deucher | ||
| 25 | */ | ||
| 26 | #include <drm/drmP.h> | ||
| 27 | |||
| 28 | #include "amdgpu.h" | ||
| 29 | #include <drm/amdgpu_drm.h> | ||
| 30 | #include <linux/dma-buf.h> | ||
| 31 | |||
| 32 | struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) | ||
| 33 | { | ||
| 34 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | ||
| 35 | int npages = bo->tbo.num_pages; | ||
| 36 | |||
| 37 | return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); | ||
| 38 | } | ||
| 39 | |||
| 40 | void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) | ||
| 41 | { | ||
| 42 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | ||
| 43 | int ret; | ||
| 44 | |||
| 45 | ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, | ||
| 46 | &bo->dma_buf_vmap); | ||
| 47 | if (ret) | ||
| 48 | return ERR_PTR(ret); | ||
| 49 | |||
| 50 | return bo->dma_buf_vmap.virtual; | ||
| 51 | } | ||
| 52 | |||
| 53 | void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) | ||
| 54 | { | ||
| 55 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | ||
| 56 | |||
| 57 | ttm_bo_kunmap(&bo->dma_buf_vmap); | ||
| 58 | } | ||
| 59 | |||
| 60 | struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, | ||
| 61 | struct dma_buf_attachment *attach, | ||
| 62 | struct sg_table *sg) | ||
| 63 | { | ||
| 64 | struct amdgpu_device *adev = dev->dev_private; | ||
| 65 | struct amdgpu_bo *bo; | ||
| 66 | int ret; | ||
| 67 | |||
| 68 | ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, | ||
| 69 | AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); | ||
| 70 | if (ret) | ||
| 71 | return ERR_PTR(ret); | ||
| 72 | |||
| 73 | mutex_lock(&adev->gem.mutex); | ||
| 74 | list_add_tail(&bo->list, &adev->gem.objects); | ||
| 75 | mutex_unlock(&adev->gem.mutex); | ||
| 76 | |||
| 77 | return &bo->gem_base; | ||
| 78 | } | ||
| 79 | |||
| 80 | int amdgpu_gem_prime_pin(struct drm_gem_object *obj) | ||
| 81 | { | ||
| 82 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | ||
| 83 | int ret = 0; | ||
| 84 | |||
| 85 | ret = amdgpu_bo_reserve(bo, false); | ||
| 86 | if (unlikely(ret != 0)) | ||
| 87 | return ret; | ||
| 88 | |||
| 89 | /* pin buffer into GTT */ | ||
| 90 | ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); | ||
| 91 | amdgpu_bo_unreserve(bo); | ||
| 92 | return ret; | ||
| 93 | } | ||
| 94 | |||
| 95 | void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) | ||
| 96 | { | ||
| 97 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | ||
| 98 | int ret = 0; | ||
| 99 | |||
| 100 | ret = amdgpu_bo_reserve(bo, false); | ||
| 101 | if (unlikely(ret != 0)) | ||
| 102 | return; | ||
| 103 | |||
| 104 | amdgpu_bo_unpin(bo); | ||
| 105 | amdgpu_bo_unreserve(bo); | ||
| 106 | } | ||
| 107 | |||
| 108 | struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) | ||
| 109 | { | ||
| 110 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | ||
| 111 | |||
| 112 | return bo->tbo.resv; | ||
| 113 | } | ||
| 114 | |||
| 115 | struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, | ||
| 116 | struct drm_gem_object *gobj, | ||
| 117 | int flags) | ||
| 118 | { | ||
| 119 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); | ||
| 120 | |||
| 121 | if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) | ||
| 122 | return ERR_PTR(-EPERM); | ||
| 123 | |||
| 124 | return drm_gem_prime_export(dev, gobj, flags); | ||
| 125 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c new file mode 100644 index 000000000000..855e2196657a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
| @@ -0,0 +1,561 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | * Christian König | ||
| 28 | */ | ||
| 29 | #include <linux/seq_file.h> | ||
| 30 | #include <linux/slab.h> | ||
| 31 | #include <drm/drmP.h> | ||
| 32 | #include <drm/amdgpu_drm.h> | ||
| 33 | #include "amdgpu.h" | ||
| 34 | #include "atom.h" | ||
| 35 | |||
| 36 | /* | ||
| 37 | * Rings | ||
| 38 | * Most engines on the GPU are fed via ring buffers. Ring | ||
| 39 | * buffers are areas of GPU accessible memory that the host | ||
| 40 | * writes commands into and the GPU reads commands out of. | ||
| 41 | * There is a rptr (read pointer) that determines where the | ||
| 42 | * GPU is currently reading, and a wptr (write pointer) | ||
| 43 | * which determines where the host has written. When the | ||
| 44 | * pointers are equal, the ring is idle. When the host | ||
| 45 | * writes commands to the ring buffer, it increments the | ||
| 46 | * wptr. The GPU then starts fetching commands and executes | ||
| 47 | * them until the pointers are equal again. | ||
| 48 | */ | ||
| 49 | static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); | ||
| 50 | |||
| 51 | /** | ||
| 52 | * amdgpu_ring_free_size - update the free size | ||
| 53 | * | ||
| 54 | * @adev: amdgpu_device pointer | ||
| 55 | * @ring: amdgpu_ring structure holding ring information | ||
| 56 | * | ||
| 57 | * Update the free dw slots in the ring buffer (all asics). | ||
| 58 | */ | ||
| 59 | void amdgpu_ring_free_size(struct amdgpu_ring *ring) | ||
| 60 | { | ||
| 61 | uint32_t rptr = amdgpu_ring_get_rptr(ring); | ||
| 62 | |||
| 63 | /* This works because ring_size is a power of 2 */ | ||
| 64 | ring->ring_free_dw = rptr + (ring->ring_size / 4); | ||
| 65 | ring->ring_free_dw -= ring->wptr; | ||
| 66 | ring->ring_free_dw &= ring->ptr_mask; | ||
| 67 | if (!ring->ring_free_dw) { | ||
| 68 | /* this is an empty ring */ | ||
| 69 | ring->ring_free_dw = ring->ring_size / 4; | ||
| 70 | /* update lockup info to avoid false positive */ | ||
| 71 | amdgpu_ring_lockup_update(ring); | ||
| 72 | } | ||
| 73 | } | ||
| 74 | |||
| 75 | /** | ||
| 76 | * amdgpu_ring_alloc - allocate space on the ring buffer | ||
| 77 | * | ||
| 78 | * @adev: amdgpu_device pointer | ||
| 79 | * @ring: amdgpu_ring structure holding ring information | ||
| 80 | * @ndw: number of dwords to allocate in the ring buffer | ||
| 81 | * | ||
| 82 | * Allocate @ndw dwords in the ring buffer (all asics). | ||
| 83 | * Returns 0 on success, error on failure. | ||
| 84 | */ | ||
| 85 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) | ||
| 86 | { | ||
| 87 | int r; | ||
| 88 | |||
| 89 | /* make sure we aren't trying to allocate more space than there is on the ring */ | ||
| 90 | if (ndw > (ring->ring_size / 4)) | ||
| 91 | return -ENOMEM; | ||
| 92 | /* Align requested size with padding so unlock_commit can | ||
| 93 | * pad safely */ | ||
| 94 | amdgpu_ring_free_size(ring); | ||
| 95 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; | ||
| 96 | while (ndw > (ring->ring_free_dw - 1)) { | ||
| 97 | amdgpu_ring_free_size(ring); | ||
| 98 | if (ndw < ring->ring_free_dw) { | ||
| 99 | break; | ||
| 100 | } | ||
| 101 | r = amdgpu_fence_wait_next(ring); | ||
| 102 | if (r) | ||
| 103 | return r; | ||
| 104 | } | ||
| 105 | ring->count_dw = ndw; | ||
| 106 | ring->wptr_old = ring->wptr; | ||
| 107 | return 0; | ||
| 108 | } | ||
| 109 | |||
| 110 | /** | ||
| 111 | * amdgpu_ring_lock - lock the ring and allocate space on it | ||
| 112 | * | ||
| 113 | * @adev: amdgpu_device pointer | ||
| 114 | * @ring: amdgpu_ring structure holding ring information | ||
| 115 | * @ndw: number of dwords to allocate in the ring buffer | ||
| 116 | * | ||
| 117 | * Lock the ring and allocate @ndw dwords in the ring buffer | ||
| 118 | * (all asics). | ||
| 119 | * Returns 0 on success, error on failure. | ||
| 120 | */ | ||
| 121 | int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw) | ||
| 122 | { | ||
| 123 | int r; | ||
| 124 | |||
| 125 | mutex_lock(ring->ring_lock); | ||
| 126 | r = amdgpu_ring_alloc(ring, ndw); | ||
| 127 | if (r) { | ||
| 128 | mutex_unlock(ring->ring_lock); | ||
| 129 | return r; | ||
| 130 | } | ||
| 131 | return 0; | ||
| 132 | } | ||
| 133 | |||
| 134 | /** | ||
| 135 | * amdgpu_ring_commit - tell the GPU to execute the new | ||
| 136 | * commands on the ring buffer | ||
| 137 | * | ||
| 138 | * @adev: amdgpu_device pointer | ||
| 139 | * @ring: amdgpu_ring structure holding ring information | ||
| 140 | * | ||
| 141 | * Update the wptr (write pointer) to tell the GPU to | ||
| 142 | * execute new commands on the ring buffer (all asics). | ||
| 143 | */ | ||
| 144 | void amdgpu_ring_commit(struct amdgpu_ring *ring) | ||
| 145 | { | ||
| 146 | /* We pad to match fetch size */ | ||
| 147 | while (ring->wptr & ring->align_mask) { | ||
| 148 | amdgpu_ring_write(ring, ring->nop); | ||
| 149 | } | ||
| 150 | mb(); | ||
| 151 | amdgpu_ring_set_wptr(ring); | ||
| 152 | } | ||
| 153 | |||
| 154 | /** | ||
| 155 | * amdgpu_ring_unlock_commit - tell the GPU to execute the new | ||
| 156 | * commands on the ring buffer and unlock it | ||
| 157 | * | ||
| 158 | * @ring: amdgpu_ring structure holding ring information | ||
| 159 | * | ||
| 160 | * Call amdgpu_ring_commit() then unlock the ring (all asics). | ||
| 161 | */ | ||
| 162 | void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring) | ||
| 163 | { | ||
| 164 | amdgpu_ring_commit(ring); | ||
| 165 | mutex_unlock(ring->ring_lock); | ||
| 166 | } | ||
| 167 | |||
| 168 | /** | ||
| 169 | * amdgpu_ring_undo - reset the wptr | ||
| 170 | * | ||
| 171 | * @ring: amdgpu_ring structure holding ring information | ||
| 172 | * | ||
| 173 | * Reset the driver's copy of the wptr (all asics). | ||
| 174 | */ | ||
| 175 | void amdgpu_ring_undo(struct amdgpu_ring *ring) | ||
| 176 | { | ||
| 177 | ring->wptr = ring->wptr_old; | ||
| 178 | } | ||
| 179 | |||
| 180 | /** | ||
| 181 | * amdgpu_ring_unlock_undo - reset the wptr and unlock the ring | ||
| 182 | * | ||
| 183 | * @ring: amdgpu_ring structure holding ring information | ||
| 184 | * | ||
| 185 | * Call amdgpu_ring_undo() then unlock the ring (all asics). | ||
| 186 | */ | ||
| 187 | void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring) | ||
| 188 | { | ||
| 189 | amdgpu_ring_undo(ring); | ||
| 190 | mutex_unlock(ring->ring_lock); | ||
| 191 | } | ||
| 192 | |||
| 193 | /** | ||
| 194 | * amdgpu_ring_lockup_update - update lockup variables | ||
| 195 | * | ||
| 196 | * @ring: amdgpu_ring structure holding ring information | ||
| 197 | * | ||
| 198 | * Update the last rptr value and timestamp (all asics). | ||
| 199 | */ | ||
| 200 | void amdgpu_ring_lockup_update(struct amdgpu_ring *ring) | ||
| 201 | { | ||
| 202 | atomic_set(&ring->last_rptr, amdgpu_ring_get_rptr(ring)); | ||
| 203 | atomic64_set(&ring->last_activity, jiffies_64); | ||
| 204 | } | ||
| 205 | |||
| 206 | /** | ||
| 207 | * amdgpu_ring_test_lockup() - check if ring is lockedup by recording information | ||
| 208 | * @ring: amdgpu_ring structure holding ring information | ||
| 209 | * | ||
| 210 | */ | ||
| 211 | bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring) | ||
| 212 | { | ||
| 213 | uint32_t rptr = amdgpu_ring_get_rptr(ring); | ||
| 214 | uint64_t last = atomic64_read(&ring->last_activity); | ||
| 215 | uint64_t elapsed; | ||
| 216 | |||
| 217 | if (rptr != atomic_read(&ring->last_rptr)) { | ||
| 218 | /* ring is still working, no lockup */ | ||
| 219 | amdgpu_ring_lockup_update(ring); | ||
| 220 | return false; | ||
| 221 | } | ||
| 222 | |||
| 223 | elapsed = jiffies_to_msecs(jiffies_64 - last); | ||
| 224 | if (amdgpu_lockup_timeout && elapsed >= amdgpu_lockup_timeout) { | ||
| 225 | dev_err(ring->adev->dev, "ring %d stalled for more than %llumsec\n", | ||
| 226 | ring->idx, elapsed); | ||
| 227 | return true; | ||
| 228 | } | ||
| 229 | /* give a chance to the GPU ... */ | ||
| 230 | return false; | ||
| 231 | } | ||
| 232 | |||
| 233 | /** | ||
| 234 | * amdgpu_ring_backup - Back up the content of a ring | ||
| 235 | * | ||
| 236 | * @ring: the ring we want to back up | ||
| 237 | * | ||
| 238 | * Saves all unprocessed commits from a ring, returns the number of dwords saved. | ||
| 239 | */ | ||
| 240 | unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, | ||
| 241 | uint32_t **data) | ||
| 242 | { | ||
| 243 | unsigned size, ptr, i; | ||
| 244 | |||
| 245 | /* just in case lock the ring */ | ||
| 246 | mutex_lock(ring->ring_lock); | ||
| 247 | *data = NULL; | ||
| 248 | |||
| 249 | if (ring->ring_obj == NULL) { | ||
| 250 | mutex_unlock(ring->ring_lock); | ||
| 251 | return 0; | ||
| 252 | } | ||
| 253 | |||
| 254 | /* it doesn't make sense to save anything if all fences are signaled */ | ||
| 255 | if (!amdgpu_fence_count_emitted(ring)) { | ||
| 256 | mutex_unlock(ring->ring_lock); | ||
| 257 | return 0; | ||
| 258 | } | ||
| 259 | |||
| 260 | ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); | ||
| 261 | |||
| 262 | size = ring->wptr + (ring->ring_size / 4); | ||
| 263 | size -= ptr; | ||
| 264 | size &= ring->ptr_mask; | ||
| 265 | if (size == 0) { | ||
| 266 | mutex_unlock(ring->ring_lock); | ||
| 267 | return 0; | ||
| 268 | } | ||
| 269 | |||
| 270 | /* and then save the content of the ring */ | ||
| 271 | *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); | ||
| 272 | if (!*data) { | ||
| 273 | mutex_unlock(ring->ring_lock); | ||
| 274 | return 0; | ||
| 275 | } | ||
| 276 | for (i = 0; i < size; ++i) { | ||
| 277 | (*data)[i] = ring->ring[ptr++]; | ||
| 278 | ptr &= ring->ptr_mask; | ||
| 279 | } | ||
| 280 | |||
| 281 | mutex_unlock(ring->ring_lock); | ||
| 282 | return size; | ||
| 283 | } | ||
| 284 | |||
| 285 | /** | ||
| 286 | * amdgpu_ring_restore - append saved commands to the ring again | ||
| 287 | * | ||
| 288 | * @ring: ring to append commands to | ||
| 289 | * @size: number of dwords we want to write | ||
| 290 | * @data: saved commands | ||
| 291 | * | ||
| 292 | * Allocates space on the ring and restore the previously saved commands. | ||
| 293 | */ | ||
| 294 | int amdgpu_ring_restore(struct amdgpu_ring *ring, | ||
| 295 | unsigned size, uint32_t *data) | ||
| 296 | { | ||
| 297 | int i, r; | ||
| 298 | |||
| 299 | if (!size || !data) | ||
| 300 | return 0; | ||
| 301 | |||
| 302 | /* restore the saved ring content */ | ||
| 303 | r = amdgpu_ring_lock(ring, size); | ||
| 304 | if (r) | ||
| 305 | return r; | ||
| 306 | |||
| 307 | for (i = 0; i < size; ++i) { | ||
| 308 | amdgpu_ring_write(ring, data[i]); | ||
| 309 | } | ||
| 310 | |||
| 311 | amdgpu_ring_unlock_commit(ring); | ||
| 312 | kfree(data); | ||
| 313 | return 0; | ||
| 314 | } | ||
| 315 | |||
| 316 | /** | ||
| 317 | * amdgpu_ring_init - init driver ring struct. | ||
| 318 | * | ||
| 319 | * @adev: amdgpu_device pointer | ||
| 320 | * @ring: amdgpu_ring structure holding ring information | ||
| 321 | * @ring_size: size of the ring | ||
| 322 | * @nop: nop packet for this ring | ||
| 323 | * | ||
| 324 | * Initialize the driver information for the selected ring (all asics). | ||
| 325 | * Returns 0 on success, error on failure. | ||
| 326 | */ | ||
| 327 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | ||
| 328 | unsigned ring_size, u32 nop, u32 align_mask, | ||
| 329 | struct amdgpu_irq_src *irq_src, unsigned irq_type, | ||
| 330 | enum amdgpu_ring_type ring_type) | ||
| 331 | { | ||
| 332 | u32 rb_bufsz; | ||
| 333 | int r; | ||
| 334 | |||
| 335 | if (ring->adev == NULL) { | ||
| 336 | if (adev->num_rings >= AMDGPU_MAX_RINGS) | ||
| 337 | return -EINVAL; | ||
| 338 | |||
| 339 | ring->adev = adev; | ||
| 340 | ring->idx = adev->num_rings++; | ||
| 341 | adev->rings[ring->idx] = ring; | ||
| 342 | amdgpu_fence_driver_init_ring(ring); | ||
| 343 | } | ||
| 344 | |||
| 345 | r = amdgpu_wb_get(adev, &ring->rptr_offs); | ||
| 346 | if (r) { | ||
| 347 | dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); | ||
| 348 | return r; | ||
| 349 | } | ||
| 350 | |||
| 351 | r = amdgpu_wb_get(adev, &ring->wptr_offs); | ||
| 352 | if (r) { | ||
| 353 | dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); | ||
| 354 | return r; | ||
| 355 | } | ||
| 356 | |||
| 357 | r = amdgpu_wb_get(adev, &ring->fence_offs); | ||
| 358 | if (r) { | ||
| 359 | dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); | ||
| 360 | return r; | ||
| 361 | } | ||
| 362 | |||
| 363 | r = amdgpu_wb_get(adev, &ring->next_rptr_offs); | ||
| 364 | if (r) { | ||
| 365 | dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r); | ||
| 366 | return r; | ||
| 367 | } | ||
| 368 | ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4); | ||
| 369 | ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; | ||
| 370 | |||
| 371 | r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); | ||
| 372 | if (r) { | ||
| 373 | dev_err(adev->dev, "failed initializing fences (%d).\n", r); | ||
| 374 | return r; | ||
| 375 | } | ||
| 376 | |||
| 377 | ring->ring_lock = &adev->ring_lock; | ||
| 378 | /* Align ring size */ | ||
| 379 | rb_bufsz = order_base_2(ring_size / 8); | ||
| 380 | ring_size = (1 << (rb_bufsz + 1)) * 4; | ||
| 381 | ring->ring_size = ring_size; | ||
| 382 | ring->align_mask = align_mask; | ||
| 383 | ring->nop = nop; | ||
| 384 | ring->type = ring_type; | ||
| 385 | |||
| 386 | /* Allocate ring buffer */ | ||
| 387 | if (ring->ring_obj == NULL) { | ||
| 388 | r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, | ||
| 389 | AMDGPU_GEM_DOMAIN_GTT, 0, | ||
| 390 | NULL, &ring->ring_obj); | ||
| 391 | if (r) { | ||
| 392 | dev_err(adev->dev, "(%d) ring create failed\n", r); | ||
| 393 | return r; | ||
| 394 | } | ||
| 395 | r = amdgpu_bo_reserve(ring->ring_obj, false); | ||
| 396 | if (unlikely(r != 0)) | ||
| 397 | return r; | ||
| 398 | r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT, | ||
| 399 | &ring->gpu_addr); | ||
| 400 | if (r) { | ||
| 401 | amdgpu_bo_unreserve(ring->ring_obj); | ||
| 402 | dev_err(adev->dev, "(%d) ring pin failed\n", r); | ||
| 403 | return r; | ||
| 404 | } | ||
| 405 | r = amdgpu_bo_kmap(ring->ring_obj, | ||
| 406 | (void **)&ring->ring); | ||
| 407 | amdgpu_bo_unreserve(ring->ring_obj); | ||
| 408 | if (r) { | ||
| 409 | dev_err(adev->dev, "(%d) ring map failed\n", r); | ||
| 410 | return r; | ||
| 411 | } | ||
| 412 | } | ||
| 413 | ring->ptr_mask = (ring->ring_size / 4) - 1; | ||
| 414 | ring->ring_free_dw = ring->ring_size / 4; | ||
| 415 | |||
| 416 | if (amdgpu_debugfs_ring_init(adev, ring)) { | ||
| 417 | DRM_ERROR("Failed to register debugfs file for rings !\n"); | ||
| 418 | } | ||
| 419 | amdgpu_ring_lockup_update(ring); | ||
| 420 | return 0; | ||
| 421 | } | ||
| 422 | |||
| 423 | /** | ||
| 424 | * amdgpu_ring_fini - tear down the driver ring struct. | ||
| 425 | * | ||
| 426 | * @adev: amdgpu_device pointer | ||
| 427 | * @ring: amdgpu_ring structure holding ring information | ||
| 428 | * | ||
| 429 | * Tear down the driver information for the selected ring (all asics). | ||
| 430 | */ | ||
| 431 | void amdgpu_ring_fini(struct amdgpu_ring *ring) | ||
| 432 | { | ||
| 433 | int r; | ||
| 434 | struct amdgpu_bo *ring_obj; | ||
| 435 | |||
| 436 | if (ring->ring_lock == NULL) | ||
| 437 | return; | ||
| 438 | |||
| 439 | mutex_lock(ring->ring_lock); | ||
| 440 | ring_obj = ring->ring_obj; | ||
| 441 | ring->ready = false; | ||
| 442 | ring->ring = NULL; | ||
| 443 | ring->ring_obj = NULL; | ||
| 444 | mutex_unlock(ring->ring_lock); | ||
| 445 | |||
| 446 | amdgpu_wb_free(ring->adev, ring->fence_offs); | ||
| 447 | amdgpu_wb_free(ring->adev, ring->rptr_offs); | ||
| 448 | amdgpu_wb_free(ring->adev, ring->wptr_offs); | ||
| 449 | amdgpu_wb_free(ring->adev, ring->next_rptr_offs); | ||
| 450 | |||
| 451 | if (ring_obj) { | ||
| 452 | r = amdgpu_bo_reserve(ring_obj, false); | ||
| 453 | if (likely(r == 0)) { | ||
| 454 | amdgpu_bo_kunmap(ring_obj); | ||
| 455 | amdgpu_bo_unpin(ring_obj); | ||
| 456 | amdgpu_bo_unreserve(ring_obj); | ||
| 457 | } | ||
| 458 | amdgpu_bo_unref(&ring_obj); | ||
| 459 | } | ||
| 460 | } | ||
| 461 | |||
| 462 | /* | ||
| 463 | * Debugfs info | ||
| 464 | */ | ||
| 465 | #if defined(CONFIG_DEBUG_FS) | ||
| 466 | |||
| 467 | static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) | ||
| 468 | { | ||
| 469 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
| 470 | struct drm_device *dev = node->minor->dev; | ||
| 471 | struct amdgpu_device *adev = dev->dev_private; | ||
| 472 | int roffset = *(int*)node->info_ent->data; | ||
| 473 | struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); | ||
| 474 | |||
| 475 | uint32_t rptr, wptr, rptr_next; | ||
| 476 | unsigned count, i, j; | ||
| 477 | |||
| 478 | amdgpu_ring_free_size(ring); | ||
| 479 | count = (ring->ring_size / 4) - ring->ring_free_dw; | ||
| 480 | |||
| 481 | wptr = amdgpu_ring_get_wptr(ring); | ||
| 482 | seq_printf(m, "wptr: 0x%08x [%5d]\n", | ||
| 483 | wptr, wptr); | ||
| 484 | |||
| 485 | rptr = amdgpu_ring_get_rptr(ring); | ||
| 486 | seq_printf(m, "rptr: 0x%08x [%5d]\n", | ||
| 487 | rptr, rptr); | ||
| 488 | |||
| 489 | rptr_next = ~0; | ||
| 490 | |||
| 491 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", | ||
| 492 | ring->wptr, ring->wptr); | ||
| 493 | seq_printf(m, "last semaphore signal addr : 0x%016llx\n", | ||
| 494 | ring->last_semaphore_signal_addr); | ||
| 495 | seq_printf(m, "last semaphore wait addr : 0x%016llx\n", | ||
| 496 | ring->last_semaphore_wait_addr); | ||
| 497 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); | ||
| 498 | seq_printf(m, "%u dwords in ring\n", count); | ||
| 499 | |||
| 500 | if (!ring->ready) | ||
| 501 | return 0; | ||
| 502 | |||
| 503 | /* print 8 dw before current rptr as often it's the last executed | ||
| 504 | * packet that is the root issue | ||
| 505 | */ | ||
| 506 | i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; | ||
| 507 | for (j = 0; j <= (count + 32); j++) { | ||
| 508 | seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); | ||
| 509 | if (rptr == i) | ||
| 510 | seq_puts(m, " *"); | ||
| 511 | if (rptr_next == i) | ||
| 512 | seq_puts(m, " #"); | ||
| 513 | seq_puts(m, "\n"); | ||
| 514 | i = (i + 1) & ring->ptr_mask; | ||
| 515 | } | ||
| 516 | return 0; | ||
| 517 | } | ||
| 518 | |||
| 519 | /* TODO: clean this up !*/ | ||
| 520 | static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]); | ||
| 521 | static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]); | ||
| 522 | static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]); | ||
| 523 | static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring); | ||
| 524 | static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring); | ||
| 525 | static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring); | ||
| 526 | static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]); | ||
| 527 | static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]); | ||
| 528 | |||
| 529 | static struct drm_info_list amdgpu_debugfs_ring_info_list[] = { | ||
| 530 | {"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index}, | ||
| 531 | {"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index}, | ||
| 532 | {"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index}, | ||
| 533 | {"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index}, | ||
| 534 | {"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index}, | ||
| 535 | {"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index}, | ||
| 536 | {"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index}, | ||
| 537 | {"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index}, | ||
| 538 | }; | ||
| 539 | |||
| 540 | #endif | ||
| 541 | |||
| 542 | static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) | ||
| 543 | { | ||
| 544 | #if defined(CONFIG_DEBUG_FS) | ||
| 545 | unsigned i; | ||
| 546 | for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) { | ||
| 547 | struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i]; | ||
| 548 | int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data; | ||
| 549 | struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset); | ||
| 550 | unsigned r; | ||
| 551 | |||
| 552 | if (other != ring) | ||
| 553 | continue; | ||
| 554 | |||
| 555 | r = amdgpu_debugfs_add_files(adev, info, 1); | ||
| 556 | if (r) | ||
| 557 | return r; | ||
| 558 | } | ||
| 559 | #endif | ||
| 560 | return 0; | ||
| 561 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c new file mode 100644 index 000000000000..eb20987ce18d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
| @@ -0,0 +1,419 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2011 Red Hat Inc. | ||
| 3 | * All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the | ||
| 7 | * "Software"), to deal in the Software without restriction, including | ||
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 10 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 11 | * the following conditions: | ||
| 12 | * | ||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 20 | * | ||
| 21 | * The above copyright notice and this permission notice (including the | ||
| 22 | * next paragraph) shall be included in all copies or substantial portions | ||
| 23 | * of the Software. | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | /* | ||
| 27 | * Authors: | ||
| 28 | * Jerome Glisse <glisse@freedesktop.org> | ||
| 29 | */ | ||
| 30 | /* Algorithm: | ||
| 31 | * | ||
| 32 | * We store the last allocated bo in "hole", we always try to allocate | ||
| 33 | * after the last allocated bo. Principle is that in a linear GPU ring | ||
| 34 | * progression was is after last is the oldest bo we allocated and thus | ||
| 35 | * the first one that should no longer be in use by the GPU. | ||
| 36 | * | ||
| 37 | * If it's not the case we skip over the bo after last to the closest | ||
| 38 | * done bo if such one exist. If none exist and we are not asked to | ||
| 39 | * block we report failure to allocate. | ||
| 40 | * | ||
| 41 | * If we are asked to block we wait on all the oldest fence of all | ||
| 42 | * rings. We just wait for any of those fence to complete. | ||
| 43 | */ | ||
| 44 | #include <drm/drmP.h> | ||
| 45 | #include "amdgpu.h" | ||
| 46 | |||
| 47 | static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo); | ||
| 48 | static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager); | ||
| 49 | |||
| 50 | int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, | ||
| 51 | struct amdgpu_sa_manager *sa_manager, | ||
| 52 | unsigned size, u32 align, u32 domain) | ||
| 53 | { | ||
| 54 | int i, r; | ||
| 55 | |||
| 56 | init_waitqueue_head(&sa_manager->wq); | ||
| 57 | sa_manager->bo = NULL; | ||
| 58 | sa_manager->size = size; | ||
| 59 | sa_manager->domain = domain; | ||
| 60 | sa_manager->align = align; | ||
| 61 | sa_manager->hole = &sa_manager->olist; | ||
| 62 | INIT_LIST_HEAD(&sa_manager->olist); | ||
| 63 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 64 | INIT_LIST_HEAD(&sa_manager->flist[i]); | ||
| 65 | } | ||
| 66 | |||
| 67 | r = amdgpu_bo_create(adev, size, align, true, | ||
| 68 | domain, 0, NULL, &sa_manager->bo); | ||
| 69 | if (r) { | ||
| 70 | dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); | ||
| 71 | return r; | ||
| 72 | } | ||
| 73 | |||
| 74 | return r; | ||
| 75 | } | ||
| 76 | |||
| 77 | void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, | ||
| 78 | struct amdgpu_sa_manager *sa_manager) | ||
| 79 | { | ||
| 80 | struct amdgpu_sa_bo *sa_bo, *tmp; | ||
| 81 | |||
| 82 | if (!list_empty(&sa_manager->olist)) { | ||
| 83 | sa_manager->hole = &sa_manager->olist, | ||
| 84 | amdgpu_sa_bo_try_free(sa_manager); | ||
| 85 | if (!list_empty(&sa_manager->olist)) { | ||
| 86 | dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n"); | ||
| 87 | } | ||
| 88 | } | ||
| 89 | list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) { | ||
| 90 | amdgpu_sa_bo_remove_locked(sa_bo); | ||
| 91 | } | ||
| 92 | amdgpu_bo_unref(&sa_manager->bo); | ||
| 93 | sa_manager->size = 0; | ||
| 94 | } | ||
| 95 | |||
| 96 | int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, | ||
| 97 | struct amdgpu_sa_manager *sa_manager) | ||
| 98 | { | ||
| 99 | int r; | ||
| 100 | |||
| 101 | if (sa_manager->bo == NULL) { | ||
| 102 | dev_err(adev->dev, "no bo for sa manager\n"); | ||
| 103 | return -EINVAL; | ||
| 104 | } | ||
| 105 | |||
| 106 | /* map the buffer */ | ||
| 107 | r = amdgpu_bo_reserve(sa_manager->bo, false); | ||
| 108 | if (r) { | ||
| 109 | dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r); | ||
| 110 | return r; | ||
| 111 | } | ||
| 112 | r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr); | ||
| 113 | if (r) { | ||
| 114 | amdgpu_bo_unreserve(sa_manager->bo); | ||
| 115 | dev_err(adev->dev, "(%d) failed to pin manager bo\n", r); | ||
| 116 | return r; | ||
| 117 | } | ||
| 118 | r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); | ||
| 119 | amdgpu_bo_unreserve(sa_manager->bo); | ||
| 120 | return r; | ||
| 121 | } | ||
| 122 | |||
| 123 | int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, | ||
| 124 | struct amdgpu_sa_manager *sa_manager) | ||
| 125 | { | ||
| 126 | int r; | ||
| 127 | |||
| 128 | if (sa_manager->bo == NULL) { | ||
| 129 | dev_err(adev->dev, "no bo for sa manager\n"); | ||
| 130 | return -EINVAL; | ||
| 131 | } | ||
| 132 | |||
| 133 | r = amdgpu_bo_reserve(sa_manager->bo, false); | ||
| 134 | if (!r) { | ||
| 135 | amdgpu_bo_kunmap(sa_manager->bo); | ||
| 136 | amdgpu_bo_unpin(sa_manager->bo); | ||
| 137 | amdgpu_bo_unreserve(sa_manager->bo); | ||
| 138 | } | ||
| 139 | return r; | ||
| 140 | } | ||
| 141 | |||
| 142 | static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) | ||
| 143 | { | ||
| 144 | struct amdgpu_sa_manager *sa_manager = sa_bo->manager; | ||
| 145 | if (sa_manager->hole == &sa_bo->olist) { | ||
| 146 | sa_manager->hole = sa_bo->olist.prev; | ||
| 147 | } | ||
| 148 | list_del_init(&sa_bo->olist); | ||
| 149 | list_del_init(&sa_bo->flist); | ||
| 150 | amdgpu_fence_unref(&sa_bo->fence); | ||
| 151 | kfree(sa_bo); | ||
| 152 | } | ||
| 153 | |||
| 154 | static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager) | ||
| 155 | { | ||
| 156 | struct amdgpu_sa_bo *sa_bo, *tmp; | ||
| 157 | |||
| 158 | if (sa_manager->hole->next == &sa_manager->olist) | ||
| 159 | return; | ||
| 160 | |||
| 161 | sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); | ||
| 162 | list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { | ||
| 163 | if (sa_bo->fence == NULL || !amdgpu_fence_signaled(sa_bo->fence)) { | ||
| 164 | return; | ||
| 165 | } | ||
| 166 | amdgpu_sa_bo_remove_locked(sa_bo); | ||
| 167 | } | ||
| 168 | } | ||
| 169 | |||
| 170 | static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager) | ||
| 171 | { | ||
| 172 | struct list_head *hole = sa_manager->hole; | ||
| 173 | |||
| 174 | if (hole != &sa_manager->olist) { | ||
| 175 | return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset; | ||
| 176 | } | ||
| 177 | return 0; | ||
| 178 | } | ||
| 179 | |||
| 180 | static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager) | ||
| 181 | { | ||
| 182 | struct list_head *hole = sa_manager->hole; | ||
| 183 | |||
| 184 | if (hole->next != &sa_manager->olist) { | ||
| 185 | return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset; | ||
| 186 | } | ||
| 187 | return sa_manager->size; | ||
| 188 | } | ||
| 189 | |||
| 190 | static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager, | ||
| 191 | struct amdgpu_sa_bo *sa_bo, | ||
| 192 | unsigned size, unsigned align) | ||
| 193 | { | ||
| 194 | unsigned soffset, eoffset, wasted; | ||
| 195 | |||
| 196 | soffset = amdgpu_sa_bo_hole_soffset(sa_manager); | ||
| 197 | eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager); | ||
| 198 | wasted = (align - (soffset % align)) % align; | ||
| 199 | |||
| 200 | if ((eoffset - soffset) >= (size + wasted)) { | ||
| 201 | soffset += wasted; | ||
| 202 | |||
| 203 | sa_bo->manager = sa_manager; | ||
| 204 | sa_bo->soffset = soffset; | ||
| 205 | sa_bo->eoffset = soffset + size; | ||
| 206 | list_add(&sa_bo->olist, sa_manager->hole); | ||
| 207 | INIT_LIST_HEAD(&sa_bo->flist); | ||
| 208 | sa_manager->hole = &sa_bo->olist; | ||
| 209 | return true; | ||
| 210 | } | ||
| 211 | return false; | ||
| 212 | } | ||
| 213 | |||
| 214 | /** | ||
| 215 | * amdgpu_sa_event - Check if we can stop waiting | ||
| 216 | * | ||
| 217 | * @sa_manager: pointer to the sa_manager | ||
| 218 | * @size: number of bytes we want to allocate | ||
| 219 | * @align: alignment we need to match | ||
| 220 | * | ||
| 221 | * Check if either there is a fence we can wait for or | ||
| 222 | * enough free memory to satisfy the allocation directly | ||
| 223 | */ | ||
| 224 | static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager, | ||
| 225 | unsigned size, unsigned align) | ||
| 226 | { | ||
| 227 | unsigned soffset, eoffset, wasted; | ||
| 228 | int i; | ||
| 229 | |||
| 230 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 231 | if (!list_empty(&sa_manager->flist[i])) { | ||
| 232 | return true; | ||
| 233 | } | ||
| 234 | } | ||
| 235 | |||
| 236 | soffset = amdgpu_sa_bo_hole_soffset(sa_manager); | ||
| 237 | eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager); | ||
| 238 | wasted = (align - (soffset % align)) % align; | ||
| 239 | |||
| 240 | if ((eoffset - soffset) >= (size + wasted)) { | ||
| 241 | return true; | ||
| 242 | } | ||
| 243 | |||
| 244 | return false; | ||
| 245 | } | ||
| 246 | |||
| 247 | static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, | ||
| 248 | struct amdgpu_fence **fences, | ||
| 249 | unsigned *tries) | ||
| 250 | { | ||
| 251 | struct amdgpu_sa_bo *best_bo = NULL; | ||
| 252 | unsigned i, soffset, best, tmp; | ||
| 253 | |||
| 254 | /* if hole points to the end of the buffer */ | ||
| 255 | if (sa_manager->hole->next == &sa_manager->olist) { | ||
| 256 | /* try again with its beginning */ | ||
| 257 | sa_manager->hole = &sa_manager->olist; | ||
| 258 | return true; | ||
| 259 | } | ||
| 260 | |||
| 261 | soffset = amdgpu_sa_bo_hole_soffset(sa_manager); | ||
| 262 | /* to handle wrap around we add sa_manager->size */ | ||
| 263 | best = sa_manager->size * 2; | ||
| 264 | /* go over all fence list and try to find the closest sa_bo | ||
| 265 | * of the current last | ||
| 266 | */ | ||
| 267 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 268 | struct amdgpu_sa_bo *sa_bo; | ||
| 269 | |||
| 270 | if (list_empty(&sa_manager->flist[i])) { | ||
| 271 | continue; | ||
| 272 | } | ||
| 273 | |||
| 274 | sa_bo = list_first_entry(&sa_manager->flist[i], | ||
| 275 | struct amdgpu_sa_bo, flist); | ||
| 276 | |||
| 277 | if (!amdgpu_fence_signaled(sa_bo->fence)) { | ||
| 278 | fences[i] = sa_bo->fence; | ||
| 279 | continue; | ||
| 280 | } | ||
| 281 | |||
| 282 | /* limit the number of tries each ring gets */ | ||
| 283 | if (tries[i] > 2) { | ||
| 284 | continue; | ||
| 285 | } | ||
| 286 | |||
| 287 | tmp = sa_bo->soffset; | ||
| 288 | if (tmp < soffset) { | ||
| 289 | /* wrap around, pretend it's after */ | ||
| 290 | tmp += sa_manager->size; | ||
| 291 | } | ||
| 292 | tmp -= soffset; | ||
| 293 | if (tmp < best) { | ||
| 294 | /* this sa bo is the closest one */ | ||
| 295 | best = tmp; | ||
| 296 | best_bo = sa_bo; | ||
| 297 | } | ||
| 298 | } | ||
| 299 | |||
| 300 | if (best_bo) { | ||
| 301 | ++tries[best_bo->fence->ring->idx]; | ||
| 302 | sa_manager->hole = best_bo->olist.prev; | ||
| 303 | |||
| 304 | /* we knew that this one is signaled, | ||
| 305 | so it's save to remote it */ | ||
| 306 | amdgpu_sa_bo_remove_locked(best_bo); | ||
| 307 | return true; | ||
| 308 | } | ||
| 309 | return false; | ||
| 310 | } | ||
| 311 | |||
| 312 | int amdgpu_sa_bo_new(struct amdgpu_device *adev, | ||
| 313 | struct amdgpu_sa_manager *sa_manager, | ||
| 314 | struct amdgpu_sa_bo **sa_bo, | ||
| 315 | unsigned size, unsigned align) | ||
| 316 | { | ||
| 317 | struct amdgpu_fence *fences[AMDGPU_MAX_RINGS]; | ||
| 318 | unsigned tries[AMDGPU_MAX_RINGS]; | ||
| 319 | int i, r; | ||
| 320 | |||
| 321 | BUG_ON(align > sa_manager->align); | ||
| 322 | BUG_ON(size > sa_manager->size); | ||
| 323 | |||
| 324 | *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL); | ||
| 325 | if ((*sa_bo) == NULL) { | ||
| 326 | return -ENOMEM; | ||
| 327 | } | ||
| 328 | (*sa_bo)->manager = sa_manager; | ||
| 329 | (*sa_bo)->fence = NULL; | ||
| 330 | INIT_LIST_HEAD(&(*sa_bo)->olist); | ||
| 331 | INIT_LIST_HEAD(&(*sa_bo)->flist); | ||
| 332 | |||
| 333 | spin_lock(&sa_manager->wq.lock); | ||
| 334 | do { | ||
| 335 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 336 | fences[i] = NULL; | ||
| 337 | tries[i] = 0; | ||
| 338 | } | ||
| 339 | |||
| 340 | do { | ||
| 341 | amdgpu_sa_bo_try_free(sa_manager); | ||
| 342 | |||
| 343 | if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo, | ||
| 344 | size, align)) { | ||
| 345 | spin_unlock(&sa_manager->wq.lock); | ||
| 346 | return 0; | ||
| 347 | } | ||
| 348 | |||
| 349 | /* see if we can skip over some allocations */ | ||
| 350 | } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); | ||
| 351 | |||
| 352 | spin_unlock(&sa_manager->wq.lock); | ||
| 353 | r = amdgpu_fence_wait_any(adev, fences, false); | ||
| 354 | spin_lock(&sa_manager->wq.lock); | ||
| 355 | /* if we have nothing to wait for block */ | ||
| 356 | if (r == -ENOENT) { | ||
| 357 | r = wait_event_interruptible_locked( | ||
| 358 | sa_manager->wq, | ||
| 359 | amdgpu_sa_event(sa_manager, size, align) | ||
| 360 | ); | ||
| 361 | } | ||
| 362 | |||
| 363 | } while (!r); | ||
| 364 | |||
| 365 | spin_unlock(&sa_manager->wq.lock); | ||
| 366 | kfree(*sa_bo); | ||
| 367 | *sa_bo = NULL; | ||
| 368 | return r; | ||
| 369 | } | ||
| 370 | |||
| 371 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, | ||
| 372 | struct amdgpu_fence *fence) | ||
| 373 | { | ||
| 374 | struct amdgpu_sa_manager *sa_manager; | ||
| 375 | |||
| 376 | if (sa_bo == NULL || *sa_bo == NULL) { | ||
| 377 | return; | ||
| 378 | } | ||
| 379 | |||
| 380 | sa_manager = (*sa_bo)->manager; | ||
| 381 | spin_lock(&sa_manager->wq.lock); | ||
| 382 | if (fence && !amdgpu_fence_signaled(fence)) { | ||
| 383 | (*sa_bo)->fence = amdgpu_fence_ref(fence); | ||
| 384 | list_add_tail(&(*sa_bo)->flist, | ||
| 385 | &sa_manager->flist[fence->ring->idx]); | ||
| 386 | } else { | ||
| 387 | amdgpu_sa_bo_remove_locked(*sa_bo); | ||
| 388 | } | ||
| 389 | wake_up_all_locked(&sa_manager->wq); | ||
| 390 | spin_unlock(&sa_manager->wq.lock); | ||
| 391 | *sa_bo = NULL; | ||
| 392 | } | ||
| 393 | |||
| 394 | #if defined(CONFIG_DEBUG_FS) | ||
| 395 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, | ||
| 396 | struct seq_file *m) | ||
| 397 | { | ||
| 398 | struct amdgpu_sa_bo *i; | ||
| 399 | |||
| 400 | spin_lock(&sa_manager->wq.lock); | ||
| 401 | list_for_each_entry(i, &sa_manager->olist, olist) { | ||
| 402 | uint64_t soffset = i->soffset + sa_manager->gpu_addr; | ||
| 403 | uint64_t eoffset = i->eoffset + sa_manager->gpu_addr; | ||
| 404 | if (&i->olist == sa_manager->hole) { | ||
| 405 | seq_printf(m, ">"); | ||
| 406 | } else { | ||
| 407 | seq_printf(m, " "); | ||
| 408 | } | ||
| 409 | seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", | ||
| 410 | soffset, eoffset, eoffset - soffset); | ||
| 411 | if (i->fence) { | ||
| 412 | seq_printf(m, " protected by 0x%016llx on ring %d", | ||
| 413 | i->fence->seq, i->fence->ring->idx); | ||
| 414 | } | ||
| 415 | seq_printf(m, "\n"); | ||
| 416 | } | ||
| 417 | spin_unlock(&sa_manager->wq.lock); | ||
| 418 | } | ||
| 419 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c new file mode 100644 index 000000000000..d6d41a42ab65 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c | |||
| @@ -0,0 +1,102 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2011 Christian König. | ||
| 3 | * All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the | ||
| 7 | * "Software"), to deal in the Software without restriction, including | ||
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 10 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 11 | * the following conditions: | ||
| 12 | * | ||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 20 | * | ||
| 21 | * The above copyright notice and this permission notice (including the | ||
| 22 | * next paragraph) shall be included in all copies or substantial portions | ||
| 23 | * of the Software. | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | /* | ||
| 27 | * Authors: | ||
| 28 | * Christian König <deathsimple@vodafone.de> | ||
| 29 | */ | ||
| 30 | #include <drm/drmP.h> | ||
| 31 | #include "amdgpu.h" | ||
| 32 | #include "amdgpu_trace.h" | ||
| 33 | |||
| 34 | int amdgpu_semaphore_create(struct amdgpu_device *adev, | ||
| 35 | struct amdgpu_semaphore **semaphore) | ||
| 36 | { | ||
| 37 | int r; | ||
| 38 | |||
| 39 | *semaphore = kmalloc(sizeof(struct amdgpu_semaphore), GFP_KERNEL); | ||
| 40 | if (*semaphore == NULL) { | ||
| 41 | return -ENOMEM; | ||
| 42 | } | ||
| 43 | r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, | ||
| 44 | &(*semaphore)->sa_bo, 8, 8); | ||
| 45 | if (r) { | ||
| 46 | kfree(*semaphore); | ||
| 47 | *semaphore = NULL; | ||
| 48 | return r; | ||
| 49 | } | ||
| 50 | (*semaphore)->waiters = 0; | ||
| 51 | (*semaphore)->gpu_addr = amdgpu_sa_bo_gpu_addr((*semaphore)->sa_bo); | ||
| 52 | |||
| 53 | *((uint64_t *)amdgpu_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; | ||
| 54 | |||
| 55 | return 0; | ||
| 56 | } | ||
| 57 | |||
| 58 | bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring, | ||
| 59 | struct amdgpu_semaphore *semaphore) | ||
| 60 | { | ||
| 61 | trace_amdgpu_semaphore_signale(ring->idx, semaphore); | ||
| 62 | |||
| 63 | if (amdgpu_ring_emit_semaphore(ring, semaphore, false)) { | ||
| 64 | --semaphore->waiters; | ||
| 65 | |||
| 66 | /* for debugging lockup only, used by sysfs debug files */ | ||
| 67 | ring->last_semaphore_signal_addr = semaphore->gpu_addr; | ||
| 68 | return true; | ||
| 69 | } | ||
| 70 | return false; | ||
| 71 | } | ||
| 72 | |||
| 73 | bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, | ||
| 74 | struct amdgpu_semaphore *semaphore) | ||
| 75 | { | ||
| 76 | trace_amdgpu_semaphore_wait(ring->idx, semaphore); | ||
| 77 | |||
| 78 | if (amdgpu_ring_emit_semaphore(ring, semaphore, true)) { | ||
| 79 | ++semaphore->waiters; | ||
| 80 | |||
| 81 | /* for debugging lockup only, used by sysfs debug files */ | ||
| 82 | ring->last_semaphore_wait_addr = semaphore->gpu_addr; | ||
| 83 | return true; | ||
| 84 | } | ||
| 85 | return false; | ||
| 86 | } | ||
| 87 | |||
| 88 | void amdgpu_semaphore_free(struct amdgpu_device *adev, | ||
| 89 | struct amdgpu_semaphore **semaphore, | ||
| 90 | struct amdgpu_fence *fence) | ||
| 91 | { | ||
| 92 | if (semaphore == NULL || *semaphore == NULL) { | ||
| 93 | return; | ||
| 94 | } | ||
| 95 | if ((*semaphore)->waiters > 0) { | ||
| 96 | dev_err(adev->dev, "semaphore %p has more waiters than signalers," | ||
| 97 | " hardware lockup imminent!\n", *semaphore); | ||
| 98 | } | ||
| 99 | amdgpu_sa_bo_free(adev, &(*semaphore)->sa_bo, fence); | ||
| 100 | kfree(*semaphore); | ||
| 101 | *semaphore = NULL; | ||
| 102 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c new file mode 100644 index 000000000000..855d56ac7115 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |||
| @@ -0,0 +1,231 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the | ||
| 7 | * "Software"), to deal in the Software without restriction, including | ||
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 10 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 11 | * the following conditions: | ||
| 12 | * | ||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 20 | * | ||
| 21 | * The above copyright notice and this permission notice (including the | ||
| 22 | * next paragraph) shall be included in all copies or substantial portions | ||
| 23 | * of the Software. | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | /* | ||
| 27 | * Authors: | ||
| 28 | * Christian König <christian.koenig@amd.com> | ||
| 29 | */ | ||
| 30 | |||
| 31 | #include <drm/drmP.h> | ||
| 32 | #include "amdgpu.h" | ||
| 33 | #include "amdgpu_trace.h" | ||
| 34 | |||
| 35 | /** | ||
| 36 | * amdgpu_sync_create - zero init sync object | ||
| 37 | * | ||
| 38 | * @sync: sync object to initialize | ||
| 39 | * | ||
| 40 | * Just clear the sync object for now. | ||
| 41 | */ | ||
| 42 | void amdgpu_sync_create(struct amdgpu_sync *sync) | ||
| 43 | { | ||
| 44 | unsigned i; | ||
| 45 | |||
| 46 | for (i = 0; i < AMDGPU_NUM_SYNCS; ++i) | ||
| 47 | sync->semaphores[i] = NULL; | ||
| 48 | |||
| 49 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | ||
| 50 | sync->sync_to[i] = NULL; | ||
| 51 | |||
| 52 | sync->last_vm_update = NULL; | ||
| 53 | } | ||
| 54 | |||
| 55 | /** | ||
| 56 | * amdgpu_sync_fence - use the semaphore to sync to a fence | ||
| 57 | * | ||
| 58 | * @sync: sync object to add fence to | ||
| 59 | * @fence: fence to sync to | ||
| 60 | * | ||
| 61 | * Sync to the fence using the semaphore objects | ||
| 62 | */ | ||
| 63 | void amdgpu_sync_fence(struct amdgpu_sync *sync, | ||
| 64 | struct amdgpu_fence *fence) | ||
| 65 | { | ||
| 66 | struct amdgpu_fence *other; | ||
| 67 | |||
| 68 | if (!fence) | ||
| 69 | return; | ||
| 70 | |||
| 71 | other = sync->sync_to[fence->ring->idx]; | ||
| 72 | sync->sync_to[fence->ring->idx] = amdgpu_fence_ref( | ||
| 73 | amdgpu_fence_later(fence, other)); | ||
| 74 | amdgpu_fence_unref(&other); | ||
| 75 | |||
| 76 | if (fence->owner == AMDGPU_FENCE_OWNER_VM) { | ||
| 77 | other = sync->last_vm_update; | ||
| 78 | sync->last_vm_update = amdgpu_fence_ref( | ||
| 79 | amdgpu_fence_later(fence, other)); | ||
| 80 | amdgpu_fence_unref(&other); | ||
| 81 | } | ||
| 82 | } | ||
| 83 | |||
| 84 | /** | ||
| 85 | * amdgpu_sync_resv - use the semaphores to sync to a reservation object | ||
| 86 | * | ||
| 87 | * @sync: sync object to add fences from reservation object to | ||
| 88 | * @resv: reservation object with embedded fence | ||
| 89 | * @shared: true if we should only sync to the exclusive fence | ||
| 90 | * | ||
| 91 | * Sync to the fence using the semaphore objects | ||
| 92 | */ | ||
| 93 | int amdgpu_sync_resv(struct amdgpu_device *adev, | ||
| 94 | struct amdgpu_sync *sync, | ||
| 95 | struct reservation_object *resv, | ||
| 96 | void *owner) | ||
| 97 | { | ||
| 98 | struct reservation_object_list *flist; | ||
| 99 | struct fence *f; | ||
| 100 | struct amdgpu_fence *fence; | ||
| 101 | unsigned i; | ||
| 102 | int r = 0; | ||
| 103 | |||
| 104 | /* always sync to the exclusive fence */ | ||
| 105 | f = reservation_object_get_excl(resv); | ||
| 106 | fence = f ? to_amdgpu_fence(f) : NULL; | ||
| 107 | if (fence && fence->ring->adev == adev) | ||
| 108 | amdgpu_sync_fence(sync, fence); | ||
| 109 | else if (f) | ||
| 110 | r = fence_wait(f, true); | ||
| 111 | |||
| 112 | flist = reservation_object_get_list(resv); | ||
| 113 | if (!flist || r) | ||
| 114 | return r; | ||
| 115 | |||
| 116 | for (i = 0; i < flist->shared_count; ++i) { | ||
| 117 | f = rcu_dereference_protected(flist->shared[i], | ||
| 118 | reservation_object_held(resv)); | ||
| 119 | fence = to_amdgpu_fence(f); | ||
| 120 | if (fence && fence->ring->adev == adev) { | ||
| 121 | if (fence->owner != owner || | ||
| 122 | fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED) | ||
| 123 | amdgpu_sync_fence(sync, fence); | ||
| 124 | } else { | ||
| 125 | r = fence_wait(f, true); | ||
| 126 | if (r) | ||
| 127 | break; | ||
| 128 | } | ||
| 129 | } | ||
| 130 | return r; | ||
| 131 | } | ||
| 132 | |||
| 133 | /** | ||
| 134 | * amdgpu_sync_rings - sync ring to all registered fences | ||
| 135 | * | ||
| 136 | * @sync: sync object to use | ||
| 137 | * @ring: ring that needs sync | ||
| 138 | * | ||
| 139 | * Ensure that all registered fences are signaled before letting | ||
| 140 | * the ring continue. The caller must hold the ring lock. | ||
| 141 | */ | ||
| 142 | int amdgpu_sync_rings(struct amdgpu_sync *sync, | ||
| 143 | struct amdgpu_ring *ring) | ||
| 144 | { | ||
| 145 | struct amdgpu_device *adev = ring->adev; | ||
| 146 | unsigned count = 0; | ||
| 147 | int i, r; | ||
| 148 | |||
| 149 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 150 | struct amdgpu_fence *fence = sync->sync_to[i]; | ||
| 151 | struct amdgpu_semaphore *semaphore; | ||
| 152 | struct amdgpu_ring *other = adev->rings[i]; | ||
| 153 | |||
| 154 | /* check if we really need to sync */ | ||
| 155 | if (!amdgpu_fence_need_sync(fence, ring)) | ||
| 156 | continue; | ||
| 157 | |||
| 158 | /* prevent GPU deadlocks */ | ||
| 159 | if (!other->ready) { | ||
| 160 | dev_err(adev->dev, "Syncing to a disabled ring!"); | ||
| 161 | return -EINVAL; | ||
| 162 | } | ||
| 163 | |||
| 164 | if (count >= AMDGPU_NUM_SYNCS) { | ||
| 165 | /* not enough room, wait manually */ | ||
| 166 | r = amdgpu_fence_wait(fence, false); | ||
| 167 | if (r) | ||
| 168 | return r; | ||
| 169 | continue; | ||
| 170 | } | ||
| 171 | r = amdgpu_semaphore_create(adev, &semaphore); | ||
| 172 | if (r) | ||
| 173 | return r; | ||
| 174 | |||
| 175 | sync->semaphores[count++] = semaphore; | ||
| 176 | |||
| 177 | /* allocate enough space for sync command */ | ||
| 178 | r = amdgpu_ring_alloc(other, 16); | ||
| 179 | if (r) | ||
| 180 | return r; | ||
| 181 | |||
| 182 | /* emit the signal semaphore */ | ||
| 183 | if (!amdgpu_semaphore_emit_signal(other, semaphore)) { | ||
| 184 | /* signaling wasn't successful wait manually */ | ||
| 185 | amdgpu_ring_undo(other); | ||
| 186 | r = amdgpu_fence_wait(fence, false); | ||
| 187 | if (r) | ||
| 188 | return r; | ||
| 189 | continue; | ||
| 190 | } | ||
| 191 | |||
| 192 | /* we assume caller has already allocated space on waiters ring */ | ||
| 193 | if (!amdgpu_semaphore_emit_wait(ring, semaphore)) { | ||
| 194 | /* waiting wasn't successful wait manually */ | ||
| 195 | amdgpu_ring_undo(other); | ||
| 196 | r = amdgpu_fence_wait(fence, false); | ||
| 197 | if (r) | ||
| 198 | return r; | ||
| 199 | continue; | ||
| 200 | } | ||
| 201 | |||
| 202 | amdgpu_ring_commit(other); | ||
| 203 | amdgpu_fence_note_sync(fence, ring); | ||
| 204 | } | ||
| 205 | |||
| 206 | return 0; | ||
| 207 | } | ||
| 208 | |||
| 209 | /** | ||
| 210 | * amdgpu_sync_free - free the sync object | ||
| 211 | * | ||
| 212 | * @adev: amdgpu_device pointer | ||
| 213 | * @sync: sync object to use | ||
| 214 | * @fence: fence to use for the free | ||
| 215 | * | ||
| 216 | * Free the sync object by freeing all semaphores in it. | ||
| 217 | */ | ||
| 218 | void amdgpu_sync_free(struct amdgpu_device *adev, | ||
| 219 | struct amdgpu_sync *sync, | ||
| 220 | struct amdgpu_fence *fence) | ||
| 221 | { | ||
| 222 | unsigned i; | ||
| 223 | |||
| 224 | for (i = 0; i < AMDGPU_NUM_SYNCS; ++i) | ||
| 225 | amdgpu_semaphore_free(adev, &sync->semaphores[i], fence); | ||
| 226 | |||
| 227 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | ||
| 228 | amdgpu_fence_unref(&sync->sync_to[i]); | ||
| 229 | |||
| 230 | amdgpu_fence_unref(&sync->last_vm_update); | ||
| 231 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c new file mode 100644 index 000000000000..df202999fbfe --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | |||
| @@ -0,0 +1,552 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2009 VMware, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Michel Dänzer | ||
| 23 | */ | ||
| 24 | #include <drm/drmP.h> | ||
| 25 | #include <drm/amdgpu_drm.h> | ||
| 26 | #include "amdgpu.h" | ||
| 27 | #include "amdgpu_uvd.h" | ||
| 28 | #include "amdgpu_vce.h" | ||
| 29 | |||
| 30 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ | ||
| 31 | static void amdgpu_do_test_moves(struct amdgpu_device *adev) | ||
| 32 | { | ||
| 33 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | ||
| 34 | struct amdgpu_bo *vram_obj = NULL; | ||
| 35 | struct amdgpu_bo **gtt_obj = NULL; | ||
| 36 | uint64_t gtt_addr, vram_addr; | ||
| 37 | unsigned n, size; | ||
| 38 | int i, r; | ||
| 39 | |||
| 40 | size = 1024 * 1024; | ||
| 41 | |||
| 42 | /* Number of tests = | ||
| 43 | * (Total GTT - IB pool - writeback page - ring buffers) / test size | ||
| 44 | */ | ||
| 45 | n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024; | ||
| 46 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | ||
| 47 | if (adev->rings[i]) | ||
| 48 | n -= adev->rings[i]->ring_size; | ||
| 49 | if (adev->wb.wb_obj) | ||
| 50 | n -= AMDGPU_GPU_PAGE_SIZE; | ||
| 51 | if (adev->irq.ih.ring_obj) | ||
| 52 | n -= adev->irq.ih.ring_size; | ||
| 53 | n /= size; | ||
| 54 | |||
| 55 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | ||
| 56 | if (!gtt_obj) { | ||
| 57 | DRM_ERROR("Failed to allocate %d pointers\n", n); | ||
| 58 | r = 1; | ||
| 59 | goto out_cleanup; | ||
| 60 | } | ||
| 61 | |||
| 62 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, | ||
| 63 | NULL, &vram_obj); | ||
| 64 | if (r) { | ||
| 65 | DRM_ERROR("Failed to create VRAM object\n"); | ||
| 66 | goto out_cleanup; | ||
| 67 | } | ||
| 68 | r = amdgpu_bo_reserve(vram_obj, false); | ||
| 69 | if (unlikely(r != 0)) | ||
| 70 | goto out_unref; | ||
| 71 | r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr); | ||
| 72 | if (r) { | ||
| 73 | DRM_ERROR("Failed to pin VRAM object\n"); | ||
| 74 | goto out_unres; | ||
| 75 | } | ||
| 76 | for (i = 0; i < n; i++) { | ||
| 77 | void *gtt_map, *vram_map; | ||
| 78 | void **gtt_start, **gtt_end; | ||
| 79 | void **vram_start, **vram_end; | ||
| 80 | struct amdgpu_fence *fence = NULL; | ||
| 81 | |||
| 82 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, | ||
| 83 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); | ||
| 84 | if (r) { | ||
| 85 | DRM_ERROR("Failed to create GTT object %d\n", i); | ||
| 86 | goto out_lclean; | ||
| 87 | } | ||
| 88 | |||
| 89 | r = amdgpu_bo_reserve(gtt_obj[i], false); | ||
| 90 | if (unlikely(r != 0)) | ||
| 91 | goto out_lclean_unref; | ||
| 92 | r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, >t_addr); | ||
| 93 | if (r) { | ||
| 94 | DRM_ERROR("Failed to pin GTT object %d\n", i); | ||
| 95 | goto out_lclean_unres; | ||
| 96 | } | ||
| 97 | |||
| 98 | r = amdgpu_bo_kmap(gtt_obj[i], >t_map); | ||
| 99 | if (r) { | ||
| 100 | DRM_ERROR("Failed to map GTT object %d\n", i); | ||
| 101 | goto out_lclean_unpin; | ||
| 102 | } | ||
| 103 | |||
| 104 | for (gtt_start = gtt_map, gtt_end = gtt_map + size; | ||
| 105 | gtt_start < gtt_end; | ||
| 106 | gtt_start++) | ||
| 107 | *gtt_start = gtt_start; | ||
| 108 | |||
| 109 | amdgpu_bo_kunmap(gtt_obj[i]); | ||
| 110 | |||
| 111 | r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, | ||
| 112 | size, NULL, &fence); | ||
| 113 | |||
| 114 | if (r) { | ||
| 115 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); | ||
| 116 | goto out_lclean_unpin; | ||
| 117 | } | ||
| 118 | |||
| 119 | r = amdgpu_fence_wait(fence, false); | ||
| 120 | if (r) { | ||
| 121 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); | ||
| 122 | goto out_lclean_unpin; | ||
| 123 | } | ||
| 124 | |||
| 125 | amdgpu_fence_unref(&fence); | ||
| 126 | |||
| 127 | r = amdgpu_bo_kmap(vram_obj, &vram_map); | ||
| 128 | if (r) { | ||
| 129 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); | ||
| 130 | goto out_lclean_unpin; | ||
| 131 | } | ||
| 132 | |||
| 133 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | ||
| 134 | vram_start = vram_map, vram_end = vram_map + size; | ||
| 135 | vram_start < vram_end; | ||
| 136 | gtt_start++, vram_start++) { | ||
| 137 | if (*vram_start != gtt_start) { | ||
| 138 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " | ||
| 139 | "expected 0x%p (GTT/VRAM offset " | ||
| 140 | "0x%16llx/0x%16llx)\n", | ||
| 141 | i, *vram_start, gtt_start, | ||
| 142 | (unsigned long long) | ||
| 143 | (gtt_addr - adev->mc.gtt_start + | ||
| 144 | (void*)gtt_start - gtt_map), | ||
| 145 | (unsigned long long) | ||
| 146 | (vram_addr - adev->mc.vram_start + | ||
| 147 | (void*)gtt_start - gtt_map)); | ||
| 148 | amdgpu_bo_kunmap(vram_obj); | ||
| 149 | goto out_lclean_unpin; | ||
| 150 | } | ||
| 151 | *vram_start = vram_start; | ||
| 152 | } | ||
| 153 | |||
| 154 | amdgpu_bo_kunmap(vram_obj); | ||
| 155 | |||
| 156 | r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, | ||
| 157 | size, NULL, &fence); | ||
| 158 | |||
| 159 | if (r) { | ||
| 160 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); | ||
| 161 | goto out_lclean_unpin; | ||
| 162 | } | ||
| 163 | |||
| 164 | r = amdgpu_fence_wait(fence, false); | ||
| 165 | if (r) { | ||
| 166 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); | ||
| 167 | goto out_lclean_unpin; | ||
| 168 | } | ||
| 169 | |||
| 170 | amdgpu_fence_unref(&fence); | ||
| 171 | |||
| 172 | r = amdgpu_bo_kmap(gtt_obj[i], >t_map); | ||
| 173 | if (r) { | ||
| 174 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); | ||
| 175 | goto out_lclean_unpin; | ||
| 176 | } | ||
| 177 | |||
| 178 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | ||
| 179 | vram_start = vram_map, vram_end = vram_map + size; | ||
| 180 | gtt_start < gtt_end; | ||
| 181 | gtt_start++, vram_start++) { | ||
| 182 | if (*gtt_start != vram_start) { | ||
| 183 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " | ||
| 184 | "expected 0x%p (VRAM/GTT offset " | ||
| 185 | "0x%16llx/0x%16llx)\n", | ||
| 186 | i, *gtt_start, vram_start, | ||
| 187 | (unsigned long long) | ||
| 188 | (vram_addr - adev->mc.vram_start + | ||
| 189 | (void*)vram_start - vram_map), | ||
| 190 | (unsigned long long) | ||
| 191 | (gtt_addr - adev->mc.gtt_start + | ||
| 192 | (void*)vram_start - vram_map)); | ||
| 193 | amdgpu_bo_kunmap(gtt_obj[i]); | ||
| 194 | goto out_lclean_unpin; | ||
| 195 | } | ||
| 196 | } | ||
| 197 | |||
| 198 | amdgpu_bo_kunmap(gtt_obj[i]); | ||
| 199 | |||
| 200 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", | ||
| 201 | gtt_addr - adev->mc.gtt_start); | ||
| 202 | continue; | ||
| 203 | |||
| 204 | out_lclean_unpin: | ||
| 205 | amdgpu_bo_unpin(gtt_obj[i]); | ||
| 206 | out_lclean_unres: | ||
| 207 | amdgpu_bo_unreserve(gtt_obj[i]); | ||
| 208 | out_lclean_unref: | ||
| 209 | amdgpu_bo_unref(>t_obj[i]); | ||
| 210 | out_lclean: | ||
| 211 | for (--i; i >= 0; --i) { | ||
| 212 | amdgpu_bo_unpin(gtt_obj[i]); | ||
| 213 | amdgpu_bo_unreserve(gtt_obj[i]); | ||
| 214 | amdgpu_bo_unref(>t_obj[i]); | ||
| 215 | } | ||
| 216 | if (fence) | ||
| 217 | amdgpu_fence_unref(&fence); | ||
| 218 | break; | ||
| 219 | } | ||
| 220 | |||
| 221 | amdgpu_bo_unpin(vram_obj); | ||
| 222 | out_unres: | ||
| 223 | amdgpu_bo_unreserve(vram_obj); | ||
| 224 | out_unref: | ||
| 225 | amdgpu_bo_unref(&vram_obj); | ||
| 226 | out_cleanup: | ||
| 227 | kfree(gtt_obj); | ||
| 228 | if (r) { | ||
| 229 | printk(KERN_WARNING "Error while testing BO move.\n"); | ||
| 230 | } | ||
| 231 | } | ||
| 232 | |||
| 233 | void amdgpu_test_moves(struct amdgpu_device *adev) | ||
| 234 | { | ||
| 235 | if (adev->mman.buffer_funcs) | ||
| 236 | amdgpu_do_test_moves(adev); | ||
| 237 | } | ||
| 238 | |||
| 239 | static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev, | ||
| 240 | struct amdgpu_ring *ring, | ||
| 241 | struct amdgpu_fence **fence) | ||
| 242 | { | ||
| 243 | uint32_t handle = ring->idx ^ 0xdeafbeef; | ||
| 244 | int r; | ||
| 245 | |||
| 246 | if (ring == &adev->uvd.ring) { | ||
| 247 | r = amdgpu_uvd_get_create_msg(ring, handle, NULL); | ||
| 248 | if (r) { | ||
| 249 | DRM_ERROR("Failed to get dummy create msg\n"); | ||
| 250 | return r; | ||
| 251 | } | ||
| 252 | |||
| 253 | r = amdgpu_uvd_get_destroy_msg(ring, handle, fence); | ||
| 254 | if (r) { | ||
| 255 | DRM_ERROR("Failed to get dummy destroy msg\n"); | ||
| 256 | return r; | ||
| 257 | } | ||
| 258 | |||
| 259 | } else if (ring == &adev->vce.ring[0] || | ||
| 260 | ring == &adev->vce.ring[1]) { | ||
| 261 | r = amdgpu_vce_get_create_msg(ring, handle, NULL); | ||
| 262 | if (r) { | ||
| 263 | DRM_ERROR("Failed to get dummy create msg\n"); | ||
| 264 | return r; | ||
| 265 | } | ||
| 266 | |||
| 267 | r = amdgpu_vce_get_destroy_msg(ring, handle, fence); | ||
| 268 | if (r) { | ||
| 269 | DRM_ERROR("Failed to get dummy destroy msg\n"); | ||
| 270 | return r; | ||
| 271 | } | ||
| 272 | |||
| 273 | } else { | ||
| 274 | r = amdgpu_ring_lock(ring, 64); | ||
| 275 | if (r) { | ||
| 276 | DRM_ERROR("Failed to lock ring A %d\n", ring->idx); | ||
| 277 | return r; | ||
| 278 | } | ||
| 279 | amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence); | ||
| 280 | amdgpu_ring_unlock_commit(ring); | ||
| 281 | } | ||
| 282 | return 0; | ||
| 283 | } | ||
| 284 | |||
| 285 | void amdgpu_test_ring_sync(struct amdgpu_device *adev, | ||
| 286 | struct amdgpu_ring *ringA, | ||
| 287 | struct amdgpu_ring *ringB) | ||
| 288 | { | ||
| 289 | struct amdgpu_fence *fence1 = NULL, *fence2 = NULL; | ||
| 290 | struct amdgpu_semaphore *semaphore = NULL; | ||
| 291 | int r; | ||
| 292 | |||
| 293 | r = amdgpu_semaphore_create(adev, &semaphore); | ||
| 294 | if (r) { | ||
| 295 | DRM_ERROR("Failed to create semaphore\n"); | ||
| 296 | goto out_cleanup; | ||
| 297 | } | ||
| 298 | |||
| 299 | r = amdgpu_ring_lock(ringA, 64); | ||
| 300 | if (r) { | ||
| 301 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); | ||
| 302 | goto out_cleanup; | ||
| 303 | } | ||
| 304 | amdgpu_semaphore_emit_wait(ringA, semaphore); | ||
| 305 | amdgpu_ring_unlock_commit(ringA); | ||
| 306 | |||
| 307 | r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1); | ||
| 308 | if (r) | ||
| 309 | goto out_cleanup; | ||
| 310 | |||
| 311 | r = amdgpu_ring_lock(ringA, 64); | ||
| 312 | if (r) { | ||
| 313 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); | ||
| 314 | goto out_cleanup; | ||
| 315 | } | ||
| 316 | amdgpu_semaphore_emit_wait(ringA, semaphore); | ||
| 317 | amdgpu_ring_unlock_commit(ringA); | ||
| 318 | |||
| 319 | r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2); | ||
| 320 | if (r) | ||
| 321 | goto out_cleanup; | ||
| 322 | |||
| 323 | mdelay(1000); | ||
| 324 | |||
| 325 | if (amdgpu_fence_signaled(fence1)) { | ||
| 326 | DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); | ||
| 327 | goto out_cleanup; | ||
| 328 | } | ||
| 329 | |||
| 330 | r = amdgpu_ring_lock(ringB, 64); | ||
| 331 | if (r) { | ||
| 332 | DRM_ERROR("Failed to lock ring B %p\n", ringB); | ||
| 333 | goto out_cleanup; | ||
| 334 | } | ||
| 335 | amdgpu_semaphore_emit_signal(ringB, semaphore); | ||
| 336 | amdgpu_ring_unlock_commit(ringB); | ||
| 337 | |||
| 338 | r = amdgpu_fence_wait(fence1, false); | ||
| 339 | if (r) { | ||
| 340 | DRM_ERROR("Failed to wait for sync fence 1\n"); | ||
| 341 | goto out_cleanup; | ||
| 342 | } | ||
| 343 | |||
| 344 | mdelay(1000); | ||
| 345 | |||
| 346 | if (amdgpu_fence_signaled(fence2)) { | ||
| 347 | DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); | ||
| 348 | goto out_cleanup; | ||
| 349 | } | ||
| 350 | |||
| 351 | r = amdgpu_ring_lock(ringB, 64); | ||
| 352 | if (r) { | ||
| 353 | DRM_ERROR("Failed to lock ring B %p\n", ringB); | ||
| 354 | goto out_cleanup; | ||
| 355 | } | ||
| 356 | amdgpu_semaphore_emit_signal(ringB, semaphore); | ||
| 357 | amdgpu_ring_unlock_commit(ringB); | ||
| 358 | |||
| 359 | r = amdgpu_fence_wait(fence2, false); | ||
| 360 | if (r) { | ||
| 361 | DRM_ERROR("Failed to wait for sync fence 1\n"); | ||
| 362 | goto out_cleanup; | ||
| 363 | } | ||
| 364 | |||
| 365 | out_cleanup: | ||
| 366 | amdgpu_semaphore_free(adev, &semaphore, NULL); | ||
| 367 | |||
| 368 | if (fence1) | ||
| 369 | amdgpu_fence_unref(&fence1); | ||
| 370 | |||
| 371 | if (fence2) | ||
| 372 | amdgpu_fence_unref(&fence2); | ||
| 373 | |||
| 374 | if (r) | ||
| 375 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | ||
| 376 | } | ||
| 377 | |||
| 378 | static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, | ||
| 379 | struct amdgpu_ring *ringA, | ||
| 380 | struct amdgpu_ring *ringB, | ||
| 381 | struct amdgpu_ring *ringC) | ||
| 382 | { | ||
| 383 | struct amdgpu_fence *fenceA = NULL, *fenceB = NULL; | ||
| 384 | struct amdgpu_semaphore *semaphore = NULL; | ||
| 385 | bool sigA, sigB; | ||
| 386 | int i, r; | ||
| 387 | |||
| 388 | r = amdgpu_semaphore_create(adev, &semaphore); | ||
| 389 | if (r) { | ||
| 390 | DRM_ERROR("Failed to create semaphore\n"); | ||
| 391 | goto out_cleanup; | ||
| 392 | } | ||
| 393 | |||
| 394 | r = amdgpu_ring_lock(ringA, 64); | ||
| 395 | if (r) { | ||
| 396 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); | ||
| 397 | goto out_cleanup; | ||
| 398 | } | ||
| 399 | amdgpu_semaphore_emit_wait(ringA, semaphore); | ||
| 400 | amdgpu_ring_unlock_commit(ringA); | ||
| 401 | |||
| 402 | r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA); | ||
| 403 | if (r) | ||
| 404 | goto out_cleanup; | ||
| 405 | |||
| 406 | r = amdgpu_ring_lock(ringB, 64); | ||
| 407 | if (r) { | ||
| 408 | DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); | ||
| 409 | goto out_cleanup; | ||
| 410 | } | ||
| 411 | amdgpu_semaphore_emit_wait(ringB, semaphore); | ||
| 412 | amdgpu_ring_unlock_commit(ringB); | ||
| 413 | r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB); | ||
| 414 | if (r) | ||
| 415 | goto out_cleanup; | ||
| 416 | |||
| 417 | mdelay(1000); | ||
| 418 | |||
| 419 | if (amdgpu_fence_signaled(fenceA)) { | ||
| 420 | DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); | ||
| 421 | goto out_cleanup; | ||
| 422 | } | ||
| 423 | if (amdgpu_fence_signaled(fenceB)) { | ||
| 424 | DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); | ||
| 425 | goto out_cleanup; | ||
| 426 | } | ||
| 427 | |||
| 428 | r = amdgpu_ring_lock(ringC, 64); | ||
| 429 | if (r) { | ||
| 430 | DRM_ERROR("Failed to lock ring B %p\n", ringC); | ||
| 431 | goto out_cleanup; | ||
| 432 | } | ||
| 433 | amdgpu_semaphore_emit_signal(ringC, semaphore); | ||
| 434 | amdgpu_ring_unlock_commit(ringC); | ||
| 435 | |||
| 436 | for (i = 0; i < 30; ++i) { | ||
| 437 | mdelay(100); | ||
| 438 | sigA = amdgpu_fence_signaled(fenceA); | ||
| 439 | sigB = amdgpu_fence_signaled(fenceB); | ||
| 440 | if (sigA || sigB) | ||
| 441 | break; | ||
| 442 | } | ||
| 443 | |||
| 444 | if (!sigA && !sigB) { | ||
| 445 | DRM_ERROR("Neither fence A nor B has been signaled\n"); | ||
| 446 | goto out_cleanup; | ||
| 447 | } else if (sigA && sigB) { | ||
| 448 | DRM_ERROR("Both fence A and B has been signaled\n"); | ||
| 449 | goto out_cleanup; | ||
| 450 | } | ||
| 451 | |||
| 452 | DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); | ||
| 453 | |||
| 454 | r = amdgpu_ring_lock(ringC, 64); | ||
| 455 | if (r) { | ||
| 456 | DRM_ERROR("Failed to lock ring B %p\n", ringC); | ||
| 457 | goto out_cleanup; | ||
| 458 | } | ||
| 459 | amdgpu_semaphore_emit_signal(ringC, semaphore); | ||
| 460 | amdgpu_ring_unlock_commit(ringC); | ||
| 461 | |||
| 462 | mdelay(1000); | ||
| 463 | |||
| 464 | r = amdgpu_fence_wait(fenceA, false); | ||
| 465 | if (r) { | ||
| 466 | DRM_ERROR("Failed to wait for sync fence A\n"); | ||
| 467 | goto out_cleanup; | ||
| 468 | } | ||
| 469 | r = amdgpu_fence_wait(fenceB, false); | ||
| 470 | if (r) { | ||
| 471 | DRM_ERROR("Failed to wait for sync fence B\n"); | ||
| 472 | goto out_cleanup; | ||
| 473 | } | ||
| 474 | |||
| 475 | out_cleanup: | ||
| 476 | amdgpu_semaphore_free(adev, &semaphore, NULL); | ||
| 477 | |||
| 478 | if (fenceA) | ||
| 479 | amdgpu_fence_unref(&fenceA); | ||
| 480 | |||
| 481 | if (fenceB) | ||
| 482 | amdgpu_fence_unref(&fenceB); | ||
| 483 | |||
| 484 | if (r) | ||
| 485 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | ||
| 486 | } | ||
| 487 | |||
| 488 | static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA, | ||
| 489 | struct amdgpu_ring *ringB) | ||
| 490 | { | ||
| 491 | if (ringA == &ringA->adev->vce.ring[0] && | ||
| 492 | ringB == &ringB->adev->vce.ring[1]) | ||
| 493 | return false; | ||
| 494 | |||
| 495 | return true; | ||
| 496 | } | ||
| 497 | |||
| 498 | void amdgpu_test_syncing(struct amdgpu_device *adev) | ||
| 499 | { | ||
| 500 | int i, j, k; | ||
| 501 | |||
| 502 | for (i = 1; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 503 | struct amdgpu_ring *ringA = adev->rings[i]; | ||
| 504 | if (!ringA || !ringA->ready) | ||
| 505 | continue; | ||
| 506 | |||
| 507 | for (j = 0; j < i; ++j) { | ||
| 508 | struct amdgpu_ring *ringB = adev->rings[j]; | ||
| 509 | if (!ringB || !ringB->ready) | ||
| 510 | continue; | ||
| 511 | |||
| 512 | if (!amdgpu_test_sync_possible(ringA, ringB)) | ||
| 513 | continue; | ||
| 514 | |||
| 515 | DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); | ||
| 516 | amdgpu_test_ring_sync(adev, ringA, ringB); | ||
| 517 | |||
| 518 | DRM_INFO("Testing syncing between rings %d and %d...\n", j, i); | ||
| 519 | amdgpu_test_ring_sync(adev, ringB, ringA); | ||
| 520 | |||
| 521 | for (k = 0; k < j; ++k) { | ||
| 522 | struct amdgpu_ring *ringC = adev->rings[k]; | ||
| 523 | if (!ringC || !ringC->ready) | ||
| 524 | continue; | ||
| 525 | |||
| 526 | if (!amdgpu_test_sync_possible(ringA, ringC)) | ||
| 527 | continue; | ||
| 528 | |||
| 529 | if (!amdgpu_test_sync_possible(ringB, ringC)) | ||
| 530 | continue; | ||
| 531 | |||
| 532 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); | ||
| 533 | amdgpu_test_ring_sync2(adev, ringA, ringB, ringC); | ||
| 534 | |||
| 535 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j); | ||
| 536 | amdgpu_test_ring_sync2(adev, ringA, ringC, ringB); | ||
| 537 | |||
| 538 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k); | ||
| 539 | amdgpu_test_ring_sync2(adev, ringB, ringA, ringC); | ||
| 540 | |||
| 541 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i); | ||
| 542 | amdgpu_test_ring_sync2(adev, ringB, ringC, ringA); | ||
| 543 | |||
| 544 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j); | ||
| 545 | amdgpu_test_ring_sync2(adev, ringC, ringA, ringB); | ||
| 546 | |||
| 547 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i); | ||
| 548 | amdgpu_test_ring_sync2(adev, ringC, ringB, ringA); | ||
| 549 | } | ||
| 550 | } | ||
| 551 | } | ||
| 552 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h new file mode 100644 index 000000000000..b57647e582d4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | |||
| @@ -0,0 +1,209 @@ | |||
| 1 | #if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
| 2 | #define _AMDGPU_TRACE_H_ | ||
| 3 | |||
| 4 | #include <linux/stringify.h> | ||
| 5 | #include <linux/types.h> | ||
| 6 | #include <linux/tracepoint.h> | ||
| 7 | |||
| 8 | #include <drm/drmP.h> | ||
| 9 | |||
| 10 | #undef TRACE_SYSTEM | ||
| 11 | #define TRACE_SYSTEM amdgpu | ||
| 12 | #define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) | ||
| 13 | #define TRACE_INCLUDE_FILE amdgpu_trace | ||
| 14 | |||
| 15 | TRACE_EVENT(amdgpu_bo_create, | ||
| 16 | TP_PROTO(struct amdgpu_bo *bo), | ||
| 17 | TP_ARGS(bo), | ||
| 18 | TP_STRUCT__entry( | ||
| 19 | __field(struct amdgpu_bo *, bo) | ||
| 20 | __field(u32, pages) | ||
| 21 | ), | ||
| 22 | |||
| 23 | TP_fast_assign( | ||
| 24 | __entry->bo = bo; | ||
| 25 | __entry->pages = bo->tbo.num_pages; | ||
| 26 | ), | ||
| 27 | TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) | ||
| 28 | ); | ||
| 29 | |||
| 30 | TRACE_EVENT(amdgpu_cs, | ||
| 31 | TP_PROTO(struct amdgpu_cs_parser *p, int i), | ||
| 32 | TP_ARGS(p, i), | ||
| 33 | TP_STRUCT__entry( | ||
| 34 | __field(u32, ring) | ||
| 35 | __field(u32, dw) | ||
| 36 | __field(u32, fences) | ||
| 37 | ), | ||
| 38 | |||
| 39 | TP_fast_assign( | ||
| 40 | __entry->ring = p->ibs[i].ring->idx; | ||
| 41 | __entry->dw = p->ibs[i].length_dw; | ||
| 42 | __entry->fences = amdgpu_fence_count_emitted( | ||
| 43 | p->ibs[i].ring); | ||
| 44 | ), | ||
| 45 | TP_printk("ring=%u, dw=%u, fences=%u", | ||
| 46 | __entry->ring, __entry->dw, | ||
| 47 | __entry->fences) | ||
| 48 | ); | ||
| 49 | |||
| 50 | TRACE_EVENT(amdgpu_vm_grab_id, | ||
| 51 | TP_PROTO(unsigned vmid, int ring), | ||
| 52 | TP_ARGS(vmid, ring), | ||
| 53 | TP_STRUCT__entry( | ||
| 54 | __field(u32, vmid) | ||
| 55 | __field(u32, ring) | ||
| 56 | ), | ||
| 57 | |||
| 58 | TP_fast_assign( | ||
| 59 | __entry->vmid = vmid; | ||
| 60 | __entry->ring = ring; | ||
| 61 | ), | ||
| 62 | TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring) | ||
| 63 | ); | ||
| 64 | |||
| 65 | TRACE_EVENT(amdgpu_vm_bo_update, | ||
| 66 | TP_PROTO(struct amdgpu_bo_va_mapping *mapping), | ||
| 67 | TP_ARGS(mapping), | ||
| 68 | TP_STRUCT__entry( | ||
| 69 | __field(u64, soffset) | ||
| 70 | __field(u64, eoffset) | ||
| 71 | __field(u32, flags) | ||
| 72 | ), | ||
| 73 | |||
| 74 | TP_fast_assign( | ||
| 75 | __entry->soffset = mapping->it.start; | ||
| 76 | __entry->eoffset = mapping->it.last + 1; | ||
| 77 | __entry->flags = mapping->flags; | ||
| 78 | ), | ||
| 79 | TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", | ||
| 80 | __entry->soffset, __entry->eoffset, __entry->flags) | ||
| 81 | ); | ||
| 82 | |||
| 83 | TRACE_EVENT(amdgpu_vm_set_page, | ||
| 84 | TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, | ||
| 85 | uint32_t incr, uint32_t flags), | ||
| 86 | TP_ARGS(pe, addr, count, incr, flags), | ||
| 87 | TP_STRUCT__entry( | ||
| 88 | __field(u64, pe) | ||
| 89 | __field(u64, addr) | ||
| 90 | __field(u32, count) | ||
| 91 | __field(u32, incr) | ||
| 92 | __field(u32, flags) | ||
| 93 | ), | ||
| 94 | |||
| 95 | TP_fast_assign( | ||
| 96 | __entry->pe = pe; | ||
| 97 | __entry->addr = addr; | ||
| 98 | __entry->count = count; | ||
| 99 | __entry->incr = incr; | ||
| 100 | __entry->flags = flags; | ||
| 101 | ), | ||
| 102 | TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u", | ||
| 103 | __entry->pe, __entry->addr, __entry->incr, | ||
| 104 | __entry->flags, __entry->count) | ||
| 105 | ); | ||
| 106 | |||
| 107 | TRACE_EVENT(amdgpu_vm_flush, | ||
| 108 | TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id), | ||
| 109 | TP_ARGS(pd_addr, ring, id), | ||
| 110 | TP_STRUCT__entry( | ||
| 111 | __field(u64, pd_addr) | ||
| 112 | __field(u32, ring) | ||
| 113 | __field(u32, id) | ||
| 114 | ), | ||
| 115 | |||
| 116 | TP_fast_assign( | ||
| 117 | __entry->pd_addr = pd_addr; | ||
| 118 | __entry->ring = ring; | ||
| 119 | __entry->id = id; | ||
| 120 | ), | ||
| 121 | TP_printk("pd_addr=%010Lx, ring=%u, id=%u", | ||
| 122 | __entry->pd_addr, __entry->ring, __entry->id) | ||
| 123 | ); | ||
| 124 | |||
| 125 | DECLARE_EVENT_CLASS(amdgpu_fence_request, | ||
| 126 | |||
| 127 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), | ||
| 128 | |||
| 129 | TP_ARGS(dev, ring, seqno), | ||
| 130 | |||
| 131 | TP_STRUCT__entry( | ||
| 132 | __field(u32, dev) | ||
| 133 | __field(int, ring) | ||
| 134 | __field(u32, seqno) | ||
| 135 | ), | ||
| 136 | |||
| 137 | TP_fast_assign( | ||
| 138 | __entry->dev = dev->primary->index; | ||
| 139 | __entry->ring = ring; | ||
| 140 | __entry->seqno = seqno; | ||
| 141 | ), | ||
| 142 | |||
| 143 | TP_printk("dev=%u, ring=%d, seqno=%u", | ||
| 144 | __entry->dev, __entry->ring, __entry->seqno) | ||
| 145 | ); | ||
| 146 | |||
| 147 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit, | ||
| 148 | |||
| 149 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), | ||
| 150 | |||
| 151 | TP_ARGS(dev, ring, seqno) | ||
| 152 | ); | ||
| 153 | |||
| 154 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin, | ||
| 155 | |||
| 156 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), | ||
| 157 | |||
| 158 | TP_ARGS(dev, ring, seqno) | ||
| 159 | ); | ||
| 160 | |||
| 161 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end, | ||
| 162 | |||
| 163 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), | ||
| 164 | |||
| 165 | TP_ARGS(dev, ring, seqno) | ||
| 166 | ); | ||
| 167 | |||
| 168 | DECLARE_EVENT_CLASS(amdgpu_semaphore_request, | ||
| 169 | |||
| 170 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), | ||
| 171 | |||
| 172 | TP_ARGS(ring, sem), | ||
| 173 | |||
| 174 | TP_STRUCT__entry( | ||
| 175 | __field(int, ring) | ||
| 176 | __field(signed, waiters) | ||
| 177 | __field(uint64_t, gpu_addr) | ||
| 178 | ), | ||
| 179 | |||
| 180 | TP_fast_assign( | ||
| 181 | __entry->ring = ring; | ||
| 182 | __entry->waiters = sem->waiters; | ||
| 183 | __entry->gpu_addr = sem->gpu_addr; | ||
| 184 | ), | ||
| 185 | |||
| 186 | TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring, | ||
| 187 | __entry->waiters, __entry->gpu_addr) | ||
| 188 | ); | ||
| 189 | |||
| 190 | DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_signale, | ||
| 191 | |||
| 192 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), | ||
| 193 | |||
| 194 | TP_ARGS(ring, sem) | ||
| 195 | ); | ||
| 196 | |||
| 197 | DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_wait, | ||
| 198 | |||
| 199 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), | ||
| 200 | |||
| 201 | TP_ARGS(ring, sem) | ||
| 202 | ); | ||
| 203 | |||
| 204 | #endif | ||
| 205 | |||
| 206 | /* This part must be outside protection */ | ||
| 207 | #undef TRACE_INCLUDE_PATH | ||
| 208 | #define TRACE_INCLUDE_PATH . | ||
| 209 | #include <trace/define_trace.h> | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c new file mode 100644 index 000000000000..385b7e1d72f9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | /* Copyright Red Hat Inc 2010. | ||
| 2 | * Author : Dave Airlie <airlied@redhat.com> | ||
| 3 | */ | ||
| 4 | #include <drm/drmP.h> | ||
| 5 | #include <drm/amdgpu_drm.h> | ||
| 6 | #include "amdgpu.h" | ||
| 7 | |||
| 8 | #define CREATE_TRACE_POINTS | ||
| 9 | #include "amdgpu_trace.h" | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c new file mode 100644 index 000000000000..120e6e7c4647 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
| @@ -0,0 +1,1249 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2009 Jerome Glisse. | ||
| 3 | * All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the | ||
| 7 | * "Software"), to deal in the Software without restriction, including | ||
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 10 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 11 | * the following conditions: | ||
| 12 | * | ||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 20 | * | ||
| 21 | * The above copyright notice and this permission notice (including the | ||
| 22 | * next paragraph) shall be included in all copies or substantial portions | ||
| 23 | * of the Software. | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | /* | ||
| 27 | * Authors: | ||
| 28 | * Jerome Glisse <glisse@freedesktop.org> | ||
| 29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | ||
| 30 | * Dave Airlie | ||
| 31 | */ | ||
| 32 | #include <ttm/ttm_bo_api.h> | ||
| 33 | #include <ttm/ttm_bo_driver.h> | ||
| 34 | #include <ttm/ttm_placement.h> | ||
| 35 | #include <ttm/ttm_module.h> | ||
| 36 | #include <ttm/ttm_page_alloc.h> | ||
| 37 | #include <drm/drmP.h> | ||
| 38 | #include <drm/amdgpu_drm.h> | ||
| 39 | #include <linux/seq_file.h> | ||
| 40 | #include <linux/slab.h> | ||
| 41 | #include <linux/swiotlb.h> | ||
| 42 | #include <linux/swap.h> | ||
| 43 | #include <linux/pagemap.h> | ||
| 44 | #include <linux/debugfs.h> | ||
| 45 | #include "amdgpu.h" | ||
| 46 | #include "bif/bif_4_1_d.h" | ||
| 47 | |||
| 48 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | ||
| 49 | |||
| 50 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); | ||
| 51 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); | ||
| 52 | |||
| 53 | static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev) | ||
| 54 | { | ||
| 55 | struct amdgpu_mman *mman; | ||
| 56 | struct amdgpu_device *adev; | ||
| 57 | |||
| 58 | mman = container_of(bdev, struct amdgpu_mman, bdev); | ||
| 59 | adev = container_of(mman, struct amdgpu_device, mman); | ||
| 60 | return adev; | ||
| 61 | } | ||
| 62 | |||
| 63 | |||
| 64 | /* | ||
| 65 | * Global memory. | ||
| 66 | */ | ||
| 67 | static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) | ||
| 68 | { | ||
| 69 | return ttm_mem_global_init(ref->object); | ||
| 70 | } | ||
| 71 | |||
| 72 | static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) | ||
| 73 | { | ||
| 74 | ttm_mem_global_release(ref->object); | ||
| 75 | } | ||
| 76 | |||
| 77 | static int amdgpu_ttm_global_init(struct amdgpu_device *adev) | ||
| 78 | { | ||
| 79 | struct drm_global_reference *global_ref; | ||
| 80 | int r; | ||
| 81 | |||
| 82 | adev->mman.mem_global_referenced = false; | ||
| 83 | global_ref = &adev->mman.mem_global_ref; | ||
| 84 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
| 85 | global_ref->size = sizeof(struct ttm_mem_global); | ||
| 86 | global_ref->init = &amdgpu_ttm_mem_global_init; | ||
| 87 | global_ref->release = &amdgpu_ttm_mem_global_release; | ||
| 88 | r = drm_global_item_ref(global_ref); | ||
| 89 | if (r != 0) { | ||
| 90 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
| 91 | "subsystem.\n"); | ||
| 92 | return r; | ||
| 93 | } | ||
| 94 | |||
| 95 | adev->mman.bo_global_ref.mem_glob = | ||
| 96 | adev->mman.mem_global_ref.object; | ||
| 97 | global_ref = &adev->mman.bo_global_ref.ref; | ||
| 98 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
| 99 | global_ref->size = sizeof(struct ttm_bo_global); | ||
| 100 | global_ref->init = &ttm_bo_global_init; | ||
| 101 | global_ref->release = &ttm_bo_global_release; | ||
| 102 | r = drm_global_item_ref(global_ref); | ||
| 103 | if (r != 0) { | ||
| 104 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
| 105 | drm_global_item_unref(&adev->mman.mem_global_ref); | ||
| 106 | return r; | ||
| 107 | } | ||
| 108 | |||
| 109 | adev->mman.mem_global_referenced = true; | ||
| 110 | return 0; | ||
| 111 | } | ||
| 112 | |||
| 113 | static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) | ||
| 114 | { | ||
| 115 | if (adev->mman.mem_global_referenced) { | ||
| 116 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); | ||
| 117 | drm_global_item_unref(&adev->mman.mem_global_ref); | ||
| 118 | adev->mman.mem_global_referenced = false; | ||
| 119 | } | ||
| 120 | } | ||
| 121 | |||
| 122 | static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | ||
| 123 | { | ||
| 124 | return 0; | ||
| 125 | } | ||
| 126 | |||
| 127 | static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | ||
| 128 | struct ttm_mem_type_manager *man) | ||
| 129 | { | ||
| 130 | struct amdgpu_device *adev; | ||
| 131 | |||
| 132 | adev = amdgpu_get_adev(bdev); | ||
| 133 | |||
| 134 | switch (type) { | ||
| 135 | case TTM_PL_SYSTEM: | ||
| 136 | /* System memory */ | ||
| 137 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | ||
| 138 | man->available_caching = TTM_PL_MASK_CACHING; | ||
| 139 | man->default_caching = TTM_PL_FLAG_CACHED; | ||
| 140 | break; | ||
| 141 | case TTM_PL_TT: | ||
| 142 | man->func = &ttm_bo_manager_func; | ||
| 143 | man->gpu_offset = adev->mc.gtt_start; | ||
| 144 | man->available_caching = TTM_PL_MASK_CACHING; | ||
| 145 | man->default_caching = TTM_PL_FLAG_CACHED; | ||
| 146 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; | ||
| 147 | break; | ||
| 148 | case TTM_PL_VRAM: | ||
| 149 | /* "On-card" video ram */ | ||
| 150 | man->func = &ttm_bo_manager_func; | ||
| 151 | man->gpu_offset = adev->mc.vram_start; | ||
| 152 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | ||
| 153 | TTM_MEMTYPE_FLAG_MAPPABLE; | ||
| 154 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | ||
| 155 | man->default_caching = TTM_PL_FLAG_WC; | ||
| 156 | break; | ||
| 157 | case AMDGPU_PL_GDS: | ||
| 158 | case AMDGPU_PL_GWS: | ||
| 159 | case AMDGPU_PL_OA: | ||
| 160 | /* On-chip GDS memory*/ | ||
| 161 | man->func = &ttm_bo_manager_func; | ||
| 162 | man->gpu_offset = 0; | ||
| 163 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; | ||
| 164 | man->available_caching = TTM_PL_FLAG_UNCACHED; | ||
| 165 | man->default_caching = TTM_PL_FLAG_UNCACHED; | ||
| 166 | break; | ||
| 167 | default: | ||
| 168 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | ||
| 169 | return -EINVAL; | ||
| 170 | } | ||
| 171 | return 0; | ||
| 172 | } | ||
| 173 | |||
| 174 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | ||
| 175 | struct ttm_placement *placement) | ||
| 176 | { | ||
| 177 | struct amdgpu_bo *rbo; | ||
| 178 | static struct ttm_place placements = { | ||
| 179 | .fpfn = 0, | ||
| 180 | .lpfn = 0, | ||
| 181 | .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | ||
| 182 | }; | ||
| 183 | |||
| 184 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { | ||
| 185 | placement->placement = &placements; | ||
| 186 | placement->busy_placement = &placements; | ||
| 187 | placement->num_placement = 1; | ||
| 188 | placement->num_busy_placement = 1; | ||
| 189 | return; | ||
| 190 | } | ||
| 191 | rbo = container_of(bo, struct amdgpu_bo, tbo); | ||
| 192 | switch (bo->mem.mem_type) { | ||
| 193 | case TTM_PL_VRAM: | ||
| 194 | if (rbo->adev->mman.buffer_funcs_ring->ready == false) | ||
| 195 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); | ||
| 196 | else | ||
| 197 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT); | ||
| 198 | break; | ||
| 199 | case TTM_PL_TT: | ||
| 200 | default: | ||
| 201 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); | ||
| 202 | } | ||
| 203 | *placement = rbo->placement; | ||
| 204 | } | ||
| 205 | |||
| 206 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) | ||
| 207 | { | ||
| 208 | struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); | ||
| 209 | |||
| 210 | return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); | ||
| 211 | } | ||
| 212 | |||
| 213 | static void amdgpu_move_null(struct ttm_buffer_object *bo, | ||
| 214 | struct ttm_mem_reg *new_mem) | ||
| 215 | { | ||
| 216 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
| 217 | |||
| 218 | BUG_ON(old_mem->mm_node != NULL); | ||
| 219 | *old_mem = *new_mem; | ||
| 220 | new_mem->mm_node = NULL; | ||
| 221 | } | ||
| 222 | |||
| 223 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, | ||
| 224 | bool evict, bool no_wait_gpu, | ||
| 225 | struct ttm_mem_reg *new_mem, | ||
| 226 | struct ttm_mem_reg *old_mem) | ||
| 227 | { | ||
| 228 | struct amdgpu_device *adev; | ||
| 229 | struct amdgpu_ring *ring; | ||
| 230 | uint64_t old_start, new_start; | ||
| 231 | struct amdgpu_fence *fence; | ||
| 232 | int r; | ||
| 233 | |||
| 234 | adev = amdgpu_get_adev(bo->bdev); | ||
| 235 | ring = adev->mman.buffer_funcs_ring; | ||
| 236 | old_start = old_mem->start << PAGE_SHIFT; | ||
| 237 | new_start = new_mem->start << PAGE_SHIFT; | ||
| 238 | |||
| 239 | switch (old_mem->mem_type) { | ||
| 240 | case TTM_PL_VRAM: | ||
| 241 | old_start += adev->mc.vram_start; | ||
| 242 | break; | ||
| 243 | case TTM_PL_TT: | ||
| 244 | old_start += adev->mc.gtt_start; | ||
| 245 | break; | ||
| 246 | default: | ||
| 247 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | ||
| 248 | return -EINVAL; | ||
| 249 | } | ||
| 250 | switch (new_mem->mem_type) { | ||
| 251 | case TTM_PL_VRAM: | ||
| 252 | new_start += adev->mc.vram_start; | ||
| 253 | break; | ||
| 254 | case TTM_PL_TT: | ||
| 255 | new_start += adev->mc.gtt_start; | ||
| 256 | break; | ||
| 257 | default: | ||
| 258 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | ||
| 259 | return -EINVAL; | ||
| 260 | } | ||
| 261 | if (!ring->ready) { | ||
| 262 | DRM_ERROR("Trying to move memory with ring turned off.\n"); | ||
| 263 | return -EINVAL; | ||
| 264 | } | ||
| 265 | |||
| 266 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); | ||
| 267 | |||
| 268 | r = amdgpu_copy_buffer(ring, old_start, new_start, | ||
| 269 | new_mem->num_pages * PAGE_SIZE, /* bytes */ | ||
| 270 | bo->resv, &fence); | ||
| 271 | /* FIXME: handle copy error */ | ||
| 272 | r = ttm_bo_move_accel_cleanup(bo, &fence->base, | ||
| 273 | evict, no_wait_gpu, new_mem); | ||
| 274 | amdgpu_fence_unref(&fence); | ||
| 275 | return r; | ||
| 276 | } | ||
| 277 | |||
| 278 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, | ||
| 279 | bool evict, bool interruptible, | ||
| 280 | bool no_wait_gpu, | ||
| 281 | struct ttm_mem_reg *new_mem) | ||
| 282 | { | ||
| 283 | struct amdgpu_device *adev; | ||
| 284 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
| 285 | struct ttm_mem_reg tmp_mem; | ||
| 286 | struct ttm_place placements; | ||
| 287 | struct ttm_placement placement; | ||
| 288 | int r; | ||
| 289 | |||
| 290 | adev = amdgpu_get_adev(bo->bdev); | ||
| 291 | tmp_mem = *new_mem; | ||
| 292 | tmp_mem.mm_node = NULL; | ||
| 293 | placement.num_placement = 1; | ||
| 294 | placement.placement = &placements; | ||
| 295 | placement.num_busy_placement = 1; | ||
| 296 | placement.busy_placement = &placements; | ||
| 297 | placements.fpfn = 0; | ||
| 298 | placements.lpfn = 0; | ||
| 299 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | ||
| 300 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | ||
| 301 | interruptible, no_wait_gpu); | ||
| 302 | if (unlikely(r)) { | ||
| 303 | return r; | ||
| 304 | } | ||
| 305 | |||
| 306 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | ||
| 307 | if (unlikely(r)) { | ||
| 308 | goto out_cleanup; | ||
| 309 | } | ||
| 310 | |||
| 311 | r = ttm_tt_bind(bo->ttm, &tmp_mem); | ||
| 312 | if (unlikely(r)) { | ||
| 313 | goto out_cleanup; | ||
| 314 | } | ||
| 315 | r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); | ||
| 316 | if (unlikely(r)) { | ||
| 317 | goto out_cleanup; | ||
| 318 | } | ||
| 319 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); | ||
| 320 | out_cleanup: | ||
| 321 | ttm_bo_mem_put(bo, &tmp_mem); | ||
| 322 | return r; | ||
| 323 | } | ||
| 324 | |||
| 325 | static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, | ||
| 326 | bool evict, bool interruptible, | ||
| 327 | bool no_wait_gpu, | ||
| 328 | struct ttm_mem_reg *new_mem) | ||
| 329 | { | ||
| 330 | struct amdgpu_device *adev; | ||
| 331 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
| 332 | struct ttm_mem_reg tmp_mem; | ||
| 333 | struct ttm_placement placement; | ||
| 334 | struct ttm_place placements; | ||
| 335 | int r; | ||
| 336 | |||
| 337 | adev = amdgpu_get_adev(bo->bdev); | ||
| 338 | tmp_mem = *new_mem; | ||
| 339 | tmp_mem.mm_node = NULL; | ||
| 340 | placement.num_placement = 1; | ||
| 341 | placement.placement = &placements; | ||
| 342 | placement.num_busy_placement = 1; | ||
| 343 | placement.busy_placement = &placements; | ||
| 344 | placements.fpfn = 0; | ||
| 345 | placements.lpfn = 0; | ||
| 346 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | ||
| 347 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | ||
| 348 | interruptible, no_wait_gpu); | ||
| 349 | if (unlikely(r)) { | ||
| 350 | return r; | ||
| 351 | } | ||
| 352 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); | ||
| 353 | if (unlikely(r)) { | ||
| 354 | goto out_cleanup; | ||
| 355 | } | ||
| 356 | r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); | ||
| 357 | if (unlikely(r)) { | ||
| 358 | goto out_cleanup; | ||
| 359 | } | ||
| 360 | out_cleanup: | ||
| 361 | ttm_bo_mem_put(bo, &tmp_mem); | ||
| 362 | return r; | ||
| 363 | } | ||
| 364 | |||
| 365 | static int amdgpu_bo_move(struct ttm_buffer_object *bo, | ||
| 366 | bool evict, bool interruptible, | ||
| 367 | bool no_wait_gpu, | ||
| 368 | struct ttm_mem_reg *new_mem) | ||
| 369 | { | ||
| 370 | struct amdgpu_device *adev; | ||
| 371 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
| 372 | int r; | ||
| 373 | |||
| 374 | adev = amdgpu_get_adev(bo->bdev); | ||
| 375 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | ||
| 376 | amdgpu_move_null(bo, new_mem); | ||
| 377 | return 0; | ||
| 378 | } | ||
| 379 | if ((old_mem->mem_type == TTM_PL_TT && | ||
| 380 | new_mem->mem_type == TTM_PL_SYSTEM) || | ||
| 381 | (old_mem->mem_type == TTM_PL_SYSTEM && | ||
| 382 | new_mem->mem_type == TTM_PL_TT)) { | ||
| 383 | /* bind is enough */ | ||
| 384 | amdgpu_move_null(bo, new_mem); | ||
| 385 | return 0; | ||
| 386 | } | ||
| 387 | if (adev->mman.buffer_funcs == NULL || | ||
| 388 | adev->mman.buffer_funcs_ring == NULL || | ||
| 389 | !adev->mman.buffer_funcs_ring->ready) { | ||
| 390 | /* use memcpy */ | ||
| 391 | goto memcpy; | ||
| 392 | } | ||
| 393 | |||
| 394 | if (old_mem->mem_type == TTM_PL_VRAM && | ||
| 395 | new_mem->mem_type == TTM_PL_SYSTEM) { | ||
| 396 | r = amdgpu_move_vram_ram(bo, evict, interruptible, | ||
| 397 | no_wait_gpu, new_mem); | ||
| 398 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && | ||
| 399 | new_mem->mem_type == TTM_PL_VRAM) { | ||
| 400 | r = amdgpu_move_ram_vram(bo, evict, interruptible, | ||
| 401 | no_wait_gpu, new_mem); | ||
| 402 | } else { | ||
| 403 | r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); | ||
| 404 | } | ||
| 405 | |||
| 406 | if (r) { | ||
| 407 | memcpy: | ||
| 408 | r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | ||
| 409 | if (r) { | ||
| 410 | return r; | ||
| 411 | } | ||
| 412 | } | ||
| 413 | |||
| 414 | /* update statistics */ | ||
| 415 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); | ||
| 416 | return 0; | ||
| 417 | } | ||
| 418 | |||
| 419 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | ||
| 420 | { | ||
| 421 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
| 422 | struct amdgpu_device *adev = amdgpu_get_adev(bdev); | ||
| 423 | |||
| 424 | mem->bus.addr = NULL; | ||
| 425 | mem->bus.offset = 0; | ||
| 426 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | ||
| 427 | mem->bus.base = 0; | ||
| 428 | mem->bus.is_iomem = false; | ||
| 429 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | ||
| 430 | return -EINVAL; | ||
| 431 | switch (mem->mem_type) { | ||
| 432 | case TTM_PL_SYSTEM: | ||
| 433 | /* system memory */ | ||
| 434 | return 0; | ||
| 435 | case TTM_PL_TT: | ||
| 436 | break; | ||
| 437 | case TTM_PL_VRAM: | ||
| 438 | mem->bus.offset = mem->start << PAGE_SHIFT; | ||
| 439 | /* check if it's visible */ | ||
| 440 | if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) | ||
| 441 | return -EINVAL; | ||
| 442 | mem->bus.base = adev->mc.aper_base; | ||
| 443 | mem->bus.is_iomem = true; | ||
| 444 | #ifdef __alpha__ | ||
| 445 | /* | ||
| 446 | * Alpha: use bus.addr to hold the ioremap() return, | ||
| 447 | * so we can modify bus.base below. | ||
| 448 | */ | ||
| 449 | if (mem->placement & TTM_PL_FLAG_WC) | ||
| 450 | mem->bus.addr = | ||
| 451 | ioremap_wc(mem->bus.base + mem->bus.offset, | ||
| 452 | mem->bus.size); | ||
| 453 | else | ||
| 454 | mem->bus.addr = | ||
| 455 | ioremap_nocache(mem->bus.base + mem->bus.offset, | ||
| 456 | mem->bus.size); | ||
| 457 | |||
| 458 | /* | ||
| 459 | * Alpha: Use just the bus offset plus | ||
| 460 | * the hose/domain memory base for bus.base. | ||
| 461 | * It then can be used to build PTEs for VRAM | ||
| 462 | * access, as done in ttm_bo_vm_fault(). | ||
| 463 | */ | ||
| 464 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + | ||
| 465 | adev->ddev->hose->dense_mem_base; | ||
| 466 | #endif | ||
| 467 | break; | ||
| 468 | default: | ||
| 469 | return -EINVAL; | ||
| 470 | } | ||
| 471 | return 0; | ||
| 472 | } | ||
| 473 | |||
| 474 | static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | ||
| 475 | { | ||
| 476 | } | ||
| 477 | |||
| 478 | /* | ||
| 479 | * TTM backend functions. | ||
| 480 | */ | ||
| 481 | struct amdgpu_ttm_tt { | ||
| 482 | struct ttm_dma_tt ttm; | ||
| 483 | struct amdgpu_device *adev; | ||
| 484 | u64 offset; | ||
| 485 | uint64_t userptr; | ||
| 486 | struct mm_struct *usermm; | ||
| 487 | uint32_t userflags; | ||
| 488 | }; | ||
| 489 | |||
| 490 | /* prepare the sg table with the user pages */ | ||
| 491 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) | ||
| 492 | { | ||
| 493 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | ||
| 494 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
| 495 | unsigned pinned = 0, nents; | ||
| 496 | int r; | ||
| 497 | |||
| 498 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | ||
| 499 | enum dma_data_direction direction = write ? | ||
| 500 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | ||
| 501 | |||
| 502 | if (current->mm != gtt->usermm) | ||
| 503 | return -EPERM; | ||
| 504 | |||
| 505 | if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { | ||
| 506 | /* check that we only pin down anonymous memory | ||
| 507 | to prevent problems with writeback */ | ||
| 508 | unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; | ||
| 509 | struct vm_area_struct *vma; | ||
| 510 | |||
| 511 | vma = find_vma(gtt->usermm, gtt->userptr); | ||
| 512 | if (!vma || vma->vm_file || vma->vm_end < end) | ||
| 513 | return -EPERM; | ||
| 514 | } | ||
| 515 | |||
| 516 | do { | ||
| 517 | unsigned num_pages = ttm->num_pages - pinned; | ||
| 518 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; | ||
| 519 | struct page **pages = ttm->pages + pinned; | ||
| 520 | |||
| 521 | r = get_user_pages(current, current->mm, userptr, num_pages, | ||
| 522 | write, 0, pages, NULL); | ||
| 523 | if (r < 0) | ||
| 524 | goto release_pages; | ||
| 525 | |||
| 526 | pinned += r; | ||
| 527 | |||
| 528 | } while (pinned < ttm->num_pages); | ||
| 529 | |||
| 530 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, | ||
| 531 | ttm->num_pages << PAGE_SHIFT, | ||
| 532 | GFP_KERNEL); | ||
| 533 | if (r) | ||
| 534 | goto release_sg; | ||
| 535 | |||
| 536 | r = -ENOMEM; | ||
| 537 | nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | ||
| 538 | if (nents != ttm->sg->nents) | ||
| 539 | goto release_sg; | ||
| 540 | |||
| 541 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | ||
| 542 | gtt->ttm.dma_address, ttm->num_pages); | ||
| 543 | |||
| 544 | return 0; | ||
| 545 | |||
| 546 | release_sg: | ||
| 547 | kfree(ttm->sg); | ||
| 548 | |||
| 549 | release_pages: | ||
| 550 | release_pages(ttm->pages, pinned, 0); | ||
| 551 | return r; | ||
| 552 | } | ||
| 553 | |||
| 554 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | ||
| 555 | { | ||
| 556 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | ||
| 557 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
| 558 | struct scatterlist *sg; | ||
| 559 | int i; | ||
| 560 | |||
| 561 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | ||
| 562 | enum dma_data_direction direction = write ? | ||
| 563 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | ||
| 564 | |||
| 565 | /* double check that we don't free the table twice */ | ||
| 566 | if (!ttm->sg->sgl) | ||
| 567 | return; | ||
| 568 | |||
| 569 | /* free the sg table and pages again */ | ||
| 570 | dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | ||
| 571 | |||
| 572 | for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { | ||
| 573 | struct page *page = sg_page(sg); | ||
| 574 | |||
| 575 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) | ||
| 576 | set_page_dirty(page); | ||
| 577 | |||
| 578 | mark_page_accessed(page); | ||
| 579 | page_cache_release(page); | ||
| 580 | } | ||
| 581 | |||
| 582 | sg_free_table(ttm->sg); | ||
| 583 | } | ||
| 584 | |||
| 585 | static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, | ||
| 586 | struct ttm_mem_reg *bo_mem) | ||
| 587 | { | ||
| 588 | struct amdgpu_ttm_tt *gtt = (void*)ttm; | ||
| 589 | uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); | ||
| 590 | int r; | ||
| 591 | |||
| 592 | if (gtt->userptr) | ||
| 593 | amdgpu_ttm_tt_pin_userptr(ttm); | ||
| 594 | |||
| 595 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); | ||
| 596 | if (!ttm->num_pages) { | ||
| 597 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | ||
| 598 | ttm->num_pages, bo_mem, ttm); | ||
| 599 | } | ||
| 600 | |||
| 601 | if (bo_mem->mem_type == AMDGPU_PL_GDS || | ||
| 602 | bo_mem->mem_type == AMDGPU_PL_GWS || | ||
| 603 | bo_mem->mem_type == AMDGPU_PL_OA) | ||
| 604 | return -EINVAL; | ||
| 605 | |||
| 606 | r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, | ||
| 607 | ttm->pages, gtt->ttm.dma_address, flags); | ||
| 608 | |||
| 609 | if (r) { | ||
| 610 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | ||
| 611 | ttm->num_pages, (unsigned)gtt->offset); | ||
| 612 | return r; | ||
| 613 | } | ||
| 614 | return 0; | ||
| 615 | } | ||
| 616 | |||
| 617 | static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) | ||
| 618 | { | ||
| 619 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
| 620 | |||
| 621 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ | ||
| 622 | if (gtt->adev->gart.ready) | ||
| 623 | amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); | ||
| 624 | |||
| 625 | if (gtt->userptr) | ||
| 626 | amdgpu_ttm_tt_unpin_userptr(ttm); | ||
| 627 | |||
| 628 | return 0; | ||
| 629 | } | ||
| 630 | |||
| 631 | static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) | ||
| 632 | { | ||
| 633 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
| 634 | |||
| 635 | ttm_dma_tt_fini(>t->ttm); | ||
| 636 | kfree(gtt); | ||
| 637 | } | ||
| 638 | |||
| 639 | static struct ttm_backend_func amdgpu_backend_func = { | ||
| 640 | .bind = &amdgpu_ttm_backend_bind, | ||
| 641 | .unbind = &amdgpu_ttm_backend_unbind, | ||
| 642 | .destroy = &amdgpu_ttm_backend_destroy, | ||
| 643 | }; | ||
| 644 | |||
| 645 | static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, | ||
| 646 | unsigned long size, uint32_t page_flags, | ||
| 647 | struct page *dummy_read_page) | ||
| 648 | { | ||
| 649 | struct amdgpu_device *adev; | ||
| 650 | struct amdgpu_ttm_tt *gtt; | ||
| 651 | |||
| 652 | adev = amdgpu_get_adev(bdev); | ||
| 653 | |||
| 654 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); | ||
| 655 | if (gtt == NULL) { | ||
| 656 | return NULL; | ||
| 657 | } | ||
| 658 | gtt->ttm.ttm.func = &amdgpu_backend_func; | ||
| 659 | gtt->adev = adev; | ||
| 660 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { | ||
| 661 | kfree(gtt); | ||
| 662 | return NULL; | ||
| 663 | } | ||
| 664 | return >t->ttm.ttm; | ||
| 665 | } | ||
| 666 | |||
| 667 | static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) | ||
| 668 | { | ||
| 669 | struct amdgpu_device *adev; | ||
| 670 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
| 671 | unsigned i; | ||
| 672 | int r; | ||
| 673 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); | ||
| 674 | |||
| 675 | if (ttm->state != tt_unpopulated) | ||
| 676 | return 0; | ||
| 677 | |||
| 678 | if (gtt && gtt->userptr) { | ||
| 679 | ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); | ||
| 680 | if (!ttm->sg) | ||
| 681 | return -ENOMEM; | ||
| 682 | |||
| 683 | ttm->page_flags |= TTM_PAGE_FLAG_SG; | ||
| 684 | ttm->state = tt_unbound; | ||
| 685 | return 0; | ||
| 686 | } | ||
| 687 | |||
| 688 | if (slave && ttm->sg) { | ||
| 689 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | ||
| 690 | gtt->ttm.dma_address, ttm->num_pages); | ||
| 691 | ttm->state = tt_unbound; | ||
| 692 | return 0; | ||
| 693 | } | ||
| 694 | |||
| 695 | adev = amdgpu_get_adev(ttm->bdev); | ||
| 696 | |||
| 697 | #ifdef CONFIG_SWIOTLB | ||
| 698 | if (swiotlb_nr_tbl()) { | ||
| 699 | return ttm_dma_populate(>t->ttm, adev->dev); | ||
| 700 | } | ||
| 701 | #endif | ||
| 702 | |||
| 703 | r = ttm_pool_populate(ttm); | ||
| 704 | if (r) { | ||
| 705 | return r; | ||
| 706 | } | ||
| 707 | |||
| 708 | for (i = 0; i < ttm->num_pages; i++) { | ||
| 709 | gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i], | ||
| 710 | 0, PAGE_SIZE, | ||
| 711 | PCI_DMA_BIDIRECTIONAL); | ||
| 712 | if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { | ||
| 713 | while (--i) { | ||
| 714 | pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], | ||
| 715 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
| 716 | gtt->ttm.dma_address[i] = 0; | ||
| 717 | } | ||
| 718 | ttm_pool_unpopulate(ttm); | ||
| 719 | return -EFAULT; | ||
| 720 | } | ||
| 721 | } | ||
| 722 | return 0; | ||
| 723 | } | ||
| 724 | |||
| 725 | static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) | ||
| 726 | { | ||
| 727 | struct amdgpu_device *adev; | ||
| 728 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
| 729 | unsigned i; | ||
| 730 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); | ||
| 731 | |||
| 732 | if (gtt && gtt->userptr) { | ||
| 733 | kfree(ttm->sg); | ||
| 734 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG; | ||
| 735 | return; | ||
| 736 | } | ||
| 737 | |||
| 738 | if (slave) | ||
| 739 | return; | ||
| 740 | |||
| 741 | adev = amdgpu_get_adev(ttm->bdev); | ||
| 742 | |||
| 743 | #ifdef CONFIG_SWIOTLB | ||
| 744 | if (swiotlb_nr_tbl()) { | ||
| 745 | ttm_dma_unpopulate(>t->ttm, adev->dev); | ||
| 746 | return; | ||
| 747 | } | ||
| 748 | #endif | ||
| 749 | |||
| 750 | for (i = 0; i < ttm->num_pages; i++) { | ||
| 751 | if (gtt->ttm.dma_address[i]) { | ||
| 752 | pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], | ||
| 753 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
| 754 | } | ||
| 755 | } | ||
| 756 | |||
| 757 | ttm_pool_unpopulate(ttm); | ||
| 758 | } | ||
| 759 | |||
| 760 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, | ||
| 761 | uint32_t flags) | ||
| 762 | { | ||
| 763 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
| 764 | |||
| 765 | if (gtt == NULL) | ||
| 766 | return -EINVAL; | ||
| 767 | |||
| 768 | gtt->userptr = addr; | ||
| 769 | gtt->usermm = current->mm; | ||
| 770 | gtt->userflags = flags; | ||
| 771 | return 0; | ||
| 772 | } | ||
| 773 | |||
| 774 | bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm) | ||
| 775 | { | ||
| 776 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
| 777 | |||
| 778 | if (gtt == NULL) | ||
| 779 | return false; | ||
| 780 | |||
| 781 | return !!gtt->userptr; | ||
| 782 | } | ||
| 783 | |||
| 784 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) | ||
| 785 | { | ||
| 786 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
| 787 | |||
| 788 | if (gtt == NULL) | ||
| 789 | return false; | ||
| 790 | |||
| 791 | return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | ||
| 792 | } | ||
| 793 | |||
| 794 | uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | ||
| 795 | struct ttm_mem_reg *mem) | ||
| 796 | { | ||
| 797 | uint32_t flags = 0; | ||
| 798 | |||
| 799 | if (mem && mem->mem_type != TTM_PL_SYSTEM) | ||
| 800 | flags |= AMDGPU_PTE_VALID; | ||
| 801 | |||
| 802 | if (mem && mem->mem_type == TTM_PL_TT) | ||
| 803 | flags |= AMDGPU_PTE_SYSTEM; | ||
| 804 | |||
| 805 | if (!ttm || ttm->caching_state == tt_cached) | ||
| 806 | flags |= AMDGPU_PTE_SNOOPED; | ||
| 807 | |||
| 808 | if (adev->asic_type >= CHIP_TOPAZ) | ||
| 809 | flags |= AMDGPU_PTE_EXECUTABLE; | ||
| 810 | |||
| 811 | flags |= AMDGPU_PTE_READABLE; | ||
| 812 | |||
| 813 | if (!amdgpu_ttm_tt_is_readonly(ttm)) | ||
| 814 | flags |= AMDGPU_PTE_WRITEABLE; | ||
| 815 | |||
| 816 | return flags; | ||
| 817 | } | ||
| 818 | |||
| 819 | static struct ttm_bo_driver amdgpu_bo_driver = { | ||
| 820 | .ttm_tt_create = &amdgpu_ttm_tt_create, | ||
| 821 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, | ||
| 822 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, | ||
| 823 | .invalidate_caches = &amdgpu_invalidate_caches, | ||
| 824 | .init_mem_type = &amdgpu_init_mem_type, | ||
| 825 | .evict_flags = &amdgpu_evict_flags, | ||
| 826 | .move = &amdgpu_bo_move, | ||
| 827 | .verify_access = &amdgpu_verify_access, | ||
| 828 | .move_notify = &amdgpu_bo_move_notify, | ||
| 829 | .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, | ||
| 830 | .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, | ||
| 831 | .io_mem_free = &amdgpu_ttm_io_mem_free, | ||
| 832 | }; | ||
| 833 | |||
| 834 | int amdgpu_ttm_init(struct amdgpu_device *adev) | ||
| 835 | { | ||
| 836 | int r; | ||
| 837 | |||
| 838 | r = amdgpu_ttm_global_init(adev); | ||
| 839 | if (r) { | ||
| 840 | return r; | ||
| 841 | } | ||
| 842 | /* No others user of address space so set it to 0 */ | ||
| 843 | r = ttm_bo_device_init(&adev->mman.bdev, | ||
| 844 | adev->mman.bo_global_ref.ref.object, | ||
| 845 | &amdgpu_bo_driver, | ||
| 846 | adev->ddev->anon_inode->i_mapping, | ||
| 847 | DRM_FILE_PAGE_OFFSET, | ||
| 848 | adev->need_dma32); | ||
| 849 | if (r) { | ||
| 850 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | ||
| 851 | return r; | ||
| 852 | } | ||
| 853 | adev->mman.initialized = true; | ||
| 854 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, | ||
| 855 | adev->mc.real_vram_size >> PAGE_SHIFT); | ||
| 856 | if (r) { | ||
| 857 | DRM_ERROR("Failed initializing VRAM heap.\n"); | ||
| 858 | return r; | ||
| 859 | } | ||
| 860 | /* Change the size here instead of the init above so only lpfn is affected */ | ||
| 861 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | ||
| 862 | |||
| 863 | r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, | ||
| 864 | AMDGPU_GEM_DOMAIN_VRAM, 0, | ||
| 865 | NULL, &adev->stollen_vga_memory); | ||
| 866 | if (r) { | ||
| 867 | return r; | ||
| 868 | } | ||
| 869 | r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); | ||
| 870 | if (r) | ||
| 871 | return r; | ||
| 872 | r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL); | ||
| 873 | amdgpu_bo_unreserve(adev->stollen_vga_memory); | ||
| 874 | if (r) { | ||
| 875 | amdgpu_bo_unref(&adev->stollen_vga_memory); | ||
| 876 | return r; | ||
| 877 | } | ||
| 878 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n", | ||
| 879 | (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); | ||
| 880 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, | ||
| 881 | adev->mc.gtt_size >> PAGE_SHIFT); | ||
| 882 | if (r) { | ||
| 883 | DRM_ERROR("Failed initializing GTT heap.\n"); | ||
| 884 | return r; | ||
| 885 | } | ||
| 886 | DRM_INFO("amdgpu: %uM of GTT memory ready.\n", | ||
| 887 | (unsigned)(adev->mc.gtt_size / (1024 * 1024))); | ||
| 888 | |||
| 889 | adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; | ||
| 890 | adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; | ||
| 891 | adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; | ||
| 892 | adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT; | ||
| 893 | adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT; | ||
| 894 | adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT; | ||
| 895 | adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT; | ||
| 896 | adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT; | ||
| 897 | adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT; | ||
| 898 | /* GDS Memory */ | ||
| 899 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, | ||
| 900 | adev->gds.mem.total_size >> PAGE_SHIFT); | ||
| 901 | if (r) { | ||
| 902 | DRM_ERROR("Failed initializing GDS heap.\n"); | ||
| 903 | return r; | ||
| 904 | } | ||
| 905 | |||
| 906 | /* GWS */ | ||
| 907 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, | ||
| 908 | adev->gds.gws.total_size >> PAGE_SHIFT); | ||
| 909 | if (r) { | ||
| 910 | DRM_ERROR("Failed initializing gws heap.\n"); | ||
| 911 | return r; | ||
| 912 | } | ||
| 913 | |||
| 914 | /* OA */ | ||
| 915 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, | ||
| 916 | adev->gds.oa.total_size >> PAGE_SHIFT); | ||
| 917 | if (r) { | ||
| 918 | DRM_ERROR("Failed initializing oa heap.\n"); | ||
| 919 | return r; | ||
| 920 | } | ||
| 921 | |||
| 922 | r = amdgpu_ttm_debugfs_init(adev); | ||
| 923 | if (r) { | ||
| 924 | DRM_ERROR("Failed to init debugfs\n"); | ||
| 925 | return r; | ||
| 926 | } | ||
| 927 | return 0; | ||
| 928 | } | ||
| 929 | |||
| 930 | void amdgpu_ttm_fini(struct amdgpu_device *adev) | ||
| 931 | { | ||
| 932 | int r; | ||
| 933 | |||
| 934 | if (!adev->mman.initialized) | ||
| 935 | return; | ||
| 936 | amdgpu_ttm_debugfs_fini(adev); | ||
| 937 | if (adev->stollen_vga_memory) { | ||
| 938 | r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); | ||
| 939 | if (r == 0) { | ||
| 940 | amdgpu_bo_unpin(adev->stollen_vga_memory); | ||
| 941 | amdgpu_bo_unreserve(adev->stollen_vga_memory); | ||
| 942 | } | ||
| 943 | amdgpu_bo_unref(&adev->stollen_vga_memory); | ||
| 944 | } | ||
| 945 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); | ||
| 946 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); | ||
| 947 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); | ||
| 948 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); | ||
| 949 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); | ||
| 950 | ttm_bo_device_release(&adev->mman.bdev); | ||
| 951 | amdgpu_gart_fini(adev); | ||
| 952 | amdgpu_ttm_global_fini(adev); | ||
| 953 | adev->mman.initialized = false; | ||
| 954 | DRM_INFO("amdgpu: ttm finalized\n"); | ||
| 955 | } | ||
| 956 | |||
| 957 | /* this should only be called at bootup or when userspace | ||
| 958 | * isn't running */ | ||
| 959 | void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size) | ||
| 960 | { | ||
| 961 | struct ttm_mem_type_manager *man; | ||
| 962 | |||
| 963 | if (!adev->mman.initialized) | ||
| 964 | return; | ||
| 965 | |||
| 966 | man = &adev->mman.bdev.man[TTM_PL_VRAM]; | ||
| 967 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | ||
| 968 | man->size = size >> PAGE_SHIFT; | ||
| 969 | } | ||
| 970 | |||
| 971 | static struct vm_operations_struct amdgpu_ttm_vm_ops; | ||
| 972 | static const struct vm_operations_struct *ttm_vm_ops = NULL; | ||
| 973 | |||
| 974 | static int amdgpu_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
| 975 | { | ||
| 976 | struct ttm_buffer_object *bo; | ||
| 977 | struct amdgpu_device *adev; | ||
| 978 | int r; | ||
| 979 | |||
| 980 | bo = (struct ttm_buffer_object *)vma->vm_private_data; | ||
| 981 | if (bo == NULL) { | ||
| 982 | return VM_FAULT_NOPAGE; | ||
| 983 | } | ||
| 984 | adev = amdgpu_get_adev(bo->bdev); | ||
| 985 | down_read(&adev->pm.mclk_lock); | ||
| 986 | r = ttm_vm_ops->fault(vma, vmf); | ||
| 987 | up_read(&adev->pm.mclk_lock); | ||
| 988 | return r; | ||
| 989 | } | ||
| 990 | |||
| 991 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) | ||
| 992 | { | ||
| 993 | struct drm_file *file_priv; | ||
| 994 | struct amdgpu_device *adev; | ||
| 995 | int r; | ||
| 996 | |||
| 997 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { | ||
| 998 | return -EINVAL; | ||
| 999 | } | ||
| 1000 | |||
| 1001 | file_priv = filp->private_data; | ||
| 1002 | adev = file_priv->minor->dev->dev_private; | ||
| 1003 | if (adev == NULL) { | ||
| 1004 | return -EINVAL; | ||
| 1005 | } | ||
| 1006 | r = ttm_bo_mmap(filp, vma, &adev->mman.bdev); | ||
| 1007 | if (unlikely(r != 0)) { | ||
| 1008 | return r; | ||
| 1009 | } | ||
| 1010 | if (unlikely(ttm_vm_ops == NULL)) { | ||
| 1011 | ttm_vm_ops = vma->vm_ops; | ||
| 1012 | amdgpu_ttm_vm_ops = *ttm_vm_ops; | ||
| 1013 | amdgpu_ttm_vm_ops.fault = &amdgpu_ttm_fault; | ||
| 1014 | } | ||
| 1015 | vma->vm_ops = &amdgpu_ttm_vm_ops; | ||
| 1016 | return 0; | ||
| 1017 | } | ||
| 1018 | |||
| 1019 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, | ||
| 1020 | uint64_t src_offset, | ||
| 1021 | uint64_t dst_offset, | ||
| 1022 | uint32_t byte_count, | ||
| 1023 | struct reservation_object *resv, | ||
| 1024 | struct amdgpu_fence **fence) | ||
| 1025 | { | ||
| 1026 | struct amdgpu_device *adev = ring->adev; | ||
| 1027 | struct amdgpu_sync sync; | ||
| 1028 | uint32_t max_bytes; | ||
| 1029 | unsigned num_loops, num_dw; | ||
| 1030 | unsigned i; | ||
| 1031 | int r; | ||
| 1032 | |||
| 1033 | /* sync other rings */ | ||
| 1034 | amdgpu_sync_create(&sync); | ||
| 1035 | if (resv) { | ||
| 1036 | r = amdgpu_sync_resv(adev, &sync, resv, false); | ||
| 1037 | if (r) { | ||
| 1038 | DRM_ERROR("sync failed (%d).\n", r); | ||
| 1039 | amdgpu_sync_free(adev, &sync, NULL); | ||
| 1040 | return r; | ||
| 1041 | } | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | max_bytes = adev->mman.buffer_funcs->copy_max_bytes; | ||
| 1045 | num_loops = DIV_ROUND_UP(byte_count, max_bytes); | ||
| 1046 | num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; | ||
| 1047 | |||
| 1048 | /* for fence and sync */ | ||
| 1049 | num_dw += 64 + AMDGPU_NUM_SYNCS * 8; | ||
| 1050 | |||
| 1051 | r = amdgpu_ring_lock(ring, num_dw); | ||
| 1052 | if (r) { | ||
| 1053 | DRM_ERROR("ring lock failed (%d).\n", r); | ||
| 1054 | amdgpu_sync_free(adev, &sync, NULL); | ||
| 1055 | return r; | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | amdgpu_sync_rings(&sync, ring); | ||
| 1059 | |||
| 1060 | for (i = 0; i < num_loops; i++) { | ||
| 1061 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); | ||
| 1062 | |||
| 1063 | amdgpu_emit_copy_buffer(adev, ring, src_offset, dst_offset, | ||
| 1064 | cur_size_in_bytes); | ||
| 1065 | |||
| 1066 | src_offset += cur_size_in_bytes; | ||
| 1067 | dst_offset += cur_size_in_bytes; | ||
| 1068 | byte_count -= cur_size_in_bytes; | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | r = amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_MOVE, fence); | ||
| 1072 | if (r) { | ||
| 1073 | amdgpu_ring_unlock_undo(ring); | ||
| 1074 | amdgpu_sync_free(adev, &sync, NULL); | ||
| 1075 | return r; | ||
| 1076 | } | ||
| 1077 | |||
| 1078 | amdgpu_ring_unlock_commit(ring); | ||
| 1079 | amdgpu_sync_free(adev, &sync, *fence); | ||
| 1080 | |||
| 1081 | return 0; | ||
| 1082 | } | ||
| 1083 | |||
| 1084 | #if defined(CONFIG_DEBUG_FS) | ||
| 1085 | |||
| 1086 | static int amdgpu_mm_dump_table(struct seq_file *m, void *data) | ||
| 1087 | { | ||
| 1088 | struct drm_info_node *node = (struct drm_info_node *)m->private; | ||
| 1089 | unsigned ttm_pl = *(int *)node->info_ent->data; | ||
| 1090 | struct drm_device *dev = node->minor->dev; | ||
| 1091 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1092 | struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv; | ||
| 1093 | int ret; | ||
| 1094 | struct ttm_bo_global *glob = adev->mman.bdev.glob; | ||
| 1095 | |||
| 1096 | spin_lock(&glob->lru_lock); | ||
| 1097 | ret = drm_mm_dump_table(m, mm); | ||
| 1098 | spin_unlock(&glob->lru_lock); | ||
| 1099 | return ret; | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | static int ttm_pl_vram = TTM_PL_VRAM; | ||
| 1103 | static int ttm_pl_tt = TTM_PL_TT; | ||
| 1104 | |||
| 1105 | static struct drm_info_list amdgpu_ttm_debugfs_list[] = { | ||
| 1106 | {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram}, | ||
| 1107 | {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt}, | ||
| 1108 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, | ||
| 1109 | #ifdef CONFIG_SWIOTLB | ||
| 1110 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} | ||
| 1111 | #endif | ||
| 1112 | }; | ||
| 1113 | |||
| 1114 | static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, | ||
| 1115 | size_t size, loff_t *pos) | ||
| 1116 | { | ||
| 1117 | struct amdgpu_device *adev = f->f_inode->i_private; | ||
| 1118 | ssize_t result = 0; | ||
| 1119 | int r; | ||
| 1120 | |||
| 1121 | if (size & 0x3 || *pos & 0x3) | ||
| 1122 | return -EINVAL; | ||
| 1123 | |||
| 1124 | while (size) { | ||
| 1125 | unsigned long flags; | ||
| 1126 | uint32_t value; | ||
| 1127 | |||
| 1128 | if (*pos >= adev->mc.mc_vram_size) | ||
| 1129 | return result; | ||
| 1130 | |||
| 1131 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | ||
| 1132 | WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); | ||
| 1133 | WREG32(mmMM_INDEX_HI, *pos >> 31); | ||
| 1134 | value = RREG32(mmMM_DATA); | ||
| 1135 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); | ||
| 1136 | |||
| 1137 | r = put_user(value, (uint32_t *)buf); | ||
| 1138 | if (r) | ||
| 1139 | return r; | ||
| 1140 | |||
| 1141 | result += 4; | ||
| 1142 | buf += 4; | ||
| 1143 | *pos += 4; | ||
| 1144 | size -= 4; | ||
| 1145 | } | ||
| 1146 | |||
| 1147 | return result; | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | static const struct file_operations amdgpu_ttm_vram_fops = { | ||
| 1151 | .owner = THIS_MODULE, | ||
| 1152 | .read = amdgpu_ttm_vram_read, | ||
| 1153 | .llseek = default_llseek | ||
| 1154 | }; | ||
| 1155 | |||
| 1156 | static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, | ||
| 1157 | size_t size, loff_t *pos) | ||
| 1158 | { | ||
| 1159 | struct amdgpu_device *adev = f->f_inode->i_private; | ||
| 1160 | ssize_t result = 0; | ||
| 1161 | int r; | ||
| 1162 | |||
| 1163 | while (size) { | ||
| 1164 | loff_t p = *pos / PAGE_SIZE; | ||
| 1165 | unsigned off = *pos & ~PAGE_MASK; | ||
| 1166 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); | ||
| 1167 | struct page *page; | ||
| 1168 | void *ptr; | ||
| 1169 | |||
| 1170 | if (p >= adev->gart.num_cpu_pages) | ||
| 1171 | return result; | ||
| 1172 | |||
| 1173 | page = adev->gart.pages[p]; | ||
| 1174 | if (page) { | ||
| 1175 | ptr = kmap(page); | ||
| 1176 | ptr += off; | ||
| 1177 | |||
| 1178 | r = copy_to_user(buf, ptr, cur_size); | ||
| 1179 | kunmap(adev->gart.pages[p]); | ||
| 1180 | } else | ||
| 1181 | r = clear_user(buf, cur_size); | ||
| 1182 | |||
| 1183 | if (r) | ||
| 1184 | return -EFAULT; | ||
| 1185 | |||
| 1186 | result += cur_size; | ||
| 1187 | buf += cur_size; | ||
| 1188 | *pos += cur_size; | ||
| 1189 | size -= cur_size; | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | return result; | ||
| 1193 | } | ||
| 1194 | |||
| 1195 | static const struct file_operations amdgpu_ttm_gtt_fops = { | ||
| 1196 | .owner = THIS_MODULE, | ||
| 1197 | .read = amdgpu_ttm_gtt_read, | ||
| 1198 | .llseek = default_llseek | ||
| 1199 | }; | ||
| 1200 | |||
| 1201 | #endif | ||
| 1202 | |||
| 1203 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) | ||
| 1204 | { | ||
| 1205 | #if defined(CONFIG_DEBUG_FS) | ||
| 1206 | unsigned count; | ||
| 1207 | |||
| 1208 | struct drm_minor *minor = adev->ddev->primary; | ||
| 1209 | struct dentry *ent, *root = minor->debugfs_root; | ||
| 1210 | |||
| 1211 | ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root, | ||
| 1212 | adev, &amdgpu_ttm_vram_fops); | ||
| 1213 | if (IS_ERR(ent)) | ||
| 1214 | return PTR_ERR(ent); | ||
| 1215 | i_size_write(ent->d_inode, adev->mc.mc_vram_size); | ||
| 1216 | adev->mman.vram = ent; | ||
| 1217 | |||
| 1218 | ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root, | ||
| 1219 | adev, &amdgpu_ttm_gtt_fops); | ||
| 1220 | if (IS_ERR(ent)) | ||
| 1221 | return PTR_ERR(ent); | ||
| 1222 | i_size_write(ent->d_inode, adev->mc.gtt_size); | ||
| 1223 | adev->mman.gtt = ent; | ||
| 1224 | |||
| 1225 | count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); | ||
| 1226 | |||
| 1227 | #ifdef CONFIG_SWIOTLB | ||
| 1228 | if (!swiotlb_nr_tbl()) | ||
| 1229 | --count; | ||
| 1230 | #endif | ||
| 1231 | |||
| 1232 | return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); | ||
| 1233 | #else | ||
| 1234 | |||
| 1235 | return 0; | ||
| 1236 | #endif | ||
| 1237 | } | ||
| 1238 | |||
| 1239 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) | ||
| 1240 | { | ||
| 1241 | #if defined(CONFIG_DEBUG_FS) | ||
| 1242 | |||
| 1243 | debugfs_remove(adev->mman.vram); | ||
| 1244 | adev->mman.vram = NULL; | ||
| 1245 | |||
| 1246 | debugfs_remove(adev->mman.gtt); | ||
| 1247 | adev->mman.gtt = NULL; | ||
| 1248 | #endif | ||
| 1249 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c new file mode 100644 index 000000000000..482e66797ae6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | |||
| @@ -0,0 +1,317 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/firmware.h> | ||
| 25 | #include <linux/slab.h> | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <drm/drmP.h> | ||
| 28 | #include "amdgpu.h" | ||
| 29 | #include "amdgpu_ucode.h" | ||
| 30 | |||
| 31 | static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr) | ||
| 32 | { | ||
| 33 | DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes)); | ||
| 34 | DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes)); | ||
| 35 | DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major)); | ||
| 36 | DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor)); | ||
| 37 | DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major)); | ||
| 38 | DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor)); | ||
| 39 | DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version)); | ||
| 40 | DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes)); | ||
| 41 | DRM_DEBUG("ucode_array_offset_bytes: %u\n", | ||
| 42 | le32_to_cpu(hdr->ucode_array_offset_bytes)); | ||
| 43 | DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32)); | ||
| 44 | } | ||
| 45 | |||
| 46 | void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr) | ||
| 47 | { | ||
| 48 | uint16_t version_major = le16_to_cpu(hdr->header_version_major); | ||
| 49 | uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); | ||
| 50 | |||
| 51 | DRM_DEBUG("MC\n"); | ||
| 52 | amdgpu_ucode_print_common_hdr(hdr); | ||
| 53 | |||
| 54 | if (version_major == 1) { | ||
| 55 | const struct mc_firmware_header_v1_0 *mc_hdr = | ||
| 56 | container_of(hdr, struct mc_firmware_header_v1_0, header); | ||
| 57 | |||
| 58 | DRM_DEBUG("io_debug_size_bytes: %u\n", | ||
| 59 | le32_to_cpu(mc_hdr->io_debug_size_bytes)); | ||
| 60 | DRM_DEBUG("io_debug_array_offset_bytes: %u\n", | ||
| 61 | le32_to_cpu(mc_hdr->io_debug_array_offset_bytes)); | ||
| 62 | } else { | ||
| 63 | DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor); | ||
| 64 | } | ||
| 65 | } | ||
| 66 | |||
| 67 | void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr) | ||
| 68 | { | ||
| 69 | uint16_t version_major = le16_to_cpu(hdr->header_version_major); | ||
| 70 | uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); | ||
| 71 | |||
| 72 | DRM_DEBUG("SMC\n"); | ||
| 73 | amdgpu_ucode_print_common_hdr(hdr); | ||
| 74 | |||
| 75 | if (version_major == 1) { | ||
| 76 | const struct smc_firmware_header_v1_0 *smc_hdr = | ||
| 77 | container_of(hdr, struct smc_firmware_header_v1_0, header); | ||
| 78 | |||
| 79 | DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr)); | ||
| 80 | } else { | ||
| 81 | DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor); | ||
| 82 | } | ||
| 83 | } | ||
| 84 | |||
| 85 | void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr) | ||
| 86 | { | ||
| 87 | uint16_t version_major = le16_to_cpu(hdr->header_version_major); | ||
| 88 | uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); | ||
| 89 | |||
| 90 | DRM_DEBUG("GFX\n"); | ||
| 91 | amdgpu_ucode_print_common_hdr(hdr); | ||
| 92 | |||
| 93 | if (version_major == 1) { | ||
| 94 | const struct gfx_firmware_header_v1_0 *gfx_hdr = | ||
| 95 | container_of(hdr, struct gfx_firmware_header_v1_0, header); | ||
| 96 | |||
| 97 | DRM_DEBUG("ucode_feature_version: %u\n", | ||
| 98 | le32_to_cpu(gfx_hdr->ucode_feature_version)); | ||
| 99 | DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset)); | ||
| 100 | DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size)); | ||
| 101 | } else { | ||
| 102 | DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor); | ||
| 103 | } | ||
| 104 | } | ||
| 105 | |||
| 106 | void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr) | ||
| 107 | { | ||
| 108 | uint16_t version_major = le16_to_cpu(hdr->header_version_major); | ||
| 109 | uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); | ||
| 110 | |||
| 111 | DRM_DEBUG("RLC\n"); | ||
| 112 | amdgpu_ucode_print_common_hdr(hdr); | ||
| 113 | |||
| 114 | if (version_major == 1) { | ||
| 115 | const struct rlc_firmware_header_v1_0 *rlc_hdr = | ||
| 116 | container_of(hdr, struct rlc_firmware_header_v1_0, header); | ||
| 117 | |||
| 118 | DRM_DEBUG("ucode_feature_version: %u\n", | ||
| 119 | le32_to_cpu(rlc_hdr->ucode_feature_version)); | ||
| 120 | DRM_DEBUG("save_and_restore_offset: %u\n", | ||
| 121 | le32_to_cpu(rlc_hdr->save_and_restore_offset)); | ||
| 122 | DRM_DEBUG("clear_state_descriptor_offset: %u\n", | ||
| 123 | le32_to_cpu(rlc_hdr->clear_state_descriptor_offset)); | ||
| 124 | DRM_DEBUG("avail_scratch_ram_locations: %u\n", | ||
| 125 | le32_to_cpu(rlc_hdr->avail_scratch_ram_locations)); | ||
| 126 | DRM_DEBUG("master_pkt_description_offset: %u\n", | ||
| 127 | le32_to_cpu(rlc_hdr->master_pkt_description_offset)); | ||
| 128 | } else if (version_major == 2) { | ||
| 129 | const struct rlc_firmware_header_v2_0 *rlc_hdr = | ||
| 130 | container_of(hdr, struct rlc_firmware_header_v2_0, header); | ||
| 131 | |||
| 132 | DRM_DEBUG("ucode_feature_version: %u\n", | ||
| 133 | le32_to_cpu(rlc_hdr->ucode_feature_version)); | ||
| 134 | DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset)); | ||
| 135 | DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size)); | ||
| 136 | DRM_DEBUG("save_and_restore_offset: %u\n", | ||
| 137 | le32_to_cpu(rlc_hdr->save_and_restore_offset)); | ||
| 138 | DRM_DEBUG("clear_state_descriptor_offset: %u\n", | ||
| 139 | le32_to_cpu(rlc_hdr->clear_state_descriptor_offset)); | ||
| 140 | DRM_DEBUG("avail_scratch_ram_locations: %u\n", | ||
| 141 | le32_to_cpu(rlc_hdr->avail_scratch_ram_locations)); | ||
| 142 | DRM_DEBUG("reg_restore_list_size: %u\n", | ||
| 143 | le32_to_cpu(rlc_hdr->reg_restore_list_size)); | ||
| 144 | DRM_DEBUG("reg_list_format_start: %u\n", | ||
| 145 | le32_to_cpu(rlc_hdr->reg_list_format_start)); | ||
| 146 | DRM_DEBUG("reg_list_format_separate_start: %u\n", | ||
| 147 | le32_to_cpu(rlc_hdr->reg_list_format_separate_start)); | ||
| 148 | DRM_DEBUG("starting_offsets_start: %u\n", | ||
| 149 | le32_to_cpu(rlc_hdr->starting_offsets_start)); | ||
| 150 | DRM_DEBUG("reg_list_format_size_bytes: %u\n", | ||
| 151 | le32_to_cpu(rlc_hdr->reg_list_format_size_bytes)); | ||
| 152 | DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n", | ||
| 153 | le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); | ||
| 154 | DRM_DEBUG("reg_list_size_bytes: %u\n", | ||
| 155 | le32_to_cpu(rlc_hdr->reg_list_size_bytes)); | ||
| 156 | DRM_DEBUG("reg_list_array_offset_bytes: %u\n", | ||
| 157 | le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); | ||
| 158 | DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n", | ||
| 159 | le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes)); | ||
| 160 | DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n", | ||
| 161 | le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes)); | ||
| 162 | DRM_DEBUG("reg_list_separate_size_bytes: %u\n", | ||
| 163 | le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes)); | ||
| 164 | DRM_DEBUG("reg_list_separate_size_bytes: %u\n", | ||
| 165 | le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes)); | ||
| 166 | } else { | ||
| 167 | DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor); | ||
| 168 | } | ||
| 169 | } | ||
| 170 | |||
| 171 | void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr) | ||
| 172 | { | ||
| 173 | uint16_t version_major = le16_to_cpu(hdr->header_version_major); | ||
| 174 | uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); | ||
| 175 | |||
| 176 | DRM_DEBUG("SDMA\n"); | ||
| 177 | amdgpu_ucode_print_common_hdr(hdr); | ||
| 178 | |||
| 179 | if (version_major == 1) { | ||
| 180 | const struct sdma_firmware_header_v1_0 *sdma_hdr = | ||
| 181 | container_of(hdr, struct sdma_firmware_header_v1_0, header); | ||
| 182 | |||
| 183 | DRM_DEBUG("ucode_feature_version: %u\n", | ||
| 184 | le32_to_cpu(sdma_hdr->ucode_feature_version)); | ||
| 185 | DRM_DEBUG("ucode_change_version: %u\n", | ||
| 186 | le32_to_cpu(sdma_hdr->ucode_change_version)); | ||
| 187 | DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset)); | ||
| 188 | DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size)); | ||
| 189 | if (version_minor >= 1) { | ||
| 190 | const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr = | ||
| 191 | container_of(sdma_hdr, struct sdma_firmware_header_v1_1, v1_0); | ||
| 192 | DRM_DEBUG("digest_size: %u\n", le32_to_cpu(sdma_v1_1_hdr->digest_size)); | ||
| 193 | } | ||
| 194 | } else { | ||
| 195 | DRM_ERROR("Unknown SDMA ucode version: %u.%u\n", | ||
| 196 | version_major, version_minor); | ||
| 197 | } | ||
| 198 | } | ||
| 199 | |||
| 200 | int amdgpu_ucode_validate(const struct firmware *fw) | ||
| 201 | { | ||
| 202 | const struct common_firmware_header *hdr = | ||
| 203 | (const struct common_firmware_header *)fw->data; | ||
| 204 | |||
| 205 | if (fw->size == le32_to_cpu(hdr->size_bytes)) | ||
| 206 | return 0; | ||
| 207 | |||
| 208 | return -EINVAL; | ||
| 209 | } | ||
| 210 | |||
| 211 | bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr, | ||
| 212 | uint16_t hdr_major, uint16_t hdr_minor) | ||
| 213 | { | ||
| 214 | if ((hdr->common.header_version_major == hdr_major) && | ||
| 215 | (hdr->common.header_version_minor == hdr_minor)) | ||
| 216 | return false; | ||
| 217 | return true; | ||
| 218 | } | ||
| 219 | |||
| 220 | static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode, | ||
| 221 | uint64_t mc_addr, void *kptr) | ||
| 222 | { | ||
| 223 | const struct common_firmware_header *header = NULL; | ||
| 224 | |||
| 225 | if (NULL == ucode->fw) | ||
| 226 | return 0; | ||
| 227 | |||
| 228 | ucode->mc_addr = mc_addr; | ||
| 229 | ucode->kaddr = kptr; | ||
| 230 | |||
| 231 | header = (const struct common_firmware_header *)ucode->fw->data; | ||
| 232 | memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + | ||
| 233 | le32_to_cpu(header->ucode_array_offset_bytes)), | ||
| 234 | le32_to_cpu(header->ucode_size_bytes)); | ||
| 235 | |||
| 236 | return 0; | ||
| 237 | } | ||
| 238 | |||
| 239 | int amdgpu_ucode_init_bo(struct amdgpu_device *adev) | ||
| 240 | { | ||
| 241 | struct amdgpu_bo **bo = &adev->firmware.fw_buf; | ||
| 242 | uint64_t fw_mc_addr; | ||
| 243 | void *fw_buf_ptr = NULL; | ||
| 244 | uint64_t fw_offset = 0; | ||
| 245 | int i, err; | ||
| 246 | struct amdgpu_firmware_info *ucode = NULL; | ||
| 247 | const struct common_firmware_header *header = NULL; | ||
| 248 | |||
| 249 | err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, | ||
| 250 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo); | ||
| 251 | if (err) { | ||
| 252 | dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); | ||
| 253 | err = -ENOMEM; | ||
| 254 | goto failed; | ||
| 255 | } | ||
| 256 | |||
| 257 | err = amdgpu_bo_reserve(*bo, false); | ||
| 258 | if (err) { | ||
| 259 | amdgpu_bo_unref(bo); | ||
| 260 | dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err); | ||
| 261 | goto failed; | ||
| 262 | } | ||
| 263 | |||
| 264 | err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr); | ||
| 265 | if (err) { | ||
| 266 | amdgpu_bo_unreserve(*bo); | ||
| 267 | amdgpu_bo_unref(bo); | ||
| 268 | dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err); | ||
| 269 | goto failed; | ||
| 270 | } | ||
| 271 | |||
| 272 | err = amdgpu_bo_kmap(*bo, &fw_buf_ptr); | ||
| 273 | if (err) { | ||
| 274 | dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err); | ||
| 275 | amdgpu_bo_unpin(*bo); | ||
| 276 | amdgpu_bo_unreserve(*bo); | ||
| 277 | amdgpu_bo_unref(bo); | ||
| 278 | goto failed; | ||
| 279 | } | ||
| 280 | |||
| 281 | amdgpu_bo_unreserve(*bo); | ||
| 282 | |||
| 283 | fw_offset = 0; | ||
| 284 | for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) { | ||
| 285 | ucode = &adev->firmware.ucode[i]; | ||
| 286 | if (ucode->fw) { | ||
| 287 | header = (const struct common_firmware_header *)ucode->fw->data; | ||
| 288 | amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset, | ||
| 289 | fw_buf_ptr + fw_offset); | ||
| 290 | fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | ||
| 291 | } | ||
| 292 | } | ||
| 293 | |||
| 294 | failed: | ||
| 295 | if (err) | ||
| 296 | adev->firmware.smu_load = false; | ||
| 297 | |||
| 298 | return err; | ||
| 299 | } | ||
| 300 | |||
| 301 | int amdgpu_ucode_fini_bo(struct amdgpu_device *adev) | ||
| 302 | { | ||
| 303 | int i; | ||
| 304 | struct amdgpu_firmware_info *ucode = NULL; | ||
| 305 | |||
| 306 | for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) { | ||
| 307 | ucode = &adev->firmware.ucode[i]; | ||
| 308 | if (ucode->fw) { | ||
| 309 | ucode->mc_addr = 0; | ||
| 310 | ucode->kaddr = NULL; | ||
| 311 | } | ||
| 312 | } | ||
| 313 | amdgpu_bo_unref(&adev->firmware.fw_buf); | ||
| 314 | adev->firmware.fw_buf = NULL; | ||
| 315 | |||
| 316 | return 0; | ||
| 317 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h new file mode 100644 index 000000000000..e468be4e28fa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | |||
| @@ -0,0 +1,176 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2012 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | #ifndef __AMDGPU_UCODE_H__ | ||
| 24 | #define __AMDGPU_UCODE_H__ | ||
| 25 | |||
| 26 | struct common_firmware_header { | ||
| 27 | uint32_t size_bytes; /* size of the entire header+image(s) in bytes */ | ||
| 28 | uint32_t header_size_bytes; /* size of just the header in bytes */ | ||
| 29 | uint16_t header_version_major; /* header version */ | ||
| 30 | uint16_t header_version_minor; /* header version */ | ||
| 31 | uint16_t ip_version_major; /* IP version */ | ||
| 32 | uint16_t ip_version_minor; /* IP version */ | ||
| 33 | uint32_t ucode_version; | ||
| 34 | uint32_t ucode_size_bytes; /* size of ucode in bytes */ | ||
| 35 | uint32_t ucode_array_offset_bytes; /* payload offset from the start of the header */ | ||
| 36 | uint32_t crc32; /* crc32 checksum of the payload */ | ||
| 37 | }; | ||
| 38 | |||
| 39 | /* version_major=1, version_minor=0 */ | ||
| 40 | struct mc_firmware_header_v1_0 { | ||
| 41 | struct common_firmware_header header; | ||
| 42 | uint32_t io_debug_size_bytes; /* size of debug array in dwords */ | ||
| 43 | uint32_t io_debug_array_offset_bytes; /* payload offset from the start of the header */ | ||
| 44 | }; | ||
| 45 | |||
| 46 | /* version_major=1, version_minor=0 */ | ||
| 47 | struct smc_firmware_header_v1_0 { | ||
| 48 | struct common_firmware_header header; | ||
| 49 | uint32_t ucode_start_addr; | ||
| 50 | }; | ||
| 51 | |||
| 52 | /* version_major=1, version_minor=0 */ | ||
| 53 | struct gfx_firmware_header_v1_0 { | ||
| 54 | struct common_firmware_header header; | ||
| 55 | uint32_t ucode_feature_version; | ||
| 56 | uint32_t jt_offset; /* jt location */ | ||
| 57 | uint32_t jt_size; /* size of jt */ | ||
| 58 | }; | ||
| 59 | |||
| 60 | /* version_major=1, version_minor=0 */ | ||
| 61 | struct rlc_firmware_header_v1_0 { | ||
| 62 | struct common_firmware_header header; | ||
| 63 | uint32_t ucode_feature_version; | ||
| 64 | uint32_t save_and_restore_offset; | ||
| 65 | uint32_t clear_state_descriptor_offset; | ||
| 66 | uint32_t avail_scratch_ram_locations; | ||
| 67 | uint32_t master_pkt_description_offset; | ||
| 68 | }; | ||
| 69 | |||
| 70 | /* version_major=2, version_minor=0 */ | ||
| 71 | struct rlc_firmware_header_v2_0 { | ||
| 72 | struct common_firmware_header header; | ||
| 73 | uint32_t ucode_feature_version; | ||
| 74 | uint32_t jt_offset; /* jt location */ | ||
| 75 | uint32_t jt_size; /* size of jt */ | ||
| 76 | uint32_t save_and_restore_offset; | ||
| 77 | uint32_t clear_state_descriptor_offset; | ||
| 78 | uint32_t avail_scratch_ram_locations; | ||
| 79 | uint32_t reg_restore_list_size; | ||
| 80 | uint32_t reg_list_format_start; | ||
| 81 | uint32_t reg_list_format_separate_start; | ||
| 82 | uint32_t starting_offsets_start; | ||
| 83 | uint32_t reg_list_format_size_bytes; /* size of reg list format array in bytes */ | ||
| 84 | uint32_t reg_list_format_array_offset_bytes; /* payload offset from the start of the header */ | ||
| 85 | uint32_t reg_list_size_bytes; /* size of reg list array in bytes */ | ||
| 86 | uint32_t reg_list_array_offset_bytes; /* payload offset from the start of the header */ | ||
| 87 | uint32_t reg_list_format_separate_size_bytes; /* size of reg list format array in bytes */ | ||
| 88 | uint32_t reg_list_format_separate_array_offset_bytes; /* payload offset from the start of the header */ | ||
| 89 | uint32_t reg_list_separate_size_bytes; /* size of reg list array in bytes */ | ||
| 90 | uint32_t reg_list_separate_array_offset_bytes; /* payload offset from the start of the header */ | ||
| 91 | }; | ||
| 92 | |||
| 93 | /* version_major=1, version_minor=0 */ | ||
| 94 | struct sdma_firmware_header_v1_0 { | ||
| 95 | struct common_firmware_header header; | ||
| 96 | uint32_t ucode_feature_version; | ||
| 97 | uint32_t ucode_change_version; | ||
| 98 | uint32_t jt_offset; /* jt location */ | ||
| 99 | uint32_t jt_size; /* size of jt */ | ||
| 100 | }; | ||
| 101 | |||
| 102 | /* version_major=1, version_minor=1 */ | ||
| 103 | struct sdma_firmware_header_v1_1 { | ||
| 104 | struct sdma_firmware_header_v1_0 v1_0; | ||
| 105 | uint32_t digest_size; | ||
| 106 | }; | ||
| 107 | |||
| 108 | /* header is fixed size */ | ||
| 109 | union amdgpu_firmware_header { | ||
| 110 | struct common_firmware_header common; | ||
| 111 | struct mc_firmware_header_v1_0 mc; | ||
| 112 | struct smc_firmware_header_v1_0 smc; | ||
| 113 | struct gfx_firmware_header_v1_0 gfx; | ||
| 114 | struct rlc_firmware_header_v1_0 rlc; | ||
| 115 | struct rlc_firmware_header_v2_0 rlc_v2_0; | ||
| 116 | struct sdma_firmware_header_v1_0 sdma; | ||
| 117 | struct sdma_firmware_header_v1_1 sdma_v1_1; | ||
| 118 | uint8_t raw[0x100]; | ||
| 119 | }; | ||
| 120 | |||
| 121 | /* | ||
| 122 | * fw loading support | ||
| 123 | */ | ||
| 124 | enum AMDGPU_UCODE_ID { | ||
| 125 | AMDGPU_UCODE_ID_SDMA0 = 0, | ||
| 126 | AMDGPU_UCODE_ID_SDMA1, | ||
| 127 | AMDGPU_UCODE_ID_CP_CE, | ||
| 128 | AMDGPU_UCODE_ID_CP_PFP, | ||
| 129 | AMDGPU_UCODE_ID_CP_ME, | ||
| 130 | AMDGPU_UCODE_ID_CP_MEC1, | ||
| 131 | AMDGPU_UCODE_ID_CP_MEC2, | ||
| 132 | AMDGPU_UCODE_ID_RLC_G, | ||
| 133 | AMDGPU_UCODE_ID_MAXIMUM, | ||
| 134 | }; | ||
| 135 | |||
| 136 | /* engine firmware status */ | ||
| 137 | enum AMDGPU_UCODE_STATUS { | ||
| 138 | AMDGPU_UCODE_STATUS_INVALID, | ||
| 139 | AMDGPU_UCODE_STATUS_NOT_LOADED, | ||
| 140 | AMDGPU_UCODE_STATUS_LOADED, | ||
| 141 | }; | ||
| 142 | |||
| 143 | /* conform to smu_ucode_xfer_cz.h */ | ||
| 144 | #define AMDGPU_SDMA0_UCODE_LOADED 0x00000001 | ||
| 145 | #define AMDGPU_SDMA1_UCODE_LOADED 0x00000002 | ||
| 146 | #define AMDGPU_CPCE_UCODE_LOADED 0x00000004 | ||
| 147 | #define AMDGPU_CPPFP_UCODE_LOADED 0x00000008 | ||
| 148 | #define AMDGPU_CPME_UCODE_LOADED 0x00000010 | ||
| 149 | #define AMDGPU_CPMEC1_UCODE_LOADED 0x00000020 | ||
| 150 | #define AMDGPU_CPMEC2_UCODE_LOADED 0x00000040 | ||
| 151 | #define AMDGPU_CPRLC_UCODE_LOADED 0x00000100 | ||
| 152 | |||
| 153 | /* amdgpu firmware info */ | ||
| 154 | struct amdgpu_firmware_info { | ||
| 155 | /* ucode ID */ | ||
| 156 | enum AMDGPU_UCODE_ID ucode_id; | ||
| 157 | /* request_firmware */ | ||
| 158 | const struct firmware *fw; | ||
| 159 | /* starting mc address */ | ||
| 160 | uint64_t mc_addr; | ||
| 161 | /* kernel linear address */ | ||
| 162 | void *kaddr; | ||
| 163 | }; | ||
| 164 | |||
| 165 | void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr); | ||
| 166 | void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr); | ||
| 167 | void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr); | ||
| 168 | void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr); | ||
| 169 | void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr); | ||
| 170 | int amdgpu_ucode_validate(const struct firmware *fw); | ||
| 171 | bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr, | ||
| 172 | uint16_t hdr_major, uint16_t hdr_minor); | ||
| 173 | int amdgpu_ucode_init_bo(struct amdgpu_device *adev); | ||
| 174 | int amdgpu_ucode_fini_bo(struct amdgpu_device *adev); | ||
| 175 | |||
| 176 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c new file mode 100644 index 000000000000..c03bce6f32a9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
| @@ -0,0 +1,976 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2011 Advanced Micro Devices, Inc. | ||
| 3 | * All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the | ||
| 7 | * "Software"), to deal in the Software without restriction, including | ||
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 10 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 11 | * the following conditions: | ||
| 12 | * | ||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 20 | * | ||
| 21 | * The above copyright notice and this permission notice (including the | ||
| 22 | * next paragraph) shall be included in all copies or substantial portions | ||
| 23 | * of the Software. | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | /* | ||
| 27 | * Authors: | ||
| 28 | * Christian König <deathsimple@vodafone.de> | ||
| 29 | */ | ||
| 30 | |||
| 31 | #include <linux/firmware.h> | ||
| 32 | #include <linux/module.h> | ||
| 33 | #include <drm/drmP.h> | ||
| 34 | #include <drm/drm.h> | ||
| 35 | |||
| 36 | #include "amdgpu.h" | ||
| 37 | #include "amdgpu_pm.h" | ||
| 38 | #include "amdgpu_uvd.h" | ||
| 39 | #include "cikd.h" | ||
| 40 | #include "uvd/uvd_4_2_d.h" | ||
| 41 | |||
| 42 | /* 1 second timeout */ | ||
| 43 | #define UVD_IDLE_TIMEOUT_MS 1000 | ||
| 44 | |||
| 45 | /* Firmware Names */ | ||
| 46 | #ifdef CONFIG_DRM_AMDGPU_CIK | ||
| 47 | #define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin" | ||
| 48 | #define FIRMWARE_KABINI "radeon/kabini_uvd.bin" | ||
| 49 | #define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin" | ||
| 50 | #define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin" | ||
| 51 | #define FIRMWARE_MULLINS "radeon/mullins_uvd.bin" | ||
| 52 | #endif | ||
| 53 | #define FIRMWARE_TONGA "radeon/tonga_uvd.bin" | ||
| 54 | #define FIRMWARE_CARRIZO "radeon/carrizo_uvd.bin" | ||
| 55 | |||
| 56 | /** | ||
| 57 | * amdgpu_uvd_cs_ctx - Command submission parser context | ||
| 58 | * | ||
| 59 | * Used for emulating virtual memory support on UVD 4.2. | ||
| 60 | */ | ||
| 61 | struct amdgpu_uvd_cs_ctx { | ||
| 62 | struct amdgpu_cs_parser *parser; | ||
| 63 | unsigned reg, count; | ||
| 64 | unsigned data0, data1; | ||
| 65 | unsigned idx; | ||
| 66 | unsigned ib_idx; | ||
| 67 | |||
| 68 | /* does the IB has a msg command */ | ||
| 69 | bool has_msg_cmd; | ||
| 70 | |||
| 71 | /* minimum buffer sizes */ | ||
| 72 | unsigned *buf_sizes; | ||
| 73 | }; | ||
| 74 | |||
| 75 | #ifdef CONFIG_DRM_AMDGPU_CIK | ||
| 76 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); | ||
| 77 | MODULE_FIRMWARE(FIRMWARE_KABINI); | ||
| 78 | MODULE_FIRMWARE(FIRMWARE_KAVERI); | ||
| 79 | MODULE_FIRMWARE(FIRMWARE_HAWAII); | ||
| 80 | MODULE_FIRMWARE(FIRMWARE_MULLINS); | ||
| 81 | #endif | ||
| 82 | MODULE_FIRMWARE(FIRMWARE_TONGA); | ||
| 83 | MODULE_FIRMWARE(FIRMWARE_CARRIZO); | ||
| 84 | |||
| 85 | static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); | ||
| 86 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work); | ||
| 87 | |||
| 88 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | ||
| 89 | { | ||
| 90 | unsigned long bo_size; | ||
| 91 | const char *fw_name; | ||
| 92 | const struct common_firmware_header *hdr; | ||
| 93 | unsigned version_major, version_minor, family_id; | ||
| 94 | int i, r; | ||
| 95 | |||
| 96 | INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); | ||
| 97 | |||
| 98 | switch (adev->asic_type) { | ||
| 99 | #ifdef CONFIG_DRM_AMDGPU_CIK | ||
| 100 | case CHIP_BONAIRE: | ||
| 101 | fw_name = FIRMWARE_BONAIRE; | ||
| 102 | break; | ||
| 103 | case CHIP_KABINI: | ||
| 104 | fw_name = FIRMWARE_KABINI; | ||
| 105 | break; | ||
| 106 | case CHIP_KAVERI: | ||
| 107 | fw_name = FIRMWARE_KAVERI; | ||
| 108 | break; | ||
| 109 | case CHIP_HAWAII: | ||
| 110 | fw_name = FIRMWARE_HAWAII; | ||
| 111 | break; | ||
| 112 | case CHIP_MULLINS: | ||
| 113 | fw_name = FIRMWARE_MULLINS; | ||
| 114 | break; | ||
| 115 | #endif | ||
| 116 | case CHIP_TONGA: | ||
| 117 | fw_name = FIRMWARE_TONGA; | ||
| 118 | break; | ||
| 119 | case CHIP_CARRIZO: | ||
| 120 | fw_name = FIRMWARE_CARRIZO; | ||
| 121 | break; | ||
| 122 | default: | ||
| 123 | return -EINVAL; | ||
| 124 | } | ||
| 125 | |||
| 126 | r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); | ||
| 127 | if (r) { | ||
| 128 | dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n", | ||
| 129 | fw_name); | ||
| 130 | return r; | ||
| 131 | } | ||
| 132 | |||
| 133 | r = amdgpu_ucode_validate(adev->uvd.fw); | ||
| 134 | if (r) { | ||
| 135 | dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n", | ||
| 136 | fw_name); | ||
| 137 | release_firmware(adev->uvd.fw); | ||
| 138 | adev->uvd.fw = NULL; | ||
| 139 | return r; | ||
| 140 | } | ||
| 141 | |||
| 142 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | ||
| 143 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; | ||
| 144 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; | ||
| 145 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; | ||
| 146 | DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", | ||
| 147 | version_major, version_minor, family_id); | ||
| 148 | |||
| 149 | bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) | ||
| 150 | + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; | ||
| 151 | r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, | ||
| 152 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo); | ||
| 153 | if (r) { | ||
| 154 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); | ||
| 155 | return r; | ||
| 156 | } | ||
| 157 | |||
| 158 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); | ||
| 159 | if (r) { | ||
| 160 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | ||
| 161 | dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r); | ||
| 162 | return r; | ||
| 163 | } | ||
| 164 | |||
| 165 | r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, | ||
| 166 | &adev->uvd.gpu_addr); | ||
| 167 | if (r) { | ||
| 168 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | ||
| 169 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | ||
| 170 | dev_err(adev->dev, "(%d) UVD bo pin failed\n", r); | ||
| 171 | return r; | ||
| 172 | } | ||
| 173 | |||
| 174 | r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr); | ||
| 175 | if (r) { | ||
| 176 | dev_err(adev->dev, "(%d) UVD map failed\n", r); | ||
| 177 | return r; | ||
| 178 | } | ||
| 179 | |||
| 180 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | ||
| 181 | |||
| 182 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | ||
| 183 | atomic_set(&adev->uvd.handles[i], 0); | ||
| 184 | adev->uvd.filp[i] = NULL; | ||
| 185 | } | ||
| 186 | |||
| 187 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ | ||
| 188 | if (!amdgpu_ip_block_version_cmp(adev, AMDGPU_IP_BLOCK_TYPE_UVD, 5, 0)) | ||
| 189 | adev->uvd.address_64_bit = true; | ||
| 190 | |||
| 191 | return 0; | ||
| 192 | } | ||
| 193 | |||
| 194 | int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) | ||
| 195 | { | ||
| 196 | int r; | ||
| 197 | |||
| 198 | if (adev->uvd.vcpu_bo == NULL) | ||
| 199 | return 0; | ||
| 200 | |||
| 201 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); | ||
| 202 | if (!r) { | ||
| 203 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); | ||
| 204 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); | ||
| 205 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | ||
| 206 | } | ||
| 207 | |||
| 208 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | ||
| 209 | |||
| 210 | amdgpu_ring_fini(&adev->uvd.ring); | ||
| 211 | |||
| 212 | release_firmware(adev->uvd.fw); | ||
| 213 | |||
| 214 | return 0; | ||
| 215 | } | ||
| 216 | |||
| 217 | int amdgpu_uvd_suspend(struct amdgpu_device *adev) | ||
| 218 | { | ||
| 219 | unsigned size; | ||
| 220 | void *ptr; | ||
| 221 | const struct common_firmware_header *hdr; | ||
| 222 | int i; | ||
| 223 | |||
| 224 | if (adev->uvd.vcpu_bo == NULL) | ||
| 225 | return 0; | ||
| 226 | |||
| 227 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) | ||
| 228 | if (atomic_read(&adev->uvd.handles[i])) | ||
| 229 | break; | ||
| 230 | |||
| 231 | if (i == AMDGPU_MAX_UVD_HANDLES) | ||
| 232 | return 0; | ||
| 233 | |||
| 234 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | ||
| 235 | |||
| 236 | size = amdgpu_bo_size(adev->uvd.vcpu_bo); | ||
| 237 | size -= le32_to_cpu(hdr->ucode_size_bytes); | ||
| 238 | |||
| 239 | ptr = adev->uvd.cpu_addr; | ||
| 240 | ptr += le32_to_cpu(hdr->ucode_size_bytes); | ||
| 241 | |||
| 242 | adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); | ||
| 243 | memcpy(adev->uvd.saved_bo, ptr, size); | ||
| 244 | |||
| 245 | return 0; | ||
| 246 | } | ||
| 247 | |||
| 248 | int amdgpu_uvd_resume(struct amdgpu_device *adev) | ||
| 249 | { | ||
| 250 | unsigned size; | ||
| 251 | void *ptr; | ||
| 252 | const struct common_firmware_header *hdr; | ||
| 253 | unsigned offset; | ||
| 254 | |||
| 255 | if (adev->uvd.vcpu_bo == NULL) | ||
| 256 | return -EINVAL; | ||
| 257 | |||
| 258 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | ||
| 259 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | ||
| 260 | memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, | ||
| 261 | (adev->uvd.fw->size) - offset); | ||
| 262 | |||
| 263 | size = amdgpu_bo_size(adev->uvd.vcpu_bo); | ||
| 264 | size -= le32_to_cpu(hdr->ucode_size_bytes); | ||
| 265 | ptr = adev->uvd.cpu_addr; | ||
| 266 | ptr += le32_to_cpu(hdr->ucode_size_bytes); | ||
| 267 | |||
| 268 | if (adev->uvd.saved_bo != NULL) { | ||
| 269 | memcpy(ptr, adev->uvd.saved_bo, size); | ||
| 270 | kfree(adev->uvd.saved_bo); | ||
| 271 | adev->uvd.saved_bo = NULL; | ||
| 272 | } else | ||
| 273 | memset(ptr, 0, size); | ||
| 274 | |||
| 275 | return 0; | ||
| 276 | } | ||
| 277 | |||
| 278 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | ||
| 279 | { | ||
| 280 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
| 281 | int i, r; | ||
| 282 | |||
| 283 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | ||
| 284 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); | ||
| 285 | if (handle != 0 && adev->uvd.filp[i] == filp) { | ||
| 286 | struct amdgpu_fence *fence; | ||
| 287 | |||
| 288 | amdgpu_uvd_note_usage(adev); | ||
| 289 | |||
| 290 | r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); | ||
| 291 | if (r) { | ||
| 292 | DRM_ERROR("Error destroying UVD (%d)!\n", r); | ||
| 293 | continue; | ||
| 294 | } | ||
| 295 | |||
| 296 | amdgpu_fence_wait(fence, false); | ||
| 297 | amdgpu_fence_unref(&fence); | ||
| 298 | |||
| 299 | adev->uvd.filp[i] = NULL; | ||
| 300 | atomic_set(&adev->uvd.handles[i], 0); | ||
| 301 | } | ||
| 302 | } | ||
| 303 | } | ||
| 304 | |||
| 305 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo) | ||
| 306 | { | ||
| 307 | int i; | ||
| 308 | for (i = 0; i < rbo->placement.num_placement; ++i) { | ||
| 309 | rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; | ||
| 310 | rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; | ||
| 311 | } | ||
| 312 | } | ||
| 313 | |||
| 314 | /** | ||
| 315 | * amdgpu_uvd_cs_pass1 - first parsing round | ||
| 316 | * | ||
| 317 | * @ctx: UVD parser context | ||
| 318 | * | ||
| 319 | * Make sure UVD message and feedback buffers are in VRAM and | ||
| 320 | * nobody is violating an 256MB boundary. | ||
| 321 | */ | ||
| 322 | static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) | ||
| 323 | { | ||
| 324 | struct amdgpu_bo_va_mapping *mapping; | ||
| 325 | struct amdgpu_bo *bo; | ||
| 326 | uint32_t cmd, lo, hi; | ||
| 327 | uint64_t addr; | ||
| 328 | int r = 0; | ||
| 329 | |||
| 330 | lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); | ||
| 331 | hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); | ||
| 332 | addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); | ||
| 333 | |||
| 334 | mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); | ||
| 335 | if (mapping == NULL) { | ||
| 336 | DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); | ||
| 337 | return -EINVAL; | ||
| 338 | } | ||
| 339 | |||
| 340 | if (!ctx->parser->adev->uvd.address_64_bit) { | ||
| 341 | /* check if it's a message or feedback command */ | ||
| 342 | cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; | ||
| 343 | if (cmd == 0x0 || cmd == 0x3) { | ||
| 344 | /* yes, force it into VRAM */ | ||
| 345 | uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; | ||
| 346 | amdgpu_ttm_placement_from_domain(bo, domain); | ||
| 347 | } | ||
| 348 | amdgpu_uvd_force_into_uvd_segment(bo); | ||
| 349 | |||
| 350 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
| 351 | } | ||
| 352 | |||
| 353 | return r; | ||
| 354 | } | ||
| 355 | |||
| 356 | /** | ||
| 357 | * amdgpu_uvd_cs_msg_decode - handle UVD decode message | ||
| 358 | * | ||
| 359 | * @msg: pointer to message structure | ||
| 360 | * @buf_sizes: returned buffer sizes | ||
| 361 | * | ||
| 362 | * Peek into the decode message and calculate the necessary buffer sizes. | ||
| 363 | */ | ||
| 364 | static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) | ||
| 365 | { | ||
| 366 | unsigned stream_type = msg[4]; | ||
| 367 | unsigned width = msg[6]; | ||
| 368 | unsigned height = msg[7]; | ||
| 369 | unsigned dpb_size = msg[9]; | ||
| 370 | unsigned pitch = msg[28]; | ||
| 371 | unsigned level = msg[57]; | ||
| 372 | |||
| 373 | unsigned width_in_mb = width / 16; | ||
| 374 | unsigned height_in_mb = ALIGN(height / 16, 2); | ||
| 375 | unsigned fs_in_mb = width_in_mb * height_in_mb; | ||
| 376 | |||
| 377 | unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; | ||
| 378 | |||
| 379 | image_size = width * height; | ||
| 380 | image_size += image_size / 2; | ||
| 381 | image_size = ALIGN(image_size, 1024); | ||
| 382 | |||
| 383 | switch (stream_type) { | ||
| 384 | case 0: /* H264 */ | ||
| 385 | case 7: /* H264 Perf */ | ||
| 386 | switch(level) { | ||
| 387 | case 30: | ||
| 388 | num_dpb_buffer = 8100 / fs_in_mb; | ||
| 389 | break; | ||
| 390 | case 31: | ||
| 391 | num_dpb_buffer = 18000 / fs_in_mb; | ||
| 392 | break; | ||
| 393 | case 32: | ||
| 394 | num_dpb_buffer = 20480 / fs_in_mb; | ||
| 395 | break; | ||
| 396 | case 41: | ||
| 397 | num_dpb_buffer = 32768 / fs_in_mb; | ||
| 398 | break; | ||
| 399 | case 42: | ||
| 400 | num_dpb_buffer = 34816 / fs_in_mb; | ||
| 401 | break; | ||
| 402 | case 50: | ||
| 403 | num_dpb_buffer = 110400 / fs_in_mb; | ||
| 404 | break; | ||
| 405 | case 51: | ||
| 406 | num_dpb_buffer = 184320 / fs_in_mb; | ||
| 407 | break; | ||
| 408 | default: | ||
| 409 | num_dpb_buffer = 184320 / fs_in_mb; | ||
| 410 | break; | ||
| 411 | } | ||
| 412 | num_dpb_buffer++; | ||
| 413 | if (num_dpb_buffer > 17) | ||
| 414 | num_dpb_buffer = 17; | ||
| 415 | |||
| 416 | /* reference picture buffer */ | ||
| 417 | min_dpb_size = image_size * num_dpb_buffer; | ||
| 418 | |||
| 419 | /* macroblock context buffer */ | ||
| 420 | min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; | ||
| 421 | |||
| 422 | /* IT surface buffer */ | ||
| 423 | min_dpb_size += width_in_mb * height_in_mb * 32; | ||
| 424 | break; | ||
| 425 | |||
| 426 | case 1: /* VC1 */ | ||
| 427 | |||
| 428 | /* reference picture buffer */ | ||
| 429 | min_dpb_size = image_size * 3; | ||
| 430 | |||
| 431 | /* CONTEXT_BUFFER */ | ||
| 432 | min_dpb_size += width_in_mb * height_in_mb * 128; | ||
| 433 | |||
| 434 | /* IT surface buffer */ | ||
| 435 | min_dpb_size += width_in_mb * 64; | ||
| 436 | |||
| 437 | /* DB surface buffer */ | ||
| 438 | min_dpb_size += width_in_mb * 128; | ||
| 439 | |||
| 440 | /* BP */ | ||
| 441 | tmp = max(width_in_mb, height_in_mb); | ||
| 442 | min_dpb_size += ALIGN(tmp * 7 * 16, 64); | ||
| 443 | break; | ||
| 444 | |||
| 445 | case 3: /* MPEG2 */ | ||
| 446 | |||
| 447 | /* reference picture buffer */ | ||
| 448 | min_dpb_size = image_size * 3; | ||
| 449 | break; | ||
| 450 | |||
| 451 | case 4: /* MPEG4 */ | ||
| 452 | |||
| 453 | /* reference picture buffer */ | ||
| 454 | min_dpb_size = image_size * 3; | ||
| 455 | |||
| 456 | /* CM */ | ||
| 457 | min_dpb_size += width_in_mb * height_in_mb * 64; | ||
| 458 | |||
| 459 | /* IT surface buffer */ | ||
| 460 | min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); | ||
| 461 | break; | ||
| 462 | |||
| 463 | default: | ||
| 464 | DRM_ERROR("UVD codec not handled %d!\n", stream_type); | ||
| 465 | return -EINVAL; | ||
| 466 | } | ||
| 467 | |||
| 468 | if (width > pitch) { | ||
| 469 | DRM_ERROR("Invalid UVD decoding target pitch!\n"); | ||
| 470 | return -EINVAL; | ||
| 471 | } | ||
| 472 | |||
| 473 | if (dpb_size < min_dpb_size) { | ||
| 474 | DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", | ||
| 475 | dpb_size, min_dpb_size); | ||
| 476 | return -EINVAL; | ||
| 477 | } | ||
| 478 | |||
| 479 | buf_sizes[0x1] = dpb_size; | ||
| 480 | buf_sizes[0x2] = image_size; | ||
| 481 | return 0; | ||
| 482 | } | ||
| 483 | |||
| 484 | /** | ||
| 485 | * amdgpu_uvd_cs_msg - handle UVD message | ||
| 486 | * | ||
| 487 | * @ctx: UVD parser context | ||
| 488 | * @bo: buffer object containing the message | ||
| 489 | * @offset: offset into the buffer object | ||
| 490 | * | ||
| 491 | * Peek into the UVD message and extract the session id. | ||
| 492 | * Make sure that we don't open up to many sessions. | ||
| 493 | */ | ||
| 494 | static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | ||
| 495 | struct amdgpu_bo *bo, unsigned offset) | ||
| 496 | { | ||
| 497 | struct amdgpu_device *adev = ctx->parser->adev; | ||
| 498 | int32_t *msg, msg_type, handle; | ||
| 499 | struct fence *f; | ||
| 500 | void *ptr; | ||
| 501 | |||
| 502 | int i, r; | ||
| 503 | |||
| 504 | if (offset & 0x3F) { | ||
| 505 | DRM_ERROR("UVD messages must be 64 byte aligned!\n"); | ||
| 506 | return -EINVAL; | ||
| 507 | } | ||
| 508 | |||
| 509 | f = reservation_object_get_excl(bo->tbo.resv); | ||
| 510 | if (f) { | ||
| 511 | r = amdgpu_fence_wait((struct amdgpu_fence *)f, false); | ||
| 512 | if (r) { | ||
| 513 | DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); | ||
| 514 | return r; | ||
| 515 | } | ||
| 516 | } | ||
| 517 | |||
| 518 | r = amdgpu_bo_kmap(bo, &ptr); | ||
| 519 | if (r) { | ||
| 520 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); | ||
| 521 | return r; | ||
| 522 | } | ||
| 523 | |||
| 524 | msg = ptr + offset; | ||
| 525 | |||
| 526 | msg_type = msg[1]; | ||
| 527 | handle = msg[2]; | ||
| 528 | |||
| 529 | if (handle == 0) { | ||
| 530 | DRM_ERROR("Invalid UVD handle!\n"); | ||
| 531 | return -EINVAL; | ||
| 532 | } | ||
| 533 | |||
| 534 | if (msg_type == 1) { | ||
| 535 | /* it's a decode msg, calc buffer sizes */ | ||
| 536 | r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); | ||
| 537 | amdgpu_bo_kunmap(bo); | ||
| 538 | if (r) | ||
| 539 | return r; | ||
| 540 | |||
| 541 | } else if (msg_type == 2) { | ||
| 542 | /* it's a destroy msg, free the handle */ | ||
| 543 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) | ||
| 544 | atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); | ||
| 545 | amdgpu_bo_kunmap(bo); | ||
| 546 | return 0; | ||
| 547 | } else { | ||
| 548 | /* it's a create msg */ | ||
| 549 | amdgpu_bo_kunmap(bo); | ||
| 550 | |||
| 551 | if (msg_type != 0) { | ||
| 552 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); | ||
| 553 | return -EINVAL; | ||
| 554 | } | ||
| 555 | |||
| 556 | /* it's a create msg, no special handling needed */ | ||
| 557 | } | ||
| 558 | |||
| 559 | /* create or decode, validate the handle */ | ||
| 560 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | ||
| 561 | if (atomic_read(&adev->uvd.handles[i]) == handle) | ||
| 562 | return 0; | ||
| 563 | } | ||
| 564 | |||
| 565 | /* handle not found try to alloc a new one */ | ||
| 566 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | ||
| 567 | if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { | ||
| 568 | adev->uvd.filp[i] = ctx->parser->filp; | ||
| 569 | return 0; | ||
| 570 | } | ||
| 571 | } | ||
| 572 | |||
| 573 | DRM_ERROR("No more free UVD handles!\n"); | ||
| 574 | return -EINVAL; | ||
| 575 | } | ||
| 576 | |||
| 577 | /** | ||
| 578 | * amdgpu_uvd_cs_pass2 - second parsing round | ||
| 579 | * | ||
| 580 | * @ctx: UVD parser context | ||
| 581 | * | ||
| 582 | * Patch buffer addresses, make sure buffer sizes are correct. | ||
| 583 | */ | ||
| 584 | static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) | ||
| 585 | { | ||
| 586 | struct amdgpu_bo_va_mapping *mapping; | ||
| 587 | struct amdgpu_bo *bo; | ||
| 588 | struct amdgpu_ib *ib; | ||
| 589 | uint32_t cmd, lo, hi; | ||
| 590 | uint64_t start, end; | ||
| 591 | uint64_t addr; | ||
| 592 | int r; | ||
| 593 | |||
| 594 | lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); | ||
| 595 | hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); | ||
| 596 | addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); | ||
| 597 | |||
| 598 | mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); | ||
| 599 | if (mapping == NULL) | ||
| 600 | return -EINVAL; | ||
| 601 | |||
| 602 | start = amdgpu_bo_gpu_offset(bo); | ||
| 603 | |||
| 604 | end = (mapping->it.last + 1 - mapping->it.start); | ||
| 605 | end = end * AMDGPU_GPU_PAGE_SIZE + start; | ||
| 606 | |||
| 607 | addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; | ||
| 608 | start += addr; | ||
| 609 | |||
| 610 | ib = &ctx->parser->ibs[ctx->ib_idx]; | ||
| 611 | ib->ptr[ctx->data0] = start & 0xFFFFFFFF; | ||
| 612 | ib->ptr[ctx->data1] = start >> 32; | ||
| 613 | |||
| 614 | cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; | ||
| 615 | if (cmd < 0x4) { | ||
| 616 | if ((end - start) < ctx->buf_sizes[cmd]) { | ||
| 617 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, | ||
| 618 | (unsigned)(end - start), | ||
| 619 | ctx->buf_sizes[cmd]); | ||
| 620 | return -EINVAL; | ||
| 621 | } | ||
| 622 | |||
| 623 | } else if ((cmd != 0x100) && (cmd != 0x204)) { | ||
| 624 | DRM_ERROR("invalid UVD command %X!\n", cmd); | ||
| 625 | return -EINVAL; | ||
| 626 | } | ||
| 627 | |||
| 628 | if (!ctx->parser->adev->uvd.address_64_bit) { | ||
| 629 | if ((start >> 28) != ((end - 1) >> 28)) { | ||
| 630 | DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", | ||
| 631 | start, end); | ||
| 632 | return -EINVAL; | ||
| 633 | } | ||
| 634 | |||
| 635 | if ((cmd == 0 || cmd == 0x3) && | ||
| 636 | (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { | ||
| 637 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", | ||
| 638 | start, end); | ||
| 639 | return -EINVAL; | ||
| 640 | } | ||
| 641 | } | ||
| 642 | |||
| 643 | if (cmd == 0) { | ||
| 644 | ctx->has_msg_cmd = true; | ||
| 645 | r = amdgpu_uvd_cs_msg(ctx, bo, addr); | ||
| 646 | if (r) | ||
| 647 | return r; | ||
| 648 | } else if (!ctx->has_msg_cmd) { | ||
| 649 | DRM_ERROR("Message needed before other commands are send!\n"); | ||
| 650 | return -EINVAL; | ||
| 651 | } | ||
| 652 | |||
| 653 | return 0; | ||
| 654 | } | ||
| 655 | |||
| 656 | /** | ||
| 657 | * amdgpu_uvd_cs_reg - parse register writes | ||
| 658 | * | ||
| 659 | * @ctx: UVD parser context | ||
| 660 | * @cb: callback function | ||
| 661 | * | ||
| 662 | * Parse the register writes, call cb on each complete command. | ||
| 663 | */ | ||
| 664 | static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, | ||
| 665 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) | ||
| 666 | { | ||
| 667 | struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; | ||
| 668 | int i, r; | ||
| 669 | |||
| 670 | ctx->idx++; | ||
| 671 | for (i = 0; i <= ctx->count; ++i) { | ||
| 672 | unsigned reg = ctx->reg + i; | ||
| 673 | |||
| 674 | if (ctx->idx >= ib->length_dw) { | ||
| 675 | DRM_ERROR("Register command after end of CS!\n"); | ||
| 676 | return -EINVAL; | ||
| 677 | } | ||
| 678 | |||
| 679 | switch (reg) { | ||
| 680 | case mmUVD_GPCOM_VCPU_DATA0: | ||
| 681 | ctx->data0 = ctx->idx; | ||
| 682 | break; | ||
| 683 | case mmUVD_GPCOM_VCPU_DATA1: | ||
| 684 | ctx->data1 = ctx->idx; | ||
| 685 | break; | ||
| 686 | case mmUVD_GPCOM_VCPU_CMD: | ||
| 687 | r = cb(ctx); | ||
| 688 | if (r) | ||
| 689 | return r; | ||
| 690 | break; | ||
| 691 | case mmUVD_ENGINE_CNTL: | ||
| 692 | break; | ||
| 693 | default: | ||
| 694 | DRM_ERROR("Invalid reg 0x%X!\n", reg); | ||
| 695 | return -EINVAL; | ||
| 696 | } | ||
| 697 | ctx->idx++; | ||
| 698 | } | ||
| 699 | return 0; | ||
| 700 | } | ||
| 701 | |||
| 702 | /** | ||
| 703 | * amdgpu_uvd_cs_packets - parse UVD packets | ||
| 704 | * | ||
| 705 | * @ctx: UVD parser context | ||
| 706 | * @cb: callback function | ||
| 707 | * | ||
| 708 | * Parse the command stream packets. | ||
| 709 | */ | ||
| 710 | static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, | ||
| 711 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) | ||
| 712 | { | ||
| 713 | struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; | ||
| 714 | int r; | ||
| 715 | |||
| 716 | for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) { | ||
| 717 | uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx); | ||
| 718 | unsigned type = CP_PACKET_GET_TYPE(cmd); | ||
| 719 | switch (type) { | ||
| 720 | case PACKET_TYPE0: | ||
| 721 | ctx->reg = CP_PACKET0_GET_REG(cmd); | ||
| 722 | ctx->count = CP_PACKET_GET_COUNT(cmd); | ||
| 723 | r = amdgpu_uvd_cs_reg(ctx, cb); | ||
| 724 | if (r) | ||
| 725 | return r; | ||
| 726 | break; | ||
| 727 | case PACKET_TYPE2: | ||
| 728 | ++ctx->idx; | ||
| 729 | break; | ||
| 730 | default: | ||
| 731 | DRM_ERROR("Unknown packet type %d !\n", type); | ||
| 732 | return -EINVAL; | ||
| 733 | } | ||
| 734 | } | ||
| 735 | return 0; | ||
| 736 | } | ||
| 737 | |||
| 738 | /** | ||
| 739 | * amdgpu_uvd_ring_parse_cs - UVD command submission parser | ||
| 740 | * | ||
| 741 | * @parser: Command submission parser context | ||
| 742 | * | ||
| 743 | * Parse the command stream, patch in addresses as necessary. | ||
| 744 | */ | ||
| 745 | int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) | ||
| 746 | { | ||
| 747 | struct amdgpu_uvd_cs_ctx ctx = {}; | ||
| 748 | unsigned buf_sizes[] = { | ||
| 749 | [0x00000000] = 2048, | ||
| 750 | [0x00000001] = 32 * 1024 * 1024, | ||
| 751 | [0x00000002] = 2048 * 1152 * 3, | ||
| 752 | [0x00000003] = 2048, | ||
| 753 | }; | ||
| 754 | struct amdgpu_ib *ib = &parser->ibs[ib_idx]; | ||
| 755 | int r; | ||
| 756 | |||
| 757 | if (ib->length_dw % 16) { | ||
| 758 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", | ||
| 759 | ib->length_dw); | ||
| 760 | return -EINVAL; | ||
| 761 | } | ||
| 762 | |||
| 763 | ctx.parser = parser; | ||
| 764 | ctx.buf_sizes = buf_sizes; | ||
| 765 | ctx.ib_idx = ib_idx; | ||
| 766 | |||
| 767 | /* first round, make sure the buffers are actually in the UVD segment */ | ||
| 768 | r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1); | ||
| 769 | if (r) | ||
| 770 | return r; | ||
| 771 | |||
| 772 | /* second round, patch buffer addresses into the command stream */ | ||
| 773 | r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2); | ||
| 774 | if (r) | ||
| 775 | return r; | ||
| 776 | |||
| 777 | if (!ctx.has_msg_cmd) { | ||
| 778 | DRM_ERROR("UVD-IBs need a msg command!\n"); | ||
| 779 | return -EINVAL; | ||
| 780 | } | ||
| 781 | |||
| 782 | amdgpu_uvd_note_usage(ctx.parser->adev); | ||
| 783 | |||
| 784 | return 0; | ||
| 785 | } | ||
| 786 | |||
| 787 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, | ||
| 788 | struct amdgpu_bo *bo, | ||
| 789 | struct amdgpu_fence **fence) | ||
| 790 | { | ||
| 791 | struct ttm_validate_buffer tv; | ||
| 792 | struct ww_acquire_ctx ticket; | ||
| 793 | struct list_head head; | ||
| 794 | struct amdgpu_ib ib; | ||
| 795 | uint64_t addr; | ||
| 796 | int i, r; | ||
| 797 | |||
| 798 | memset(&tv, 0, sizeof(tv)); | ||
| 799 | tv.bo = &bo->tbo; | ||
| 800 | |||
| 801 | INIT_LIST_HEAD(&head); | ||
| 802 | list_add(&tv.head, &head); | ||
| 803 | |||
| 804 | r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL); | ||
| 805 | if (r) | ||
| 806 | return r; | ||
| 807 | |||
| 808 | if (!bo->adev->uvd.address_64_bit) { | ||
| 809 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); | ||
| 810 | amdgpu_uvd_force_into_uvd_segment(bo); | ||
| 811 | } | ||
| 812 | |||
| 813 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | ||
| 814 | if (r) | ||
| 815 | goto err; | ||
| 816 | |||
| 817 | r = amdgpu_ib_get(ring, NULL, 64, &ib); | ||
| 818 | if (r) | ||
| 819 | goto err; | ||
| 820 | |||
| 821 | addr = amdgpu_bo_gpu_offset(bo); | ||
| 822 | ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); | ||
| 823 | ib.ptr[1] = addr; | ||
| 824 | ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); | ||
| 825 | ib.ptr[3] = addr >> 32; | ||
| 826 | ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); | ||
| 827 | ib.ptr[5] = 0; | ||
| 828 | for (i = 6; i < 16; ++i) | ||
| 829 | ib.ptr[i] = PACKET2(0); | ||
| 830 | ib.length_dw = 16; | ||
| 831 | |||
| 832 | r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); | ||
| 833 | if (r) | ||
| 834 | goto err; | ||
| 835 | ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base); | ||
| 836 | |||
| 837 | if (fence) | ||
| 838 | *fence = amdgpu_fence_ref(ib.fence); | ||
| 839 | |||
| 840 | amdgpu_ib_free(ring->adev, &ib); | ||
| 841 | amdgpu_bo_unref(&bo); | ||
| 842 | return 0; | ||
| 843 | |||
| 844 | err: | ||
| 845 | ttm_eu_backoff_reservation(&ticket, &head); | ||
| 846 | return r; | ||
| 847 | } | ||
| 848 | |||
| 849 | /* multiple fence commands without any stream commands in between can | ||
| 850 | crash the vcpu so just try to emmit a dummy create/destroy msg to | ||
| 851 | avoid this */ | ||
| 852 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | ||
| 853 | struct amdgpu_fence **fence) | ||
| 854 | { | ||
| 855 | struct amdgpu_device *adev = ring->adev; | ||
| 856 | struct amdgpu_bo *bo; | ||
| 857 | uint32_t *msg; | ||
| 858 | int r, i; | ||
| 859 | |||
| 860 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | ||
| 861 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo); | ||
| 862 | if (r) | ||
| 863 | return r; | ||
| 864 | |||
| 865 | r = amdgpu_bo_reserve(bo, false); | ||
| 866 | if (r) { | ||
| 867 | amdgpu_bo_unref(&bo); | ||
| 868 | return r; | ||
| 869 | } | ||
| 870 | |||
| 871 | r = amdgpu_bo_kmap(bo, (void **)&msg); | ||
| 872 | if (r) { | ||
| 873 | amdgpu_bo_unreserve(bo); | ||
| 874 | amdgpu_bo_unref(&bo); | ||
| 875 | return r; | ||
| 876 | } | ||
| 877 | |||
| 878 | /* stitch together an UVD create msg */ | ||
| 879 | msg[0] = cpu_to_le32(0x00000de4); | ||
| 880 | msg[1] = cpu_to_le32(0x00000000); | ||
| 881 | msg[2] = cpu_to_le32(handle); | ||
| 882 | msg[3] = cpu_to_le32(0x00000000); | ||
| 883 | msg[4] = cpu_to_le32(0x00000000); | ||
| 884 | msg[5] = cpu_to_le32(0x00000000); | ||
| 885 | msg[6] = cpu_to_le32(0x00000000); | ||
| 886 | msg[7] = cpu_to_le32(0x00000780); | ||
| 887 | msg[8] = cpu_to_le32(0x00000440); | ||
| 888 | msg[9] = cpu_to_le32(0x00000000); | ||
| 889 | msg[10] = cpu_to_le32(0x01b37000); | ||
| 890 | for (i = 11; i < 1024; ++i) | ||
| 891 | msg[i] = cpu_to_le32(0x0); | ||
| 892 | |||
| 893 | amdgpu_bo_kunmap(bo); | ||
| 894 | amdgpu_bo_unreserve(bo); | ||
| 895 | |||
| 896 | return amdgpu_uvd_send_msg(ring, bo, fence); | ||
| 897 | } | ||
| 898 | |||
| 899 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | ||
| 900 | struct amdgpu_fence **fence) | ||
| 901 | { | ||
| 902 | struct amdgpu_device *adev = ring->adev; | ||
| 903 | struct amdgpu_bo *bo; | ||
| 904 | uint32_t *msg; | ||
| 905 | int r, i; | ||
| 906 | |||
| 907 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | ||
| 908 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo); | ||
| 909 | if (r) | ||
| 910 | return r; | ||
| 911 | |||
| 912 | r = amdgpu_bo_reserve(bo, false); | ||
| 913 | if (r) { | ||
| 914 | amdgpu_bo_unref(&bo); | ||
| 915 | return r; | ||
| 916 | } | ||
| 917 | |||
| 918 | r = amdgpu_bo_kmap(bo, (void **)&msg); | ||
| 919 | if (r) { | ||
| 920 | amdgpu_bo_unreserve(bo); | ||
| 921 | amdgpu_bo_unref(&bo); | ||
| 922 | return r; | ||
| 923 | } | ||
| 924 | |||
| 925 | /* stitch together an UVD destroy msg */ | ||
| 926 | msg[0] = cpu_to_le32(0x00000de4); | ||
| 927 | msg[1] = cpu_to_le32(0x00000002); | ||
| 928 | msg[2] = cpu_to_le32(handle); | ||
| 929 | msg[3] = cpu_to_le32(0x00000000); | ||
| 930 | for (i = 4; i < 1024; ++i) | ||
| 931 | msg[i] = cpu_to_le32(0x0); | ||
| 932 | |||
| 933 | amdgpu_bo_kunmap(bo); | ||
| 934 | amdgpu_bo_unreserve(bo); | ||
| 935 | |||
| 936 | return amdgpu_uvd_send_msg(ring, bo, fence); | ||
| 937 | } | ||
| 938 | |||
| 939 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | ||
| 940 | { | ||
| 941 | struct amdgpu_device *adev = | ||
| 942 | container_of(work, struct amdgpu_device, uvd.idle_work.work); | ||
| 943 | unsigned i, fences, handles = 0; | ||
| 944 | |||
| 945 | fences = amdgpu_fence_count_emitted(&adev->uvd.ring); | ||
| 946 | |||
| 947 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) | ||
| 948 | if (atomic_read(&adev->uvd.handles[i])) | ||
| 949 | ++handles; | ||
| 950 | |||
| 951 | if (fences == 0 && handles == 0) { | ||
| 952 | if (adev->pm.dpm_enabled) { | ||
| 953 | amdgpu_dpm_enable_uvd(adev, false); | ||
| 954 | } else { | ||
| 955 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); | ||
| 956 | } | ||
| 957 | } else { | ||
| 958 | schedule_delayed_work(&adev->uvd.idle_work, | ||
| 959 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); | ||
| 960 | } | ||
| 961 | } | ||
| 962 | |||
| 963 | static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) | ||
| 964 | { | ||
| 965 | bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); | ||
| 966 | set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, | ||
| 967 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); | ||
| 968 | |||
| 969 | if (set_clocks) { | ||
| 970 | if (adev->pm.dpm_enabled) { | ||
| 971 | amdgpu_dpm_enable_uvd(adev, true); | ||
| 972 | } else { | ||
| 973 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); | ||
| 974 | } | ||
| 975 | } | ||
| 976 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h new file mode 100644 index 000000000000..2255aa710e33 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __AMDGPU_UVD_H__ | ||
| 25 | #define __AMDGPU_UVD_H__ | ||
| 26 | |||
| 27 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev); | ||
| 28 | int amdgpu_uvd_sw_fini(struct amdgpu_device *adev); | ||
| 29 | int amdgpu_uvd_suspend(struct amdgpu_device *adev); | ||
| 30 | int amdgpu_uvd_resume(struct amdgpu_device *adev); | ||
| 31 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | ||
| 32 | struct amdgpu_fence **fence); | ||
| 33 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | ||
| 34 | struct amdgpu_fence **fence); | ||
| 35 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, | ||
| 36 | struct drm_file *filp); | ||
| 37 | int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); | ||
| 38 | |||
| 39 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c new file mode 100644 index 000000000000..c65d93cb540d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
| @@ -0,0 +1,727 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2013 Advanced Micro Devices, Inc. | ||
| 3 | * All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the | ||
| 7 | * "Software"), to deal in the Software without restriction, including | ||
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 10 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 11 | * the following conditions: | ||
| 12 | * | ||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 20 | * | ||
| 21 | * The above copyright notice and this permission notice (including the | ||
| 22 | * next paragraph) shall be included in all copies or substantial portions | ||
| 23 | * of the Software. | ||
| 24 | * | ||
| 25 | * Authors: Christian König <christian.koenig@amd.com> | ||
| 26 | */ | ||
| 27 | |||
| 28 | #include <linux/firmware.h> | ||
| 29 | #include <linux/module.h> | ||
| 30 | #include <drm/drmP.h> | ||
| 31 | #include <drm/drm.h> | ||
| 32 | |||
| 33 | #include "amdgpu.h" | ||
| 34 | #include "amdgpu_pm.h" | ||
| 35 | #include "amdgpu_vce.h" | ||
| 36 | #include "cikd.h" | ||
| 37 | |||
| 38 | /* 1 second timeout */ | ||
| 39 | #define VCE_IDLE_TIMEOUT_MS 1000 | ||
| 40 | |||
| 41 | /* Firmware Names */ | ||
| 42 | #ifdef CONFIG_DRM_AMDGPU_CIK | ||
| 43 | #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin" | ||
| 44 | #define FIRMWARE_KABINI "radeon/kabini_vce.bin" | ||
| 45 | #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin" | ||
| 46 | #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin" | ||
| 47 | #define FIRMWARE_MULLINS "radeon/mullins_vce.bin" | ||
| 48 | #endif | ||
| 49 | #define FIRMWARE_TONGA "radeon/tonga_vce.bin" | ||
| 50 | #define FIRMWARE_CARRIZO "radeon/carrizo_vce.bin" | ||
| 51 | |||
| 52 | #ifdef CONFIG_DRM_AMDGPU_CIK | ||
| 53 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); | ||
| 54 | MODULE_FIRMWARE(FIRMWARE_KABINI); | ||
| 55 | MODULE_FIRMWARE(FIRMWARE_KAVERI); | ||
| 56 | MODULE_FIRMWARE(FIRMWARE_HAWAII); | ||
| 57 | MODULE_FIRMWARE(FIRMWARE_MULLINS); | ||
| 58 | #endif | ||
| 59 | MODULE_FIRMWARE(FIRMWARE_TONGA); | ||
| 60 | MODULE_FIRMWARE(FIRMWARE_CARRIZO); | ||
| 61 | |||
| 62 | static void amdgpu_vce_idle_work_handler(struct work_struct *work); | ||
| 63 | |||
| 64 | /** | ||
| 65 | * amdgpu_vce_init - allocate memory, load vce firmware | ||
| 66 | * | ||
| 67 | * @adev: amdgpu_device pointer | ||
| 68 | * | ||
| 69 | * First step to get VCE online, allocate memory and load the firmware | ||
| 70 | */ | ||
| 71 | int amdgpu_vce_sw_init(struct amdgpu_device *adev) | ||
| 72 | { | ||
| 73 | unsigned long size; | ||
| 74 | const char *fw_name; | ||
| 75 | const struct common_firmware_header *hdr; | ||
| 76 | unsigned ucode_version, version_major, version_minor, binary_id; | ||
| 77 | int i, r; | ||
| 78 | |||
| 79 | INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); | ||
| 80 | |||
| 81 | switch (adev->asic_type) { | ||
| 82 | #ifdef CONFIG_DRM_AMDGPU_CIK | ||
| 83 | case CHIP_BONAIRE: | ||
| 84 | fw_name = FIRMWARE_BONAIRE; | ||
| 85 | break; | ||
| 86 | case CHIP_KAVERI: | ||
| 87 | fw_name = FIRMWARE_KAVERI; | ||
| 88 | break; | ||
| 89 | case CHIP_KABINI: | ||
| 90 | fw_name = FIRMWARE_KABINI; | ||
| 91 | break; | ||
| 92 | case CHIP_HAWAII: | ||
| 93 | fw_name = FIRMWARE_HAWAII; | ||
| 94 | break; | ||
| 95 | case CHIP_MULLINS: | ||
| 96 | fw_name = FIRMWARE_MULLINS; | ||
| 97 | break; | ||
| 98 | #endif | ||
| 99 | case CHIP_TONGA: | ||
| 100 | fw_name = FIRMWARE_TONGA; | ||
| 101 | break; | ||
| 102 | case CHIP_CARRIZO: | ||
| 103 | fw_name = FIRMWARE_CARRIZO; | ||
| 104 | break; | ||
| 105 | |||
| 106 | default: | ||
| 107 | return -EINVAL; | ||
| 108 | } | ||
| 109 | |||
| 110 | r = request_firmware(&adev->vce.fw, fw_name, adev->dev); | ||
| 111 | if (r) { | ||
| 112 | dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n", | ||
| 113 | fw_name); | ||
| 114 | return r; | ||
| 115 | } | ||
| 116 | |||
| 117 | r = amdgpu_ucode_validate(adev->vce.fw); | ||
| 118 | if (r) { | ||
| 119 | dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", | ||
| 120 | fw_name); | ||
| 121 | release_firmware(adev->vce.fw); | ||
| 122 | adev->vce.fw = NULL; | ||
| 123 | return r; | ||
| 124 | } | ||
| 125 | |||
| 126 | hdr = (const struct common_firmware_header *)adev->vce.fw->data; | ||
| 127 | |||
| 128 | ucode_version = le32_to_cpu(hdr->ucode_version); | ||
| 129 | version_major = (ucode_version >> 20) & 0xfff; | ||
| 130 | version_minor = (ucode_version >> 8) & 0xfff; | ||
| 131 | binary_id = ucode_version & 0xff; | ||
| 132 | DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n", | ||
| 133 | version_major, version_minor, binary_id); | ||
| 134 | adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | | ||
| 135 | (binary_id << 8)); | ||
| 136 | |||
| 137 | /* allocate firmware, stack and heap BO */ | ||
| 138 | |||
| 139 | size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes)) + | ||
| 140 | AMDGPU_VCE_STACK_SIZE + AMDGPU_VCE_HEAP_SIZE; | ||
| 141 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, | ||
| 142 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo); | ||
| 143 | if (r) { | ||
| 144 | dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); | ||
| 145 | return r; | ||
| 146 | } | ||
| 147 | |||
| 148 | r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); | ||
| 149 | if (r) { | ||
| 150 | amdgpu_bo_unref(&adev->vce.vcpu_bo); | ||
| 151 | dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); | ||
| 152 | return r; | ||
| 153 | } | ||
| 154 | |||
| 155 | r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, | ||
| 156 | &adev->vce.gpu_addr); | ||
| 157 | amdgpu_bo_unreserve(adev->vce.vcpu_bo); | ||
| 158 | if (r) { | ||
| 159 | amdgpu_bo_unref(&adev->vce.vcpu_bo); | ||
| 160 | dev_err(adev->dev, "(%d) VCE bo pin failed\n", r); | ||
| 161 | return r; | ||
| 162 | } | ||
| 163 | |||
| 164 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { | ||
| 165 | atomic_set(&adev->vce.handles[i], 0); | ||
| 166 | adev->vce.filp[i] = NULL; | ||
| 167 | } | ||
| 168 | |||
| 169 | return 0; | ||
| 170 | } | ||
| 171 | |||
| 172 | /** | ||
| 173 | * amdgpu_vce_fini - free memory | ||
| 174 | * | ||
| 175 | * @adev: amdgpu_device pointer | ||
| 176 | * | ||
| 177 | * Last step on VCE teardown, free firmware memory | ||
| 178 | */ | ||
| 179 | int amdgpu_vce_sw_fini(struct amdgpu_device *adev) | ||
| 180 | { | ||
| 181 | if (adev->vce.vcpu_bo == NULL) | ||
| 182 | return 0; | ||
| 183 | |||
| 184 | amdgpu_bo_unref(&adev->vce.vcpu_bo); | ||
| 185 | |||
| 186 | amdgpu_ring_fini(&adev->vce.ring[0]); | ||
| 187 | amdgpu_ring_fini(&adev->vce.ring[1]); | ||
| 188 | |||
| 189 | release_firmware(adev->vce.fw); | ||
| 190 | |||
| 191 | return 0; | ||
| 192 | } | ||
| 193 | |||
| 194 | /** | ||
| 195 | * amdgpu_vce_suspend - unpin VCE fw memory | ||
| 196 | * | ||
| 197 | * @adev: amdgpu_device pointer | ||
| 198 | * | ||
| 199 | */ | ||
| 200 | int amdgpu_vce_suspend(struct amdgpu_device *adev) | ||
| 201 | { | ||
| 202 | int i; | ||
| 203 | |||
| 204 | if (adev->vce.vcpu_bo == NULL) | ||
| 205 | return 0; | ||
| 206 | |||
| 207 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) | ||
| 208 | if (atomic_read(&adev->vce.handles[i])) | ||
| 209 | break; | ||
| 210 | |||
| 211 | if (i == AMDGPU_MAX_VCE_HANDLES) | ||
| 212 | return 0; | ||
| 213 | |||
| 214 | /* TODO: suspending running encoding sessions isn't supported */ | ||
| 215 | return -EINVAL; | ||
| 216 | } | ||
| 217 | |||
| 218 | /** | ||
| 219 | * amdgpu_vce_resume - pin VCE fw memory | ||
| 220 | * | ||
| 221 | * @adev: amdgpu_device pointer | ||
| 222 | * | ||
| 223 | */ | ||
| 224 | int amdgpu_vce_resume(struct amdgpu_device *adev) | ||
| 225 | { | ||
| 226 | void *cpu_addr; | ||
| 227 | const struct common_firmware_header *hdr; | ||
| 228 | unsigned offset; | ||
| 229 | int r; | ||
| 230 | |||
| 231 | if (adev->vce.vcpu_bo == NULL) | ||
| 232 | return -EINVAL; | ||
| 233 | |||
| 234 | r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); | ||
| 235 | if (r) { | ||
| 236 | dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); | ||
| 237 | return r; | ||
| 238 | } | ||
| 239 | |||
| 240 | r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr); | ||
| 241 | if (r) { | ||
| 242 | amdgpu_bo_unreserve(adev->vce.vcpu_bo); | ||
| 243 | dev_err(adev->dev, "(%d) VCE map failed\n", r); | ||
| 244 | return r; | ||
| 245 | } | ||
| 246 | |||
| 247 | hdr = (const struct common_firmware_header *)adev->vce.fw->data; | ||
| 248 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | ||
| 249 | memcpy(cpu_addr, (adev->vce.fw->data) + offset, | ||
| 250 | (adev->vce.fw->size) - offset); | ||
| 251 | |||
| 252 | amdgpu_bo_kunmap(adev->vce.vcpu_bo); | ||
| 253 | |||
| 254 | amdgpu_bo_unreserve(adev->vce.vcpu_bo); | ||
| 255 | |||
| 256 | return 0; | ||
| 257 | } | ||
| 258 | |||
| 259 | /** | ||
| 260 | * amdgpu_vce_idle_work_handler - power off VCE | ||
| 261 | * | ||
| 262 | * @work: pointer to work structure | ||
| 263 | * | ||
| 264 | * power of VCE when it's not used any more | ||
| 265 | */ | ||
| 266 | static void amdgpu_vce_idle_work_handler(struct work_struct *work) | ||
| 267 | { | ||
| 268 | struct amdgpu_device *adev = | ||
| 269 | container_of(work, struct amdgpu_device, vce.idle_work.work); | ||
| 270 | |||
| 271 | if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) && | ||
| 272 | (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) { | ||
| 273 | if (adev->pm.dpm_enabled) { | ||
| 274 | amdgpu_dpm_enable_vce(adev, false); | ||
| 275 | } else { | ||
| 276 | amdgpu_asic_set_vce_clocks(adev, 0, 0); | ||
| 277 | } | ||
| 278 | } else { | ||
| 279 | schedule_delayed_work(&adev->vce.idle_work, | ||
| 280 | msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); | ||
| 281 | } | ||
| 282 | } | ||
| 283 | |||
| 284 | /** | ||
| 285 | * amdgpu_vce_note_usage - power up VCE | ||
| 286 | * | ||
| 287 | * @adev: amdgpu_device pointer | ||
| 288 | * | ||
| 289 | * Make sure VCE is powerd up when we want to use it | ||
| 290 | */ | ||
| 291 | static void amdgpu_vce_note_usage(struct amdgpu_device *adev) | ||
| 292 | { | ||
| 293 | bool streams_changed = false; | ||
| 294 | bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); | ||
| 295 | set_clocks &= schedule_delayed_work(&adev->vce.idle_work, | ||
| 296 | msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); | ||
| 297 | |||
| 298 | if (adev->pm.dpm_enabled) { | ||
| 299 | /* XXX figure out if the streams changed */ | ||
| 300 | streams_changed = false; | ||
| 301 | } | ||
| 302 | |||
| 303 | if (set_clocks || streams_changed) { | ||
| 304 | if (adev->pm.dpm_enabled) { | ||
| 305 | amdgpu_dpm_enable_vce(adev, true); | ||
| 306 | } else { | ||
| 307 | amdgpu_asic_set_vce_clocks(adev, 53300, 40000); | ||
| 308 | } | ||
| 309 | } | ||
| 310 | } | ||
| 311 | |||
| 312 | /** | ||
| 313 | * amdgpu_vce_free_handles - free still open VCE handles | ||
| 314 | * | ||
| 315 | * @adev: amdgpu_device pointer | ||
| 316 | * @filp: drm file pointer | ||
| 317 | * | ||
| 318 | * Close all VCE handles still open by this file pointer | ||
| 319 | */ | ||
| 320 | void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | ||
| 321 | { | ||
| 322 | struct amdgpu_ring *ring = &adev->vce.ring[0]; | ||
| 323 | int i, r; | ||
| 324 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { | ||
| 325 | uint32_t handle = atomic_read(&adev->vce.handles[i]); | ||
| 326 | if (!handle || adev->vce.filp[i] != filp) | ||
| 327 | continue; | ||
| 328 | |||
| 329 | amdgpu_vce_note_usage(adev); | ||
| 330 | |||
| 331 | r = amdgpu_vce_get_destroy_msg(ring, handle, NULL); | ||
| 332 | if (r) | ||
| 333 | DRM_ERROR("Error destroying VCE handle (%d)!\n", r); | ||
| 334 | |||
| 335 | adev->vce.filp[i] = NULL; | ||
| 336 | atomic_set(&adev->vce.handles[i], 0); | ||
| 337 | } | ||
| 338 | } | ||
| 339 | |||
| 340 | /** | ||
| 341 | * amdgpu_vce_get_create_msg - generate a VCE create msg | ||
| 342 | * | ||
| 343 | * @adev: amdgpu_device pointer | ||
| 344 | * @ring: ring we should submit the msg to | ||
| 345 | * @handle: VCE session handle to use | ||
| 346 | * @fence: optional fence to return | ||
| 347 | * | ||
| 348 | * Open up a stream for HW test | ||
| 349 | */ | ||
| 350 | int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | ||
| 351 | struct amdgpu_fence **fence) | ||
| 352 | { | ||
| 353 | const unsigned ib_size_dw = 1024; | ||
| 354 | struct amdgpu_ib ib; | ||
| 355 | uint64_t dummy; | ||
| 356 | int i, r; | ||
| 357 | |||
| 358 | r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib); | ||
| 359 | if (r) { | ||
| 360 | DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); | ||
| 361 | return r; | ||
| 362 | } | ||
| 363 | |||
| 364 | dummy = ib.gpu_addr + 1024; | ||
| 365 | |||
| 366 | /* stitch together an VCE create msg */ | ||
| 367 | ib.length_dw = 0; | ||
| 368 | ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ | ||
| 369 | ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ | ||
| 370 | ib.ptr[ib.length_dw++] = handle; | ||
| 371 | |||
| 372 | ib.ptr[ib.length_dw++] = 0x00000030; /* len */ | ||
| 373 | ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ | ||
| 374 | ib.ptr[ib.length_dw++] = 0x00000000; | ||
| 375 | ib.ptr[ib.length_dw++] = 0x00000042; | ||
| 376 | ib.ptr[ib.length_dw++] = 0x0000000a; | ||
| 377 | ib.ptr[ib.length_dw++] = 0x00000001; | ||
| 378 | ib.ptr[ib.length_dw++] = 0x00000080; | ||
| 379 | ib.ptr[ib.length_dw++] = 0x00000060; | ||
| 380 | ib.ptr[ib.length_dw++] = 0x00000100; | ||
| 381 | ib.ptr[ib.length_dw++] = 0x00000100; | ||
| 382 | ib.ptr[ib.length_dw++] = 0x0000000c; | ||
| 383 | ib.ptr[ib.length_dw++] = 0x00000000; | ||
| 384 | |||
| 385 | ib.ptr[ib.length_dw++] = 0x00000014; /* len */ | ||
| 386 | ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ | ||
| 387 | ib.ptr[ib.length_dw++] = upper_32_bits(dummy); | ||
| 388 | ib.ptr[ib.length_dw++] = dummy; | ||
| 389 | ib.ptr[ib.length_dw++] = 0x00000001; | ||
| 390 | |||
| 391 | for (i = ib.length_dw; i < ib_size_dw; ++i) | ||
| 392 | ib.ptr[i] = 0x0; | ||
| 393 | |||
| 394 | r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); | ||
| 395 | if (r) { | ||
| 396 | DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); | ||
| 397 | } | ||
| 398 | |||
| 399 | if (fence) | ||
| 400 | *fence = amdgpu_fence_ref(ib.fence); | ||
| 401 | |||
| 402 | amdgpu_ib_free(ring->adev, &ib); | ||
| 403 | |||
| 404 | return r; | ||
| 405 | } | ||
| 406 | |||
| 407 | /** | ||
| 408 | * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg | ||
| 409 | * | ||
| 410 | * @adev: amdgpu_device pointer | ||
| 411 | * @ring: ring we should submit the msg to | ||
| 412 | * @handle: VCE session handle to use | ||
| 413 | * @fence: optional fence to return | ||
| 414 | * | ||
| 415 | * Close up a stream for HW test or if userspace failed to do so | ||
| 416 | */ | ||
| 417 | int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | ||
| 418 | struct amdgpu_fence **fence) | ||
| 419 | { | ||
| 420 | const unsigned ib_size_dw = 1024; | ||
| 421 | struct amdgpu_ib ib; | ||
| 422 | uint64_t dummy; | ||
| 423 | int i, r; | ||
| 424 | |||
| 425 | r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib); | ||
| 426 | if (r) { | ||
| 427 | DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); | ||
| 428 | return r; | ||
| 429 | } | ||
| 430 | |||
| 431 | dummy = ib.gpu_addr + 1024; | ||
| 432 | |||
| 433 | /* stitch together an VCE destroy msg */ | ||
| 434 | ib.length_dw = 0; | ||
| 435 | ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ | ||
| 436 | ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ | ||
| 437 | ib.ptr[ib.length_dw++] = handle; | ||
| 438 | |||
| 439 | ib.ptr[ib.length_dw++] = 0x00000014; /* len */ | ||
| 440 | ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ | ||
| 441 | ib.ptr[ib.length_dw++] = upper_32_bits(dummy); | ||
| 442 | ib.ptr[ib.length_dw++] = dummy; | ||
| 443 | ib.ptr[ib.length_dw++] = 0x00000001; | ||
| 444 | |||
| 445 | ib.ptr[ib.length_dw++] = 0x00000008; /* len */ | ||
| 446 | ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ | ||
| 447 | |||
| 448 | for (i = ib.length_dw; i < ib_size_dw; ++i) | ||
| 449 | ib.ptr[i] = 0x0; | ||
| 450 | |||
| 451 | r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); | ||
| 452 | if (r) { | ||
| 453 | DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); | ||
| 454 | } | ||
| 455 | |||
| 456 | if (fence) | ||
| 457 | *fence = amdgpu_fence_ref(ib.fence); | ||
| 458 | |||
| 459 | amdgpu_ib_free(ring->adev, &ib); | ||
| 460 | |||
| 461 | return r; | ||
| 462 | } | ||
| 463 | |||
| 464 | /** | ||
| 465 | * amdgpu_vce_cs_reloc - command submission relocation | ||
| 466 | * | ||
| 467 | * @p: parser context | ||
| 468 | * @lo: address of lower dword | ||
| 469 | * @hi: address of higher dword | ||
| 470 | * | ||
| 471 | * Patch relocation inside command stream with real buffer address | ||
| 472 | */ | ||
| 473 | int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi) | ||
| 474 | { | ||
| 475 | struct amdgpu_bo_va_mapping *mapping; | ||
| 476 | struct amdgpu_ib *ib = &p->ibs[ib_idx]; | ||
| 477 | struct amdgpu_bo *bo; | ||
| 478 | uint64_t addr; | ||
| 479 | |||
| 480 | addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | | ||
| 481 | ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; | ||
| 482 | |||
| 483 | mapping = amdgpu_cs_find_mapping(p, addr, &bo); | ||
| 484 | if (mapping == NULL) { | ||
| 485 | DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d\n", | ||
| 486 | addr, lo, hi); | ||
| 487 | return -EINVAL; | ||
| 488 | } | ||
| 489 | |||
| 490 | addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; | ||
| 491 | addr += amdgpu_bo_gpu_offset(bo); | ||
| 492 | |||
| 493 | ib->ptr[lo] = addr & 0xFFFFFFFF; | ||
| 494 | ib->ptr[hi] = addr >> 32; | ||
| 495 | |||
| 496 | return 0; | ||
| 497 | } | ||
| 498 | |||
| 499 | /** | ||
| 500 | * amdgpu_vce_cs_parse - parse and validate the command stream | ||
| 501 | * | ||
| 502 | * @p: parser context | ||
| 503 | * | ||
| 504 | */ | ||
| 505 | int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) | ||
| 506 | { | ||
| 507 | uint32_t handle = 0; | ||
| 508 | bool destroy = false; | ||
| 509 | int i, r, idx = 0; | ||
| 510 | struct amdgpu_ib *ib = &p->ibs[ib_idx]; | ||
| 511 | |||
| 512 | amdgpu_vce_note_usage(p->adev); | ||
| 513 | |||
| 514 | while (idx < ib->length_dw) { | ||
| 515 | uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); | ||
| 516 | uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); | ||
| 517 | |||
| 518 | if ((len < 8) || (len & 3)) { | ||
| 519 | DRM_ERROR("invalid VCE command length (%d)!\n", len); | ||
| 520 | return -EINVAL; | ||
| 521 | } | ||
| 522 | |||
| 523 | switch (cmd) { | ||
| 524 | case 0x00000001: // session | ||
| 525 | handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); | ||
| 526 | break; | ||
| 527 | |||
| 528 | case 0x00000002: // task info | ||
| 529 | case 0x01000001: // create | ||
| 530 | case 0x04000001: // config extension | ||
| 531 | case 0x04000002: // pic control | ||
| 532 | case 0x04000005: // rate control | ||
| 533 | case 0x04000007: // motion estimation | ||
| 534 | case 0x04000008: // rdo | ||
| 535 | case 0x04000009: // vui | ||
| 536 | case 0x05000002: // auxiliary buffer | ||
| 537 | break; | ||
| 538 | |||
| 539 | case 0x03000001: // encode | ||
| 540 | r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9); | ||
| 541 | if (r) | ||
| 542 | return r; | ||
| 543 | |||
| 544 | r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11); | ||
| 545 | if (r) | ||
| 546 | return r; | ||
| 547 | break; | ||
| 548 | |||
| 549 | case 0x02000001: // destroy | ||
| 550 | destroy = true; | ||
| 551 | break; | ||
| 552 | |||
| 553 | case 0x05000001: // context buffer | ||
| 554 | case 0x05000004: // video bitstream buffer | ||
| 555 | case 0x05000005: // feedback buffer | ||
| 556 | r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2); | ||
| 557 | if (r) | ||
| 558 | return r; | ||
| 559 | break; | ||
| 560 | |||
| 561 | default: | ||
| 562 | DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); | ||
| 563 | return -EINVAL; | ||
| 564 | } | ||
| 565 | |||
| 566 | idx += len / 4; | ||
| 567 | } | ||
| 568 | |||
| 569 | if (destroy) { | ||
| 570 | /* IB contains a destroy msg, free the handle */ | ||
| 571 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) | ||
| 572 | atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0); | ||
| 573 | |||
| 574 | return 0; | ||
| 575 | } | ||
| 576 | |||
| 577 | /* create or encode, validate the handle */ | ||
| 578 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { | ||
| 579 | if (atomic_read(&p->adev->vce.handles[i]) == handle) | ||
| 580 | return 0; | ||
| 581 | } | ||
| 582 | |||
| 583 | /* handle not found try to alloc a new one */ | ||
| 584 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { | ||
| 585 | if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { | ||
| 586 | p->adev->vce.filp[i] = p->filp; | ||
| 587 | return 0; | ||
| 588 | } | ||
| 589 | } | ||
| 590 | |||
| 591 | DRM_ERROR("No more free VCE handles!\n"); | ||
| 592 | |||
| 593 | return -EINVAL; | ||
| 594 | } | ||
| 595 | |||
| 596 | /** | ||
| 597 | * amdgpu_vce_ring_emit_semaphore - emit a semaphore command | ||
| 598 | * | ||
| 599 | * @ring: engine to use | ||
| 600 | * @semaphore: address of semaphore | ||
| 601 | * @emit_wait: true=emit wait, false=emit signal | ||
| 602 | * | ||
| 603 | */ | ||
| 604 | bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, | ||
| 605 | struct amdgpu_semaphore *semaphore, | ||
| 606 | bool emit_wait) | ||
| 607 | { | ||
| 608 | uint64_t addr = semaphore->gpu_addr; | ||
| 609 | |||
| 610 | amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE); | ||
| 611 | amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); | ||
| 612 | amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); | ||
| 613 | amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); | ||
| 614 | if (!emit_wait) | ||
| 615 | amdgpu_ring_write(ring, VCE_CMD_END); | ||
| 616 | |||
| 617 | return true; | ||
| 618 | } | ||
| 619 | |||
| 620 | /** | ||
| 621 | * amdgpu_vce_ring_emit_ib - execute indirect buffer | ||
| 622 | * | ||
| 623 | * @ring: engine to use | ||
| 624 | * @ib: the IB to execute | ||
| 625 | * | ||
| 626 | */ | ||
| 627 | void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | ||
| 628 | { | ||
| 629 | amdgpu_ring_write(ring, VCE_CMD_IB); | ||
| 630 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | ||
| 631 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | ||
| 632 | amdgpu_ring_write(ring, ib->length_dw); | ||
| 633 | } | ||
| 634 | |||
| 635 | /** | ||
| 636 | * amdgpu_vce_ring_emit_fence - add a fence command to the ring | ||
| 637 | * | ||
| 638 | * @ring: engine to use | ||
| 639 | * @fence: the fence | ||
| 640 | * | ||
| 641 | */ | ||
| 642 | void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | ||
| 643 | bool write64bits) | ||
| 644 | { | ||
| 645 | WARN_ON(write64bits); | ||
| 646 | |||
| 647 | amdgpu_ring_write(ring, VCE_CMD_FENCE); | ||
| 648 | amdgpu_ring_write(ring, addr); | ||
| 649 | amdgpu_ring_write(ring, upper_32_bits(addr)); | ||
| 650 | amdgpu_ring_write(ring, seq); | ||
| 651 | amdgpu_ring_write(ring, VCE_CMD_TRAP); | ||
| 652 | amdgpu_ring_write(ring, VCE_CMD_END); | ||
| 653 | } | ||
| 654 | |||
| 655 | /** | ||
| 656 | * amdgpu_vce_ring_test_ring - test if VCE ring is working | ||
| 657 | * | ||
| 658 | * @ring: the engine to test on | ||
| 659 | * | ||
| 660 | */ | ||
| 661 | int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) | ||
| 662 | { | ||
| 663 | struct amdgpu_device *adev = ring->adev; | ||
| 664 | uint32_t rptr = amdgpu_ring_get_rptr(ring); | ||
| 665 | unsigned i; | ||
| 666 | int r; | ||
| 667 | |||
| 668 | r = amdgpu_ring_lock(ring, 16); | ||
| 669 | if (r) { | ||
| 670 | DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", | ||
| 671 | ring->idx, r); | ||
| 672 | return r; | ||
| 673 | } | ||
| 674 | amdgpu_ring_write(ring, VCE_CMD_END); | ||
| 675 | amdgpu_ring_unlock_commit(ring); | ||
| 676 | |||
| 677 | for (i = 0; i < adev->usec_timeout; i++) { | ||
| 678 | if (amdgpu_ring_get_rptr(ring) != rptr) | ||
| 679 | break; | ||
| 680 | DRM_UDELAY(1); | ||
| 681 | } | ||
| 682 | |||
| 683 | if (i < adev->usec_timeout) { | ||
| 684 | DRM_INFO("ring test on %d succeeded in %d usecs\n", | ||
| 685 | ring->idx, i); | ||
| 686 | } else { | ||
| 687 | DRM_ERROR("amdgpu: ring %d test failed\n", | ||
| 688 | ring->idx); | ||
| 689 | r = -ETIMEDOUT; | ||
| 690 | } | ||
| 691 | |||
| 692 | return r; | ||
| 693 | } | ||
| 694 | |||
| 695 | /** | ||
| 696 | * amdgpu_vce_ring_test_ib - test if VCE IBs are working | ||
| 697 | * | ||
| 698 | * @ring: the engine to test on | ||
| 699 | * | ||
| 700 | */ | ||
| 701 | int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) | ||
| 702 | { | ||
| 703 | struct amdgpu_fence *fence = NULL; | ||
| 704 | int r; | ||
| 705 | |||
| 706 | r = amdgpu_vce_get_create_msg(ring, 1, NULL); | ||
| 707 | if (r) { | ||
| 708 | DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); | ||
| 709 | goto error; | ||
| 710 | } | ||
| 711 | |||
| 712 | r = amdgpu_vce_get_destroy_msg(ring, 1, &fence); | ||
| 713 | if (r) { | ||
| 714 | DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); | ||
| 715 | goto error; | ||
| 716 | } | ||
| 717 | |||
| 718 | r = amdgpu_fence_wait(fence, false); | ||
| 719 | if (r) { | ||
| 720 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | ||
| 721 | } else { | ||
| 722 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); | ||
| 723 | } | ||
| 724 | error: | ||
| 725 | amdgpu_fence_unref(&fence); | ||
| 726 | return r; | ||
| 727 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h new file mode 100644 index 000000000000..b9411e43db25 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | |||
| @@ -0,0 +1,47 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __AMDGPU_VCE_H__ | ||
| 25 | #define __AMDGPU_VCE_H__ | ||
| 26 | |||
| 27 | int amdgpu_vce_sw_init(struct amdgpu_device *adev); | ||
| 28 | int amdgpu_vce_sw_fini(struct amdgpu_device *adev); | ||
| 29 | int amdgpu_vce_suspend(struct amdgpu_device *adev); | ||
| 30 | int amdgpu_vce_resume(struct amdgpu_device *adev); | ||
| 31 | int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | ||
| 32 | struct amdgpu_fence **fence); | ||
| 33 | int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | ||
| 34 | struct amdgpu_fence **fence); | ||
| 35 | void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); | ||
| 36 | int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi); | ||
| 37 | int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); | ||
| 38 | bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, | ||
| 39 | struct amdgpu_semaphore *semaphore, | ||
| 40 | bool emit_wait); | ||
| 41 | void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); | ||
| 42 | void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | ||
| 43 | bool write64bit); | ||
| 44 | int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); | ||
| 45 | int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring); | ||
| 46 | |||
| 47 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c new file mode 100644 index 000000000000..1cc01fb409dc --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -0,0 +1,1248 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #include <drm/drmP.h> | ||
| 29 | #include <drm/amdgpu_drm.h> | ||
| 30 | #include "amdgpu.h" | ||
| 31 | #include "amdgpu_trace.h" | ||
| 32 | |||
| 33 | /* | ||
| 34 | * GPUVM | ||
| 35 | * GPUVM is similar to the legacy gart on older asics, however | ||
| 36 | * rather than there being a single global gart table | ||
| 37 | * for the entire GPU, there are multiple VM page tables active | ||
| 38 | * at any given time. The VM page tables can contain a mix | ||
| 39 | * vram pages and system memory pages and system memory pages | ||
| 40 | * can be mapped as snooped (cached system pages) or unsnooped | ||
| 41 | * (uncached system pages). | ||
| 42 | * Each VM has an ID associated with it and there is a page table | ||
| 43 | * associated with each VMID. When execting a command buffer, | ||
| 44 | * the kernel tells the the ring what VMID to use for that command | ||
| 45 | * buffer. VMIDs are allocated dynamically as commands are submitted. | ||
| 46 | * The userspace drivers maintain their own address space and the kernel | ||
| 47 | * sets up their pages tables accordingly when they submit their | ||
| 48 | * command buffers and a VMID is assigned. | ||
| 49 | * Cayman/Trinity support up to 8 active VMs at any given time; | ||
| 50 | * SI supports 16. | ||
| 51 | */ | ||
| 52 | |||
| 53 | /** | ||
| 54 | * amdgpu_vm_num_pde - return the number of page directory entries | ||
| 55 | * | ||
| 56 | * @adev: amdgpu_device pointer | ||
| 57 | * | ||
| 58 | * Calculate the number of page directory entries (cayman+). | ||
| 59 | */ | ||
| 60 | static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) | ||
| 61 | { | ||
| 62 | return adev->vm_manager.max_pfn >> amdgpu_vm_block_size; | ||
| 63 | } | ||
| 64 | |||
| 65 | /** | ||
| 66 | * amdgpu_vm_directory_size - returns the size of the page directory in bytes | ||
| 67 | * | ||
| 68 | * @adev: amdgpu_device pointer | ||
| 69 | * | ||
| 70 | * Calculate the size of the page directory in bytes (cayman+). | ||
| 71 | */ | ||
| 72 | static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) | ||
| 73 | { | ||
| 74 | return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8); | ||
| 75 | } | ||
| 76 | |||
| 77 | /** | ||
| 78 | * amdgpu_vm_get_bos - add the vm BOs to a validation list | ||
| 79 | * | ||
| 80 | * @vm: vm providing the BOs | ||
| 81 | * @head: head of validation list | ||
| 82 | * | ||
| 83 | * Add the page directory to the list of BOs to | ||
| 84 | * validate for command submission (cayman+). | ||
| 85 | */ | ||
| 86 | struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, | ||
| 87 | struct amdgpu_vm *vm, | ||
| 88 | struct list_head *head) | ||
| 89 | { | ||
| 90 | struct amdgpu_bo_list_entry *list; | ||
| 91 | unsigned i, idx; | ||
| 92 | |||
| 93 | list = drm_malloc_ab(vm->max_pde_used + 2, | ||
| 94 | sizeof(struct amdgpu_bo_list_entry)); | ||
| 95 | if (!list) | ||
| 96 | return NULL; | ||
| 97 | |||
| 98 | /* add the vm page table to the list */ | ||
| 99 | list[0].robj = vm->page_directory; | ||
| 100 | list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; | ||
| 101 | list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; | ||
| 102 | list[0].priority = 0; | ||
| 103 | list[0].tv.bo = &vm->page_directory->tbo; | ||
| 104 | list[0].tv.shared = true; | ||
| 105 | list_add(&list[0].tv.head, head); | ||
| 106 | |||
| 107 | for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { | ||
| 108 | if (!vm->page_tables[i].bo) | ||
| 109 | continue; | ||
| 110 | |||
| 111 | list[idx].robj = vm->page_tables[i].bo; | ||
| 112 | list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; | ||
| 113 | list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; | ||
| 114 | list[idx].priority = 0; | ||
| 115 | list[idx].tv.bo = &list[idx].robj->tbo; | ||
| 116 | list[idx].tv.shared = true; | ||
| 117 | list_add(&list[idx++].tv.head, head); | ||
| 118 | } | ||
| 119 | |||
| 120 | return list; | ||
| 121 | } | ||
| 122 | |||
| 123 | /** | ||
| 124 | * amdgpu_vm_grab_id - allocate the next free VMID | ||
| 125 | * | ||
| 126 | * @ring: ring we want to submit job to | ||
| 127 | * @vm: vm to allocate id for | ||
| 128 | * | ||
| 129 | * Allocate an id for the vm (cayman+). | ||
| 130 | * Returns the fence we need to sync to (if any). | ||
| 131 | * | ||
| 132 | * Global and local mutex must be locked! | ||
| 133 | */ | ||
| 134 | struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, | ||
| 135 | struct amdgpu_vm *vm) | ||
| 136 | { | ||
| 137 | struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {}; | ||
| 138 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; | ||
| 139 | struct amdgpu_device *adev = ring->adev; | ||
| 140 | |||
| 141 | unsigned choices[2] = {}; | ||
| 142 | unsigned i; | ||
| 143 | |||
| 144 | /* check if the id is still valid */ | ||
| 145 | if (vm_id->id && vm_id->last_id_use && | ||
| 146 | vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) | ||
| 147 | return NULL; | ||
| 148 | |||
| 149 | /* we definately need to flush */ | ||
| 150 | vm_id->pd_gpu_addr = ~0ll; | ||
| 151 | |||
| 152 | /* skip over VMID 0, since it is the system VM */ | ||
| 153 | for (i = 1; i < adev->vm_manager.nvm; ++i) { | ||
| 154 | struct amdgpu_fence *fence = adev->vm_manager.active[i]; | ||
| 155 | |||
| 156 | if (fence == NULL) { | ||
| 157 | /* found a free one */ | ||
| 158 | vm_id->id = i; | ||
| 159 | trace_amdgpu_vm_grab_id(i, ring->idx); | ||
| 160 | return NULL; | ||
| 161 | } | ||
| 162 | |||
| 163 | if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) { | ||
| 164 | best[fence->ring->idx] = fence; | ||
| 165 | choices[fence->ring == ring ? 0 : 1] = i; | ||
| 166 | } | ||
| 167 | } | ||
| 168 | |||
| 169 | for (i = 0; i < 2; ++i) { | ||
| 170 | if (choices[i]) { | ||
| 171 | vm_id->id = choices[i]; | ||
| 172 | trace_amdgpu_vm_grab_id(choices[i], ring->idx); | ||
| 173 | return adev->vm_manager.active[choices[i]]; | ||
| 174 | } | ||
| 175 | } | ||
| 176 | |||
| 177 | /* should never happen */ | ||
| 178 | BUG(); | ||
| 179 | return NULL; | ||
| 180 | } | ||
| 181 | |||
| 182 | /** | ||
| 183 | * amdgpu_vm_flush - hardware flush the vm | ||
| 184 | * | ||
| 185 | * @ring: ring to use for flush | ||
| 186 | * @vm: vm we want to flush | ||
| 187 | * @updates: last vm update that we waited for | ||
| 188 | * | ||
| 189 | * Flush the vm (cayman+). | ||
| 190 | * | ||
| 191 | * Global and local mutex must be locked! | ||
| 192 | */ | ||
| 193 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | ||
| 194 | struct amdgpu_vm *vm, | ||
| 195 | struct amdgpu_fence *updates) | ||
| 196 | { | ||
| 197 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | ||
| 198 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; | ||
| 199 | |||
| 200 | if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates || | ||
| 201 | amdgpu_fence_is_earlier(vm_id->flushed_updates, updates)) { | ||
| 202 | |||
| 203 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); | ||
| 204 | amdgpu_fence_unref(&vm_id->flushed_updates); | ||
| 205 | vm_id->flushed_updates = amdgpu_fence_ref(updates); | ||
| 206 | vm_id->pd_gpu_addr = pd_addr; | ||
| 207 | amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); | ||
| 208 | } | ||
| 209 | } | ||
| 210 | |||
| 211 | /** | ||
| 212 | * amdgpu_vm_fence - remember fence for vm | ||
| 213 | * | ||
| 214 | * @adev: amdgpu_device pointer | ||
| 215 | * @vm: vm we want to fence | ||
| 216 | * @fence: fence to remember | ||
| 217 | * | ||
| 218 | * Fence the vm (cayman+). | ||
| 219 | * Set the fence used to protect page table and id. | ||
| 220 | * | ||
| 221 | * Global and local mutex must be locked! | ||
| 222 | */ | ||
| 223 | void amdgpu_vm_fence(struct amdgpu_device *adev, | ||
| 224 | struct amdgpu_vm *vm, | ||
| 225 | struct amdgpu_fence *fence) | ||
| 226 | { | ||
| 227 | unsigned ridx = fence->ring->idx; | ||
| 228 | unsigned vm_id = vm->ids[ridx].id; | ||
| 229 | |||
| 230 | amdgpu_fence_unref(&adev->vm_manager.active[vm_id]); | ||
| 231 | adev->vm_manager.active[vm_id] = amdgpu_fence_ref(fence); | ||
| 232 | |||
| 233 | amdgpu_fence_unref(&vm->ids[ridx].last_id_use); | ||
| 234 | vm->ids[ridx].last_id_use = amdgpu_fence_ref(fence); | ||
| 235 | } | ||
| 236 | |||
| 237 | /** | ||
| 238 | * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo | ||
| 239 | * | ||
| 240 | * @vm: requested vm | ||
| 241 | * @bo: requested buffer object | ||
| 242 | * | ||
| 243 | * Find @bo inside the requested vm (cayman+). | ||
| 244 | * Search inside the @bos vm list for the requested vm | ||
| 245 | * Returns the found bo_va or NULL if none is found | ||
| 246 | * | ||
| 247 | * Object has to be reserved! | ||
| 248 | */ | ||
| 249 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
| 250 | struct amdgpu_bo *bo) | ||
| 251 | { | ||
| 252 | struct amdgpu_bo_va *bo_va; | ||
| 253 | |||
| 254 | list_for_each_entry(bo_va, &bo->va, bo_list) { | ||
| 255 | if (bo_va->vm == vm) { | ||
| 256 | return bo_va; | ||
| 257 | } | ||
| 258 | } | ||
| 259 | return NULL; | ||
| 260 | } | ||
| 261 | |||
| 262 | /** | ||
| 263 | * amdgpu_vm_update_pages - helper to call the right asic function | ||
| 264 | * | ||
| 265 | * @adev: amdgpu_device pointer | ||
| 266 | * @ib: indirect buffer to fill with commands | ||
| 267 | * @pe: addr of the page entry | ||
| 268 | * @addr: dst addr to write into pe | ||
| 269 | * @count: number of page entries to update | ||
| 270 | * @incr: increase next addr by incr bytes | ||
| 271 | * @flags: hw access flags | ||
| 272 | * @gtt_flags: GTT hw access flags | ||
| 273 | * | ||
| 274 | * Traces the parameters and calls the right asic functions | ||
| 275 | * to setup the page table using the DMA. | ||
| 276 | */ | ||
| 277 | static void amdgpu_vm_update_pages(struct amdgpu_device *adev, | ||
| 278 | struct amdgpu_ib *ib, | ||
| 279 | uint64_t pe, uint64_t addr, | ||
| 280 | unsigned count, uint32_t incr, | ||
| 281 | uint32_t flags, uint32_t gtt_flags) | ||
| 282 | { | ||
| 283 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); | ||
| 284 | |||
| 285 | if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { | ||
| 286 | uint64_t src = adev->gart.table_addr + (addr >> 12) * 8; | ||
| 287 | amdgpu_vm_copy_pte(adev, ib, pe, src, count); | ||
| 288 | |||
| 289 | } else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) { | ||
| 290 | amdgpu_vm_write_pte(adev, ib, pe, addr, | ||
| 291 | count, incr, flags); | ||
| 292 | |||
| 293 | } else { | ||
| 294 | amdgpu_vm_set_pte_pde(adev, ib, pe, addr, | ||
| 295 | count, incr, flags); | ||
| 296 | } | ||
| 297 | } | ||
| 298 | |||
| 299 | /** | ||
| 300 | * amdgpu_vm_clear_bo - initially clear the page dir/table | ||
| 301 | * | ||
| 302 | * @adev: amdgpu_device pointer | ||
| 303 | * @bo: bo to clear | ||
| 304 | */ | ||
| 305 | static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | ||
| 306 | struct amdgpu_bo *bo) | ||
| 307 | { | ||
| 308 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; | ||
| 309 | struct amdgpu_ib ib; | ||
| 310 | unsigned entries; | ||
| 311 | uint64_t addr; | ||
| 312 | int r; | ||
| 313 | |||
| 314 | r = amdgpu_bo_reserve(bo, false); | ||
| 315 | if (r) | ||
| 316 | return r; | ||
| 317 | |||
| 318 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | ||
| 319 | if (r) | ||
| 320 | goto error_unreserve; | ||
| 321 | |||
| 322 | addr = amdgpu_bo_gpu_offset(bo); | ||
| 323 | entries = amdgpu_bo_size(bo) / 8; | ||
| 324 | |||
| 325 | r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib); | ||
| 326 | if (r) | ||
| 327 | goto error_unreserve; | ||
| 328 | |||
| 329 | ib.length_dw = 0; | ||
| 330 | |||
| 331 | amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0); | ||
| 332 | amdgpu_vm_pad_ib(adev, &ib); | ||
| 333 | WARN_ON(ib.length_dw > 64); | ||
| 334 | |||
| 335 | r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); | ||
| 336 | if (r) | ||
| 337 | goto error_free; | ||
| 338 | |||
| 339 | amdgpu_bo_fence(bo, ib.fence, false); | ||
| 340 | |||
| 341 | error_free: | ||
| 342 | amdgpu_ib_free(adev, &ib); | ||
| 343 | |||
| 344 | error_unreserve: | ||
| 345 | amdgpu_bo_unreserve(bo); | ||
| 346 | return r; | ||
| 347 | } | ||
| 348 | |||
| 349 | /** | ||
| 350 | * amdgpu_vm_map_gart - get the physical address of a gart page | ||
| 351 | * | ||
| 352 | * @adev: amdgpu_device pointer | ||
| 353 | * @addr: the unmapped addr | ||
| 354 | * | ||
| 355 | * Look up the physical address of the page that the pte resolves | ||
| 356 | * to (cayman+). | ||
| 357 | * Returns the physical address of the page. | ||
| 358 | */ | ||
| 359 | uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr) | ||
| 360 | { | ||
| 361 | uint64_t result; | ||
| 362 | |||
| 363 | /* page table offset */ | ||
| 364 | result = adev->gart.pages_addr[addr >> PAGE_SHIFT]; | ||
| 365 | |||
| 366 | /* in case cpu page size != gpu page size*/ | ||
| 367 | result |= addr & (~PAGE_MASK); | ||
| 368 | |||
| 369 | return result; | ||
| 370 | } | ||
| 371 | |||
| 372 | /** | ||
| 373 | * amdgpu_vm_update_pdes - make sure that page directory is valid | ||
| 374 | * | ||
| 375 | * @adev: amdgpu_device pointer | ||
| 376 | * @vm: requested vm | ||
| 377 | * @start: start of GPU address range | ||
| 378 | * @end: end of GPU address range | ||
| 379 | * | ||
| 380 | * Allocates new page tables if necessary | ||
| 381 | * and updates the page directory (cayman+). | ||
| 382 | * Returns 0 for success, error for failure. | ||
| 383 | * | ||
| 384 | * Global and local mutex must be locked! | ||
| 385 | */ | ||
| 386 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
| 387 | struct amdgpu_vm *vm) | ||
| 388 | { | ||
| 389 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; | ||
| 390 | struct amdgpu_bo *pd = vm->page_directory; | ||
| 391 | uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); | ||
| 392 | uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; | ||
| 393 | uint64_t last_pde = ~0, last_pt = ~0; | ||
| 394 | unsigned count = 0, pt_idx, ndw; | ||
| 395 | struct amdgpu_ib ib; | ||
| 396 | int r; | ||
| 397 | |||
| 398 | /* padding, etc. */ | ||
| 399 | ndw = 64; | ||
| 400 | |||
| 401 | /* assume the worst case */ | ||
| 402 | ndw += vm->max_pde_used * 6; | ||
| 403 | |||
| 404 | /* update too big for an IB */ | ||
| 405 | if (ndw > 0xfffff) | ||
| 406 | return -ENOMEM; | ||
| 407 | |||
| 408 | r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib); | ||
| 409 | if (r) | ||
| 410 | return r; | ||
| 411 | ib.length_dw = 0; | ||
| 412 | |||
| 413 | /* walk over the address space and update the page directory */ | ||
| 414 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | ||
| 415 | struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo; | ||
| 416 | uint64_t pde, pt; | ||
| 417 | |||
| 418 | if (bo == NULL) | ||
| 419 | continue; | ||
| 420 | |||
| 421 | pt = amdgpu_bo_gpu_offset(bo); | ||
| 422 | if (vm->page_tables[pt_idx].addr == pt) | ||
| 423 | continue; | ||
| 424 | vm->page_tables[pt_idx].addr = pt; | ||
| 425 | |||
| 426 | pde = pd_addr + pt_idx * 8; | ||
| 427 | if (((last_pde + 8 * count) != pde) || | ||
| 428 | ((last_pt + incr * count) != pt)) { | ||
| 429 | |||
| 430 | if (count) { | ||
| 431 | amdgpu_vm_update_pages(adev, &ib, last_pde, | ||
| 432 | last_pt, count, incr, | ||
| 433 | AMDGPU_PTE_VALID, 0); | ||
| 434 | } | ||
| 435 | |||
| 436 | count = 1; | ||
| 437 | last_pde = pde; | ||
| 438 | last_pt = pt; | ||
| 439 | } else { | ||
| 440 | ++count; | ||
| 441 | } | ||
| 442 | } | ||
| 443 | |||
| 444 | if (count) | ||
| 445 | amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count, | ||
| 446 | incr, AMDGPU_PTE_VALID, 0); | ||
| 447 | |||
| 448 | if (ib.length_dw != 0) { | ||
| 449 | amdgpu_vm_pad_ib(adev, &ib); | ||
| 450 | amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); | ||
| 451 | WARN_ON(ib.length_dw > ndw); | ||
| 452 | r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); | ||
| 453 | if (r) { | ||
| 454 | amdgpu_ib_free(adev, &ib); | ||
| 455 | return r; | ||
| 456 | } | ||
| 457 | amdgpu_bo_fence(pd, ib.fence, false); | ||
| 458 | } | ||
| 459 | amdgpu_ib_free(adev, &ib); | ||
| 460 | |||
| 461 | return 0; | ||
| 462 | } | ||
| 463 | |||
| 464 | /** | ||
| 465 | * amdgpu_vm_frag_ptes - add fragment information to PTEs | ||
| 466 | * | ||
| 467 | * @adev: amdgpu_device pointer | ||
| 468 | * @ib: IB for the update | ||
| 469 | * @pe_start: first PTE to handle | ||
| 470 | * @pe_end: last PTE to handle | ||
| 471 | * @addr: addr those PTEs should point to | ||
| 472 | * @flags: hw mapping flags | ||
| 473 | * @gtt_flags: GTT hw mapping flags | ||
| 474 | * | ||
| 475 | * Global and local mutex must be locked! | ||
| 476 | */ | ||
| 477 | static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | ||
| 478 | struct amdgpu_ib *ib, | ||
| 479 | uint64_t pe_start, uint64_t pe_end, | ||
| 480 | uint64_t addr, uint32_t flags, | ||
| 481 | uint32_t gtt_flags) | ||
| 482 | { | ||
| 483 | /** | ||
| 484 | * The MC L1 TLB supports variable sized pages, based on a fragment | ||
| 485 | * field in the PTE. When this field is set to a non-zero value, page | ||
| 486 | * granularity is increased from 4KB to (1 << (12 + frag)). The PTE | ||
| 487 | * flags are considered valid for all PTEs within the fragment range | ||
| 488 | * and corresponding mappings are assumed to be physically contiguous. | ||
| 489 | * | ||
| 490 | * The L1 TLB can store a single PTE for the whole fragment, | ||
| 491 | * significantly increasing the space available for translation | ||
| 492 | * caching. This leads to large improvements in throughput when the | ||
| 493 | * TLB is under pressure. | ||
| 494 | * | ||
| 495 | * The L2 TLB distributes small and large fragments into two | ||
| 496 | * asymmetric partitions. The large fragment cache is significantly | ||
| 497 | * larger. Thus, we try to use large fragments wherever possible. | ||
| 498 | * Userspace can support this by aligning virtual base address and | ||
| 499 | * allocation size to the fragment size. | ||
| 500 | */ | ||
| 501 | |||
| 502 | /* SI and newer are optimized for 64KB */ | ||
| 503 | uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB; | ||
| 504 | uint64_t frag_align = 0x80; | ||
| 505 | |||
| 506 | uint64_t frag_start = ALIGN(pe_start, frag_align); | ||
| 507 | uint64_t frag_end = pe_end & ~(frag_align - 1); | ||
| 508 | |||
| 509 | unsigned count; | ||
| 510 | |||
| 511 | /* system pages are non continuously */ | ||
| 512 | if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) || | ||
| 513 | (frag_start >= frag_end)) { | ||
| 514 | |||
| 515 | count = (pe_end - pe_start) / 8; | ||
| 516 | amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, | ||
| 517 | AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); | ||
| 518 | return; | ||
| 519 | } | ||
| 520 | |||
| 521 | /* handle the 4K area at the beginning */ | ||
| 522 | if (pe_start != frag_start) { | ||
| 523 | count = (frag_start - pe_start) / 8; | ||
| 524 | amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, | ||
| 525 | AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); | ||
| 526 | addr += AMDGPU_GPU_PAGE_SIZE * count; | ||
| 527 | } | ||
| 528 | |||
| 529 | /* handle the area in the middle */ | ||
| 530 | count = (frag_end - frag_start) / 8; | ||
| 531 | amdgpu_vm_update_pages(adev, ib, frag_start, addr, count, | ||
| 532 | AMDGPU_GPU_PAGE_SIZE, flags | frag_flags, | ||
| 533 | gtt_flags); | ||
| 534 | |||
| 535 | /* handle the 4K area at the end */ | ||
| 536 | if (frag_end != pe_end) { | ||
| 537 | addr += AMDGPU_GPU_PAGE_SIZE * count; | ||
| 538 | count = (pe_end - frag_end) / 8; | ||
| 539 | amdgpu_vm_update_pages(adev, ib, frag_end, addr, count, | ||
| 540 | AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); | ||
| 541 | } | ||
| 542 | } | ||
| 543 | |||
| 544 | /** | ||
| 545 | * amdgpu_vm_update_ptes - make sure that page tables are valid | ||
| 546 | * | ||
| 547 | * @adev: amdgpu_device pointer | ||
| 548 | * @vm: requested vm | ||
| 549 | * @start: start of GPU address range | ||
| 550 | * @end: end of GPU address range | ||
| 551 | * @dst: destination address to map to | ||
| 552 | * @flags: mapping flags | ||
| 553 | * | ||
| 554 | * Update the page tables in the range @start - @end (cayman+). | ||
| 555 | * | ||
| 556 | * Global and local mutex must be locked! | ||
| 557 | */ | ||
| 558 | static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, | ||
| 559 | struct amdgpu_vm *vm, | ||
| 560 | struct amdgpu_ib *ib, | ||
| 561 | uint64_t start, uint64_t end, | ||
| 562 | uint64_t dst, uint32_t flags, | ||
| 563 | uint32_t gtt_flags) | ||
| 564 | { | ||
| 565 | uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; | ||
| 566 | uint64_t last_pte = ~0, last_dst = ~0; | ||
| 567 | unsigned count = 0; | ||
| 568 | uint64_t addr; | ||
| 569 | |||
| 570 | /* walk over the address space and update the page tables */ | ||
| 571 | for (addr = start; addr < end; ) { | ||
| 572 | uint64_t pt_idx = addr >> amdgpu_vm_block_size; | ||
| 573 | struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo; | ||
| 574 | unsigned nptes; | ||
| 575 | uint64_t pte; | ||
| 576 | int r; | ||
| 577 | |||
| 578 | amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, | ||
| 579 | AMDGPU_FENCE_OWNER_VM); | ||
| 580 | r = reservation_object_reserve_shared(pt->tbo.resv); | ||
| 581 | if (r) | ||
| 582 | return r; | ||
| 583 | |||
| 584 | if ((addr & ~mask) == (end & ~mask)) | ||
| 585 | nptes = end - addr; | ||
| 586 | else | ||
| 587 | nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); | ||
| 588 | |||
| 589 | pte = amdgpu_bo_gpu_offset(pt); | ||
| 590 | pte += (addr & mask) * 8; | ||
| 591 | |||
| 592 | if ((last_pte + 8 * count) != pte) { | ||
| 593 | |||
| 594 | if (count) { | ||
| 595 | amdgpu_vm_frag_ptes(adev, ib, last_pte, | ||
| 596 | last_pte + 8 * count, | ||
| 597 | last_dst, flags, | ||
| 598 | gtt_flags); | ||
| 599 | } | ||
| 600 | |||
| 601 | count = nptes; | ||
| 602 | last_pte = pte; | ||
| 603 | last_dst = dst; | ||
| 604 | } else { | ||
| 605 | count += nptes; | ||
| 606 | } | ||
| 607 | |||
| 608 | addr += nptes; | ||
| 609 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; | ||
| 610 | } | ||
| 611 | |||
| 612 | if (count) { | ||
| 613 | amdgpu_vm_frag_ptes(adev, ib, last_pte, | ||
| 614 | last_pte + 8 * count, | ||
| 615 | last_dst, flags, gtt_flags); | ||
| 616 | } | ||
| 617 | |||
| 618 | return 0; | ||
| 619 | } | ||
| 620 | |||
| 621 | /** | ||
| 622 | * amdgpu_vm_fence_pts - fence page tables after an update | ||
| 623 | * | ||
| 624 | * @vm: requested vm | ||
| 625 | * @start: start of GPU address range | ||
| 626 | * @end: end of GPU address range | ||
| 627 | * @fence: fence to use | ||
| 628 | * | ||
| 629 | * Fence the page tables in the range @start - @end (cayman+). | ||
| 630 | * | ||
| 631 | * Global and local mutex must be locked! | ||
| 632 | */ | ||
| 633 | static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm, | ||
| 634 | uint64_t start, uint64_t end, | ||
| 635 | struct amdgpu_fence *fence) | ||
| 636 | { | ||
| 637 | unsigned i; | ||
| 638 | |||
| 639 | start >>= amdgpu_vm_block_size; | ||
| 640 | end >>= amdgpu_vm_block_size; | ||
| 641 | |||
| 642 | for (i = start; i <= end; ++i) | ||
| 643 | amdgpu_bo_fence(vm->page_tables[i].bo, fence, true); | ||
| 644 | } | ||
| 645 | |||
| 646 | /** | ||
| 647 | * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table | ||
| 648 | * | ||
| 649 | * @adev: amdgpu_device pointer | ||
| 650 | * @vm: requested vm | ||
| 651 | * @mapping: mapped range and flags to use for the update | ||
| 652 | * @addr: addr to set the area to | ||
| 653 | * @gtt_flags: flags as they are used for GTT | ||
| 654 | * @fence: optional resulting fence | ||
| 655 | * | ||
| 656 | * Fill in the page table entries for @mapping. | ||
| 657 | * Returns 0 for success, -EINVAL for failure. | ||
| 658 | * | ||
| 659 | * Object have to be reserved and mutex must be locked! | ||
| 660 | */ | ||
| 661 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | ||
| 662 | struct amdgpu_vm *vm, | ||
| 663 | struct amdgpu_bo_va_mapping *mapping, | ||
| 664 | uint64_t addr, uint32_t gtt_flags, | ||
| 665 | struct amdgpu_fence **fence) | ||
| 666 | { | ||
| 667 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; | ||
| 668 | unsigned nptes, ncmds, ndw; | ||
| 669 | uint32_t flags = gtt_flags; | ||
| 670 | struct amdgpu_ib ib; | ||
| 671 | int r; | ||
| 672 | |||
| 673 | /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here | ||
| 674 | * but in case of something, we filter the flags in first place | ||
| 675 | */ | ||
| 676 | if (!(mapping->flags & AMDGPU_PTE_READABLE)) | ||
| 677 | flags &= ~AMDGPU_PTE_READABLE; | ||
| 678 | if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) | ||
| 679 | flags &= ~AMDGPU_PTE_WRITEABLE; | ||
| 680 | |||
| 681 | trace_amdgpu_vm_bo_update(mapping); | ||
| 682 | |||
| 683 | nptes = mapping->it.last - mapping->it.start + 1; | ||
| 684 | |||
| 685 | /* | ||
| 686 | * reserve space for one command every (1 << BLOCK_SIZE) | ||
| 687 | * entries or 2k dwords (whatever is smaller) | ||
| 688 | */ | ||
| 689 | ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1; | ||
| 690 | |||
| 691 | /* padding, etc. */ | ||
| 692 | ndw = 64; | ||
| 693 | |||
| 694 | if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { | ||
| 695 | /* only copy commands needed */ | ||
| 696 | ndw += ncmds * 7; | ||
| 697 | |||
| 698 | } else if (flags & AMDGPU_PTE_SYSTEM) { | ||
| 699 | /* header for write data commands */ | ||
| 700 | ndw += ncmds * 4; | ||
| 701 | |||
| 702 | /* body of write data command */ | ||
| 703 | ndw += nptes * 2; | ||
| 704 | |||
| 705 | } else { | ||
| 706 | /* set page commands needed */ | ||
| 707 | ndw += ncmds * 10; | ||
| 708 | |||
| 709 | /* two extra commands for begin/end of fragment */ | ||
| 710 | ndw += 2 * 10; | ||
| 711 | } | ||
| 712 | |||
| 713 | /* update too big for an IB */ | ||
| 714 | if (ndw > 0xfffff) | ||
| 715 | return -ENOMEM; | ||
| 716 | |||
| 717 | r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib); | ||
| 718 | if (r) | ||
| 719 | return r; | ||
| 720 | ib.length_dw = 0; | ||
| 721 | |||
| 722 | if (!(flags & AMDGPU_PTE_VALID)) { | ||
| 723 | unsigned i; | ||
| 724 | |||
| 725 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 726 | struct amdgpu_fence *f = vm->ids[i].last_id_use; | ||
| 727 | amdgpu_sync_fence(&ib.sync, f); | ||
| 728 | } | ||
| 729 | } | ||
| 730 | |||
| 731 | r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start, | ||
| 732 | mapping->it.last + 1, addr + mapping->offset, | ||
| 733 | flags, gtt_flags); | ||
| 734 | |||
| 735 | if (r) { | ||
| 736 | amdgpu_ib_free(adev, &ib); | ||
| 737 | return r; | ||
| 738 | } | ||
| 739 | |||
| 740 | amdgpu_vm_pad_ib(adev, &ib); | ||
| 741 | WARN_ON(ib.length_dw > ndw); | ||
| 742 | |||
| 743 | r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); | ||
| 744 | if (r) { | ||
| 745 | amdgpu_ib_free(adev, &ib); | ||
| 746 | return r; | ||
| 747 | } | ||
| 748 | amdgpu_vm_fence_pts(vm, mapping->it.start, | ||
| 749 | mapping->it.last + 1, ib.fence); | ||
| 750 | if (fence) { | ||
| 751 | amdgpu_fence_unref(fence); | ||
| 752 | *fence = amdgpu_fence_ref(ib.fence); | ||
| 753 | } | ||
| 754 | amdgpu_ib_free(adev, &ib); | ||
| 755 | |||
| 756 | return 0; | ||
| 757 | } | ||
| 758 | |||
| 759 | /** | ||
| 760 | * amdgpu_vm_bo_update - update all BO mappings in the vm page table | ||
| 761 | * | ||
| 762 | * @adev: amdgpu_device pointer | ||
| 763 | * @bo_va: requested BO and VM object | ||
| 764 | * @mem: ttm mem | ||
| 765 | * | ||
| 766 | * Fill in the page table entries for @bo_va. | ||
| 767 | * Returns 0 for success, -EINVAL for failure. | ||
| 768 | * | ||
| 769 | * Object have to be reserved and mutex must be locked! | ||
| 770 | */ | ||
| 771 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
| 772 | struct amdgpu_bo_va *bo_va, | ||
| 773 | struct ttm_mem_reg *mem) | ||
| 774 | { | ||
| 775 | struct amdgpu_vm *vm = bo_va->vm; | ||
| 776 | struct amdgpu_bo_va_mapping *mapping; | ||
| 777 | uint32_t flags; | ||
| 778 | uint64_t addr; | ||
| 779 | int r; | ||
| 780 | |||
| 781 | if (mem) { | ||
| 782 | addr = mem->start << PAGE_SHIFT; | ||
| 783 | if (mem->mem_type != TTM_PL_TT) | ||
| 784 | addr += adev->vm_manager.vram_base_offset; | ||
| 785 | } else { | ||
| 786 | addr = 0; | ||
| 787 | } | ||
| 788 | |||
| 789 | if (addr == bo_va->addr) | ||
| 790 | return 0; | ||
| 791 | |||
| 792 | flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); | ||
| 793 | |||
| 794 | list_for_each_entry(mapping, &bo_va->mappings, list) { | ||
| 795 | r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, | ||
| 796 | flags, &bo_va->last_pt_update); | ||
| 797 | if (r) | ||
| 798 | return r; | ||
| 799 | } | ||
| 800 | |||
| 801 | bo_va->addr = addr; | ||
| 802 | spin_lock(&vm->status_lock); | ||
| 803 | list_del_init(&bo_va->vm_status); | ||
| 804 | spin_unlock(&vm->status_lock); | ||
| 805 | |||
| 806 | return 0; | ||
| 807 | } | ||
| 808 | |||
| 809 | /** | ||
| 810 | * amdgpu_vm_clear_freed - clear freed BOs in the PT | ||
| 811 | * | ||
| 812 | * @adev: amdgpu_device pointer | ||
| 813 | * @vm: requested vm | ||
| 814 | * | ||
| 815 | * Make sure all freed BOs are cleared in the PT. | ||
| 816 | * Returns 0 for success. | ||
| 817 | * | ||
| 818 | * PTs have to be reserved and mutex must be locked! | ||
| 819 | */ | ||
| 820 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
| 821 | struct amdgpu_vm *vm) | ||
| 822 | { | ||
| 823 | struct amdgpu_bo_va_mapping *mapping; | ||
| 824 | int r; | ||
| 825 | |||
| 826 | while (!list_empty(&vm->freed)) { | ||
| 827 | mapping = list_first_entry(&vm->freed, | ||
| 828 | struct amdgpu_bo_va_mapping, list); | ||
| 829 | list_del(&mapping->list); | ||
| 830 | |||
| 831 | r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); | ||
| 832 | kfree(mapping); | ||
| 833 | if (r) | ||
| 834 | return r; | ||
| 835 | |||
| 836 | } | ||
| 837 | return 0; | ||
| 838 | |||
| 839 | } | ||
| 840 | |||
| 841 | /** | ||
| 842 | * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT | ||
| 843 | * | ||
| 844 | * @adev: amdgpu_device pointer | ||
| 845 | * @vm: requested vm | ||
| 846 | * | ||
| 847 | * Make sure all invalidated BOs are cleared in the PT. | ||
| 848 | * Returns 0 for success. | ||
| 849 | * | ||
| 850 | * PTs have to be reserved and mutex must be locked! | ||
| 851 | */ | ||
| 852 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, | ||
| 853 | struct amdgpu_vm *vm) | ||
| 854 | { | ||
| 855 | struct amdgpu_bo_va *bo_va; | ||
| 856 | int r; | ||
| 857 | |||
| 858 | spin_lock(&vm->status_lock); | ||
| 859 | while (!list_empty(&vm->invalidated)) { | ||
| 860 | bo_va = list_first_entry(&vm->invalidated, | ||
| 861 | struct amdgpu_bo_va, vm_status); | ||
| 862 | spin_unlock(&vm->status_lock); | ||
| 863 | |||
| 864 | r = amdgpu_vm_bo_update(adev, bo_va, NULL); | ||
| 865 | if (r) | ||
| 866 | return r; | ||
| 867 | |||
| 868 | spin_lock(&vm->status_lock); | ||
| 869 | } | ||
| 870 | spin_unlock(&vm->status_lock); | ||
| 871 | |||
| 872 | return 0; | ||
| 873 | } | ||
| 874 | |||
| 875 | /** | ||
| 876 | * amdgpu_vm_bo_add - add a bo to a specific vm | ||
| 877 | * | ||
| 878 | * @adev: amdgpu_device pointer | ||
| 879 | * @vm: requested vm | ||
| 880 | * @bo: amdgpu buffer object | ||
| 881 | * | ||
| 882 | * Add @bo into the requested vm (cayman+). | ||
| 883 | * Add @bo to the list of bos associated with the vm | ||
| 884 | * Returns newly added bo_va or NULL for failure | ||
| 885 | * | ||
| 886 | * Object has to be reserved! | ||
| 887 | */ | ||
| 888 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
| 889 | struct amdgpu_vm *vm, | ||
| 890 | struct amdgpu_bo *bo) | ||
| 891 | { | ||
| 892 | struct amdgpu_bo_va *bo_va; | ||
| 893 | |||
| 894 | bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); | ||
| 895 | if (bo_va == NULL) { | ||
| 896 | return NULL; | ||
| 897 | } | ||
| 898 | bo_va->vm = vm; | ||
| 899 | bo_va->bo = bo; | ||
| 900 | bo_va->addr = 0; | ||
| 901 | bo_va->ref_count = 1; | ||
| 902 | INIT_LIST_HEAD(&bo_va->bo_list); | ||
| 903 | INIT_LIST_HEAD(&bo_va->mappings); | ||
| 904 | INIT_LIST_HEAD(&bo_va->vm_status); | ||
| 905 | |||
| 906 | mutex_lock(&vm->mutex); | ||
| 907 | list_add_tail(&bo_va->bo_list, &bo->va); | ||
| 908 | mutex_unlock(&vm->mutex); | ||
| 909 | |||
| 910 | return bo_va; | ||
| 911 | } | ||
| 912 | |||
| 913 | /** | ||
| 914 | * amdgpu_vm_bo_map - map bo inside a vm | ||
| 915 | * | ||
| 916 | * @adev: amdgpu_device pointer | ||
| 917 | * @bo_va: bo_va to store the address | ||
| 918 | * @saddr: where to map the BO | ||
| 919 | * @offset: requested offset in the BO | ||
| 920 | * @flags: attributes of pages (read/write/valid/etc.) | ||
| 921 | * | ||
| 922 | * Add a mapping of the BO at the specefied addr into the VM. | ||
| 923 | * Returns 0 for success, error for failure. | ||
| 924 | * | ||
| 925 | * Object has to be reserved and gets unreserved by this function! | ||
| 926 | */ | ||
| 927 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
| 928 | struct amdgpu_bo_va *bo_va, | ||
| 929 | uint64_t saddr, uint64_t offset, | ||
| 930 | uint64_t size, uint32_t flags) | ||
| 931 | { | ||
| 932 | struct amdgpu_bo_va_mapping *mapping; | ||
| 933 | struct amdgpu_vm *vm = bo_va->vm; | ||
| 934 | struct interval_tree_node *it; | ||
| 935 | unsigned last_pfn, pt_idx; | ||
| 936 | uint64_t eaddr; | ||
| 937 | int r; | ||
| 938 | |||
| 939 | /* make sure object fit at this offset */ | ||
| 940 | eaddr = saddr + size; | ||
| 941 | if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { | ||
| 942 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 943 | return -EINVAL; | ||
| 944 | } | ||
| 945 | |||
| 946 | last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; | ||
| 947 | if (last_pfn > adev->vm_manager.max_pfn) { | ||
| 948 | dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", | ||
| 949 | last_pfn, adev->vm_manager.max_pfn); | ||
| 950 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 951 | return -EINVAL; | ||
| 952 | } | ||
| 953 | |||
| 954 | mutex_lock(&vm->mutex); | ||
| 955 | |||
| 956 | saddr /= AMDGPU_GPU_PAGE_SIZE; | ||
| 957 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | ||
| 958 | |||
| 959 | it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); | ||
| 960 | if (it) { | ||
| 961 | struct amdgpu_bo_va_mapping *tmp; | ||
| 962 | tmp = container_of(it, struct amdgpu_bo_va_mapping, it); | ||
| 963 | /* bo and tmp overlap, invalid addr */ | ||
| 964 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " | ||
| 965 | "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, | ||
| 966 | tmp->it.start, tmp->it.last + 1); | ||
| 967 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 968 | r = -EINVAL; | ||
| 969 | goto error_unlock; | ||
| 970 | } | ||
| 971 | |||
| 972 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); | ||
| 973 | if (!mapping) { | ||
| 974 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 975 | r = -ENOMEM; | ||
| 976 | goto error_unlock; | ||
| 977 | } | ||
| 978 | |||
| 979 | INIT_LIST_HEAD(&mapping->list); | ||
| 980 | mapping->it.start = saddr; | ||
| 981 | mapping->it.last = eaddr - 1; | ||
| 982 | mapping->offset = offset; | ||
| 983 | mapping->flags = flags; | ||
| 984 | |||
| 985 | list_add(&mapping->list, &bo_va->mappings); | ||
| 986 | interval_tree_insert(&mapping->it, &vm->va); | ||
| 987 | |||
| 988 | /* Make sure the page tables are allocated */ | ||
| 989 | saddr >>= amdgpu_vm_block_size; | ||
| 990 | eaddr >>= amdgpu_vm_block_size; | ||
| 991 | |||
| 992 | BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev)); | ||
| 993 | |||
| 994 | if (eaddr > vm->max_pde_used) | ||
| 995 | vm->max_pde_used = eaddr; | ||
| 996 | |||
| 997 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 998 | |||
| 999 | /* walk over the address space and allocate the page tables */ | ||
| 1000 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | ||
| 1001 | struct amdgpu_bo *pt; | ||
| 1002 | |||
| 1003 | if (vm->page_tables[pt_idx].bo) | ||
| 1004 | continue; | ||
| 1005 | |||
| 1006 | /* drop mutex to allocate and clear page table */ | ||
| 1007 | mutex_unlock(&vm->mutex); | ||
| 1008 | |||
| 1009 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, | ||
| 1010 | AMDGPU_GPU_PAGE_SIZE, true, | ||
| 1011 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt); | ||
| 1012 | if (r) | ||
| 1013 | goto error_free; | ||
| 1014 | |||
| 1015 | r = amdgpu_vm_clear_bo(adev, pt); | ||
| 1016 | if (r) { | ||
| 1017 | amdgpu_bo_unref(&pt); | ||
| 1018 | goto error_free; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | /* aquire mutex again */ | ||
| 1022 | mutex_lock(&vm->mutex); | ||
| 1023 | if (vm->page_tables[pt_idx].bo) { | ||
| 1024 | /* someone else allocated the pt in the meantime */ | ||
| 1025 | mutex_unlock(&vm->mutex); | ||
| 1026 | amdgpu_bo_unref(&pt); | ||
| 1027 | mutex_lock(&vm->mutex); | ||
| 1028 | continue; | ||
| 1029 | } | ||
| 1030 | |||
| 1031 | vm->page_tables[pt_idx].addr = 0; | ||
| 1032 | vm->page_tables[pt_idx].bo = pt; | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | mutex_unlock(&vm->mutex); | ||
| 1036 | return 0; | ||
| 1037 | |||
| 1038 | error_free: | ||
| 1039 | mutex_lock(&vm->mutex); | ||
| 1040 | list_del(&mapping->list); | ||
| 1041 | interval_tree_remove(&mapping->it, &vm->va); | ||
| 1042 | kfree(mapping); | ||
| 1043 | |||
| 1044 | error_unlock: | ||
| 1045 | mutex_unlock(&vm->mutex); | ||
| 1046 | return r; | ||
| 1047 | } | ||
| 1048 | |||
| 1049 | /** | ||
| 1050 | * amdgpu_vm_bo_unmap - remove bo mapping from vm | ||
| 1051 | * | ||
| 1052 | * @adev: amdgpu_device pointer | ||
| 1053 | * @bo_va: bo_va to remove the address from | ||
| 1054 | * @saddr: where to the BO is mapped | ||
| 1055 | * | ||
| 1056 | * Remove a mapping of the BO at the specefied addr from the VM. | ||
| 1057 | * Returns 0 for success, error for failure. | ||
| 1058 | * | ||
| 1059 | * Object has to be reserved and gets unreserved by this function! | ||
| 1060 | */ | ||
| 1061 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
| 1062 | struct amdgpu_bo_va *bo_va, | ||
| 1063 | uint64_t saddr) | ||
| 1064 | { | ||
| 1065 | struct amdgpu_bo_va_mapping *mapping; | ||
| 1066 | struct amdgpu_vm *vm = bo_va->vm; | ||
| 1067 | |||
| 1068 | list_for_each_entry(mapping, &bo_va->mappings, list) { | ||
| 1069 | if (mapping->it.start == saddr) | ||
| 1070 | break; | ||
| 1071 | } | ||
| 1072 | |||
| 1073 | if (&mapping->list == &bo_va->mappings) { | ||
| 1074 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1075 | return -ENOENT; | ||
| 1076 | } | ||
| 1077 | |||
| 1078 | mutex_lock(&vm->mutex); | ||
| 1079 | list_del(&mapping->list); | ||
| 1080 | interval_tree_remove(&mapping->it, &vm->va); | ||
| 1081 | |||
| 1082 | if (bo_va->addr) { | ||
| 1083 | /* clear the old address */ | ||
| 1084 | list_add(&mapping->list, &vm->freed); | ||
| 1085 | } else { | ||
| 1086 | kfree(mapping); | ||
| 1087 | } | ||
| 1088 | mutex_unlock(&vm->mutex); | ||
| 1089 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1090 | |||
| 1091 | return 0; | ||
| 1092 | } | ||
| 1093 | |||
| 1094 | /** | ||
| 1095 | * amdgpu_vm_bo_rmv - remove a bo to a specific vm | ||
| 1096 | * | ||
| 1097 | * @adev: amdgpu_device pointer | ||
| 1098 | * @bo_va: requested bo_va | ||
| 1099 | * | ||
| 1100 | * Remove @bo_va->bo from the requested vm (cayman+). | ||
| 1101 | * | ||
| 1102 | * Object have to be reserved! | ||
| 1103 | */ | ||
| 1104 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
| 1105 | struct amdgpu_bo_va *bo_va) | ||
| 1106 | { | ||
| 1107 | struct amdgpu_bo_va_mapping *mapping, *next; | ||
| 1108 | struct amdgpu_vm *vm = bo_va->vm; | ||
| 1109 | |||
| 1110 | list_del(&bo_va->bo_list); | ||
| 1111 | |||
| 1112 | mutex_lock(&vm->mutex); | ||
| 1113 | |||
| 1114 | spin_lock(&vm->status_lock); | ||
| 1115 | list_del(&bo_va->vm_status); | ||
| 1116 | spin_unlock(&vm->status_lock); | ||
| 1117 | |||
| 1118 | list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) { | ||
| 1119 | list_del(&mapping->list); | ||
| 1120 | interval_tree_remove(&mapping->it, &vm->va); | ||
| 1121 | if (bo_va->addr) | ||
| 1122 | list_add(&mapping->list, &vm->freed); | ||
| 1123 | else | ||
| 1124 | kfree(mapping); | ||
| 1125 | } | ||
| 1126 | amdgpu_fence_unref(&bo_va->last_pt_update); | ||
| 1127 | kfree(bo_va); | ||
| 1128 | |||
| 1129 | mutex_unlock(&vm->mutex); | ||
| 1130 | } | ||
| 1131 | |||
| 1132 | /** | ||
| 1133 | * amdgpu_vm_bo_invalidate - mark the bo as invalid | ||
| 1134 | * | ||
| 1135 | * @adev: amdgpu_device pointer | ||
| 1136 | * @vm: requested vm | ||
| 1137 | * @bo: amdgpu buffer object | ||
| 1138 | * | ||
| 1139 | * Mark @bo as invalid (cayman+). | ||
| 1140 | */ | ||
| 1141 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
| 1142 | struct amdgpu_bo *bo) | ||
| 1143 | { | ||
| 1144 | struct amdgpu_bo_va *bo_va; | ||
| 1145 | |||
| 1146 | list_for_each_entry(bo_va, &bo->va, bo_list) { | ||
| 1147 | if (bo_va->addr) { | ||
| 1148 | spin_lock(&bo_va->vm->status_lock); | ||
| 1149 | list_del(&bo_va->vm_status); | ||
| 1150 | list_add(&bo_va->vm_status, &bo_va->vm->invalidated); | ||
| 1151 | spin_unlock(&bo_va->vm->status_lock); | ||
| 1152 | } | ||
| 1153 | } | ||
| 1154 | } | ||
| 1155 | |||
| 1156 | /** | ||
| 1157 | * amdgpu_vm_init - initialize a vm instance | ||
| 1158 | * | ||
| 1159 | * @adev: amdgpu_device pointer | ||
| 1160 | * @vm: requested vm | ||
| 1161 | * | ||
| 1162 | * Init @vm fields (cayman+). | ||
| 1163 | */ | ||
| 1164 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | ||
| 1165 | { | ||
| 1166 | const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, | ||
| 1167 | AMDGPU_VM_PTE_COUNT * 8); | ||
| 1168 | unsigned pd_size, pd_entries, pts_size; | ||
| 1169 | int i, r; | ||
| 1170 | |||
| 1171 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 1172 | vm->ids[i].id = 0; | ||
| 1173 | vm->ids[i].flushed_updates = NULL; | ||
| 1174 | vm->ids[i].last_id_use = NULL; | ||
| 1175 | } | ||
| 1176 | mutex_init(&vm->mutex); | ||
| 1177 | vm->va = RB_ROOT; | ||
| 1178 | spin_lock_init(&vm->status_lock); | ||
| 1179 | INIT_LIST_HEAD(&vm->invalidated); | ||
| 1180 | INIT_LIST_HEAD(&vm->freed); | ||
| 1181 | |||
| 1182 | pd_size = amdgpu_vm_directory_size(adev); | ||
| 1183 | pd_entries = amdgpu_vm_num_pdes(adev); | ||
| 1184 | |||
| 1185 | /* allocate page table array */ | ||
| 1186 | pts_size = pd_entries * sizeof(struct amdgpu_vm_pt); | ||
| 1187 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); | ||
| 1188 | if (vm->page_tables == NULL) { | ||
| 1189 | DRM_ERROR("Cannot allocate memory for page table array\n"); | ||
| 1190 | return -ENOMEM; | ||
| 1191 | } | ||
| 1192 | |||
| 1193 | r = amdgpu_bo_create(adev, pd_size, align, true, | ||
| 1194 | AMDGPU_GEM_DOMAIN_VRAM, 0, | ||
| 1195 | NULL, &vm->page_directory); | ||
| 1196 | if (r) | ||
| 1197 | return r; | ||
| 1198 | |||
| 1199 | r = amdgpu_vm_clear_bo(adev, vm->page_directory); | ||
| 1200 | if (r) { | ||
| 1201 | amdgpu_bo_unref(&vm->page_directory); | ||
| 1202 | vm->page_directory = NULL; | ||
| 1203 | return r; | ||
| 1204 | } | ||
| 1205 | |||
| 1206 | return 0; | ||
| 1207 | } | ||
| 1208 | |||
| 1209 | /** | ||
| 1210 | * amdgpu_vm_fini - tear down a vm instance | ||
| 1211 | * | ||
| 1212 | * @adev: amdgpu_device pointer | ||
| 1213 | * @vm: requested vm | ||
| 1214 | * | ||
| 1215 | * Tear down @vm (cayman+). | ||
| 1216 | * Unbind the VM and remove all bos from the vm bo list | ||
| 1217 | */ | ||
| 1218 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | ||
| 1219 | { | ||
| 1220 | struct amdgpu_bo_va_mapping *mapping, *tmp; | ||
| 1221 | int i; | ||
| 1222 | |||
| 1223 | if (!RB_EMPTY_ROOT(&vm->va)) { | ||
| 1224 | dev_err(adev->dev, "still active bo inside vm\n"); | ||
| 1225 | } | ||
| 1226 | rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { | ||
| 1227 | list_del(&mapping->list); | ||
| 1228 | interval_tree_remove(&mapping->it, &vm->va); | ||
| 1229 | kfree(mapping); | ||
| 1230 | } | ||
| 1231 | list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { | ||
| 1232 | list_del(&mapping->list); | ||
| 1233 | kfree(mapping); | ||
| 1234 | } | ||
| 1235 | |||
| 1236 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) | ||
| 1237 | amdgpu_bo_unref(&vm->page_tables[i].bo); | ||
| 1238 | kfree(vm->page_tables); | ||
| 1239 | |||
| 1240 | amdgpu_bo_unref(&vm->page_directory); | ||
| 1241 | |||
| 1242 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
| 1243 | amdgpu_fence_unref(&vm->ids[i].flushed_updates); | ||
| 1244 | amdgpu_fence_unref(&vm->ids[i].last_id_use); | ||
| 1245 | } | ||
| 1246 | |||
| 1247 | mutex_destroy(&vm->mutex); | ||
| 1248 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-bits.h b/drivers/gpu/drm/amd/amdgpu/atom-bits.h new file mode 100644 index 000000000000..e8fae5c77514 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atom-bits.h | |||
| @@ -0,0 +1,48 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Author: Stanislaw Skowronek | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifndef ATOM_BITS_H | ||
| 26 | #define ATOM_BITS_H | ||
| 27 | |||
| 28 | static inline uint8_t get_u8(void *bios, int ptr) | ||
| 29 | { | ||
| 30 | return ((unsigned char *)bios)[ptr]; | ||
| 31 | } | ||
| 32 | #define U8(ptr) get_u8(ctx->ctx->bios, (ptr)) | ||
| 33 | #define CU8(ptr) get_u8(ctx->bios, (ptr)) | ||
| 34 | static inline uint16_t get_u16(void *bios, int ptr) | ||
| 35 | { | ||
| 36 | return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8); | ||
| 37 | } | ||
| 38 | #define U16(ptr) get_u16(ctx->ctx->bios, (ptr)) | ||
| 39 | #define CU16(ptr) get_u16(ctx->bios, (ptr)) | ||
| 40 | static inline uint32_t get_u32(void *bios, int ptr) | ||
| 41 | { | ||
| 42 | return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16); | ||
| 43 | } | ||
| 44 | #define U32(ptr) get_u32(ctx->ctx->bios, (ptr)) | ||
| 45 | #define CU32(ptr) get_u32(ctx->bios, (ptr)) | ||
| 46 | #define CSTR(ptr) (((char *)(ctx->bios))+(ptr)) | ||
| 47 | |||
| 48 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-names.h b/drivers/gpu/drm/amd/amdgpu/atom-names.h new file mode 100644 index 000000000000..6f907a5ffa5f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atom-names.h | |||
| @@ -0,0 +1,100 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Author: Stanislaw Skowronek | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifndef ATOM_NAMES_H | ||
| 26 | #define ATOM_NAMES_H | ||
| 27 | |||
| 28 | #include "atom.h" | ||
| 29 | |||
| 30 | #ifdef ATOM_DEBUG | ||
| 31 | |||
| 32 | #define ATOM_OP_NAMES_CNT 123 | ||
| 33 | static char *atom_op_names[ATOM_OP_NAMES_CNT] = { | ||
| 34 | "RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL", | ||
| 35 | "MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC", | ||
| 36 | "OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG", | ||
| 37 | "SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL", | ||
| 38 | "SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS", | ||
| 39 | "SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG", | ||
| 40 | "MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS", | ||
| 41 | "DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS", | ||
| 42 | "ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB", | ||
| 43 | "SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT", | ||
| 44 | "SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS", | ||
| 45 | "COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH", | ||
| 46 | "JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL", | ||
| 47 | "JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS", | ||
| 48 | "TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC", | ||
| 49 | "CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB", | ||
| 50 | "CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS", | ||
| 51 | "MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG", | ||
| 52 | "RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB", | ||
| 53 | "XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL", | ||
| 54 | "SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC", | ||
| 55 | "DEBUG", "CTB_DS", | ||
| 56 | }; | ||
| 57 | |||
| 58 | #define ATOM_TABLE_NAMES_CNT 74 | ||
| 59 | static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = { | ||
| 60 | "ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit", | ||
| 61 | "VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit", | ||
| 62 | "GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl", | ||
| 63 | "GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock", | ||
| 64 | "DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice", | ||
| 65 | "MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController", | ||
| 66 | "EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange", | ||
| 67 | "DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl", | ||
| 68 | "DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl", | ||
| 69 | "CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl", | ||
| 70 | "TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl", | ||
| 71 | "EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock", | ||
| 72 | "EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing", | ||
| 73 | "SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source", | ||
| 74 | "EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters", | ||
| 75 | "LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock", | ||
| 76 | "GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection", | ||
| 77 | "DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp", | ||
| 78 | "ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C", | ||
| 79 | "ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection", | ||
| 80 | "MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion", | ||
| 81 | "VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining", | ||
| 82 | "EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl", | ||
| 83 | "CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource", | ||
| 84 | "MemoryDeviceInit", "EnableYUV", | ||
| 85 | }; | ||
| 86 | |||
| 87 | #define ATOM_IO_NAMES_CNT 5 | ||
| 88 | static char *atom_io_names[ATOM_IO_NAMES_CNT] = { | ||
| 89 | "MM", "PLL", "MC", "PCIE", "PCIE PORT", | ||
| 90 | }; | ||
| 91 | |||
| 92 | #else | ||
| 93 | |||
| 94 | #define ATOM_OP_NAMES_CNT 0 | ||
| 95 | #define ATOM_TABLE_NAMES_CNT 0 | ||
| 96 | #define ATOM_IO_NAMES_CNT 0 | ||
| 97 | |||
| 98 | #endif | ||
| 99 | |||
| 100 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-types.h b/drivers/gpu/drm/amd/amdgpu/atom-types.h new file mode 100644 index 000000000000..1125b866cdb0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atom-types.h | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Author: Dave Airlie | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifndef ATOM_TYPES_H | ||
| 26 | #define ATOM_TYPES_H | ||
| 27 | |||
| 28 | /* sync atom types to kernel types */ | ||
| 29 | |||
| 30 | typedef uint16_t USHORT; | ||
| 31 | typedef uint32_t ULONG; | ||
| 32 | typedef uint8_t UCHAR; | ||
| 33 | |||
| 34 | |||
| 35 | #ifndef ATOM_BIG_ENDIAN | ||
| 36 | #if defined(__BIG_ENDIAN) | ||
| 37 | #define ATOM_BIG_ENDIAN 1 | ||
| 38 | #else | ||
| 39 | #define ATOM_BIG_ENDIAN 0 | ||
| 40 | #endif | ||
| 41 | #endif | ||
| 42 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c new file mode 100644 index 000000000000..a0346a90d805 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atom.c | |||
| @@ -0,0 +1,1408 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Author: Stanislaw Skowronek | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <linux/module.h> | ||
| 26 | #include <linux/sched.h> | ||
| 27 | #include <linux/slab.h> | ||
| 28 | #include <asm/unaligned.h> | ||
| 29 | |||
| 30 | #define ATOM_DEBUG | ||
| 31 | |||
| 32 | #include "atom.h" | ||
| 33 | #include "atom-names.h" | ||
| 34 | #include "atom-bits.h" | ||
| 35 | #include "amdgpu.h" | ||
| 36 | |||
| 37 | #define ATOM_COND_ABOVE 0 | ||
| 38 | #define ATOM_COND_ABOVEOREQUAL 1 | ||
| 39 | #define ATOM_COND_ALWAYS 2 | ||
| 40 | #define ATOM_COND_BELOW 3 | ||
| 41 | #define ATOM_COND_BELOWOREQUAL 4 | ||
| 42 | #define ATOM_COND_EQUAL 5 | ||
| 43 | #define ATOM_COND_NOTEQUAL 6 | ||
| 44 | |||
| 45 | #define ATOM_PORT_ATI 0 | ||
| 46 | #define ATOM_PORT_PCI 1 | ||
| 47 | #define ATOM_PORT_SYSIO 2 | ||
| 48 | |||
| 49 | #define ATOM_UNIT_MICROSEC 0 | ||
| 50 | #define ATOM_UNIT_MILLISEC 1 | ||
| 51 | |||
| 52 | #define PLL_INDEX 2 | ||
| 53 | #define PLL_DATA 3 | ||
| 54 | |||
| 55 | typedef struct { | ||
| 56 | struct atom_context *ctx; | ||
| 57 | uint32_t *ps, *ws; | ||
| 58 | int ps_shift; | ||
| 59 | uint16_t start; | ||
| 60 | unsigned last_jump; | ||
| 61 | unsigned long last_jump_jiffies; | ||
| 62 | bool abort; | ||
| 63 | } atom_exec_context; | ||
| 64 | |||
| 65 | int amdgpu_atom_debug = 0; | ||
| 66 | static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); | ||
| 67 | int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); | ||
| 68 | |||
| 69 | static uint32_t atom_arg_mask[8] = | ||
| 70 | { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, | ||
| 71 | 0xFF000000 }; | ||
| 72 | static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 }; | ||
| 73 | |||
| 74 | static int atom_dst_to_src[8][4] = { | ||
| 75 | /* translate destination alignment field to the source alignment encoding */ | ||
| 76 | {0, 0, 0, 0}, | ||
| 77 | {1, 2, 3, 0}, | ||
| 78 | {1, 2, 3, 0}, | ||
| 79 | {1, 2, 3, 0}, | ||
| 80 | {4, 5, 6, 7}, | ||
| 81 | {4, 5, 6, 7}, | ||
| 82 | {4, 5, 6, 7}, | ||
| 83 | {4, 5, 6, 7}, | ||
| 84 | }; | ||
| 85 | static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 }; | ||
| 86 | |||
| 87 | static int debug_depth = 0; | ||
| 88 | #ifdef ATOM_DEBUG | ||
| 89 | static void debug_print_spaces(int n) | ||
| 90 | { | ||
| 91 | while (n--) | ||
| 92 | printk(" "); | ||
| 93 | } | ||
| 94 | |||
| 95 | #define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0) | ||
| 96 | #define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0) | ||
| 97 | #else | ||
| 98 | #define DEBUG(...) do { } while (0) | ||
| 99 | #define SDEBUG(...) do { } while (0) | ||
| 100 | #endif | ||
| 101 | |||
| 102 | static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | ||
| 103 | uint32_t index, uint32_t data) | ||
| 104 | { | ||
| 105 | uint32_t temp = 0xCDCDCDCD; | ||
| 106 | |||
| 107 | while (1) | ||
| 108 | switch (CU8(base)) { | ||
| 109 | case ATOM_IIO_NOP: | ||
| 110 | base++; | ||
| 111 | break; | ||
| 112 | case ATOM_IIO_READ: | ||
| 113 | temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1)); | ||
| 114 | base += 3; | ||
| 115 | break; | ||
| 116 | case ATOM_IIO_WRITE: | ||
| 117 | ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); | ||
| 118 | base += 3; | ||
| 119 | break; | ||
| 120 | case ATOM_IIO_CLEAR: | ||
| 121 | temp &= | ||
| 122 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << | ||
| 123 | CU8(base + 2)); | ||
| 124 | base += 3; | ||
| 125 | break; | ||
| 126 | case ATOM_IIO_SET: | ||
| 127 | temp |= | ||
| 128 | (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + | ||
| 129 | 2); | ||
| 130 | base += 3; | ||
| 131 | break; | ||
| 132 | case ATOM_IIO_MOVE_INDEX: | ||
| 133 | temp &= | ||
| 134 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << | ||
| 135 | CU8(base + 3)); | ||
| 136 | temp |= | ||
| 137 | ((index >> CU8(base + 2)) & | ||
| 138 | (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + | ||
| 139 | 3); | ||
| 140 | base += 4; | ||
| 141 | break; | ||
| 142 | case ATOM_IIO_MOVE_DATA: | ||
| 143 | temp &= | ||
| 144 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << | ||
| 145 | CU8(base + 3)); | ||
| 146 | temp |= | ||
| 147 | ((data >> CU8(base + 2)) & | ||
| 148 | (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + | ||
| 149 | 3); | ||
| 150 | base += 4; | ||
| 151 | break; | ||
| 152 | case ATOM_IIO_MOVE_ATTR: | ||
| 153 | temp &= | ||
| 154 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << | ||
| 155 | CU8(base + 3)); | ||
| 156 | temp |= | ||
| 157 | ((ctx-> | ||
| 158 | io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - | ||
| 159 | CU8 | ||
| 160 | (base | ||
| 161 | + | ||
| 162 | 1)))) | ||
| 163 | << CU8(base + 3); | ||
| 164 | base += 4; | ||
| 165 | break; | ||
| 166 | case ATOM_IIO_END: | ||
| 167 | return temp; | ||
| 168 | default: | ||
| 169 | printk(KERN_INFO "Unknown IIO opcode.\n"); | ||
| 170 | return 0; | ||
| 171 | } | ||
| 172 | } | ||
| 173 | |||
| 174 | static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, | ||
| 175 | int *ptr, uint32_t *saved, int print) | ||
| 176 | { | ||
| 177 | uint32_t idx, val = 0xCDCDCDCD, align, arg; | ||
| 178 | struct atom_context *gctx = ctx->ctx; | ||
| 179 | arg = attr & 7; | ||
| 180 | align = (attr >> 3) & 7; | ||
| 181 | switch (arg) { | ||
| 182 | case ATOM_ARG_REG: | ||
| 183 | idx = U16(*ptr); | ||
| 184 | (*ptr) += 2; | ||
| 185 | if (print) | ||
| 186 | DEBUG("REG[0x%04X]", idx); | ||
| 187 | idx += gctx->reg_block; | ||
| 188 | switch (gctx->io_mode) { | ||
| 189 | case ATOM_IO_MM: | ||
| 190 | val = gctx->card->reg_read(gctx->card, idx); | ||
| 191 | break; | ||
| 192 | case ATOM_IO_PCI: | ||
| 193 | printk(KERN_INFO | ||
| 194 | "PCI registers are not implemented.\n"); | ||
| 195 | return 0; | ||
| 196 | case ATOM_IO_SYSIO: | ||
| 197 | printk(KERN_INFO | ||
| 198 | "SYSIO registers are not implemented.\n"); | ||
| 199 | return 0; | ||
| 200 | default: | ||
| 201 | if (!(gctx->io_mode & 0x80)) { | ||
| 202 | printk(KERN_INFO "Bad IO mode.\n"); | ||
| 203 | return 0; | ||
| 204 | } | ||
| 205 | if (!gctx->iio[gctx->io_mode & 0x7F]) { | ||
| 206 | printk(KERN_INFO | ||
| 207 | "Undefined indirect IO read method %d.\n", | ||
| 208 | gctx->io_mode & 0x7F); | ||
| 209 | return 0; | ||
| 210 | } | ||
| 211 | val = | ||
| 212 | atom_iio_execute(gctx, | ||
| 213 | gctx->iio[gctx->io_mode & 0x7F], | ||
| 214 | idx, 0); | ||
| 215 | } | ||
| 216 | break; | ||
| 217 | case ATOM_ARG_PS: | ||
| 218 | idx = U8(*ptr); | ||
| 219 | (*ptr)++; | ||
| 220 | /* get_unaligned_le32 avoids unaligned accesses from atombios | ||
| 221 | * tables, noticed on a DEC Alpha. */ | ||
| 222 | val = get_unaligned_le32((u32 *)&ctx->ps[idx]); | ||
| 223 | if (print) | ||
| 224 | DEBUG("PS[0x%02X,0x%04X]", idx, val); | ||
| 225 | break; | ||
| 226 | case ATOM_ARG_WS: | ||
| 227 | idx = U8(*ptr); | ||
| 228 | (*ptr)++; | ||
| 229 | if (print) | ||
| 230 | DEBUG("WS[0x%02X]", idx); | ||
| 231 | switch (idx) { | ||
| 232 | case ATOM_WS_QUOTIENT: | ||
| 233 | val = gctx->divmul[0]; | ||
| 234 | break; | ||
| 235 | case ATOM_WS_REMAINDER: | ||
| 236 | val = gctx->divmul[1]; | ||
| 237 | break; | ||
| 238 | case ATOM_WS_DATAPTR: | ||
| 239 | val = gctx->data_block; | ||
| 240 | break; | ||
| 241 | case ATOM_WS_SHIFT: | ||
| 242 | val = gctx->shift; | ||
| 243 | break; | ||
| 244 | case ATOM_WS_OR_MASK: | ||
| 245 | val = 1 << gctx->shift; | ||
| 246 | break; | ||
| 247 | case ATOM_WS_AND_MASK: | ||
| 248 | val = ~(1 << gctx->shift); | ||
| 249 | break; | ||
| 250 | case ATOM_WS_FB_WINDOW: | ||
| 251 | val = gctx->fb_base; | ||
| 252 | break; | ||
| 253 | case ATOM_WS_ATTRIBUTES: | ||
| 254 | val = gctx->io_attr; | ||
| 255 | break; | ||
| 256 | case ATOM_WS_REGPTR: | ||
| 257 | val = gctx->reg_block; | ||
| 258 | break; | ||
| 259 | default: | ||
| 260 | val = ctx->ws[idx]; | ||
| 261 | } | ||
| 262 | break; | ||
| 263 | case ATOM_ARG_ID: | ||
| 264 | idx = U16(*ptr); | ||
| 265 | (*ptr) += 2; | ||
| 266 | if (print) { | ||
| 267 | if (gctx->data_block) | ||
| 268 | DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block); | ||
| 269 | else | ||
| 270 | DEBUG("ID[0x%04X]", idx); | ||
| 271 | } | ||
| 272 | val = U32(idx + gctx->data_block); | ||
| 273 | break; | ||
| 274 | case ATOM_ARG_FB: | ||
| 275 | idx = U8(*ptr); | ||
| 276 | (*ptr)++; | ||
| 277 | if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { | ||
| 278 | DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n", | ||
| 279 | gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); | ||
| 280 | val = 0; | ||
| 281 | } else | ||
| 282 | val = gctx->scratch[(gctx->fb_base / 4) + idx]; | ||
| 283 | if (print) | ||
| 284 | DEBUG("FB[0x%02X]", idx); | ||
| 285 | break; | ||
| 286 | case ATOM_ARG_IMM: | ||
| 287 | switch (align) { | ||
| 288 | case ATOM_SRC_DWORD: | ||
| 289 | val = U32(*ptr); | ||
| 290 | (*ptr) += 4; | ||
| 291 | if (print) | ||
| 292 | DEBUG("IMM 0x%08X\n", val); | ||
| 293 | return val; | ||
| 294 | case ATOM_SRC_WORD0: | ||
| 295 | case ATOM_SRC_WORD8: | ||
| 296 | case ATOM_SRC_WORD16: | ||
| 297 | val = U16(*ptr); | ||
| 298 | (*ptr) += 2; | ||
| 299 | if (print) | ||
| 300 | DEBUG("IMM 0x%04X\n", val); | ||
| 301 | return val; | ||
| 302 | case ATOM_SRC_BYTE0: | ||
| 303 | case ATOM_SRC_BYTE8: | ||
| 304 | case ATOM_SRC_BYTE16: | ||
| 305 | case ATOM_SRC_BYTE24: | ||
| 306 | val = U8(*ptr); | ||
| 307 | (*ptr)++; | ||
| 308 | if (print) | ||
| 309 | DEBUG("IMM 0x%02X\n", val); | ||
| 310 | return val; | ||
| 311 | } | ||
| 312 | return 0; | ||
| 313 | case ATOM_ARG_PLL: | ||
| 314 | idx = U8(*ptr); | ||
| 315 | (*ptr)++; | ||
| 316 | if (print) | ||
| 317 | DEBUG("PLL[0x%02X]", idx); | ||
| 318 | val = gctx->card->pll_read(gctx->card, idx); | ||
| 319 | break; | ||
| 320 | case ATOM_ARG_MC: | ||
| 321 | idx = U8(*ptr); | ||
| 322 | (*ptr)++; | ||
| 323 | if (print) | ||
| 324 | DEBUG("MC[0x%02X]", idx); | ||
| 325 | val = gctx->card->mc_read(gctx->card, idx); | ||
| 326 | break; | ||
| 327 | } | ||
| 328 | if (saved) | ||
| 329 | *saved = val; | ||
| 330 | val &= atom_arg_mask[align]; | ||
| 331 | val >>= atom_arg_shift[align]; | ||
| 332 | if (print) | ||
| 333 | switch (align) { | ||
| 334 | case ATOM_SRC_DWORD: | ||
| 335 | DEBUG(".[31:0] -> 0x%08X\n", val); | ||
| 336 | break; | ||
| 337 | case ATOM_SRC_WORD0: | ||
| 338 | DEBUG(".[15:0] -> 0x%04X\n", val); | ||
| 339 | break; | ||
| 340 | case ATOM_SRC_WORD8: | ||
| 341 | DEBUG(".[23:8] -> 0x%04X\n", val); | ||
| 342 | break; | ||
| 343 | case ATOM_SRC_WORD16: | ||
| 344 | DEBUG(".[31:16] -> 0x%04X\n", val); | ||
| 345 | break; | ||
| 346 | case ATOM_SRC_BYTE0: | ||
| 347 | DEBUG(".[7:0] -> 0x%02X\n", val); | ||
| 348 | break; | ||
| 349 | case ATOM_SRC_BYTE8: | ||
| 350 | DEBUG(".[15:8] -> 0x%02X\n", val); | ||
| 351 | break; | ||
| 352 | case ATOM_SRC_BYTE16: | ||
| 353 | DEBUG(".[23:16] -> 0x%02X\n", val); | ||
| 354 | break; | ||
| 355 | case ATOM_SRC_BYTE24: | ||
| 356 | DEBUG(".[31:24] -> 0x%02X\n", val); | ||
| 357 | break; | ||
| 358 | } | ||
| 359 | return val; | ||
| 360 | } | ||
| 361 | |||
| 362 | static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr) | ||
| 363 | { | ||
| 364 | uint32_t align = (attr >> 3) & 7, arg = attr & 7; | ||
| 365 | switch (arg) { | ||
| 366 | case ATOM_ARG_REG: | ||
| 367 | case ATOM_ARG_ID: | ||
| 368 | (*ptr) += 2; | ||
| 369 | break; | ||
| 370 | case ATOM_ARG_PLL: | ||
| 371 | case ATOM_ARG_MC: | ||
| 372 | case ATOM_ARG_PS: | ||
| 373 | case ATOM_ARG_WS: | ||
| 374 | case ATOM_ARG_FB: | ||
| 375 | (*ptr)++; | ||
| 376 | break; | ||
| 377 | case ATOM_ARG_IMM: | ||
| 378 | switch (align) { | ||
| 379 | case ATOM_SRC_DWORD: | ||
| 380 | (*ptr) += 4; | ||
| 381 | return; | ||
| 382 | case ATOM_SRC_WORD0: | ||
| 383 | case ATOM_SRC_WORD8: | ||
| 384 | case ATOM_SRC_WORD16: | ||
| 385 | (*ptr) += 2; | ||
| 386 | return; | ||
| 387 | case ATOM_SRC_BYTE0: | ||
| 388 | case ATOM_SRC_BYTE8: | ||
| 389 | case ATOM_SRC_BYTE16: | ||
| 390 | case ATOM_SRC_BYTE24: | ||
| 391 | (*ptr)++; | ||
| 392 | return; | ||
| 393 | } | ||
| 394 | return; | ||
| 395 | } | ||
| 396 | } | ||
| 397 | |||
| 398 | static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr) | ||
| 399 | { | ||
| 400 | return atom_get_src_int(ctx, attr, ptr, NULL, 1); | ||
| 401 | } | ||
| 402 | |||
| 403 | static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) | ||
| 404 | { | ||
| 405 | uint32_t val = 0xCDCDCDCD; | ||
| 406 | |||
| 407 | switch (align) { | ||
| 408 | case ATOM_SRC_DWORD: | ||
| 409 | val = U32(*ptr); | ||
| 410 | (*ptr) += 4; | ||
| 411 | break; | ||
| 412 | case ATOM_SRC_WORD0: | ||
| 413 | case ATOM_SRC_WORD8: | ||
| 414 | case ATOM_SRC_WORD16: | ||
| 415 | val = U16(*ptr); | ||
| 416 | (*ptr) += 2; | ||
| 417 | break; | ||
| 418 | case ATOM_SRC_BYTE0: | ||
| 419 | case ATOM_SRC_BYTE8: | ||
| 420 | case ATOM_SRC_BYTE16: | ||
| 421 | case ATOM_SRC_BYTE24: | ||
| 422 | val = U8(*ptr); | ||
| 423 | (*ptr)++; | ||
| 424 | break; | ||
| 425 | } | ||
| 426 | return val; | ||
| 427 | } | ||
| 428 | |||
| 429 | static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, | ||
| 430 | int *ptr, uint32_t *saved, int print) | ||
| 431 | { | ||
| 432 | return atom_get_src_int(ctx, | ||
| 433 | arg | atom_dst_to_src[(attr >> 3) & | ||
| 434 | 7][(attr >> 6) & 3] << 3, | ||
| 435 | ptr, saved, print); | ||
| 436 | } | ||
| 437 | |||
| 438 | static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr) | ||
| 439 | { | ||
| 440 | atom_skip_src_int(ctx, | ||
| 441 | arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & | ||
| 442 | 3] << 3, ptr); | ||
| 443 | } | ||
| 444 | |||
| 445 | static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, | ||
| 446 | int *ptr, uint32_t val, uint32_t saved) | ||
| 447 | { | ||
| 448 | uint32_t align = | ||
| 449 | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val = | ||
| 450 | val, idx; | ||
| 451 | struct atom_context *gctx = ctx->ctx; | ||
| 452 | old_val &= atom_arg_mask[align] >> atom_arg_shift[align]; | ||
| 453 | val <<= atom_arg_shift[align]; | ||
| 454 | val &= atom_arg_mask[align]; | ||
| 455 | saved &= ~atom_arg_mask[align]; | ||
| 456 | val |= saved; | ||
| 457 | switch (arg) { | ||
| 458 | case ATOM_ARG_REG: | ||
| 459 | idx = U16(*ptr); | ||
| 460 | (*ptr) += 2; | ||
| 461 | DEBUG("REG[0x%04X]", idx); | ||
| 462 | idx += gctx->reg_block; | ||
| 463 | switch (gctx->io_mode) { | ||
| 464 | case ATOM_IO_MM: | ||
| 465 | if (idx == 0) | ||
| 466 | gctx->card->reg_write(gctx->card, idx, | ||
| 467 | val << 2); | ||
| 468 | else | ||
| 469 | gctx->card->reg_write(gctx->card, idx, val); | ||
| 470 | break; | ||
| 471 | case ATOM_IO_PCI: | ||
| 472 | printk(KERN_INFO | ||
| 473 | "PCI registers are not implemented.\n"); | ||
| 474 | return; | ||
| 475 | case ATOM_IO_SYSIO: | ||
| 476 | printk(KERN_INFO | ||
| 477 | "SYSIO registers are not implemented.\n"); | ||
| 478 | return; | ||
| 479 | default: | ||
| 480 | if (!(gctx->io_mode & 0x80)) { | ||
| 481 | printk(KERN_INFO "Bad IO mode.\n"); | ||
| 482 | return; | ||
| 483 | } | ||
| 484 | if (!gctx->iio[gctx->io_mode & 0xFF]) { | ||
| 485 | printk(KERN_INFO | ||
| 486 | "Undefined indirect IO write method %d.\n", | ||
| 487 | gctx->io_mode & 0x7F); | ||
| 488 | return; | ||
| 489 | } | ||
| 490 | atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF], | ||
| 491 | idx, val); | ||
| 492 | } | ||
| 493 | break; | ||
| 494 | case ATOM_ARG_PS: | ||
| 495 | idx = U8(*ptr); | ||
| 496 | (*ptr)++; | ||
| 497 | DEBUG("PS[0x%02X]", idx); | ||
| 498 | ctx->ps[idx] = cpu_to_le32(val); | ||
| 499 | break; | ||
| 500 | case ATOM_ARG_WS: | ||
| 501 | idx = U8(*ptr); | ||
| 502 | (*ptr)++; | ||
| 503 | DEBUG("WS[0x%02X]", idx); | ||
| 504 | switch (idx) { | ||
| 505 | case ATOM_WS_QUOTIENT: | ||
| 506 | gctx->divmul[0] = val; | ||
| 507 | break; | ||
| 508 | case ATOM_WS_REMAINDER: | ||
| 509 | gctx->divmul[1] = val; | ||
| 510 | break; | ||
| 511 | case ATOM_WS_DATAPTR: | ||
| 512 | gctx->data_block = val; | ||
| 513 | break; | ||
| 514 | case ATOM_WS_SHIFT: | ||
| 515 | gctx->shift = val; | ||
| 516 | break; | ||
| 517 | case ATOM_WS_OR_MASK: | ||
| 518 | case ATOM_WS_AND_MASK: | ||
| 519 | break; | ||
| 520 | case ATOM_WS_FB_WINDOW: | ||
| 521 | gctx->fb_base = val; | ||
| 522 | break; | ||
| 523 | case ATOM_WS_ATTRIBUTES: | ||
| 524 | gctx->io_attr = val; | ||
| 525 | break; | ||
| 526 | case ATOM_WS_REGPTR: | ||
| 527 | gctx->reg_block = val; | ||
| 528 | break; | ||
| 529 | default: | ||
| 530 | ctx->ws[idx] = val; | ||
| 531 | } | ||
| 532 | break; | ||
| 533 | case ATOM_ARG_FB: | ||
| 534 | idx = U8(*ptr); | ||
| 535 | (*ptr)++; | ||
| 536 | if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { | ||
| 537 | DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n", | ||
| 538 | gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); | ||
| 539 | } else | ||
| 540 | gctx->scratch[(gctx->fb_base / 4) + idx] = val; | ||
| 541 | DEBUG("FB[0x%02X]", idx); | ||
| 542 | break; | ||
| 543 | case ATOM_ARG_PLL: | ||
| 544 | idx = U8(*ptr); | ||
| 545 | (*ptr)++; | ||
| 546 | DEBUG("PLL[0x%02X]", idx); | ||
| 547 | gctx->card->pll_write(gctx->card, idx, val); | ||
| 548 | break; | ||
| 549 | case ATOM_ARG_MC: | ||
| 550 | idx = U8(*ptr); | ||
| 551 | (*ptr)++; | ||
| 552 | DEBUG("MC[0x%02X]", idx); | ||
| 553 | gctx->card->mc_write(gctx->card, idx, val); | ||
| 554 | return; | ||
| 555 | } | ||
| 556 | switch (align) { | ||
| 557 | case ATOM_SRC_DWORD: | ||
| 558 | DEBUG(".[31:0] <- 0x%08X\n", old_val); | ||
| 559 | break; | ||
| 560 | case ATOM_SRC_WORD0: | ||
| 561 | DEBUG(".[15:0] <- 0x%04X\n", old_val); | ||
| 562 | break; | ||
| 563 | case ATOM_SRC_WORD8: | ||
| 564 | DEBUG(".[23:8] <- 0x%04X\n", old_val); | ||
| 565 | break; | ||
| 566 | case ATOM_SRC_WORD16: | ||
| 567 | DEBUG(".[31:16] <- 0x%04X\n", old_val); | ||
| 568 | break; | ||
| 569 | case ATOM_SRC_BYTE0: | ||
| 570 | DEBUG(".[7:0] <- 0x%02X\n", old_val); | ||
| 571 | break; | ||
| 572 | case ATOM_SRC_BYTE8: | ||
| 573 | DEBUG(".[15:8] <- 0x%02X\n", old_val); | ||
| 574 | break; | ||
| 575 | case ATOM_SRC_BYTE16: | ||
| 576 | DEBUG(".[23:16] <- 0x%02X\n", old_val); | ||
| 577 | break; | ||
| 578 | case ATOM_SRC_BYTE24: | ||
| 579 | DEBUG(".[31:24] <- 0x%02X\n", old_val); | ||
| 580 | break; | ||
| 581 | } | ||
| 582 | } | ||
| 583 | |||
| 584 | static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg) | ||
| 585 | { | ||
| 586 | uint8_t attr = U8((*ptr)++); | ||
| 587 | uint32_t dst, src, saved; | ||
| 588 | int dptr = *ptr; | ||
| 589 | SDEBUG(" dst: "); | ||
| 590 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
| 591 | SDEBUG(" src: "); | ||
| 592 | src = atom_get_src(ctx, attr, ptr); | ||
| 593 | dst += src; | ||
| 594 | SDEBUG(" dst: "); | ||
| 595 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
| 596 | } | ||
| 597 | |||
| 598 | static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg) | ||
| 599 | { | ||
| 600 | uint8_t attr = U8((*ptr)++); | ||
| 601 | uint32_t dst, src, saved; | ||
| 602 | int dptr = *ptr; | ||
| 603 | SDEBUG(" dst: "); | ||
| 604 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
| 605 | SDEBUG(" src: "); | ||
| 606 | src = atom_get_src(ctx, attr, ptr); | ||
| 607 | dst &= src; | ||
| 608 | SDEBUG(" dst: "); | ||
| 609 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
| 610 | } | ||
| 611 | |||
| 612 | static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) | ||
| 613 | { | ||
| 614 | printk("ATOM BIOS beeped!\n"); | ||
| 615 | } | ||
| 616 | |||
| 617 | static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) | ||
| 618 | { | ||
| 619 | int idx = U8((*ptr)++); | ||
| 620 | int r = 0; | ||
| 621 | |||
| 622 | if (idx < ATOM_TABLE_NAMES_CNT) | ||
| 623 | SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); | ||
| 624 | else | ||
| 625 | SDEBUG(" table: %d\n", idx); | ||
| 626 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) | ||
| 627 | r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); | ||
| 628 | if (r) { | ||
| 629 | ctx->abort = true; | ||
| 630 | } | ||
| 631 | } | ||
| 632 | |||
| 633 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) | ||
| 634 | { | ||
| 635 | uint8_t attr = U8((*ptr)++); | ||
| 636 | uint32_t saved; | ||
| 637 | int dptr = *ptr; | ||
| 638 | attr &= 0x38; | ||
| 639 | attr |= atom_def_dst[attr >> 3] << 6; | ||
| 640 | atom_get_dst(ctx, arg, attr, ptr, &saved, 0); | ||
| 641 | SDEBUG(" dst: "); | ||
| 642 | atom_put_dst(ctx, arg, attr, &dptr, 0, saved); | ||
| 643 | } | ||
| 644 | |||
| 645 | static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg) | ||
| 646 | { | ||
| 647 | uint8_t attr = U8((*ptr)++); | ||
| 648 | uint32_t dst, src; | ||
| 649 | SDEBUG(" src1: "); | ||
| 650 | dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); | ||
| 651 | SDEBUG(" src2: "); | ||
| 652 | src = atom_get_src(ctx, attr, ptr); | ||
| 653 | ctx->ctx->cs_equal = (dst == src); | ||
| 654 | ctx->ctx->cs_above = (dst > src); | ||
| 655 | SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE", | ||
| 656 | ctx->ctx->cs_above ? "GT" : "LE"); | ||
| 657 | } | ||
| 658 | |||
| 659 | static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) | ||
| 660 | { | ||
| 661 | unsigned count = U8((*ptr)++); | ||
| 662 | SDEBUG(" count: %d\n", count); | ||
| 663 | if (arg == ATOM_UNIT_MICROSEC) | ||
| 664 | udelay(count); | ||
| 665 | else if (!drm_can_sleep()) | ||
| 666 | mdelay(count); | ||
| 667 | else | ||
| 668 | msleep(count); | ||
| 669 | } | ||
| 670 | |||
| 671 | static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg) | ||
| 672 | { | ||
| 673 | uint8_t attr = U8((*ptr)++); | ||
| 674 | uint32_t dst, src; | ||
| 675 | SDEBUG(" src1: "); | ||
| 676 | dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); | ||
| 677 | SDEBUG(" src2: "); | ||
| 678 | src = atom_get_src(ctx, attr, ptr); | ||
| 679 | if (src != 0) { | ||
| 680 | ctx->ctx->divmul[0] = dst / src; | ||
| 681 | ctx->ctx->divmul[1] = dst % src; | ||
| 682 | } else { | ||
| 683 | ctx->ctx->divmul[0] = 0; | ||
| 684 | ctx->ctx->divmul[1] = 0; | ||
| 685 | } | ||
| 686 | } | ||
| 687 | |||
| 688 | static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) | ||
| 689 | { | ||
| 690 | /* functionally, a nop */ | ||
| 691 | } | ||
| 692 | |||
| 693 | static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | ||
| 694 | { | ||
| 695 | int execute = 0, target = U16(*ptr); | ||
| 696 | unsigned long cjiffies; | ||
| 697 | |||
| 698 | (*ptr) += 2; | ||
| 699 | switch (arg) { | ||
| 700 | case ATOM_COND_ABOVE: | ||
| 701 | execute = ctx->ctx->cs_above; | ||
| 702 | break; | ||
| 703 | case ATOM_COND_ABOVEOREQUAL: | ||
| 704 | execute = ctx->ctx->cs_above || ctx->ctx->cs_equal; | ||
| 705 | break; | ||
| 706 | case ATOM_COND_ALWAYS: | ||
| 707 | execute = 1; | ||
| 708 | break; | ||
| 709 | case ATOM_COND_BELOW: | ||
| 710 | execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal); | ||
| 711 | break; | ||
| 712 | case ATOM_COND_BELOWOREQUAL: | ||
| 713 | execute = !ctx->ctx->cs_above; | ||
| 714 | break; | ||
| 715 | case ATOM_COND_EQUAL: | ||
| 716 | execute = ctx->ctx->cs_equal; | ||
| 717 | break; | ||
| 718 | case ATOM_COND_NOTEQUAL: | ||
| 719 | execute = !ctx->ctx->cs_equal; | ||
| 720 | break; | ||
| 721 | } | ||
| 722 | if (arg != ATOM_COND_ALWAYS) | ||
| 723 | SDEBUG(" taken: %s\n", execute ? "yes" : "no"); | ||
| 724 | SDEBUG(" target: 0x%04X\n", target); | ||
| 725 | if (execute) { | ||
| 726 | if (ctx->last_jump == (ctx->start + target)) { | ||
| 727 | cjiffies = jiffies; | ||
| 728 | if (time_after(cjiffies, ctx->last_jump_jiffies)) { | ||
| 729 | cjiffies -= ctx->last_jump_jiffies; | ||
| 730 | if ((jiffies_to_msecs(cjiffies) > 5000)) { | ||
| 731 | DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); | ||
| 732 | ctx->abort = true; | ||
| 733 | } | ||
| 734 | } else { | ||
| 735 | /* jiffies wrap around we will just wait a little longer */ | ||
| 736 | ctx->last_jump_jiffies = jiffies; | ||
| 737 | } | ||
| 738 | } else { | ||
| 739 | ctx->last_jump = ctx->start + target; | ||
| 740 | ctx->last_jump_jiffies = jiffies; | ||
| 741 | } | ||
| 742 | *ptr = ctx->start + target; | ||
| 743 | } | ||
| 744 | } | ||
| 745 | |||
| 746 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) | ||
| 747 | { | ||
| 748 | uint8_t attr = U8((*ptr)++); | ||
| 749 | uint32_t dst, mask, src, saved; | ||
| 750 | int dptr = *ptr; | ||
| 751 | SDEBUG(" dst: "); | ||
| 752 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
| 753 | mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); | ||
| 754 | SDEBUG(" mask: 0x%08x", mask); | ||
| 755 | SDEBUG(" src: "); | ||
| 756 | src = atom_get_src(ctx, attr, ptr); | ||
| 757 | dst &= mask; | ||
| 758 | dst |= src; | ||
| 759 | SDEBUG(" dst: "); | ||
| 760 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
| 761 | } | ||
| 762 | |||
| 763 | static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg) | ||
| 764 | { | ||
| 765 | uint8_t attr = U8((*ptr)++); | ||
| 766 | uint32_t src, saved; | ||
| 767 | int dptr = *ptr; | ||
| 768 | if (((attr >> 3) & 7) != ATOM_SRC_DWORD) | ||
| 769 | atom_get_dst(ctx, arg, attr, ptr, &saved, 0); | ||
| 770 | else { | ||
| 771 | atom_skip_dst(ctx, arg, attr, ptr); | ||
| 772 | saved = 0xCDCDCDCD; | ||
| 773 | } | ||
| 774 | SDEBUG(" src: "); | ||
| 775 | src = atom_get_src(ctx, attr, ptr); | ||
| 776 | SDEBUG(" dst: "); | ||
| 777 | atom_put_dst(ctx, arg, attr, &dptr, src, saved); | ||
| 778 | } | ||
| 779 | |||
| 780 | static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg) | ||
| 781 | { | ||
| 782 | uint8_t attr = U8((*ptr)++); | ||
| 783 | uint32_t dst, src; | ||
| 784 | SDEBUG(" src1: "); | ||
| 785 | dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); | ||
| 786 | SDEBUG(" src2: "); | ||
| 787 | src = atom_get_src(ctx, attr, ptr); | ||
| 788 | ctx->ctx->divmul[0] = dst * src; | ||
| 789 | } | ||
| 790 | |||
| 791 | static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg) | ||
| 792 | { | ||
| 793 | /* nothing */ | ||
| 794 | } | ||
| 795 | |||
| 796 | static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg) | ||
| 797 | { | ||
| 798 | uint8_t attr = U8((*ptr)++); | ||
| 799 | uint32_t dst, src, saved; | ||
| 800 | int dptr = *ptr; | ||
| 801 | SDEBUG(" dst: "); | ||
| 802 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
| 803 | SDEBUG(" src: "); | ||
| 804 | src = atom_get_src(ctx, attr, ptr); | ||
| 805 | dst |= src; | ||
| 806 | SDEBUG(" dst: "); | ||
| 807 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
| 808 | } | ||
| 809 | |||
| 810 | static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg) | ||
| 811 | { | ||
| 812 | uint8_t val = U8((*ptr)++); | ||
| 813 | SDEBUG("POST card output: 0x%02X\n", val); | ||
| 814 | } | ||
| 815 | |||
| 816 | static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg) | ||
| 817 | { | ||
| 818 | printk(KERN_INFO "unimplemented!\n"); | ||
| 819 | } | ||
| 820 | |||
| 821 | static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg) | ||
| 822 | { | ||
| 823 | printk(KERN_INFO "unimplemented!\n"); | ||
| 824 | } | ||
| 825 | |||
| 826 | static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg) | ||
| 827 | { | ||
| 828 | printk(KERN_INFO "unimplemented!\n"); | ||
| 829 | } | ||
| 830 | |||
| 831 | static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg) | ||
| 832 | { | ||
| 833 | int idx = U8(*ptr); | ||
| 834 | (*ptr)++; | ||
| 835 | SDEBUG(" block: %d\n", idx); | ||
| 836 | if (!idx) | ||
| 837 | ctx->ctx->data_block = 0; | ||
| 838 | else if (idx == 255) | ||
| 839 | ctx->ctx->data_block = ctx->start; | ||
| 840 | else | ||
| 841 | ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx); | ||
| 842 | SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block); | ||
| 843 | } | ||
| 844 | |||
| 845 | static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg) | ||
| 846 | { | ||
| 847 | uint8_t attr = U8((*ptr)++); | ||
| 848 | SDEBUG(" fb_base: "); | ||
| 849 | ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr); | ||
| 850 | } | ||
| 851 | |||
| 852 | static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg) | ||
| 853 | { | ||
| 854 | int port; | ||
| 855 | switch (arg) { | ||
| 856 | case ATOM_PORT_ATI: | ||
| 857 | port = U16(*ptr); | ||
| 858 | if (port < ATOM_IO_NAMES_CNT) | ||
| 859 | SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]); | ||
| 860 | else | ||
| 861 | SDEBUG(" port: %d\n", port); | ||
| 862 | if (!port) | ||
| 863 | ctx->ctx->io_mode = ATOM_IO_MM; | ||
| 864 | else | ||
| 865 | ctx->ctx->io_mode = ATOM_IO_IIO | port; | ||
| 866 | (*ptr) += 2; | ||
| 867 | break; | ||
| 868 | case ATOM_PORT_PCI: | ||
| 869 | ctx->ctx->io_mode = ATOM_IO_PCI; | ||
| 870 | (*ptr)++; | ||
| 871 | break; | ||
| 872 | case ATOM_PORT_SYSIO: | ||
| 873 | ctx->ctx->io_mode = ATOM_IO_SYSIO; | ||
| 874 | (*ptr)++; | ||
| 875 | break; | ||
| 876 | } | ||
| 877 | } | ||
| 878 | |||
| 879 | static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) | ||
| 880 | { | ||
| 881 | ctx->ctx->reg_block = U16(*ptr); | ||
| 882 | (*ptr) += 2; | ||
| 883 | SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); | ||
| 884 | } | ||
| 885 | |||
| 886 | static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) | ||
| 887 | { | ||
| 888 | uint8_t attr = U8((*ptr)++), shift; | ||
| 889 | uint32_t saved, dst; | ||
| 890 | int dptr = *ptr; | ||
| 891 | attr &= 0x38; | ||
| 892 | attr |= atom_def_dst[attr >> 3] << 6; | ||
| 893 | SDEBUG(" dst: "); | ||
| 894 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
| 895 | shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); | ||
| 896 | SDEBUG(" shift: %d\n", shift); | ||
| 897 | dst <<= shift; | ||
| 898 | SDEBUG(" dst: "); | ||
| 899 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
| 900 | } | ||
| 901 | |||
| 902 | static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) | ||
| 903 | { | ||
| 904 | uint8_t attr = U8((*ptr)++), shift; | ||
| 905 | uint32_t saved, dst; | ||
| 906 | int dptr = *ptr; | ||
| 907 | attr &= 0x38; | ||
| 908 | attr |= atom_def_dst[attr >> 3] << 6; | ||
| 909 | SDEBUG(" dst: "); | ||
| 910 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
| 911 | shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); | ||
| 912 | SDEBUG(" shift: %d\n", shift); | ||
| 913 | dst >>= shift; | ||
| 914 | SDEBUG(" dst: "); | ||
| 915 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
| 916 | } | ||
| 917 | |||
| 918 | static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) | ||
| 919 | { | ||
| 920 | uint8_t attr = U8((*ptr)++), shift; | ||
| 921 | uint32_t saved, dst; | ||
| 922 | int dptr = *ptr; | ||
| 923 | uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; | ||
| 924 | SDEBUG(" dst: "); | ||
| 925 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
| 926 | /* op needs to full dst value */ | ||
| 927 | dst = saved; | ||
| 928 | shift = atom_get_src(ctx, attr, ptr); | ||
| 929 | SDEBUG(" shift: %d\n", shift); | ||
| 930 | dst <<= shift; | ||
| 931 | dst &= atom_arg_mask[dst_align]; | ||
| 932 | dst >>= atom_arg_shift[dst_align]; | ||
| 933 | SDEBUG(" dst: "); | ||
| 934 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
| 935 | } | ||
| 936 | |||
| 937 | static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) | ||
| 938 | { | ||
| 939 | uint8_t attr = U8((*ptr)++), shift; | ||
| 940 | uint32_t saved, dst; | ||
| 941 | int dptr = *ptr; | ||
| 942 | uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; | ||
| 943 | SDEBUG(" dst: "); | ||
| 944 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
| 945 | /* op needs to full dst value */ | ||
| 946 | dst = saved; | ||
| 947 | shift = atom_get_src(ctx, attr, ptr); | ||
| 948 | SDEBUG(" shift: %d\n", shift); | ||
| 949 | dst >>= shift; | ||
| 950 | dst &= atom_arg_mask[dst_align]; | ||
| 951 | dst >>= atom_arg_shift[dst_align]; | ||
| 952 | SDEBUG(" dst: "); | ||
| 953 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
| 954 | } | ||
| 955 | |||
| 956 | static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg) | ||
| 957 | { | ||
| 958 | uint8_t attr = U8((*ptr)++); | ||
| 959 | uint32_t dst, src, saved; | ||
| 960 | int dptr = *ptr; | ||
| 961 | SDEBUG(" dst: "); | ||
| 962 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
| 963 | SDEBUG(" src: "); | ||
| 964 | src = atom_get_src(ctx, attr, ptr); | ||
| 965 | dst -= src; | ||
| 966 | SDEBUG(" dst: "); | ||
| 967 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
| 968 | } | ||
| 969 | |||
| 970 | static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg) | ||
| 971 | { | ||
| 972 | uint8_t attr = U8((*ptr)++); | ||
| 973 | uint32_t src, val, target; | ||
| 974 | SDEBUG(" switch: "); | ||
| 975 | src = atom_get_src(ctx, attr, ptr); | ||
| 976 | while (U16(*ptr) != ATOM_CASE_END) | ||
| 977 | if (U8(*ptr) == ATOM_CASE_MAGIC) { | ||
| 978 | (*ptr)++; | ||
| 979 | SDEBUG(" case: "); | ||
| 980 | val = | ||
| 981 | atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM, | ||
| 982 | ptr); | ||
| 983 | target = U16(*ptr); | ||
| 984 | if (val == src) { | ||
| 985 | SDEBUG(" target: %04X\n", target); | ||
| 986 | *ptr = ctx->start + target; | ||
| 987 | return; | ||
| 988 | } | ||
| 989 | (*ptr) += 2; | ||
| 990 | } else { | ||
| 991 | printk(KERN_INFO "Bad case.\n"); | ||
| 992 | return; | ||
| 993 | } | ||
| 994 | (*ptr) += 2; | ||
| 995 | } | ||
| 996 | |||
| 997 | static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg) | ||
| 998 | { | ||
| 999 | uint8_t attr = U8((*ptr)++); | ||
| 1000 | uint32_t dst, src; | ||
| 1001 | SDEBUG(" src1: "); | ||
| 1002 | dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); | ||
| 1003 | SDEBUG(" src2: "); | ||
| 1004 | src = atom_get_src(ctx, attr, ptr); | ||
| 1005 | ctx->ctx->cs_equal = ((dst & src) == 0); | ||
| 1006 | SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE"); | ||
| 1007 | } | ||
| 1008 | |||
| 1009 | static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg) | ||
| 1010 | { | ||
| 1011 | uint8_t attr = U8((*ptr)++); | ||
| 1012 | uint32_t dst, src, saved; | ||
| 1013 | int dptr = *ptr; | ||
| 1014 | SDEBUG(" dst: "); | ||
| 1015 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
| 1016 | SDEBUG(" src: "); | ||
| 1017 | src = atom_get_src(ctx, attr, ptr); | ||
| 1018 | dst ^= src; | ||
| 1019 | SDEBUG(" dst: "); | ||
| 1020 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
| 1021 | } | ||
| 1022 | |||
| 1023 | static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg) | ||
| 1024 | { | ||
| 1025 | printk(KERN_INFO "unimplemented!\n"); | ||
| 1026 | } | ||
| 1027 | |||
| 1028 | static struct { | ||
| 1029 | void (*func) (atom_exec_context *, int *, int); | ||
| 1030 | int arg; | ||
| 1031 | } opcode_table[ATOM_OP_CNT] = { | ||
| 1032 | { | ||
| 1033 | NULL, 0}, { | ||
| 1034 | atom_op_move, ATOM_ARG_REG}, { | ||
| 1035 | atom_op_move, ATOM_ARG_PS}, { | ||
| 1036 | atom_op_move, ATOM_ARG_WS}, { | ||
| 1037 | atom_op_move, ATOM_ARG_FB}, { | ||
| 1038 | atom_op_move, ATOM_ARG_PLL}, { | ||
| 1039 | atom_op_move, ATOM_ARG_MC}, { | ||
| 1040 | atom_op_and, ATOM_ARG_REG}, { | ||
| 1041 | atom_op_and, ATOM_ARG_PS}, { | ||
| 1042 | atom_op_and, ATOM_ARG_WS}, { | ||
| 1043 | atom_op_and, ATOM_ARG_FB}, { | ||
| 1044 | atom_op_and, ATOM_ARG_PLL}, { | ||
| 1045 | atom_op_and, ATOM_ARG_MC}, { | ||
| 1046 | atom_op_or, ATOM_ARG_REG}, { | ||
| 1047 | atom_op_or, ATOM_ARG_PS}, { | ||
| 1048 | atom_op_or, ATOM_ARG_WS}, { | ||
| 1049 | atom_op_or, ATOM_ARG_FB}, { | ||
| 1050 | atom_op_or, ATOM_ARG_PLL}, { | ||
| 1051 | atom_op_or, ATOM_ARG_MC}, { | ||
| 1052 | atom_op_shift_left, ATOM_ARG_REG}, { | ||
| 1053 | atom_op_shift_left, ATOM_ARG_PS}, { | ||
| 1054 | atom_op_shift_left, ATOM_ARG_WS}, { | ||
| 1055 | atom_op_shift_left, ATOM_ARG_FB}, { | ||
| 1056 | atom_op_shift_left, ATOM_ARG_PLL}, { | ||
| 1057 | atom_op_shift_left, ATOM_ARG_MC}, { | ||
| 1058 | atom_op_shift_right, ATOM_ARG_REG}, { | ||
| 1059 | atom_op_shift_right, ATOM_ARG_PS}, { | ||
| 1060 | atom_op_shift_right, ATOM_ARG_WS}, { | ||
| 1061 | atom_op_shift_right, ATOM_ARG_FB}, { | ||
| 1062 | atom_op_shift_right, ATOM_ARG_PLL}, { | ||
| 1063 | atom_op_shift_right, ATOM_ARG_MC}, { | ||
| 1064 | atom_op_mul, ATOM_ARG_REG}, { | ||
| 1065 | atom_op_mul, ATOM_ARG_PS}, { | ||
| 1066 | atom_op_mul, ATOM_ARG_WS}, { | ||
| 1067 | atom_op_mul, ATOM_ARG_FB}, { | ||
| 1068 | atom_op_mul, ATOM_ARG_PLL}, { | ||
| 1069 | atom_op_mul, ATOM_ARG_MC}, { | ||
| 1070 | atom_op_div, ATOM_ARG_REG}, { | ||
| 1071 | atom_op_div, ATOM_ARG_PS}, { | ||
| 1072 | atom_op_div, ATOM_ARG_WS}, { | ||
| 1073 | atom_op_div, ATOM_ARG_FB}, { | ||
| 1074 | atom_op_div, ATOM_ARG_PLL}, { | ||
| 1075 | atom_op_div, ATOM_ARG_MC}, { | ||
| 1076 | atom_op_add, ATOM_ARG_REG}, { | ||
| 1077 | atom_op_add, ATOM_ARG_PS}, { | ||
| 1078 | atom_op_add, ATOM_ARG_WS}, { | ||
| 1079 | atom_op_add, ATOM_ARG_FB}, { | ||
| 1080 | atom_op_add, ATOM_ARG_PLL}, { | ||
| 1081 | atom_op_add, ATOM_ARG_MC}, { | ||
| 1082 | atom_op_sub, ATOM_ARG_REG}, { | ||
| 1083 | atom_op_sub, ATOM_ARG_PS}, { | ||
| 1084 | atom_op_sub, ATOM_ARG_WS}, { | ||
| 1085 | atom_op_sub, ATOM_ARG_FB}, { | ||
| 1086 | atom_op_sub, ATOM_ARG_PLL}, { | ||
| 1087 | atom_op_sub, ATOM_ARG_MC}, { | ||
| 1088 | atom_op_setport, ATOM_PORT_ATI}, { | ||
| 1089 | atom_op_setport, ATOM_PORT_PCI}, { | ||
| 1090 | atom_op_setport, ATOM_PORT_SYSIO}, { | ||
| 1091 | atom_op_setregblock, 0}, { | ||
| 1092 | atom_op_setfbbase, 0}, { | ||
| 1093 | atom_op_compare, ATOM_ARG_REG}, { | ||
| 1094 | atom_op_compare, ATOM_ARG_PS}, { | ||
| 1095 | atom_op_compare, ATOM_ARG_WS}, { | ||
| 1096 | atom_op_compare, ATOM_ARG_FB}, { | ||
| 1097 | atom_op_compare, ATOM_ARG_PLL}, { | ||
| 1098 | atom_op_compare, ATOM_ARG_MC}, { | ||
| 1099 | atom_op_switch, 0}, { | ||
| 1100 | atom_op_jump, ATOM_COND_ALWAYS}, { | ||
| 1101 | atom_op_jump, ATOM_COND_EQUAL}, { | ||
| 1102 | atom_op_jump, ATOM_COND_BELOW}, { | ||
| 1103 | atom_op_jump, ATOM_COND_ABOVE}, { | ||
| 1104 | atom_op_jump, ATOM_COND_BELOWOREQUAL}, { | ||
| 1105 | atom_op_jump, ATOM_COND_ABOVEOREQUAL}, { | ||
| 1106 | atom_op_jump, ATOM_COND_NOTEQUAL}, { | ||
| 1107 | atom_op_test, ATOM_ARG_REG}, { | ||
| 1108 | atom_op_test, ATOM_ARG_PS}, { | ||
| 1109 | atom_op_test, ATOM_ARG_WS}, { | ||
| 1110 | atom_op_test, ATOM_ARG_FB}, { | ||
| 1111 | atom_op_test, ATOM_ARG_PLL}, { | ||
| 1112 | atom_op_test, ATOM_ARG_MC}, { | ||
| 1113 | atom_op_delay, ATOM_UNIT_MILLISEC}, { | ||
| 1114 | atom_op_delay, ATOM_UNIT_MICROSEC}, { | ||
| 1115 | atom_op_calltable, 0}, { | ||
| 1116 | atom_op_repeat, 0}, { | ||
| 1117 | atom_op_clear, ATOM_ARG_REG}, { | ||
| 1118 | atom_op_clear, ATOM_ARG_PS}, { | ||
| 1119 | atom_op_clear, ATOM_ARG_WS}, { | ||
| 1120 | atom_op_clear, ATOM_ARG_FB}, { | ||
| 1121 | atom_op_clear, ATOM_ARG_PLL}, { | ||
| 1122 | atom_op_clear, ATOM_ARG_MC}, { | ||
| 1123 | atom_op_nop, 0}, { | ||
| 1124 | atom_op_eot, 0}, { | ||
| 1125 | atom_op_mask, ATOM_ARG_REG}, { | ||
| 1126 | atom_op_mask, ATOM_ARG_PS}, { | ||
| 1127 | atom_op_mask, ATOM_ARG_WS}, { | ||
| 1128 | atom_op_mask, ATOM_ARG_FB}, { | ||
| 1129 | atom_op_mask, ATOM_ARG_PLL}, { | ||
| 1130 | atom_op_mask, ATOM_ARG_MC}, { | ||
| 1131 | atom_op_postcard, 0}, { | ||
| 1132 | atom_op_beep, 0}, { | ||
| 1133 | atom_op_savereg, 0}, { | ||
| 1134 | atom_op_restorereg, 0}, { | ||
| 1135 | atom_op_setdatablock, 0}, { | ||
| 1136 | atom_op_xor, ATOM_ARG_REG}, { | ||
| 1137 | atom_op_xor, ATOM_ARG_PS}, { | ||
| 1138 | atom_op_xor, ATOM_ARG_WS}, { | ||
| 1139 | atom_op_xor, ATOM_ARG_FB}, { | ||
| 1140 | atom_op_xor, ATOM_ARG_PLL}, { | ||
| 1141 | atom_op_xor, ATOM_ARG_MC}, { | ||
| 1142 | atom_op_shl, ATOM_ARG_REG}, { | ||
| 1143 | atom_op_shl, ATOM_ARG_PS}, { | ||
| 1144 | atom_op_shl, ATOM_ARG_WS}, { | ||
| 1145 | atom_op_shl, ATOM_ARG_FB}, { | ||
| 1146 | atom_op_shl, ATOM_ARG_PLL}, { | ||
| 1147 | atom_op_shl, ATOM_ARG_MC}, { | ||
| 1148 | atom_op_shr, ATOM_ARG_REG}, { | ||
| 1149 | atom_op_shr, ATOM_ARG_PS}, { | ||
| 1150 | atom_op_shr, ATOM_ARG_WS}, { | ||
| 1151 | atom_op_shr, ATOM_ARG_FB}, { | ||
| 1152 | atom_op_shr, ATOM_ARG_PLL}, { | ||
| 1153 | atom_op_shr, ATOM_ARG_MC}, { | ||
| 1154 | atom_op_debug, 0},}; | ||
| 1155 | |||
| 1156 | static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) | ||
| 1157 | { | ||
| 1158 | int base = CU16(ctx->cmd_table + 4 + 2 * index); | ||
| 1159 | int len, ws, ps, ptr; | ||
| 1160 | unsigned char op; | ||
| 1161 | atom_exec_context ectx; | ||
| 1162 | int ret = 0; | ||
| 1163 | |||
| 1164 | if (!base) | ||
| 1165 | return -EINVAL; | ||
| 1166 | |||
| 1167 | len = CU16(base + ATOM_CT_SIZE_PTR); | ||
| 1168 | ws = CU8(base + ATOM_CT_WS_PTR); | ||
| 1169 | ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK; | ||
| 1170 | ptr = base + ATOM_CT_CODE_PTR; | ||
| 1171 | |||
| 1172 | SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); | ||
| 1173 | |||
| 1174 | ectx.ctx = ctx; | ||
| 1175 | ectx.ps_shift = ps / 4; | ||
| 1176 | ectx.start = base; | ||
| 1177 | ectx.ps = params; | ||
| 1178 | ectx.abort = false; | ||
| 1179 | ectx.last_jump = 0; | ||
| 1180 | if (ws) | ||
| 1181 | ectx.ws = kzalloc(4 * ws, GFP_KERNEL); | ||
| 1182 | else | ||
| 1183 | ectx.ws = NULL; | ||
| 1184 | |||
| 1185 | debug_depth++; | ||
| 1186 | while (1) { | ||
| 1187 | op = CU8(ptr++); | ||
| 1188 | if (op < ATOM_OP_NAMES_CNT) | ||
| 1189 | SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); | ||
| 1190 | else | ||
| 1191 | SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); | ||
| 1192 | if (ectx.abort) { | ||
| 1193 | DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", | ||
| 1194 | base, len, ws, ps, ptr - 1); | ||
| 1195 | ret = -EINVAL; | ||
| 1196 | goto free; | ||
| 1197 | } | ||
| 1198 | |||
| 1199 | if (op < ATOM_OP_CNT && op > 0) | ||
| 1200 | opcode_table[op].func(&ectx, &ptr, | ||
| 1201 | opcode_table[op].arg); | ||
| 1202 | else | ||
| 1203 | break; | ||
| 1204 | |||
| 1205 | if (op == ATOM_OP_EOT) | ||
| 1206 | break; | ||
| 1207 | } | ||
| 1208 | debug_depth--; | ||
| 1209 | SDEBUG("<<\n"); | ||
| 1210 | |||
| 1211 | free: | ||
| 1212 | if (ws) | ||
| 1213 | kfree(ectx.ws); | ||
| 1214 | return ret; | ||
| 1215 | } | ||
| 1216 | |||
| 1217 | int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | ||
| 1218 | { | ||
| 1219 | int r; | ||
| 1220 | |||
| 1221 | mutex_lock(&ctx->mutex); | ||
| 1222 | /* reset data block */ | ||
| 1223 | ctx->data_block = 0; | ||
| 1224 | /* reset reg block */ | ||
| 1225 | ctx->reg_block = 0; | ||
| 1226 | /* reset fb window */ | ||
| 1227 | ctx->fb_base = 0; | ||
| 1228 | /* reset io mode */ | ||
| 1229 | ctx->io_mode = ATOM_IO_MM; | ||
| 1230 | /* reset divmul */ | ||
| 1231 | ctx->divmul[0] = 0; | ||
| 1232 | ctx->divmul[1] = 0; | ||
| 1233 | r = amdgpu_atom_execute_table_locked(ctx, index, params); | ||
| 1234 | mutex_unlock(&ctx->mutex); | ||
| 1235 | return r; | ||
| 1236 | } | ||
| 1237 | |||
| 1238 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; | ||
| 1239 | |||
| 1240 | static void atom_index_iio(struct atom_context *ctx, int base) | ||
| 1241 | { | ||
| 1242 | ctx->iio = kzalloc(2 * 256, GFP_KERNEL); | ||
| 1243 | if (!ctx->iio) | ||
| 1244 | return; | ||
| 1245 | while (CU8(base) == ATOM_IIO_START) { | ||
| 1246 | ctx->iio[CU8(base + 1)] = base + 2; | ||
| 1247 | base += 2; | ||
| 1248 | while (CU8(base) != ATOM_IIO_END) | ||
| 1249 | base += atom_iio_len[CU8(base)]; | ||
| 1250 | base += 3; | ||
| 1251 | } | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) | ||
| 1255 | { | ||
| 1256 | int base; | ||
| 1257 | struct atom_context *ctx = | ||
| 1258 | kzalloc(sizeof(struct atom_context), GFP_KERNEL); | ||
| 1259 | char *str; | ||
| 1260 | char name[512]; | ||
| 1261 | int i; | ||
| 1262 | |||
| 1263 | if (!ctx) | ||
| 1264 | return NULL; | ||
| 1265 | |||
| 1266 | ctx->card = card; | ||
| 1267 | ctx->bios = bios; | ||
| 1268 | |||
| 1269 | if (CU16(0) != ATOM_BIOS_MAGIC) { | ||
| 1270 | printk(KERN_INFO "Invalid BIOS magic.\n"); | ||
| 1271 | kfree(ctx); | ||
| 1272 | return NULL; | ||
| 1273 | } | ||
| 1274 | if (strncmp | ||
| 1275 | (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC, | ||
| 1276 | strlen(ATOM_ATI_MAGIC))) { | ||
| 1277 | printk(KERN_INFO "Invalid ATI magic.\n"); | ||
| 1278 | kfree(ctx); | ||
| 1279 | return NULL; | ||
| 1280 | } | ||
| 1281 | |||
| 1282 | base = CU16(ATOM_ROM_TABLE_PTR); | ||
| 1283 | if (strncmp | ||
| 1284 | (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC, | ||
| 1285 | strlen(ATOM_ROM_MAGIC))) { | ||
| 1286 | printk(KERN_INFO "Invalid ATOM magic.\n"); | ||
| 1287 | kfree(ctx); | ||
| 1288 | return NULL; | ||
| 1289 | } | ||
| 1290 | |||
| 1291 | ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR); | ||
| 1292 | ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR); | ||
| 1293 | atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4); | ||
| 1294 | if (!ctx->iio) { | ||
| 1295 | amdgpu_atom_destroy(ctx); | ||
| 1296 | return NULL; | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | str = CSTR(CU16(base + ATOM_ROM_MSG_PTR)); | ||
| 1300 | while (*str && ((*str == '\n') || (*str == '\r'))) | ||
| 1301 | str++; | ||
| 1302 | /* name string isn't always 0 terminated */ | ||
| 1303 | for (i = 0; i < 511; i++) { | ||
| 1304 | name[i] = str[i]; | ||
| 1305 | if (name[i] < '.' || name[i] > 'z') { | ||
| 1306 | name[i] = 0; | ||
| 1307 | break; | ||
| 1308 | } | ||
| 1309 | } | ||
| 1310 | printk(KERN_INFO "ATOM BIOS: %s\n", name); | ||
| 1311 | |||
| 1312 | return ctx; | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | int amdgpu_atom_asic_init(struct atom_context *ctx) | ||
| 1316 | { | ||
| 1317 | int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); | ||
| 1318 | uint32_t ps[16]; | ||
| 1319 | int ret; | ||
| 1320 | |||
| 1321 | memset(ps, 0, 64); | ||
| 1322 | |||
| 1323 | ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); | ||
| 1324 | ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR)); | ||
| 1325 | if (!ps[0] || !ps[1]) | ||
| 1326 | return 1; | ||
| 1327 | |||
| 1328 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) | ||
| 1329 | return 1; | ||
| 1330 | ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps); | ||
| 1331 | if (ret) | ||
| 1332 | return ret; | ||
| 1333 | |||
| 1334 | memset(ps, 0, 64); | ||
| 1335 | |||
| 1336 | return ret; | ||
| 1337 | } | ||
| 1338 | |||
| 1339 | void amdgpu_atom_destroy(struct atom_context *ctx) | ||
| 1340 | { | ||
| 1341 | kfree(ctx->iio); | ||
| 1342 | kfree(ctx); | ||
| 1343 | } | ||
| 1344 | |||
| 1345 | bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, | ||
| 1346 | uint16_t * size, uint8_t * frev, uint8_t * crev, | ||
| 1347 | uint16_t * data_start) | ||
| 1348 | { | ||
| 1349 | int offset = index * 2 + 4; | ||
| 1350 | int idx = CU16(ctx->data_table + offset); | ||
| 1351 | u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); | ||
| 1352 | |||
| 1353 | if (!mdt[index]) | ||
| 1354 | return false; | ||
| 1355 | |||
| 1356 | if (size) | ||
| 1357 | *size = CU16(idx); | ||
| 1358 | if (frev) | ||
| 1359 | *frev = CU8(idx + 2); | ||
| 1360 | if (crev) | ||
| 1361 | *crev = CU8(idx + 3); | ||
| 1362 | *data_start = idx; | ||
| 1363 | return true; | ||
| 1364 | } | ||
| 1365 | |||
| 1366 | bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, | ||
| 1367 | uint8_t * crev) | ||
| 1368 | { | ||
| 1369 | int offset = index * 2 + 4; | ||
| 1370 | int idx = CU16(ctx->cmd_table + offset); | ||
| 1371 | u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); | ||
| 1372 | |||
| 1373 | if (!mct[index]) | ||
| 1374 | return false; | ||
| 1375 | |||
| 1376 | if (frev) | ||
| 1377 | *frev = CU8(idx + 2); | ||
| 1378 | if (crev) | ||
| 1379 | *crev = CU8(idx + 3); | ||
| 1380 | return true; | ||
| 1381 | } | ||
| 1382 | |||
| 1383 | int amdgpu_atom_allocate_fb_scratch(struct atom_context *ctx) | ||
| 1384 | { | ||
| 1385 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); | ||
| 1386 | uint16_t data_offset; | ||
| 1387 | int usage_bytes = 0; | ||
| 1388 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; | ||
| 1389 | |||
| 1390 | if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { | ||
| 1391 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); | ||
| 1392 | |||
| 1393 | DRM_DEBUG("atom firmware requested %08x %dkb\n", | ||
| 1394 | le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware), | ||
| 1395 | le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb)); | ||
| 1396 | |||
| 1397 | usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024; | ||
| 1398 | } | ||
| 1399 | ctx->scratch_size_bytes = 0; | ||
| 1400 | if (usage_bytes == 0) | ||
| 1401 | usage_bytes = 20 * 1024; | ||
| 1402 | /* allocate some scratch memory */ | ||
| 1403 | ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); | ||
| 1404 | if (!ctx->scratch) | ||
| 1405 | return -ENOMEM; | ||
| 1406 | ctx->scratch_size_bytes = usage_bytes; | ||
| 1407 | return 0; | ||
| 1408 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h new file mode 100644 index 000000000000..09d0f8230708 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atom.h | |||
| @@ -0,0 +1,159 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Author: Stanislaw Skowronek | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifndef ATOM_H | ||
| 26 | #define ATOM_H | ||
| 27 | |||
| 28 | #include <linux/types.h> | ||
| 29 | #include <drm/drmP.h> | ||
| 30 | |||
| 31 | #define ATOM_BIOS_MAGIC 0xAA55 | ||
| 32 | #define ATOM_ATI_MAGIC_PTR 0x30 | ||
| 33 | #define ATOM_ATI_MAGIC " 761295520" | ||
| 34 | #define ATOM_ROM_TABLE_PTR 0x48 | ||
| 35 | |||
| 36 | #define ATOM_ROM_MAGIC "ATOM" | ||
| 37 | #define ATOM_ROM_MAGIC_PTR 4 | ||
| 38 | |||
| 39 | #define ATOM_ROM_MSG_PTR 0x10 | ||
| 40 | #define ATOM_ROM_CMD_PTR 0x1E | ||
| 41 | #define ATOM_ROM_DATA_PTR 0x20 | ||
| 42 | |||
| 43 | #define ATOM_CMD_INIT 0 | ||
| 44 | #define ATOM_CMD_SETSCLK 0x0A | ||
| 45 | #define ATOM_CMD_SETMCLK 0x0B | ||
| 46 | #define ATOM_CMD_SETPCLK 0x0C | ||
| 47 | #define ATOM_CMD_SPDFANCNTL 0x39 | ||
| 48 | |||
| 49 | #define ATOM_DATA_FWI_PTR 0xC | ||
| 50 | #define ATOM_DATA_IIO_PTR 0x32 | ||
| 51 | |||
| 52 | #define ATOM_FWI_DEFSCLK_PTR 8 | ||
| 53 | #define ATOM_FWI_DEFMCLK_PTR 0xC | ||
| 54 | #define ATOM_FWI_MAXSCLK_PTR 0x24 | ||
| 55 | #define ATOM_FWI_MAXMCLK_PTR 0x28 | ||
| 56 | |||
| 57 | #define ATOM_CT_SIZE_PTR 0 | ||
| 58 | #define ATOM_CT_WS_PTR 4 | ||
| 59 | #define ATOM_CT_PS_PTR 5 | ||
| 60 | #define ATOM_CT_PS_MASK 0x7F | ||
| 61 | #define ATOM_CT_CODE_PTR 6 | ||
| 62 | |||
| 63 | #define ATOM_OP_CNT 123 | ||
| 64 | #define ATOM_OP_EOT 91 | ||
| 65 | |||
| 66 | #define ATOM_CASE_MAGIC 0x63 | ||
| 67 | #define ATOM_CASE_END 0x5A5A | ||
| 68 | |||
| 69 | #define ATOM_ARG_REG 0 | ||
| 70 | #define ATOM_ARG_PS 1 | ||
| 71 | #define ATOM_ARG_WS 2 | ||
| 72 | #define ATOM_ARG_FB 3 | ||
| 73 | #define ATOM_ARG_ID 4 | ||
| 74 | #define ATOM_ARG_IMM 5 | ||
| 75 | #define ATOM_ARG_PLL 6 | ||
| 76 | #define ATOM_ARG_MC 7 | ||
| 77 | |||
| 78 | #define ATOM_SRC_DWORD 0 | ||
| 79 | #define ATOM_SRC_WORD0 1 | ||
| 80 | #define ATOM_SRC_WORD8 2 | ||
| 81 | #define ATOM_SRC_WORD16 3 | ||
| 82 | #define ATOM_SRC_BYTE0 4 | ||
| 83 | #define ATOM_SRC_BYTE8 5 | ||
| 84 | #define ATOM_SRC_BYTE16 6 | ||
| 85 | #define ATOM_SRC_BYTE24 7 | ||
| 86 | |||
| 87 | #define ATOM_WS_QUOTIENT 0x40 | ||
| 88 | #define ATOM_WS_REMAINDER 0x41 | ||
| 89 | #define ATOM_WS_DATAPTR 0x42 | ||
| 90 | #define ATOM_WS_SHIFT 0x43 | ||
| 91 | #define ATOM_WS_OR_MASK 0x44 | ||
| 92 | #define ATOM_WS_AND_MASK 0x45 | ||
| 93 | #define ATOM_WS_FB_WINDOW 0x46 | ||
| 94 | #define ATOM_WS_ATTRIBUTES 0x47 | ||
| 95 | #define ATOM_WS_REGPTR 0x48 | ||
| 96 | |||
| 97 | #define ATOM_IIO_NOP 0 | ||
| 98 | #define ATOM_IIO_START 1 | ||
| 99 | #define ATOM_IIO_READ 2 | ||
| 100 | #define ATOM_IIO_WRITE 3 | ||
| 101 | #define ATOM_IIO_CLEAR 4 | ||
| 102 | #define ATOM_IIO_SET 5 | ||
| 103 | #define ATOM_IIO_MOVE_INDEX 6 | ||
| 104 | #define ATOM_IIO_MOVE_ATTR 7 | ||
| 105 | #define ATOM_IIO_MOVE_DATA 8 | ||
| 106 | #define ATOM_IIO_END 9 | ||
| 107 | |||
| 108 | #define ATOM_IO_MM 0 | ||
| 109 | #define ATOM_IO_PCI 1 | ||
| 110 | #define ATOM_IO_SYSIO 2 | ||
| 111 | #define ATOM_IO_IIO 0x80 | ||
| 112 | |||
| 113 | struct card_info { | ||
| 114 | struct drm_device *dev; | ||
| 115 | void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ | ||
| 116 | uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */ | ||
| 117 | void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ | ||
| 118 | uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */ | ||
| 119 | void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ | ||
| 120 | uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */ | ||
| 121 | void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ | ||
| 122 | uint32_t (* pll_read)(struct card_info *, uint32_t); /* filled by driver */ | ||
| 123 | }; | ||
| 124 | |||
| 125 | struct atom_context { | ||
| 126 | struct card_info *card; | ||
| 127 | struct mutex mutex; | ||
| 128 | void *bios; | ||
| 129 | uint32_t cmd_table, data_table; | ||
| 130 | uint16_t *iio; | ||
| 131 | |||
| 132 | uint16_t data_block; | ||
| 133 | uint32_t fb_base; | ||
| 134 | uint32_t divmul[2]; | ||
| 135 | uint16_t io_attr; | ||
| 136 | uint16_t reg_block; | ||
| 137 | uint8_t shift; | ||
| 138 | int cs_equal, cs_above; | ||
| 139 | int io_mode; | ||
| 140 | uint32_t *scratch; | ||
| 141 | int scratch_size_bytes; | ||
| 142 | }; | ||
| 143 | |||
| 144 | extern int amdgpu_atom_debug; | ||
| 145 | |||
| 146 | struct atom_context *amdgpu_atom_parse(struct card_info *, void *); | ||
| 147 | int amdgpu_atom_execute_table(struct atom_context *, int, uint32_t *); | ||
| 148 | int amdgpu_atom_asic_init(struct atom_context *); | ||
| 149 | void amdgpu_atom_destroy(struct atom_context *); | ||
| 150 | bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, | ||
| 151 | uint8_t *frev, uint8_t *crev, uint16_t *data_start); | ||
| 152 | bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, | ||
| 153 | uint8_t *frev, uint8_t *crev); | ||
| 154 | int amdgpu_atom_allocate_fb_scratch(struct atom_context *ctx); | ||
| 155 | #include "atom-types.h" | ||
| 156 | #include "atombios.h" | ||
| 157 | #include "ObjectID.h" | ||
| 158 | |||
| 159 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c new file mode 100644 index 000000000000..49aa35016653 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c | |||
| @@ -0,0 +1,807 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2007-8 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the "Software"), | ||
| 7 | * to deal in the Software without restriction, including without limitation | ||
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 10 | * Software is furnished to do so, subject to the following conditions: | ||
| 11 | * | ||
| 12 | * The above copyright notice and this permission notice shall be included in | ||
| 13 | * all copies or substantial portions of the Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 22 | * | ||
| 23 | * Authors: Dave Airlie | ||
| 24 | * Alex Deucher | ||
| 25 | */ | ||
| 26 | #include <drm/drmP.h> | ||
| 27 | #include <drm/drm_crtc_helper.h> | ||
| 28 | #include <drm/amdgpu_drm.h> | ||
| 29 | #include <drm/drm_fixed.h> | ||
| 30 | #include "amdgpu.h" | ||
| 31 | #include "atom.h" | ||
| 32 | #include "atom-bits.h" | ||
| 33 | #include "atombios_encoders.h" | ||
| 34 | #include "amdgpu_atombios.h" | ||
| 35 | #include "amdgpu_pll.h" | ||
| 36 | #include "amdgpu_connectors.h" | ||
| 37 | |||
| 38 | void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc, | ||
| 39 | struct drm_display_mode *mode, | ||
| 40 | struct drm_display_mode *adjusted_mode) | ||
| 41 | { | ||
| 42 | struct drm_device *dev = crtc->dev; | ||
| 43 | struct amdgpu_device *adev = dev->dev_private; | ||
| 44 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 45 | SET_CRTC_OVERSCAN_PS_ALLOCATION args; | ||
| 46 | int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); | ||
| 47 | int a1, a2; | ||
| 48 | |||
| 49 | memset(&args, 0, sizeof(args)); | ||
| 50 | |||
| 51 | args.ucCRTC = amdgpu_crtc->crtc_id; | ||
| 52 | |||
| 53 | switch (amdgpu_crtc->rmx_type) { | ||
| 54 | case RMX_CENTER: | ||
| 55 | args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); | ||
| 56 | args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); | ||
| 57 | args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); | ||
| 58 | args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); | ||
| 59 | break; | ||
| 60 | case RMX_ASPECT: | ||
| 61 | a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; | ||
| 62 | a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; | ||
| 63 | |||
| 64 | if (a1 > a2) { | ||
| 65 | args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); | ||
| 66 | args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); | ||
| 67 | } else if (a2 > a1) { | ||
| 68 | args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); | ||
| 69 | args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); | ||
| 70 | } | ||
| 71 | break; | ||
| 72 | case RMX_FULL: | ||
| 73 | default: | ||
| 74 | args.usOverscanRight = cpu_to_le16(amdgpu_crtc->h_border); | ||
| 75 | args.usOverscanLeft = cpu_to_le16(amdgpu_crtc->h_border); | ||
| 76 | args.usOverscanBottom = cpu_to_le16(amdgpu_crtc->v_border); | ||
| 77 | args.usOverscanTop = cpu_to_le16(amdgpu_crtc->v_border); | ||
| 78 | break; | ||
| 79 | } | ||
| 80 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 81 | } | ||
| 82 | |||
| 83 | void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc) | ||
| 84 | { | ||
| 85 | struct drm_device *dev = crtc->dev; | ||
| 86 | struct amdgpu_device *adev = dev->dev_private; | ||
| 87 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 88 | ENABLE_SCALER_PS_ALLOCATION args; | ||
| 89 | int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); | ||
| 90 | |||
| 91 | memset(&args, 0, sizeof(args)); | ||
| 92 | |||
| 93 | args.ucScaler = amdgpu_crtc->crtc_id; | ||
| 94 | |||
| 95 | switch (amdgpu_crtc->rmx_type) { | ||
| 96 | case RMX_FULL: | ||
| 97 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
| 98 | break; | ||
| 99 | case RMX_CENTER: | ||
| 100 | args.ucEnable = ATOM_SCALER_CENTER; | ||
| 101 | break; | ||
| 102 | case RMX_ASPECT: | ||
| 103 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
| 104 | break; | ||
| 105 | default: | ||
| 106 | args.ucEnable = ATOM_SCALER_DISABLE; | ||
| 107 | break; | ||
| 108 | } | ||
| 109 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 110 | } | ||
| 111 | |||
| 112 | void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock) | ||
| 113 | { | ||
| 114 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 115 | struct drm_device *dev = crtc->dev; | ||
| 116 | struct amdgpu_device *adev = dev->dev_private; | ||
| 117 | int index = | ||
| 118 | GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters); | ||
| 119 | ENABLE_CRTC_PS_ALLOCATION args; | ||
| 120 | |||
| 121 | memset(&args, 0, sizeof(args)); | ||
| 122 | |||
| 123 | args.ucCRTC = amdgpu_crtc->crtc_id; | ||
| 124 | args.ucEnable = lock; | ||
| 125 | |||
| 126 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 127 | } | ||
| 128 | |||
| 129 | void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state) | ||
| 130 | { | ||
| 131 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 132 | struct drm_device *dev = crtc->dev; | ||
| 133 | struct amdgpu_device *adev = dev->dev_private; | ||
| 134 | int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC); | ||
| 135 | ENABLE_CRTC_PS_ALLOCATION args; | ||
| 136 | |||
| 137 | memset(&args, 0, sizeof(args)); | ||
| 138 | |||
| 139 | args.ucCRTC = amdgpu_crtc->crtc_id; | ||
| 140 | args.ucEnable = state; | ||
| 141 | |||
| 142 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 143 | } | ||
| 144 | |||
| 145 | void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state) | ||
| 146 | { | ||
| 147 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 148 | struct drm_device *dev = crtc->dev; | ||
| 149 | struct amdgpu_device *adev = dev->dev_private; | ||
| 150 | int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC); | ||
| 151 | BLANK_CRTC_PS_ALLOCATION args; | ||
| 152 | |||
| 153 | memset(&args, 0, sizeof(args)); | ||
| 154 | |||
| 155 | args.ucCRTC = amdgpu_crtc->crtc_id; | ||
| 156 | args.ucBlanking = state; | ||
| 157 | |||
| 158 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 159 | } | ||
| 160 | |||
| 161 | void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) | ||
| 162 | { | ||
| 163 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 164 | struct drm_device *dev = crtc->dev; | ||
| 165 | struct amdgpu_device *adev = dev->dev_private; | ||
| 166 | int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); | ||
| 167 | ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; | ||
| 168 | |||
| 169 | memset(&args, 0, sizeof(args)); | ||
| 170 | |||
| 171 | args.ucDispPipeId = amdgpu_crtc->crtc_id; | ||
| 172 | args.ucEnable = state; | ||
| 173 | |||
| 174 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 175 | } | ||
| 176 | |||
| 177 | void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) | ||
| 178 | { | ||
| 179 | int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); | ||
| 180 | ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; | ||
| 181 | |||
| 182 | memset(&args, 0, sizeof(args)); | ||
| 183 | |||
| 184 | args.ucEnable = ATOM_INIT; | ||
| 185 | |||
| 186 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 187 | } | ||
| 188 | |||
| 189 | void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc, | ||
| 190 | struct drm_display_mode *mode) | ||
| 191 | { | ||
| 192 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 193 | struct drm_device *dev = crtc->dev; | ||
| 194 | struct amdgpu_device *adev = dev->dev_private; | ||
| 195 | SET_CRTC_USING_DTD_TIMING_PARAMETERS args; | ||
| 196 | int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming); | ||
| 197 | u16 misc = 0; | ||
| 198 | |||
| 199 | memset(&args, 0, sizeof(args)); | ||
| 200 | args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (amdgpu_crtc->h_border * 2)); | ||
| 201 | args.usH_Blanking_Time = | ||
| 202 | cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (amdgpu_crtc->h_border * 2)); | ||
| 203 | args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (amdgpu_crtc->v_border * 2)); | ||
| 204 | args.usV_Blanking_Time = | ||
| 205 | cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (amdgpu_crtc->v_border * 2)); | ||
| 206 | args.usH_SyncOffset = | ||
| 207 | cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + amdgpu_crtc->h_border); | ||
| 208 | args.usH_SyncWidth = | ||
| 209 | cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start); | ||
| 210 | args.usV_SyncOffset = | ||
| 211 | cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + amdgpu_crtc->v_border); | ||
| 212 | args.usV_SyncWidth = | ||
| 213 | cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); | ||
| 214 | args.ucH_Border = amdgpu_crtc->h_border; | ||
| 215 | args.ucV_Border = amdgpu_crtc->v_border; | ||
| 216 | |||
| 217 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
| 218 | misc |= ATOM_VSYNC_POLARITY; | ||
| 219 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
| 220 | misc |= ATOM_HSYNC_POLARITY; | ||
| 221 | if (mode->flags & DRM_MODE_FLAG_CSYNC) | ||
| 222 | misc |= ATOM_COMPOSITESYNC; | ||
| 223 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
| 224 | misc |= ATOM_INTERLACE; | ||
| 225 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 226 | misc |= ATOM_DOUBLE_CLOCK_MODE; | ||
| 227 | |||
| 228 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); | ||
| 229 | args.ucCRTC = amdgpu_crtc->crtc_id; | ||
| 230 | |||
| 231 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 232 | } | ||
| 233 | |||
| 234 | union atom_enable_ss { | ||
| 235 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; | ||
| 236 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2; | ||
| 237 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3; | ||
| 238 | }; | ||
| 239 | |||
| 240 | static void amdgpu_atombios_crtc_program_ss(struct amdgpu_device *adev, | ||
| 241 | int enable, | ||
| 242 | int pll_id, | ||
| 243 | int crtc_id, | ||
| 244 | struct amdgpu_atom_ss *ss) | ||
| 245 | { | ||
| 246 | unsigned i; | ||
| 247 | int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); | ||
| 248 | union atom_enable_ss args; | ||
| 249 | |||
| 250 | if (enable) { | ||
| 251 | /* Don't mess with SS if percentage is 0 or external ss. | ||
| 252 | * SS is already disabled previously, and disabling it | ||
| 253 | * again can cause display problems if the pll is already | ||
| 254 | * programmed. | ||
| 255 | */ | ||
| 256 | if (ss->percentage == 0) | ||
| 257 | return; | ||
| 258 | if (ss->type & ATOM_EXTERNAL_SS_MASK) | ||
| 259 | return; | ||
| 260 | } else { | ||
| 261 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
| 262 | if (adev->mode_info.crtcs[i] && | ||
| 263 | adev->mode_info.crtcs[i]->enabled && | ||
| 264 | i != crtc_id && | ||
| 265 | pll_id == adev->mode_info.crtcs[i]->pll_id) { | ||
| 266 | /* one other crtc is using this pll don't turn | ||
| 267 | * off spread spectrum as it might turn off | ||
| 268 | * display on active crtc | ||
| 269 | */ | ||
| 270 | return; | ||
| 271 | } | ||
| 272 | } | ||
| 273 | } | ||
| 274 | |||
| 275 | memset(&args, 0, sizeof(args)); | ||
| 276 | |||
| 277 | args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0); | ||
| 278 | args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; | ||
| 279 | switch (pll_id) { | ||
| 280 | case ATOM_PPLL1: | ||
| 281 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; | ||
| 282 | break; | ||
| 283 | case ATOM_PPLL2: | ||
| 284 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; | ||
| 285 | break; | ||
| 286 | case ATOM_DCPLL: | ||
| 287 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; | ||
| 288 | break; | ||
| 289 | case ATOM_PPLL_INVALID: | ||
| 290 | return; | ||
| 291 | } | ||
| 292 | args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); | ||
| 293 | args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); | ||
| 294 | args.v3.ucEnable = enable; | ||
| 295 | |||
| 296 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 297 | } | ||
| 298 | |||
| 299 | union adjust_pixel_clock { | ||
| 300 | ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; | ||
| 301 | ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3; | ||
| 302 | }; | ||
| 303 | |||
| 304 | static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc, | ||
| 305 | struct drm_display_mode *mode) | ||
| 306 | { | ||
| 307 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 308 | struct drm_device *dev = crtc->dev; | ||
| 309 | struct amdgpu_device *adev = dev->dev_private; | ||
| 310 | struct drm_encoder *encoder = amdgpu_crtc->encoder; | ||
| 311 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 312 | struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); | ||
| 313 | u32 adjusted_clock = mode->clock; | ||
| 314 | int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(encoder); | ||
| 315 | u32 dp_clock = mode->clock; | ||
| 316 | u32 clock = mode->clock; | ||
| 317 | int bpc = amdgpu_crtc->bpc; | ||
| 318 | bool is_duallink = amdgpu_dig_monitor_is_duallink(encoder, mode->clock); | ||
| 319 | union adjust_pixel_clock args; | ||
| 320 | u8 frev, crev; | ||
| 321 | int index; | ||
| 322 | |||
| 323 | amdgpu_crtc->pll_flags = AMDGPU_PLL_USE_FRAC_FB_DIV; | ||
| 324 | |||
| 325 | if ((amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || | ||
| 326 | (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { | ||
| 327 | if (connector) { | ||
| 328 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 329 | struct amdgpu_connector_atom_dig *dig_connector = | ||
| 330 | amdgpu_connector->con_priv; | ||
| 331 | |||
| 332 | dp_clock = dig_connector->dp_clock; | ||
| 333 | } | ||
| 334 | } | ||
| 335 | |||
| 336 | /* use recommended ref_div for ss */ | ||
| 337 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
| 338 | if (amdgpu_crtc->ss_enabled) { | ||
| 339 | if (amdgpu_crtc->ss.refdiv) { | ||
| 340 | amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_REF_DIV; | ||
| 341 | amdgpu_crtc->pll_reference_div = amdgpu_crtc->ss.refdiv; | ||
| 342 | amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV; | ||
| 343 | } | ||
| 344 | } | ||
| 345 | } | ||
| 346 | |||
| 347 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | ||
| 348 | if (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | ||
| 349 | adjusted_clock = mode->clock * 2; | ||
| 350 | if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
| 351 | amdgpu_crtc->pll_flags |= AMDGPU_PLL_PREFER_CLOSEST_LOWER; | ||
| 352 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
| 353 | amdgpu_crtc->pll_flags |= AMDGPU_PLL_IS_LCD; | ||
| 354 | |||
| 355 | |||
| 356 | /* adjust pll for deep color modes */ | ||
| 357 | if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { | ||
| 358 | switch (bpc) { | ||
| 359 | case 8: | ||
| 360 | default: | ||
| 361 | break; | ||
| 362 | case 10: | ||
| 363 | clock = (clock * 5) / 4; | ||
| 364 | break; | ||
| 365 | case 12: | ||
| 366 | clock = (clock * 3) / 2; | ||
| 367 | break; | ||
| 368 | case 16: | ||
| 369 | clock = clock * 2; | ||
| 370 | break; | ||
| 371 | } | ||
| 372 | } | ||
| 373 | |||
| 374 | /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock | ||
| 375 | * accordingly based on the encoder/transmitter to work around | ||
| 376 | * special hw requirements. | ||
| 377 | */ | ||
| 378 | index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); | ||
| 379 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, | ||
| 380 | &crev)) | ||
| 381 | return adjusted_clock; | ||
| 382 | |||
| 383 | memset(&args, 0, sizeof(args)); | ||
| 384 | |||
| 385 | switch (frev) { | ||
| 386 | case 1: | ||
| 387 | switch (crev) { | ||
| 388 | case 1: | ||
| 389 | case 2: | ||
| 390 | args.v1.usPixelClock = cpu_to_le16(clock / 10); | ||
| 391 | args.v1.ucTransmitterID = amdgpu_encoder->encoder_id; | ||
| 392 | args.v1.ucEncodeMode = encoder_mode; | ||
| 393 | if (amdgpu_crtc->ss_enabled && amdgpu_crtc->ss.percentage) | ||
| 394 | args.v1.ucConfig |= | ||
| 395 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; | ||
| 396 | |||
| 397 | amdgpu_atom_execute_table(adev->mode_info.atom_context, | ||
| 398 | index, (uint32_t *)&args); | ||
| 399 | adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; | ||
| 400 | break; | ||
| 401 | case 3: | ||
| 402 | args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10); | ||
| 403 | args.v3.sInput.ucTransmitterID = amdgpu_encoder->encoder_id; | ||
| 404 | args.v3.sInput.ucEncodeMode = encoder_mode; | ||
| 405 | args.v3.sInput.ucDispPllConfig = 0; | ||
| 406 | if (amdgpu_crtc->ss_enabled && amdgpu_crtc->ss.percentage) | ||
| 407 | args.v3.sInput.ucDispPllConfig |= | ||
| 408 | DISPPLL_CONFIG_SS_ENABLE; | ||
| 409 | if (ENCODER_MODE_IS_DP(encoder_mode)) { | ||
| 410 | args.v3.sInput.ucDispPllConfig |= | ||
| 411 | DISPPLL_CONFIG_COHERENT_MODE; | ||
| 412 | /* 16200 or 27000 */ | ||
| 413 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
| 414 | } else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
| 415 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
| 416 | if (dig->coherent_mode) | ||
| 417 | args.v3.sInput.ucDispPllConfig |= | ||
| 418 | DISPPLL_CONFIG_COHERENT_MODE; | ||
| 419 | if (is_duallink) | ||
| 420 | args.v3.sInput.ucDispPllConfig |= | ||
| 421 | DISPPLL_CONFIG_DUAL_LINK; | ||
| 422 | } | ||
| 423 | if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != | ||
| 424 | ENCODER_OBJECT_ID_NONE) | ||
| 425 | args.v3.sInput.ucExtTransmitterID = | ||
| 426 | amdgpu_encoder_get_dp_bridge_encoder_id(encoder); | ||
| 427 | else | ||
| 428 | args.v3.sInput.ucExtTransmitterID = 0; | ||
| 429 | |||
| 430 | amdgpu_atom_execute_table(adev->mode_info.atom_context, | ||
| 431 | index, (uint32_t *)&args); | ||
| 432 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; | ||
| 433 | if (args.v3.sOutput.ucRefDiv) { | ||
| 434 | amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV; | ||
| 435 | amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_REF_DIV; | ||
| 436 | amdgpu_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv; | ||
| 437 | } | ||
| 438 | if (args.v3.sOutput.ucPostDiv) { | ||
| 439 | amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV; | ||
| 440 | amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_POST_DIV; | ||
| 441 | amdgpu_crtc->pll_post_div = args.v3.sOutput.ucPostDiv; | ||
| 442 | } | ||
| 443 | break; | ||
| 444 | default: | ||
| 445 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | ||
| 446 | return adjusted_clock; | ||
| 447 | } | ||
| 448 | break; | ||
| 449 | default: | ||
| 450 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | ||
| 451 | return adjusted_clock; | ||
| 452 | } | ||
| 453 | |||
| 454 | return adjusted_clock; | ||
| 455 | } | ||
| 456 | |||
| 457 | union set_pixel_clock { | ||
| 458 | SET_PIXEL_CLOCK_PS_ALLOCATION base; | ||
| 459 | PIXEL_CLOCK_PARAMETERS v1; | ||
| 460 | PIXEL_CLOCK_PARAMETERS_V2 v2; | ||
| 461 | PIXEL_CLOCK_PARAMETERS_V3 v3; | ||
| 462 | PIXEL_CLOCK_PARAMETERS_V5 v5; | ||
| 463 | PIXEL_CLOCK_PARAMETERS_V6 v6; | ||
| 464 | }; | ||
| 465 | |||
| 466 | /* on DCE5, make sure the voltage is high enough to support the | ||
| 467 | * required disp clk. | ||
| 468 | */ | ||
| 469 | void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev, | ||
| 470 | u32 dispclk) | ||
| 471 | { | ||
| 472 | u8 frev, crev; | ||
| 473 | int index; | ||
| 474 | union set_pixel_clock args; | ||
| 475 | |||
| 476 | memset(&args, 0, sizeof(args)); | ||
| 477 | |||
| 478 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); | ||
| 479 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, | ||
| 480 | &crev)) | ||
| 481 | return; | ||
| 482 | |||
| 483 | switch (frev) { | ||
| 484 | case 1: | ||
| 485 | switch (crev) { | ||
| 486 | case 5: | ||
| 487 | /* if the default dcpll clock is specified, | ||
| 488 | * SetPixelClock provides the dividers | ||
| 489 | */ | ||
| 490 | args.v5.ucCRTC = ATOM_CRTC_INVALID; | ||
| 491 | args.v5.usPixelClock = cpu_to_le16(dispclk); | ||
| 492 | args.v5.ucPpll = ATOM_DCPLL; | ||
| 493 | break; | ||
| 494 | case 6: | ||
| 495 | /* if the default dcpll clock is specified, | ||
| 496 | * SetPixelClock provides the dividers | ||
| 497 | */ | ||
| 498 | args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk); | ||
| 499 | args.v6.ucPpll = ATOM_EXT_PLL1; | ||
| 500 | break; | ||
| 501 | default: | ||
| 502 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | ||
| 503 | return; | ||
| 504 | } | ||
| 505 | break; | ||
| 506 | default: | ||
| 507 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | ||
| 508 | return; | ||
| 509 | } | ||
| 510 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 511 | } | ||
| 512 | |||
| 513 | static bool is_pixel_clock_source_from_pll(u32 encoder_mode, int pll_id) | ||
| 514 | { | ||
| 515 | if (ENCODER_MODE_IS_DP(encoder_mode)) { | ||
| 516 | if (pll_id < ATOM_EXT_PLL1) | ||
| 517 | return true; | ||
| 518 | else | ||
| 519 | return false; | ||
| 520 | } else { | ||
| 521 | return true; | ||
| 522 | } | ||
| 523 | } | ||
| 524 | |||
| 525 | void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc, | ||
| 526 | u32 crtc_id, | ||
| 527 | int pll_id, | ||
| 528 | u32 encoder_mode, | ||
| 529 | u32 encoder_id, | ||
| 530 | u32 clock, | ||
| 531 | u32 ref_div, | ||
| 532 | u32 fb_div, | ||
| 533 | u32 frac_fb_div, | ||
| 534 | u32 post_div, | ||
| 535 | int bpc, | ||
| 536 | bool ss_enabled, | ||
| 537 | struct amdgpu_atom_ss *ss) | ||
| 538 | { | ||
| 539 | struct drm_device *dev = crtc->dev; | ||
| 540 | struct amdgpu_device *adev = dev->dev_private; | ||
| 541 | u8 frev, crev; | ||
| 542 | int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); | ||
| 543 | union set_pixel_clock args; | ||
| 544 | |||
| 545 | memset(&args, 0, sizeof(args)); | ||
| 546 | |||
| 547 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, | ||
| 548 | &crev)) | ||
| 549 | return; | ||
| 550 | |||
| 551 | switch (frev) { | ||
| 552 | case 1: | ||
| 553 | switch (crev) { | ||
| 554 | case 1: | ||
| 555 | if (clock == ATOM_DISABLE) | ||
| 556 | return; | ||
| 557 | args.v1.usPixelClock = cpu_to_le16(clock / 10); | ||
| 558 | args.v1.usRefDiv = cpu_to_le16(ref_div); | ||
| 559 | args.v1.usFbDiv = cpu_to_le16(fb_div); | ||
| 560 | args.v1.ucFracFbDiv = frac_fb_div; | ||
| 561 | args.v1.ucPostDiv = post_div; | ||
| 562 | args.v1.ucPpll = pll_id; | ||
| 563 | args.v1.ucCRTC = crtc_id; | ||
| 564 | args.v1.ucRefDivSrc = 1; | ||
| 565 | break; | ||
| 566 | case 2: | ||
| 567 | args.v2.usPixelClock = cpu_to_le16(clock / 10); | ||
| 568 | args.v2.usRefDiv = cpu_to_le16(ref_div); | ||
| 569 | args.v2.usFbDiv = cpu_to_le16(fb_div); | ||
| 570 | args.v2.ucFracFbDiv = frac_fb_div; | ||
| 571 | args.v2.ucPostDiv = post_div; | ||
| 572 | args.v2.ucPpll = pll_id; | ||
| 573 | args.v2.ucCRTC = crtc_id; | ||
| 574 | args.v2.ucRefDivSrc = 1; | ||
| 575 | break; | ||
| 576 | case 3: | ||
| 577 | args.v3.usPixelClock = cpu_to_le16(clock / 10); | ||
| 578 | args.v3.usRefDiv = cpu_to_le16(ref_div); | ||
| 579 | args.v3.usFbDiv = cpu_to_le16(fb_div); | ||
| 580 | args.v3.ucFracFbDiv = frac_fb_div; | ||
| 581 | args.v3.ucPostDiv = post_div; | ||
| 582 | args.v3.ucPpll = pll_id; | ||
| 583 | if (crtc_id == ATOM_CRTC2) | ||
| 584 | args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2; | ||
| 585 | else | ||
| 586 | args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1; | ||
| 587 | if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) | ||
| 588 | args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; | ||
| 589 | args.v3.ucTransmitterId = encoder_id; | ||
| 590 | args.v3.ucEncoderMode = encoder_mode; | ||
| 591 | break; | ||
| 592 | case 5: | ||
| 593 | args.v5.ucCRTC = crtc_id; | ||
| 594 | args.v5.usPixelClock = cpu_to_le16(clock / 10); | ||
| 595 | args.v5.ucRefDiv = ref_div; | ||
| 596 | args.v5.usFbDiv = cpu_to_le16(fb_div); | ||
| 597 | args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); | ||
| 598 | args.v5.ucPostDiv = post_div; | ||
| 599 | args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ | ||
| 600 | if ((ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) && | ||
| 601 | (pll_id < ATOM_EXT_PLL1)) | ||
| 602 | args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; | ||
| 603 | if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { | ||
| 604 | switch (bpc) { | ||
| 605 | case 8: | ||
| 606 | default: | ||
| 607 | args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; | ||
| 608 | break; | ||
| 609 | case 10: | ||
| 610 | /* yes this is correct, the atom define is wrong */ | ||
| 611 | args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP; | ||
| 612 | break; | ||
| 613 | case 12: | ||
| 614 | /* yes this is correct, the atom define is wrong */ | ||
| 615 | args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; | ||
| 616 | break; | ||
| 617 | } | ||
| 618 | } | ||
| 619 | args.v5.ucTransmitterID = encoder_id; | ||
| 620 | args.v5.ucEncoderMode = encoder_mode; | ||
| 621 | args.v5.ucPpll = pll_id; | ||
| 622 | break; | ||
| 623 | case 6: | ||
| 624 | args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10); | ||
| 625 | args.v6.ucRefDiv = ref_div; | ||
| 626 | args.v6.usFbDiv = cpu_to_le16(fb_div); | ||
| 627 | args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); | ||
| 628 | args.v6.ucPostDiv = post_div; | ||
| 629 | args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ | ||
| 630 | if ((ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) && | ||
| 631 | (pll_id < ATOM_EXT_PLL1) && | ||
| 632 | !is_pixel_clock_source_from_pll(encoder_mode, pll_id)) | ||
| 633 | args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; | ||
| 634 | if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { | ||
| 635 | switch (bpc) { | ||
| 636 | case 8: | ||
| 637 | default: | ||
| 638 | args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; | ||
| 639 | break; | ||
| 640 | case 10: | ||
| 641 | args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6; | ||
| 642 | break; | ||
| 643 | case 12: | ||
| 644 | args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6; | ||
| 645 | break; | ||
| 646 | case 16: | ||
| 647 | args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; | ||
| 648 | break; | ||
| 649 | } | ||
| 650 | } | ||
| 651 | args.v6.ucTransmitterID = encoder_id; | ||
| 652 | args.v6.ucEncoderMode = encoder_mode; | ||
| 653 | args.v6.ucPpll = pll_id; | ||
| 654 | break; | ||
| 655 | default: | ||
| 656 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | ||
| 657 | return; | ||
| 658 | } | ||
| 659 | break; | ||
| 660 | default: | ||
| 661 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | ||
| 662 | return; | ||
| 663 | } | ||
| 664 | |||
| 665 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 666 | } | ||
| 667 | |||
| 668 | int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc, | ||
| 669 | struct drm_display_mode *mode) | ||
| 670 | { | ||
| 671 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 672 | struct drm_device *dev = crtc->dev; | ||
| 673 | struct amdgpu_device *adev = dev->dev_private; | ||
| 674 | struct amdgpu_encoder *amdgpu_encoder = | ||
| 675 | to_amdgpu_encoder(amdgpu_crtc->encoder); | ||
| 676 | int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder); | ||
| 677 | |||
| 678 | amdgpu_crtc->bpc = 8; | ||
| 679 | amdgpu_crtc->ss_enabled = false; | ||
| 680 | |||
| 681 | if ((amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || | ||
| 682 | (amdgpu_encoder_get_dp_bridge_encoder_id(amdgpu_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) { | ||
| 683 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
| 684 | struct drm_connector *connector = | ||
| 685 | amdgpu_get_connector_for_encoder(amdgpu_crtc->encoder); | ||
| 686 | struct amdgpu_connector *amdgpu_connector = | ||
| 687 | to_amdgpu_connector(connector); | ||
| 688 | struct amdgpu_connector_atom_dig *dig_connector = | ||
| 689 | amdgpu_connector->con_priv; | ||
| 690 | int dp_clock; | ||
| 691 | |||
| 692 | /* Assign mode clock for hdmi deep color max clock limit check */ | ||
| 693 | amdgpu_connector->pixelclock_for_modeset = mode->clock; | ||
| 694 | amdgpu_crtc->bpc = amdgpu_connector_get_monitor_bpc(connector); | ||
| 695 | |||
| 696 | switch (encoder_mode) { | ||
| 697 | case ATOM_ENCODER_MODE_DP_MST: | ||
| 698 | case ATOM_ENCODER_MODE_DP: | ||
| 699 | /* DP/eDP */ | ||
| 700 | dp_clock = dig_connector->dp_clock / 10; | ||
| 701 | amdgpu_crtc->ss_enabled = | ||
| 702 | amdgpu_atombios_get_asic_ss_info(adev, &amdgpu_crtc->ss, | ||
| 703 | ASIC_INTERNAL_SS_ON_DP, | ||
| 704 | dp_clock); | ||
| 705 | break; | ||
| 706 | case ATOM_ENCODER_MODE_LVDS: | ||
| 707 | amdgpu_crtc->ss_enabled = | ||
| 708 | amdgpu_atombios_get_asic_ss_info(adev, | ||
| 709 | &amdgpu_crtc->ss, | ||
| 710 | dig->lcd_ss_id, | ||
| 711 | mode->clock / 10); | ||
| 712 | break; | ||
| 713 | case ATOM_ENCODER_MODE_DVI: | ||
| 714 | amdgpu_crtc->ss_enabled = | ||
| 715 | amdgpu_atombios_get_asic_ss_info(adev, | ||
| 716 | &amdgpu_crtc->ss, | ||
| 717 | ASIC_INTERNAL_SS_ON_TMDS, | ||
| 718 | mode->clock / 10); | ||
| 719 | break; | ||
| 720 | case ATOM_ENCODER_MODE_HDMI: | ||
| 721 | amdgpu_crtc->ss_enabled = | ||
| 722 | amdgpu_atombios_get_asic_ss_info(adev, | ||
| 723 | &amdgpu_crtc->ss, | ||
| 724 | ASIC_INTERNAL_SS_ON_HDMI, | ||
| 725 | mode->clock / 10); | ||
| 726 | break; | ||
| 727 | default: | ||
| 728 | break; | ||
| 729 | } | ||
| 730 | } | ||
| 731 | |||
| 732 | /* adjust pixel clock as needed */ | ||
| 733 | amdgpu_crtc->adjusted_clock = amdgpu_atombios_crtc_adjust_pll(crtc, mode); | ||
| 734 | |||
| 735 | return 0; | ||
| 736 | } | ||
| 737 | |||
| 738 | void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | ||
| 739 | { | ||
| 740 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
| 741 | struct drm_device *dev = crtc->dev; | ||
| 742 | struct amdgpu_device *adev = dev->dev_private; | ||
| 743 | struct amdgpu_encoder *amdgpu_encoder = | ||
| 744 | to_amdgpu_encoder(amdgpu_crtc->encoder); | ||
| 745 | u32 pll_clock = mode->clock; | ||
| 746 | u32 clock = mode->clock; | ||
| 747 | u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; | ||
| 748 | struct amdgpu_pll *pll; | ||
| 749 | int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder); | ||
| 750 | |||
| 751 | /* pass the actual clock to amdgpu_atombios_crtc_program_pll for HDMI */ | ||
| 752 | if ((encoder_mode == ATOM_ENCODER_MODE_HDMI) && | ||
| 753 | (amdgpu_crtc->bpc > 8)) | ||
| 754 | clock = amdgpu_crtc->adjusted_clock; | ||
| 755 | |||
| 756 | switch (amdgpu_crtc->pll_id) { | ||
| 757 | case ATOM_PPLL1: | ||
| 758 | pll = &adev->clock.ppll[0]; | ||
| 759 | break; | ||
| 760 | case ATOM_PPLL2: | ||
| 761 | pll = &adev->clock.ppll[1]; | ||
| 762 | break; | ||
| 763 | case ATOM_PPLL0: | ||
| 764 | case ATOM_PPLL_INVALID: | ||
| 765 | default: | ||
| 766 | pll = &adev->clock.ppll[2]; | ||
| 767 | break; | ||
| 768 | } | ||
| 769 | |||
| 770 | /* update pll params */ | ||
| 771 | pll->flags = amdgpu_crtc->pll_flags; | ||
| 772 | pll->reference_div = amdgpu_crtc->pll_reference_div; | ||
| 773 | pll->post_div = amdgpu_crtc->pll_post_div; | ||
| 774 | |||
| 775 | amdgpu_pll_compute(pll, amdgpu_crtc->adjusted_clock, &pll_clock, | ||
| 776 | &fb_div, &frac_fb_div, &ref_div, &post_div); | ||
| 777 | |||
| 778 | amdgpu_atombios_crtc_program_ss(adev, ATOM_DISABLE, amdgpu_crtc->pll_id, | ||
| 779 | amdgpu_crtc->crtc_id, &amdgpu_crtc->ss); | ||
| 780 | |||
| 781 | amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, | ||
| 782 | encoder_mode, amdgpu_encoder->encoder_id, clock, | ||
| 783 | ref_div, fb_div, frac_fb_div, post_div, | ||
| 784 | amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss); | ||
| 785 | |||
| 786 | if (amdgpu_crtc->ss_enabled) { | ||
| 787 | /* calculate ss amount and step size */ | ||
| 788 | u32 step_size; | ||
| 789 | u32 amount = (((fb_div * 10) + frac_fb_div) * | ||
| 790 | (u32)amdgpu_crtc->ss.percentage) / | ||
| 791 | (100 * (u32)amdgpu_crtc->ss.percentage_divider); | ||
| 792 | amdgpu_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; | ||
| 793 | amdgpu_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & | ||
| 794 | ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; | ||
| 795 | if (amdgpu_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) | ||
| 796 | step_size = (4 * amount * ref_div * ((u32)amdgpu_crtc->ss.rate * 2048)) / | ||
| 797 | (125 * 25 * pll->reference_freq / 100); | ||
| 798 | else | ||
| 799 | step_size = (2 * amount * ref_div * ((u32)amdgpu_crtc->ss.rate * 2048)) / | ||
| 800 | (125 * 25 * pll->reference_freq / 100); | ||
| 801 | amdgpu_crtc->ss.step = step_size; | ||
| 802 | |||
| 803 | amdgpu_atombios_crtc_program_ss(adev, ATOM_ENABLE, amdgpu_crtc->pll_id, | ||
| 804 | amdgpu_crtc->crtc_id, &amdgpu_crtc->ss); | ||
| 805 | } | ||
| 806 | } | ||
| 807 | |||
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h new file mode 100644 index 000000000000..c67083335b13 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h | |||
| @@ -0,0 +1,58 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __ATOMBIOS_CRTC_H__ | ||
| 25 | #define __ATOMBIOS_CRTC_H__ | ||
| 26 | |||
| 27 | void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc, | ||
| 28 | struct drm_display_mode *mode, | ||
| 29 | struct drm_display_mode *adjusted_mode); | ||
| 30 | void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc); | ||
| 31 | void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock); | ||
| 32 | void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state); | ||
| 33 | void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state); | ||
| 34 | void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state); | ||
| 35 | void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev); | ||
| 36 | void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc, | ||
| 37 | struct drm_display_mode *mode); | ||
| 38 | void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev, | ||
| 39 | u32 dispclk); | ||
| 40 | void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc, | ||
| 41 | u32 crtc_id, | ||
| 42 | int pll_id, | ||
| 43 | u32 encoder_mode, | ||
| 44 | u32 encoder_id, | ||
| 45 | u32 clock, | ||
| 46 | u32 ref_div, | ||
| 47 | u32 fb_div, | ||
| 48 | u32 frac_fb_div, | ||
| 49 | u32 post_div, | ||
| 50 | int bpc, | ||
| 51 | bool ss_enabled, | ||
| 52 | struct amdgpu_atom_ss *ss); | ||
| 53 | int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc, | ||
| 54 | struct drm_display_mode *mode); | ||
| 55 | void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, | ||
| 56 | struct drm_display_mode *mode); | ||
| 57 | |||
| 58 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c new file mode 100644 index 000000000000..e00b8adde18d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c | |||
| @@ -0,0 +1,774 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2007-8 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the "Software"), | ||
| 7 | * to deal in the Software without restriction, including without limitation | ||
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 10 | * Software is furnished to do so, subject to the following conditions: | ||
| 11 | * | ||
| 12 | * The above copyright notice and this permission notice shall be included in | ||
| 13 | * all copies or substantial portions of the Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 22 | * | ||
| 23 | * Authors: Dave Airlie | ||
| 24 | * Alex Deucher | ||
| 25 | * Jerome Glisse | ||
| 26 | */ | ||
| 27 | #include <drm/drmP.h> | ||
| 28 | #include <drm/amdgpu_drm.h> | ||
| 29 | #include "amdgpu.h" | ||
| 30 | |||
| 31 | #include "atom.h" | ||
| 32 | #include "atom-bits.h" | ||
| 33 | #include "atombios_encoders.h" | ||
| 34 | #include "atombios_dp.h" | ||
| 35 | #include "amdgpu_connectors.h" | ||
| 36 | #include "amdgpu_atombios.h" | ||
| 37 | #include <drm/drm_dp_helper.h> | ||
| 38 | |||
| 39 | /* move these to drm_dp_helper.c/h */ | ||
| 40 | #define DP_LINK_CONFIGURATION_SIZE 9 | ||
| 41 | #define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE | ||
| 42 | |||
| 43 | static char *voltage_names[] = { | ||
| 44 | "0.4V", "0.6V", "0.8V", "1.2V" | ||
| 45 | }; | ||
| 46 | static char *pre_emph_names[] = { | ||
| 47 | "0dB", "3.5dB", "6dB", "9.5dB" | ||
| 48 | }; | ||
| 49 | |||
| 50 | /***** amdgpu AUX functions *****/ | ||
| 51 | |||
| 52 | union aux_channel_transaction { | ||
| 53 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; | ||
| 54 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; | ||
| 55 | }; | ||
| 56 | |||
| 57 | static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan, | ||
| 58 | u8 *send, int send_bytes, | ||
| 59 | u8 *recv, int recv_size, | ||
| 60 | u8 delay, u8 *ack) | ||
| 61 | { | ||
| 62 | struct drm_device *dev = chan->dev; | ||
| 63 | struct amdgpu_device *adev = dev->dev_private; | ||
| 64 | union aux_channel_transaction args; | ||
| 65 | int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); | ||
| 66 | unsigned char *base; | ||
| 67 | int recv_bytes; | ||
| 68 | int r = 0; | ||
| 69 | |||
| 70 | memset(&args, 0, sizeof(args)); | ||
| 71 | |||
| 72 | mutex_lock(&chan->mutex); | ||
| 73 | |||
| 74 | base = (unsigned char *)(adev->mode_info.atom_context->scratch + 1); | ||
| 75 | |||
| 76 | amdgpu_atombios_copy_swap(base, send, send_bytes, true); | ||
| 77 | |||
| 78 | args.v2.lpAuxRequest = cpu_to_le16((u16)(0 + 4)); | ||
| 79 | args.v2.lpDataOut = cpu_to_le16((u16)(16 + 4)); | ||
| 80 | args.v2.ucDataOutLen = 0; | ||
| 81 | args.v2.ucChannelID = chan->rec.i2c_id; | ||
| 82 | args.v2.ucDelay = delay / 10; | ||
| 83 | args.v2.ucHPD_ID = chan->rec.hpd; | ||
| 84 | |||
| 85 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 86 | |||
| 87 | *ack = args.v2.ucReplyStatus; | ||
| 88 | |||
| 89 | /* timeout */ | ||
| 90 | if (args.v2.ucReplyStatus == 1) { | ||
| 91 | DRM_DEBUG_KMS("dp_aux_ch timeout\n"); | ||
| 92 | r = -ETIMEDOUT; | ||
| 93 | goto done; | ||
| 94 | } | ||
| 95 | |||
| 96 | /* flags not zero */ | ||
| 97 | if (args.v2.ucReplyStatus == 2) { | ||
| 98 | DRM_DEBUG_KMS("dp_aux_ch flags not zero\n"); | ||
| 99 | r = -EIO; | ||
| 100 | goto done; | ||
| 101 | } | ||
| 102 | |||
| 103 | /* error */ | ||
| 104 | if (args.v2.ucReplyStatus == 3) { | ||
| 105 | DRM_DEBUG_KMS("dp_aux_ch error\n"); | ||
| 106 | r = -EIO; | ||
| 107 | goto done; | ||
| 108 | } | ||
| 109 | |||
| 110 | recv_bytes = args.v1.ucDataOutLen; | ||
| 111 | if (recv_bytes > recv_size) | ||
| 112 | recv_bytes = recv_size; | ||
| 113 | |||
| 114 | if (recv && recv_size) | ||
| 115 | amdgpu_atombios_copy_swap(recv, base + 16, recv_bytes, false); | ||
| 116 | |||
| 117 | r = recv_bytes; | ||
| 118 | done: | ||
| 119 | mutex_unlock(&chan->mutex); | ||
| 120 | |||
| 121 | return r; | ||
| 122 | } | ||
| 123 | |||
| 124 | #define BARE_ADDRESS_SIZE 3 | ||
| 125 | #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) | ||
| 126 | |||
| 127 | static ssize_t | ||
| 128 | amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) | ||
| 129 | { | ||
| 130 | struct amdgpu_i2c_chan *chan = | ||
| 131 | container_of(aux, struct amdgpu_i2c_chan, aux); | ||
| 132 | int ret; | ||
| 133 | u8 tx_buf[20]; | ||
| 134 | size_t tx_size; | ||
| 135 | u8 ack, delay = 0; | ||
| 136 | |||
| 137 | if (WARN_ON(msg->size > 16)) | ||
| 138 | return -E2BIG; | ||
| 139 | |||
| 140 | tx_buf[0] = msg->address & 0xff; | ||
| 141 | tx_buf[1] = msg->address >> 8; | ||
| 142 | tx_buf[2] = msg->request << 4; | ||
| 143 | tx_buf[3] = msg->size ? (msg->size - 1) : 0; | ||
| 144 | |||
| 145 | switch (msg->request & ~DP_AUX_I2C_MOT) { | ||
| 146 | case DP_AUX_NATIVE_WRITE: | ||
| 147 | case DP_AUX_I2C_WRITE: | ||
| 148 | /* tx_size needs to be 4 even for bare address packets since the atom | ||
| 149 | * table needs the info in tx_buf[3]. | ||
| 150 | */ | ||
| 151 | tx_size = HEADER_SIZE + msg->size; | ||
| 152 | if (msg->size == 0) | ||
| 153 | tx_buf[3] |= BARE_ADDRESS_SIZE << 4; | ||
| 154 | else | ||
| 155 | tx_buf[3] |= tx_size << 4; | ||
| 156 | memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size); | ||
| 157 | ret = amdgpu_atombios_dp_process_aux_ch(chan, | ||
| 158 | tx_buf, tx_size, NULL, 0, delay, &ack); | ||
| 159 | if (ret >= 0) | ||
| 160 | /* Return payload size. */ | ||
| 161 | ret = msg->size; | ||
| 162 | break; | ||
| 163 | case DP_AUX_NATIVE_READ: | ||
| 164 | case DP_AUX_I2C_READ: | ||
| 165 | /* tx_size needs to be 4 even for bare address packets since the atom | ||
| 166 | * table needs the info in tx_buf[3]. | ||
| 167 | */ | ||
| 168 | tx_size = HEADER_SIZE; | ||
| 169 | if (msg->size == 0) | ||
| 170 | tx_buf[3] |= BARE_ADDRESS_SIZE << 4; | ||
| 171 | else | ||
| 172 | tx_buf[3] |= tx_size << 4; | ||
| 173 | ret = amdgpu_atombios_dp_process_aux_ch(chan, | ||
| 174 | tx_buf, tx_size, msg->buffer, msg->size, delay, &ack); | ||
| 175 | break; | ||
| 176 | default: | ||
| 177 | ret = -EINVAL; | ||
| 178 | break; | ||
| 179 | } | ||
| 180 | |||
| 181 | if (ret >= 0) | ||
| 182 | msg->reply = ack >> 4; | ||
| 183 | |||
| 184 | return ret; | ||
| 185 | } | ||
| 186 | |||
| 187 | void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector) | ||
| 188 | { | ||
| 189 | int ret; | ||
| 190 | |||
| 191 | amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd; | ||
| 192 | amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev; | ||
| 193 | amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer; | ||
| 194 | ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux); | ||
| 195 | if (!ret) | ||
| 196 | amdgpu_connector->ddc_bus->has_aux = true; | ||
| 197 | |||
| 198 | WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret); | ||
| 199 | } | ||
| 200 | |||
| 201 | /***** general DP utility functions *****/ | ||
| 202 | |||
| 203 | #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | ||
| 204 | #define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3 | ||
| 205 | |||
| 206 | static void amdgpu_atombios_dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], | ||
| 207 | int lane_count, | ||
| 208 | u8 train_set[4]) | ||
| 209 | { | ||
| 210 | u8 v = 0; | ||
| 211 | u8 p = 0; | ||
| 212 | int lane; | ||
| 213 | |||
| 214 | for (lane = 0; lane < lane_count; lane++) { | ||
| 215 | u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane); | ||
| 216 | u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); | ||
| 217 | |||
| 218 | DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n", | ||
| 219 | lane, | ||
| 220 | voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT], | ||
| 221 | pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); | ||
| 222 | |||
| 223 | if (this_v > v) | ||
| 224 | v = this_v; | ||
| 225 | if (this_p > p) | ||
| 226 | p = this_p; | ||
| 227 | } | ||
| 228 | |||
| 229 | if (v >= DP_VOLTAGE_MAX) | ||
| 230 | v |= DP_TRAIN_MAX_SWING_REACHED; | ||
| 231 | |||
| 232 | if (p >= DP_PRE_EMPHASIS_MAX) | ||
| 233 | p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; | ||
| 234 | |||
| 235 | DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n", | ||
| 236 | voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT], | ||
| 237 | pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); | ||
| 238 | |||
| 239 | for (lane = 0; lane < 4; lane++) | ||
| 240 | train_set[lane] = v | p; | ||
| 241 | } | ||
| 242 | |||
| 243 | /* convert bits per color to bits per pixel */ | ||
| 244 | /* get bpc from the EDID */ | ||
| 245 | static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc) | ||
| 246 | { | ||
| 247 | if (bpc == 0) | ||
| 248 | return 24; | ||
| 249 | else | ||
| 250 | return bpc * 3; | ||
| 251 | } | ||
| 252 | |||
| 253 | /* get the max pix clock supported by the link rate and lane num */ | ||
| 254 | static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate, | ||
| 255 | int lane_num, | ||
| 256 | int bpp) | ||
| 257 | { | ||
| 258 | return (link_rate * lane_num * 8) / bpp; | ||
| 259 | } | ||
| 260 | |||
| 261 | /***** amdgpu specific DP functions *****/ | ||
| 262 | |||
| 263 | /* First get the min lane# when low rate is used according to pixel clock | ||
| 264 | * (prefer low rate), second check max lane# supported by DP panel, | ||
| 265 | * if the max lane# < low rate lane# then use max lane# instead. | ||
| 266 | */ | ||
| 267 | static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector, | ||
| 268 | u8 dpcd[DP_DPCD_SIZE], | ||
| 269 | int pix_clock) | ||
| 270 | { | ||
| 271 | int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector)); | ||
| 272 | int max_link_rate = drm_dp_max_link_rate(dpcd); | ||
| 273 | int max_lane_num = drm_dp_max_lane_count(dpcd); | ||
| 274 | int lane_num; | ||
| 275 | int max_dp_pix_clock; | ||
| 276 | |||
| 277 | for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) { | ||
| 278 | max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp); | ||
| 279 | if (pix_clock <= max_dp_pix_clock) | ||
| 280 | break; | ||
| 281 | } | ||
| 282 | |||
| 283 | return lane_num; | ||
| 284 | } | ||
| 285 | |||
| 286 | static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector, | ||
| 287 | u8 dpcd[DP_DPCD_SIZE], | ||
| 288 | int pix_clock) | ||
| 289 | { | ||
| 290 | int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector)); | ||
| 291 | int lane_num, max_pix_clock; | ||
| 292 | |||
| 293 | if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) == | ||
| 294 | ENCODER_OBJECT_ID_NUTMEG) | ||
| 295 | return 270000; | ||
| 296 | |||
| 297 | lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock); | ||
| 298 | max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp); | ||
| 299 | if (pix_clock <= max_pix_clock) | ||
| 300 | return 162000; | ||
| 301 | max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp); | ||
| 302 | if (pix_clock <= max_pix_clock) | ||
| 303 | return 270000; | ||
| 304 | if (amdgpu_connector_is_dp12_capable(connector)) { | ||
| 305 | max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp); | ||
| 306 | if (pix_clock <= max_pix_clock) | ||
| 307 | return 540000; | ||
| 308 | } | ||
| 309 | |||
| 310 | return drm_dp_max_link_rate(dpcd); | ||
| 311 | } | ||
| 312 | |||
| 313 | static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev, | ||
| 314 | int action, int dp_clock, | ||
| 315 | u8 ucconfig, u8 lane_num) | ||
| 316 | { | ||
| 317 | DP_ENCODER_SERVICE_PARAMETERS args; | ||
| 318 | int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); | ||
| 319 | |||
| 320 | memset(&args, 0, sizeof(args)); | ||
| 321 | args.ucLinkClock = dp_clock / 10; | ||
| 322 | args.ucConfig = ucconfig; | ||
| 323 | args.ucAction = action; | ||
| 324 | args.ucLaneNum = lane_num; | ||
| 325 | args.ucStatus = 0; | ||
| 326 | |||
| 327 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 328 | return args.ucStatus; | ||
| 329 | } | ||
| 330 | |||
| 331 | u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector) | ||
| 332 | { | ||
| 333 | struct drm_device *dev = amdgpu_connector->base.dev; | ||
| 334 | struct amdgpu_device *adev = dev->dev_private; | ||
| 335 | |||
| 336 | return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0, | ||
| 337 | amdgpu_connector->ddc_bus->rec.i2c_id, 0); | ||
| 338 | } | ||
| 339 | |||
| 340 | static void amdgpu_atombios_dp_probe_oui(struct amdgpu_connector *amdgpu_connector) | ||
| 341 | { | ||
| 342 | struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv; | ||
| 343 | u8 buf[3]; | ||
| 344 | |||
| 345 | if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) | ||
| 346 | return; | ||
| 347 | |||
| 348 | if (drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3) | ||
| 349 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", | ||
| 350 | buf[0], buf[1], buf[2]); | ||
| 351 | |||
| 352 | if (drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3) | ||
| 353 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", | ||
| 354 | buf[0], buf[1], buf[2]); | ||
| 355 | } | ||
| 356 | |||
| 357 | int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector) | ||
| 358 | { | ||
| 359 | struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv; | ||
| 360 | u8 msg[DP_DPCD_SIZE]; | ||
| 361 | int ret, i; | ||
| 362 | |||
| 363 | ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg, | ||
| 364 | DP_DPCD_SIZE); | ||
| 365 | if (ret > 0) { | ||
| 366 | memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); | ||
| 367 | DRM_DEBUG_KMS("DPCD: "); | ||
| 368 | for (i = 0; i < DP_DPCD_SIZE; i++) | ||
| 369 | DRM_DEBUG_KMS("%02x ", msg[i]); | ||
| 370 | DRM_DEBUG_KMS("\n"); | ||
| 371 | |||
| 372 | amdgpu_atombios_dp_probe_oui(amdgpu_connector); | ||
| 373 | |||
| 374 | return 0; | ||
| 375 | } | ||
| 376 | dig_connector->dpcd[0] = 0; | ||
| 377 | return -EINVAL; | ||
| 378 | } | ||
| 379 | |||
| 380 | int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder, | ||
| 381 | struct drm_connector *connector) | ||
| 382 | { | ||
| 383 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 384 | struct amdgpu_connector_atom_dig *dig_connector; | ||
| 385 | int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; | ||
| 386 | u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector); | ||
| 387 | u8 tmp; | ||
| 388 | |||
| 389 | if (!amdgpu_connector->con_priv) | ||
| 390 | return panel_mode; | ||
| 391 | |||
| 392 | dig_connector = amdgpu_connector->con_priv; | ||
| 393 | |||
| 394 | if (dp_bridge != ENCODER_OBJECT_ID_NONE) { | ||
| 395 | /* DP bridge chips */ | ||
| 396 | if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux, | ||
| 397 | DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { | ||
| 398 | if (tmp & 1) | ||
| 399 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; | ||
| 400 | else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || | ||
| 401 | (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) | ||
| 402 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; | ||
| 403 | else | ||
| 404 | panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; | ||
| 405 | } | ||
| 406 | } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
| 407 | /* eDP */ | ||
| 408 | if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux, | ||
| 409 | DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { | ||
| 410 | if (tmp & 1) | ||
| 411 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; | ||
| 412 | } | ||
| 413 | } | ||
| 414 | |||
| 415 | return panel_mode; | ||
| 416 | } | ||
| 417 | |||
| 418 | void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector, | ||
| 419 | const struct drm_display_mode *mode) | ||
| 420 | { | ||
| 421 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 422 | struct amdgpu_connector_atom_dig *dig_connector; | ||
| 423 | |||
| 424 | if (!amdgpu_connector->con_priv) | ||
| 425 | return; | ||
| 426 | dig_connector = amdgpu_connector->con_priv; | ||
| 427 | |||
| 428 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | ||
| 429 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { | ||
| 430 | dig_connector->dp_clock = | ||
| 431 | amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); | ||
| 432 | dig_connector->dp_lane_count = | ||
| 433 | amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock); | ||
| 434 | } | ||
| 435 | } | ||
| 436 | |||
| 437 | int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector, | ||
| 438 | struct drm_display_mode *mode) | ||
| 439 | { | ||
| 440 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 441 | struct amdgpu_connector_atom_dig *dig_connector; | ||
| 442 | int dp_clock; | ||
| 443 | |||
| 444 | if (!amdgpu_connector->con_priv) | ||
| 445 | return MODE_CLOCK_HIGH; | ||
| 446 | dig_connector = amdgpu_connector->con_priv; | ||
| 447 | |||
| 448 | dp_clock = | ||
| 449 | amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); | ||
| 450 | |||
| 451 | if ((dp_clock == 540000) && | ||
| 452 | (!amdgpu_connector_is_dp12_capable(connector))) | ||
| 453 | return MODE_CLOCK_HIGH; | ||
| 454 | |||
| 455 | return MODE_OK; | ||
| 456 | } | ||
| 457 | |||
| 458 | bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector) | ||
| 459 | { | ||
| 460 | u8 link_status[DP_LINK_STATUS_SIZE]; | ||
| 461 | struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv; | ||
| 462 | |||
| 463 | if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status) | ||
| 464 | <= 0) | ||
| 465 | return false; | ||
| 466 | if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) | ||
| 467 | return false; | ||
| 468 | return true; | ||
| 469 | } | ||
| 470 | |||
| 471 | void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector, | ||
| 472 | u8 power_state) | ||
| 473 | { | ||
| 474 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 475 | struct amdgpu_connector_atom_dig *dig_connector; | ||
| 476 | |||
| 477 | if (!amdgpu_connector->con_priv) | ||
| 478 | return; | ||
| 479 | |||
| 480 | dig_connector = amdgpu_connector->con_priv; | ||
| 481 | |||
| 482 | /* power up/down the sink */ | ||
| 483 | if (dig_connector->dpcd[0] >= 0x11) { | ||
| 484 | drm_dp_dpcd_writeb(&amdgpu_connector->ddc_bus->aux, | ||
| 485 | DP_SET_POWER, power_state); | ||
| 486 | usleep_range(1000, 2000); | ||
| 487 | } | ||
| 488 | } | ||
| 489 | |||
| 490 | struct amdgpu_atombios_dp_link_train_info { | ||
| 491 | struct amdgpu_device *adev; | ||
| 492 | struct drm_encoder *encoder; | ||
| 493 | struct drm_connector *connector; | ||
| 494 | int dp_clock; | ||
| 495 | int dp_lane_count; | ||
| 496 | bool tp3_supported; | ||
| 497 | u8 dpcd[DP_RECEIVER_CAP_SIZE]; | ||
| 498 | u8 train_set[4]; | ||
| 499 | u8 link_status[DP_LINK_STATUS_SIZE]; | ||
| 500 | u8 tries; | ||
| 501 | struct drm_dp_aux *aux; | ||
| 502 | }; | ||
| 503 | |||
| 504 | static void | ||
| 505 | amdgpu_atombios_dp_update_vs_emph(struct amdgpu_atombios_dp_link_train_info *dp_info) | ||
| 506 | { | ||
| 507 | /* set the initial vs/emph on the source */ | ||
| 508 | amdgpu_atombios_encoder_setup_dig_transmitter(dp_info->encoder, | ||
| 509 | ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH, | ||
| 510 | 0, dp_info->train_set[0]); /* sets all lanes at once */ | ||
| 511 | |||
| 512 | /* set the vs/emph on the sink */ | ||
| 513 | drm_dp_dpcd_write(dp_info->aux, DP_TRAINING_LANE0_SET, | ||
| 514 | dp_info->train_set, dp_info->dp_lane_count); | ||
| 515 | } | ||
| 516 | |||
| 517 | static void | ||
| 518 | amdgpu_atombios_dp_set_tp(struct amdgpu_atombios_dp_link_train_info *dp_info, int tp) | ||
| 519 | { | ||
| 520 | int rtp = 0; | ||
| 521 | |||
| 522 | /* set training pattern on the source */ | ||
| 523 | switch (tp) { | ||
| 524 | case DP_TRAINING_PATTERN_1: | ||
| 525 | rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1; | ||
| 526 | break; | ||
| 527 | case DP_TRAINING_PATTERN_2: | ||
| 528 | rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2; | ||
| 529 | break; | ||
| 530 | case DP_TRAINING_PATTERN_3: | ||
| 531 | rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3; | ||
| 532 | break; | ||
| 533 | } | ||
| 534 | amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder, rtp, 0); | ||
| 535 | |||
| 536 | /* enable training pattern on the sink */ | ||
| 537 | drm_dp_dpcd_writeb(dp_info->aux, DP_TRAINING_PATTERN_SET, tp); | ||
| 538 | } | ||
| 539 | |||
| 540 | static int | ||
| 541 | amdgpu_atombios_dp_link_train_init(struct amdgpu_atombios_dp_link_train_info *dp_info) | ||
| 542 | { | ||
| 543 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(dp_info->encoder); | ||
| 544 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
| 545 | u8 tmp; | ||
| 546 | |||
| 547 | /* power up the sink */ | ||
| 548 | amdgpu_atombios_dp_set_rx_power_state(dp_info->connector, DP_SET_POWER_D0); | ||
| 549 | |||
| 550 | /* possibly enable downspread on the sink */ | ||
| 551 | if (dp_info->dpcd[3] & 0x1) | ||
| 552 | drm_dp_dpcd_writeb(dp_info->aux, | ||
| 553 | DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); | ||
| 554 | else | ||
| 555 | drm_dp_dpcd_writeb(dp_info->aux, | ||
| 556 | DP_DOWNSPREAD_CTRL, 0); | ||
| 557 | |||
| 558 | if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE) | ||
| 559 | drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1); | ||
| 560 | |||
| 561 | /* set the lane count on the sink */ | ||
| 562 | tmp = dp_info->dp_lane_count; | ||
| 563 | if (drm_dp_enhanced_frame_cap(dp_info->dpcd)) | ||
| 564 | tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | ||
| 565 | drm_dp_dpcd_writeb(dp_info->aux, DP_LANE_COUNT_SET, tmp); | ||
| 566 | |||
| 567 | /* set the link rate on the sink */ | ||
| 568 | tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock); | ||
| 569 | drm_dp_dpcd_writeb(dp_info->aux, DP_LINK_BW_SET, tmp); | ||
| 570 | |||
| 571 | /* start training on the source */ | ||
| 572 | amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder, | ||
| 573 | ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0); | ||
| 574 | |||
| 575 | /* disable the training pattern on the sink */ | ||
| 576 | drm_dp_dpcd_writeb(dp_info->aux, | ||
| 577 | DP_TRAINING_PATTERN_SET, | ||
| 578 | DP_TRAINING_PATTERN_DISABLE); | ||
| 579 | |||
| 580 | return 0; | ||
| 581 | } | ||
| 582 | |||
| 583 | static int | ||
| 584 | amdgpu_atombios_dp_link_train_finish(struct amdgpu_atombios_dp_link_train_info *dp_info) | ||
| 585 | { | ||
| 586 | udelay(400); | ||
| 587 | |||
| 588 | /* disable the training pattern on the sink */ | ||
| 589 | drm_dp_dpcd_writeb(dp_info->aux, | ||
| 590 | DP_TRAINING_PATTERN_SET, | ||
| 591 | DP_TRAINING_PATTERN_DISABLE); | ||
| 592 | |||
| 593 | /* disable the training pattern on the source */ | ||
| 594 | amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder, | ||
| 595 | ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0); | ||
| 596 | |||
| 597 | return 0; | ||
| 598 | } | ||
| 599 | |||
| 600 | static int | ||
| 601 | amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_info) | ||
| 602 | { | ||
| 603 | bool clock_recovery; | ||
| 604 | u8 voltage; | ||
| 605 | int i; | ||
| 606 | |||
| 607 | amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1); | ||
| 608 | memset(dp_info->train_set, 0, 4); | ||
| 609 | amdgpu_atombios_dp_update_vs_emph(dp_info); | ||
| 610 | |||
| 611 | udelay(400); | ||
| 612 | |||
| 613 | /* clock recovery loop */ | ||
| 614 | clock_recovery = false; | ||
| 615 | dp_info->tries = 0; | ||
| 616 | voltage = 0xff; | ||
| 617 | while (1) { | ||
| 618 | drm_dp_link_train_clock_recovery_delay(dp_info->dpcd); | ||
| 619 | |||
| 620 | if (drm_dp_dpcd_read_link_status(dp_info->aux, | ||
| 621 | dp_info->link_status) <= 0) { | ||
| 622 | DRM_ERROR("displayport link status failed\n"); | ||
| 623 | break; | ||
| 624 | } | ||
| 625 | |||
| 626 | if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { | ||
| 627 | clock_recovery = true; | ||
| 628 | break; | ||
| 629 | } | ||
| 630 | |||
| 631 | for (i = 0; i < dp_info->dp_lane_count; i++) { | ||
| 632 | if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | ||
| 633 | break; | ||
| 634 | } | ||
| 635 | if (i == dp_info->dp_lane_count) { | ||
| 636 | DRM_ERROR("clock recovery reached max voltage\n"); | ||
| 637 | break; | ||
| 638 | } | ||
| 639 | |||
| 640 | if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | ||
| 641 | ++dp_info->tries; | ||
| 642 | if (dp_info->tries == 5) { | ||
| 643 | DRM_ERROR("clock recovery tried 5 times\n"); | ||
| 644 | break; | ||
| 645 | } | ||
| 646 | } else | ||
| 647 | dp_info->tries = 0; | ||
| 648 | |||
| 649 | voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
| 650 | |||
| 651 | /* Compute new train_set as requested by sink */ | ||
| 652 | amdgpu_atombios_dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, | ||
| 653 | dp_info->train_set); | ||
| 654 | |||
| 655 | amdgpu_atombios_dp_update_vs_emph(dp_info); | ||
| 656 | } | ||
| 657 | if (!clock_recovery) { | ||
| 658 | DRM_ERROR("clock recovery failed\n"); | ||
| 659 | return -1; | ||
| 660 | } else { | ||
| 661 | DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n", | ||
| 662 | dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, | ||
| 663 | (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> | ||
| 664 | DP_TRAIN_PRE_EMPHASIS_SHIFT); | ||
| 665 | return 0; | ||
| 666 | } | ||
| 667 | } | ||
| 668 | |||
| 669 | static int | ||
| 670 | amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_info) | ||
| 671 | { | ||
| 672 | bool channel_eq; | ||
| 673 | |||
| 674 | if (dp_info->tp3_supported) | ||
| 675 | amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3); | ||
| 676 | else | ||
| 677 | amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2); | ||
| 678 | |||
| 679 | /* channel equalization loop */ | ||
| 680 | dp_info->tries = 0; | ||
| 681 | channel_eq = false; | ||
| 682 | while (1) { | ||
| 683 | drm_dp_link_train_channel_eq_delay(dp_info->dpcd); | ||
| 684 | |||
| 685 | if (drm_dp_dpcd_read_link_status(dp_info->aux, | ||
| 686 | dp_info->link_status) <= 0) { | ||
| 687 | DRM_ERROR("displayport link status failed\n"); | ||
| 688 | break; | ||
| 689 | } | ||
| 690 | |||
| 691 | if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { | ||
| 692 | channel_eq = true; | ||
| 693 | break; | ||
| 694 | } | ||
| 695 | |||
| 696 | /* Try 5 times */ | ||
| 697 | if (dp_info->tries > 5) { | ||
| 698 | DRM_ERROR("channel eq failed: 5 tries\n"); | ||
| 699 | break; | ||
| 700 | } | ||
| 701 | |||
| 702 | /* Compute new train_set as requested by sink */ | ||
| 703 | amdgpu_atombios_dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, | ||
| 704 | dp_info->train_set); | ||
| 705 | |||
| 706 | amdgpu_atombios_dp_update_vs_emph(dp_info); | ||
| 707 | dp_info->tries++; | ||
| 708 | } | ||
| 709 | |||
| 710 | if (!channel_eq) { | ||
| 711 | DRM_ERROR("channel eq failed\n"); | ||
| 712 | return -1; | ||
| 713 | } else { | ||
| 714 | DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n", | ||
| 715 | dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, | ||
| 716 | (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) | ||
| 717 | >> DP_TRAIN_PRE_EMPHASIS_SHIFT); | ||
| 718 | return 0; | ||
| 719 | } | ||
| 720 | } | ||
| 721 | |||
| 722 | void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder, | ||
| 723 | struct drm_connector *connector) | ||
| 724 | { | ||
| 725 | struct drm_device *dev = encoder->dev; | ||
| 726 | struct amdgpu_device *adev = dev->dev_private; | ||
| 727 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 728 | struct amdgpu_encoder_atom_dig *dig; | ||
| 729 | struct amdgpu_connector *amdgpu_connector; | ||
| 730 | struct amdgpu_connector_atom_dig *dig_connector; | ||
| 731 | struct amdgpu_atombios_dp_link_train_info dp_info; | ||
| 732 | u8 tmp; | ||
| 733 | |||
| 734 | if (!amdgpu_encoder->enc_priv) | ||
| 735 | return; | ||
| 736 | dig = amdgpu_encoder->enc_priv; | ||
| 737 | |||
| 738 | amdgpu_connector = to_amdgpu_connector(connector); | ||
| 739 | if (!amdgpu_connector->con_priv) | ||
| 740 | return; | ||
| 741 | dig_connector = amdgpu_connector->con_priv; | ||
| 742 | |||
| 743 | if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) && | ||
| 744 | (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP)) | ||
| 745 | return; | ||
| 746 | |||
| 747 | if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp) | ||
| 748 | == 1) { | ||
| 749 | if (tmp & DP_TPS3_SUPPORTED) | ||
| 750 | dp_info.tp3_supported = true; | ||
| 751 | else | ||
| 752 | dp_info.tp3_supported = false; | ||
| 753 | } else { | ||
| 754 | dp_info.tp3_supported = false; | ||
| 755 | } | ||
| 756 | |||
| 757 | memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); | ||
| 758 | dp_info.adev = adev; | ||
| 759 | dp_info.encoder = encoder; | ||
| 760 | dp_info.connector = connector; | ||
| 761 | dp_info.dp_lane_count = dig_connector->dp_lane_count; | ||
| 762 | dp_info.dp_clock = dig_connector->dp_clock; | ||
| 763 | dp_info.aux = &amdgpu_connector->ddc_bus->aux; | ||
| 764 | |||
| 765 | if (amdgpu_atombios_dp_link_train_init(&dp_info)) | ||
| 766 | goto done; | ||
| 767 | if (amdgpu_atombios_dp_link_train_cr(&dp_info)) | ||
| 768 | goto done; | ||
| 769 | if (amdgpu_atombios_dp_link_train_ce(&dp_info)) | ||
| 770 | goto done; | ||
| 771 | done: | ||
| 772 | if (amdgpu_atombios_dp_link_train_finish(&dp_info)) | ||
| 773 | return; | ||
| 774 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.h b/drivers/gpu/drm/amd/amdgpu/atombios_dp.h new file mode 100644 index 000000000000..f59d85eaddf0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.h | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __ATOMBIOS_DP_H__ | ||
| 25 | #define __ATOMBIOS_DP_H__ | ||
| 26 | |||
| 27 | void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector); | ||
| 28 | u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector); | ||
| 29 | int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector); | ||
| 30 | int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder, | ||
| 31 | struct drm_connector *connector); | ||
| 32 | void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector, | ||
| 33 | const struct drm_display_mode *mode); | ||
| 34 | int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector, | ||
| 35 | struct drm_display_mode *mode); | ||
| 36 | bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector); | ||
| 37 | void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector, | ||
| 38 | u8 power_state); | ||
| 39 | void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder, | ||
| 40 | struct drm_connector *connector); | ||
| 41 | |||
| 42 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c new file mode 100644 index 000000000000..ae8caca61e04 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | |||
| @@ -0,0 +1,2066 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2007-11 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the "Software"), | ||
| 7 | * to deal in the Software without restriction, including without limitation | ||
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 10 | * Software is furnished to do so, subject to the following conditions: | ||
| 11 | * | ||
| 12 | * The above copyright notice and this permission notice shall be included in | ||
| 13 | * all copies or substantial portions of the Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 22 | * | ||
| 23 | * Authors: Dave Airlie | ||
| 24 | * Alex Deucher | ||
| 25 | */ | ||
| 26 | #include <drm/drmP.h> | ||
| 27 | #include <drm/drm_crtc_helper.h> | ||
| 28 | #include <drm/amdgpu_drm.h> | ||
| 29 | #include "amdgpu.h" | ||
| 30 | #include "amdgpu_connectors.h" | ||
| 31 | #include "atom.h" | ||
| 32 | #include "atombios_encoders.h" | ||
| 33 | #include "atombios_dp.h" | ||
| 34 | #include <linux/backlight.h> | ||
| 35 | #include "bif/bif_4_1_d.h" | ||
| 36 | |||
| 37 | static u8 | ||
| 38 | amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) | ||
| 39 | { | ||
| 40 | u8 backlight_level; | ||
| 41 | u32 bios_2_scratch; | ||
| 42 | |||
| 43 | bios_2_scratch = RREG32(mmBIOS_SCRATCH_2); | ||
| 44 | |||
| 45 | backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >> | ||
| 46 | ATOM_S2_CURRENT_BL_LEVEL_SHIFT); | ||
| 47 | |||
| 48 | return backlight_level; | ||
| 49 | } | ||
| 50 | |||
| 51 | static void | ||
| 52 | amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, | ||
| 53 | u8 backlight_level) | ||
| 54 | { | ||
| 55 | u32 bios_2_scratch; | ||
| 56 | |||
| 57 | bios_2_scratch = RREG32(mmBIOS_SCRATCH_2); | ||
| 58 | |||
| 59 | bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; | ||
| 60 | bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) & | ||
| 61 | ATOM_S2_CURRENT_BL_LEVEL_MASK); | ||
| 62 | |||
| 63 | WREG32(mmBIOS_SCRATCH_2, bios_2_scratch); | ||
| 64 | } | ||
| 65 | |||
| 66 | u8 | ||
| 67 | amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder) | ||
| 68 | { | ||
| 69 | struct drm_device *dev = amdgpu_encoder->base.dev; | ||
| 70 | struct amdgpu_device *adev = dev->dev_private; | ||
| 71 | |||
| 72 | if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) | ||
| 73 | return 0; | ||
| 74 | |||
| 75 | return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); | ||
| 76 | } | ||
| 77 | |||
| 78 | void | ||
| 79 | amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, | ||
| 80 | u8 level) | ||
| 81 | { | ||
| 82 | struct drm_encoder *encoder = &amdgpu_encoder->base; | ||
| 83 | struct drm_device *dev = amdgpu_encoder->base.dev; | ||
| 84 | struct amdgpu_device *adev = dev->dev_private; | ||
| 85 | struct amdgpu_encoder_atom_dig *dig; | ||
| 86 | |||
| 87 | if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) | ||
| 88 | return; | ||
| 89 | |||
| 90 | if ((amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) && | ||
| 91 | amdgpu_encoder->enc_priv) { | ||
| 92 | dig = amdgpu_encoder->enc_priv; | ||
| 93 | dig->backlight_level = level; | ||
| 94 | amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, dig->backlight_level); | ||
| 95 | |||
| 96 | switch (amdgpu_encoder->encoder_id) { | ||
| 97 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 98 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
| 99 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 100 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 101 | if (dig->backlight_level == 0) | ||
| 102 | amdgpu_atombios_encoder_setup_dig_transmitter(encoder, | ||
| 103 | ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); | ||
| 104 | else { | ||
| 105 | amdgpu_atombios_encoder_setup_dig_transmitter(encoder, | ||
| 106 | ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0); | ||
| 107 | amdgpu_atombios_encoder_setup_dig_transmitter(encoder, | ||
| 108 | ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); | ||
| 109 | } | ||
| 110 | break; | ||
| 111 | default: | ||
| 112 | break; | ||
| 113 | } | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 117 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) | ||
| 118 | |||
| 119 | static u8 amdgpu_atombios_encoder_backlight_level(struct backlight_device *bd) | ||
| 120 | { | ||
| 121 | u8 level; | ||
| 122 | |||
| 123 | /* Convert brightness to hardware level */ | ||
| 124 | if (bd->props.brightness < 0) | ||
| 125 | level = 0; | ||
| 126 | else if (bd->props.brightness > AMDGPU_MAX_BL_LEVEL) | ||
| 127 | level = AMDGPU_MAX_BL_LEVEL; | ||
| 128 | else | ||
| 129 | level = bd->props.brightness; | ||
| 130 | |||
| 131 | return level; | ||
| 132 | } | ||
| 133 | |||
| 134 | static int amdgpu_atombios_encoder_update_backlight_status(struct backlight_device *bd) | ||
| 135 | { | ||
| 136 | struct amdgpu_backlight_privdata *pdata = bl_get_data(bd); | ||
| 137 | struct amdgpu_encoder *amdgpu_encoder = pdata->encoder; | ||
| 138 | |||
| 139 | amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, | ||
| 140 | amdgpu_atombios_encoder_backlight_level(bd)); | ||
| 141 | |||
| 142 | return 0; | ||
| 143 | } | ||
| 144 | |||
| 145 | static int | ||
| 146 | amdgpu_atombios_encoder_get_backlight_brightness(struct backlight_device *bd) | ||
| 147 | { | ||
| 148 | struct amdgpu_backlight_privdata *pdata = bl_get_data(bd); | ||
| 149 | struct amdgpu_encoder *amdgpu_encoder = pdata->encoder; | ||
| 150 | struct drm_device *dev = amdgpu_encoder->base.dev; | ||
| 151 | struct amdgpu_device *adev = dev->dev_private; | ||
| 152 | |||
| 153 | return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); | ||
| 154 | } | ||
| 155 | |||
| 156 | static const struct backlight_ops amdgpu_atombios_encoder_backlight_ops = { | ||
| 157 | .get_brightness = amdgpu_atombios_encoder_get_backlight_brightness, | ||
| 158 | .update_status = amdgpu_atombios_encoder_update_backlight_status, | ||
| 159 | }; | ||
| 160 | |||
| 161 | void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encoder, | ||
| 162 | struct drm_connector *drm_connector) | ||
| 163 | { | ||
| 164 | struct drm_device *dev = amdgpu_encoder->base.dev; | ||
| 165 | struct amdgpu_device *adev = dev->dev_private; | ||
| 166 | struct backlight_device *bd; | ||
| 167 | struct backlight_properties props; | ||
| 168 | struct amdgpu_backlight_privdata *pdata; | ||
| 169 | struct amdgpu_encoder_atom_dig *dig; | ||
| 170 | u8 backlight_level; | ||
| 171 | char bl_name[16]; | ||
| 172 | |||
| 173 | /* Mac laptops with multiple GPUs use the gmux driver for backlight | ||
| 174 | * so don't register a backlight device | ||
| 175 | */ | ||
| 176 | if ((adev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && | ||
| 177 | (adev->pdev->device == 0x6741)) | ||
| 178 | return; | ||
| 179 | |||
| 180 | if (!amdgpu_encoder->enc_priv) | ||
| 181 | return; | ||
| 182 | |||
| 183 | if (!adev->is_atom_bios) | ||
| 184 | return; | ||
| 185 | |||
| 186 | if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) | ||
| 187 | return; | ||
| 188 | |||
| 189 | pdata = kmalloc(sizeof(struct amdgpu_backlight_privdata), GFP_KERNEL); | ||
| 190 | if (!pdata) { | ||
| 191 | DRM_ERROR("Memory allocation failed\n"); | ||
| 192 | goto error; | ||
| 193 | } | ||
| 194 | |||
| 195 | memset(&props, 0, sizeof(props)); | ||
| 196 | props.max_brightness = AMDGPU_MAX_BL_LEVEL; | ||
| 197 | props.type = BACKLIGHT_RAW; | ||
| 198 | snprintf(bl_name, sizeof(bl_name), | ||
| 199 | "amdgpu_bl%d", dev->primary->index); | ||
| 200 | bd = backlight_device_register(bl_name, drm_connector->kdev, | ||
| 201 | pdata, &amdgpu_atombios_encoder_backlight_ops, &props); | ||
| 202 | if (IS_ERR(bd)) { | ||
| 203 | DRM_ERROR("Backlight registration failed\n"); | ||
| 204 | goto error; | ||
| 205 | } | ||
| 206 | |||
| 207 | pdata->encoder = amdgpu_encoder; | ||
| 208 | |||
| 209 | backlight_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); | ||
| 210 | |||
| 211 | dig = amdgpu_encoder->enc_priv; | ||
| 212 | dig->bl_dev = bd; | ||
| 213 | |||
| 214 | bd->props.brightness = amdgpu_atombios_encoder_get_backlight_brightness(bd); | ||
| 215 | bd->props.power = FB_BLANK_UNBLANK; | ||
| 216 | backlight_update_status(bd); | ||
| 217 | |||
| 218 | DRM_INFO("amdgpu atom DIG backlight initialized\n"); | ||
| 219 | |||
| 220 | return; | ||
| 221 | |||
| 222 | error: | ||
| 223 | kfree(pdata); | ||
| 224 | return; | ||
| 225 | } | ||
| 226 | |||
| 227 | void | ||
| 228 | amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder) | ||
| 229 | { | ||
| 230 | struct drm_device *dev = amdgpu_encoder->base.dev; | ||
| 231 | struct amdgpu_device *adev = dev->dev_private; | ||
| 232 | struct backlight_device *bd = NULL; | ||
| 233 | struct amdgpu_encoder_atom_dig *dig; | ||
| 234 | |||
| 235 | if (!amdgpu_encoder->enc_priv) | ||
| 236 | return; | ||
| 237 | |||
| 238 | if (!adev->is_atom_bios) | ||
| 239 | return; | ||
| 240 | |||
| 241 | if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) | ||
| 242 | return; | ||
| 243 | |||
| 244 | dig = amdgpu_encoder->enc_priv; | ||
| 245 | bd = dig->bl_dev; | ||
| 246 | dig->bl_dev = NULL; | ||
| 247 | |||
| 248 | if (bd) { | ||
| 249 | struct amdgpu_legacy_backlight_privdata *pdata; | ||
| 250 | |||
| 251 | pdata = bl_get_data(bd); | ||
| 252 | backlight_device_unregister(bd); | ||
| 253 | kfree(pdata); | ||
| 254 | |||
| 255 | DRM_INFO("amdgpu atom LVDS backlight unloaded\n"); | ||
| 256 | } | ||
| 257 | } | ||
| 258 | |||
| 259 | #else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */ | ||
| 260 | |||
| 261 | void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *encoder) | ||
| 262 | { | ||
| 263 | } | ||
| 264 | |||
| 265 | void amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *encoder) | ||
| 266 | { | ||
| 267 | } | ||
| 268 | |||
| 269 | #endif | ||
| 270 | |||
| 271 | bool amdgpu_atombios_encoder_is_digital(struct drm_encoder *encoder) | ||
| 272 | { | ||
| 273 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 274 | switch (amdgpu_encoder->encoder_id) { | ||
| 275 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
| 276 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 277 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 278 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 279 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | ||
| 280 | return true; | ||
| 281 | default: | ||
| 282 | return false; | ||
| 283 | } | ||
| 284 | } | ||
| 285 | |||
| 286 | bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder, | ||
| 287 | const struct drm_display_mode *mode, | ||
| 288 | struct drm_display_mode *adjusted_mode) | ||
| 289 | { | ||
| 290 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 291 | |||
| 292 | /* set the active encoder to connector routing */ | ||
| 293 | amdgpu_encoder_set_active_device(encoder); | ||
| 294 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
| 295 | |||
| 296 | /* hw bug */ | ||
| 297 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
| 298 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) | ||
| 299 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; | ||
| 300 | |||
| 301 | /* get the native mode for scaling */ | ||
| 302 | if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) | ||
| 303 | amdgpu_panel_mode_fixup(encoder, adjusted_mode); | ||
| 304 | else if (amdgpu_encoder->rmx_type != RMX_OFF) | ||
| 305 | amdgpu_panel_mode_fixup(encoder, adjusted_mode); | ||
| 306 | |||
| 307 | if ((amdgpu_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || | ||
| 308 | (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { | ||
| 309 | struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); | ||
| 310 | amdgpu_atombios_dp_set_link_config(connector, adjusted_mode); | ||
| 311 | } | ||
| 312 | |||
| 313 | return true; | ||
| 314 | } | ||
| 315 | |||
| 316 | static void | ||
| 317 | amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action) | ||
| 318 | { | ||
| 319 | struct drm_device *dev = encoder->dev; | ||
| 320 | struct amdgpu_device *adev = dev->dev_private; | ||
| 321 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 322 | DAC_ENCODER_CONTROL_PS_ALLOCATION args; | ||
| 323 | int index = 0; | ||
| 324 | |||
| 325 | memset(&args, 0, sizeof(args)); | ||
| 326 | |||
| 327 | switch (amdgpu_encoder->encoder_id) { | ||
| 328 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
| 329 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
| 330 | index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); | ||
| 331 | break; | ||
| 332 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
| 333 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
| 334 | index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); | ||
| 335 | break; | ||
| 336 | } | ||
| 337 | |||
| 338 | args.ucAction = action; | ||
| 339 | args.ucDacStandard = ATOM_DAC1_PS2; | ||
| 340 | args.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 341 | |||
| 342 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 343 | |||
| 344 | } | ||
| 345 | |||
| 346 | static u8 amdgpu_atombios_encoder_get_bpc(struct drm_encoder *encoder) | ||
| 347 | { | ||
| 348 | int bpc = 8; | ||
| 349 | |||
| 350 | if (encoder->crtc) { | ||
| 351 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); | ||
| 352 | bpc = amdgpu_crtc->bpc; | ||
| 353 | } | ||
| 354 | |||
| 355 | switch (bpc) { | ||
| 356 | case 0: | ||
| 357 | return PANEL_BPC_UNDEFINE; | ||
| 358 | case 6: | ||
| 359 | return PANEL_6BIT_PER_COLOR; | ||
| 360 | case 8: | ||
| 361 | default: | ||
| 362 | return PANEL_8BIT_PER_COLOR; | ||
| 363 | case 10: | ||
| 364 | return PANEL_10BIT_PER_COLOR; | ||
| 365 | case 12: | ||
| 366 | return PANEL_12BIT_PER_COLOR; | ||
| 367 | case 16: | ||
| 368 | return PANEL_16BIT_PER_COLOR; | ||
| 369 | } | ||
| 370 | } | ||
| 371 | |||
| 372 | union dvo_encoder_control { | ||
| 373 | ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; | ||
| 374 | DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; | ||
| 375 | DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; | ||
| 376 | DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 dvo_v4; | ||
| 377 | }; | ||
| 378 | |||
| 379 | static void | ||
| 380 | amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action) | ||
| 381 | { | ||
| 382 | struct drm_device *dev = encoder->dev; | ||
| 383 | struct amdgpu_device *adev = dev->dev_private; | ||
| 384 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 385 | union dvo_encoder_control args; | ||
| 386 | int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); | ||
| 387 | uint8_t frev, crev; | ||
| 388 | |||
| 389 | memset(&args, 0, sizeof(args)); | ||
| 390 | |||
| 391 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
| 392 | return; | ||
| 393 | |||
| 394 | switch (frev) { | ||
| 395 | case 1: | ||
| 396 | switch (crev) { | ||
| 397 | case 1: | ||
| 398 | /* R4xx, R5xx */ | ||
| 399 | args.ext_tmds.sXTmdsEncoder.ucEnable = action; | ||
| 400 | |||
| 401 | if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 402 | args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; | ||
| 403 | |||
| 404 | args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; | ||
| 405 | break; | ||
| 406 | case 2: | ||
| 407 | /* RS600/690/740 */ | ||
| 408 | args.dvo.sDVOEncoder.ucAction = action; | ||
| 409 | args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 410 | /* DFP1, CRT1, TV1 depending on the type of port */ | ||
| 411 | args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; | ||
| 412 | |||
| 413 | if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 414 | args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; | ||
| 415 | break; | ||
| 416 | case 3: | ||
| 417 | /* R6xx */ | ||
| 418 | args.dvo_v3.ucAction = action; | ||
| 419 | args.dvo_v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 420 | args.dvo_v3.ucDVOConfig = 0; /* XXX */ | ||
| 421 | break; | ||
| 422 | case 4: | ||
| 423 | /* DCE8 */ | ||
| 424 | args.dvo_v4.ucAction = action; | ||
| 425 | args.dvo_v4.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 426 | args.dvo_v4.ucDVOConfig = 0; /* XXX */ | ||
| 427 | args.dvo_v4.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder); | ||
| 428 | break; | ||
| 429 | default: | ||
| 430 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 431 | break; | ||
| 432 | } | ||
| 433 | break; | ||
| 434 | default: | ||
| 435 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 436 | break; | ||
| 437 | } | ||
| 438 | |||
| 439 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 440 | } | ||
| 441 | |||
| 442 | int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder) | ||
| 443 | { | ||
| 444 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 445 | struct drm_connector *connector; | ||
| 446 | struct amdgpu_connector *amdgpu_connector; | ||
| 447 | struct amdgpu_connector_atom_dig *dig_connector; | ||
| 448 | |||
| 449 | /* dp bridges are always DP */ | ||
| 450 | if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) | ||
| 451 | return ATOM_ENCODER_MODE_DP; | ||
| 452 | |||
| 453 | /* DVO is always DVO */ | ||
| 454 | if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) || | ||
| 455 | (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) | ||
| 456 | return ATOM_ENCODER_MODE_DVO; | ||
| 457 | |||
| 458 | connector = amdgpu_get_connector_for_encoder(encoder); | ||
| 459 | /* if we don't have an active device yet, just use one of | ||
| 460 | * the connectors tied to the encoder. | ||
| 461 | */ | ||
| 462 | if (!connector) | ||
| 463 | connector = amdgpu_get_connector_for_encoder_init(encoder); | ||
| 464 | amdgpu_connector = to_amdgpu_connector(connector); | ||
| 465 | |||
| 466 | switch (connector->connector_type) { | ||
| 467 | case DRM_MODE_CONNECTOR_DVII: | ||
| 468 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ | ||
| 469 | if (amdgpu_audio != 0) { | ||
| 470 | if (amdgpu_connector->use_digital && | ||
| 471 | (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)) | ||
| 472 | return ATOM_ENCODER_MODE_HDMI; | ||
| 473 | else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && | ||
| 474 | (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO)) | ||
| 475 | return ATOM_ENCODER_MODE_HDMI; | ||
| 476 | else if (amdgpu_connector->use_digital) | ||
| 477 | return ATOM_ENCODER_MODE_DVI; | ||
| 478 | else | ||
| 479 | return ATOM_ENCODER_MODE_CRT; | ||
| 480 | } else if (amdgpu_connector->use_digital) { | ||
| 481 | return ATOM_ENCODER_MODE_DVI; | ||
| 482 | } else { | ||
| 483 | return ATOM_ENCODER_MODE_CRT; | ||
| 484 | } | ||
| 485 | break; | ||
| 486 | case DRM_MODE_CONNECTOR_DVID: | ||
| 487 | case DRM_MODE_CONNECTOR_HDMIA: | ||
| 488 | default: | ||
| 489 | if (amdgpu_audio != 0) { | ||
| 490 | if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE) | ||
| 491 | return ATOM_ENCODER_MODE_HDMI; | ||
| 492 | else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && | ||
| 493 | (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO)) | ||
| 494 | return ATOM_ENCODER_MODE_HDMI; | ||
| 495 | else | ||
| 496 | return ATOM_ENCODER_MODE_DVI; | ||
| 497 | } else { | ||
| 498 | return ATOM_ENCODER_MODE_DVI; | ||
| 499 | } | ||
| 500 | break; | ||
| 501 | case DRM_MODE_CONNECTOR_LVDS: | ||
| 502 | return ATOM_ENCODER_MODE_LVDS; | ||
| 503 | break; | ||
| 504 | case DRM_MODE_CONNECTOR_DisplayPort: | ||
| 505 | dig_connector = amdgpu_connector->con_priv; | ||
| 506 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | ||
| 507 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { | ||
| 508 | return ATOM_ENCODER_MODE_DP; | ||
| 509 | } else if (amdgpu_audio != 0) { | ||
| 510 | if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE) | ||
| 511 | return ATOM_ENCODER_MODE_HDMI; | ||
| 512 | else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && | ||
| 513 | (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO)) | ||
| 514 | return ATOM_ENCODER_MODE_HDMI; | ||
| 515 | else | ||
| 516 | return ATOM_ENCODER_MODE_DVI; | ||
| 517 | } else { | ||
| 518 | return ATOM_ENCODER_MODE_DVI; | ||
| 519 | } | ||
| 520 | break; | ||
| 521 | case DRM_MODE_CONNECTOR_eDP: | ||
| 522 | return ATOM_ENCODER_MODE_DP; | ||
| 523 | case DRM_MODE_CONNECTOR_DVIA: | ||
| 524 | case DRM_MODE_CONNECTOR_VGA: | ||
| 525 | return ATOM_ENCODER_MODE_CRT; | ||
| 526 | break; | ||
| 527 | case DRM_MODE_CONNECTOR_Composite: | ||
| 528 | case DRM_MODE_CONNECTOR_SVIDEO: | ||
| 529 | case DRM_MODE_CONNECTOR_9PinDIN: | ||
| 530 | /* fix me */ | ||
| 531 | return ATOM_ENCODER_MODE_TV; | ||
| 532 | /*return ATOM_ENCODER_MODE_CV;*/ | ||
| 533 | break; | ||
| 534 | } | ||
| 535 | } | ||
| 536 | |||
| 537 | /* | ||
| 538 | * DIG Encoder/Transmitter Setup | ||
| 539 | * | ||
| 540 | * DCE 6.0 | ||
| 541 | * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). | ||
| 542 | * Supports up to 6 digital outputs | ||
| 543 | * - 6 DIG encoder blocks. | ||
| 544 | * - DIG to PHY mapping is hardcoded | ||
| 545 | * DIG1 drives UNIPHY0 link A, A+B | ||
| 546 | * DIG2 drives UNIPHY0 link B | ||
| 547 | * DIG3 drives UNIPHY1 link A, A+B | ||
| 548 | * DIG4 drives UNIPHY1 link B | ||
| 549 | * DIG5 drives UNIPHY2 link A, A+B | ||
| 550 | * DIG6 drives UNIPHY2 link B | ||
| 551 | * | ||
| 552 | * Routing | ||
| 553 | * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) | ||
| 554 | * Examples: | ||
| 555 | * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI | ||
| 556 | * crtc1 -> dig1 -> UNIPHY0 link B -> DP | ||
| 557 | * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS | ||
| 558 | * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI | ||
| 559 | */ | ||
| 560 | |||
| 561 | union dig_encoder_control { | ||
| 562 | DIG_ENCODER_CONTROL_PS_ALLOCATION v1; | ||
| 563 | DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; | ||
| 564 | DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; | ||
| 565 | DIG_ENCODER_CONTROL_PARAMETERS_V4 v4; | ||
| 566 | }; | ||
| 567 | |||
| 568 | void | ||
| 569 | amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder, | ||
| 570 | int action, int panel_mode) | ||
| 571 | { | ||
| 572 | struct drm_device *dev = encoder->dev; | ||
| 573 | struct amdgpu_device *adev = dev->dev_private; | ||
| 574 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 575 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
| 576 | struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); | ||
| 577 | union dig_encoder_control args; | ||
| 578 | int index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl); | ||
| 579 | uint8_t frev, crev; | ||
| 580 | int dp_clock = 0; | ||
| 581 | int dp_lane_count = 0; | ||
| 582 | int hpd_id = AMDGPU_HPD_NONE; | ||
| 583 | |||
| 584 | if (connector) { | ||
| 585 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 586 | struct amdgpu_connector_atom_dig *dig_connector = | ||
| 587 | amdgpu_connector->con_priv; | ||
| 588 | |||
| 589 | dp_clock = dig_connector->dp_clock; | ||
| 590 | dp_lane_count = dig_connector->dp_lane_count; | ||
| 591 | hpd_id = amdgpu_connector->hpd.hpd; | ||
| 592 | } | ||
| 593 | |||
| 594 | /* no dig encoder assigned */ | ||
| 595 | if (dig->dig_encoder == -1) | ||
| 596 | return; | ||
| 597 | |||
| 598 | memset(&args, 0, sizeof(args)); | ||
| 599 | |||
| 600 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
| 601 | return; | ||
| 602 | |||
| 603 | switch (frev) { | ||
| 604 | case 1: | ||
| 605 | switch (crev) { | ||
| 606 | case 1: | ||
| 607 | args.v1.ucAction = action; | ||
| 608 | args.v1.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 609 | if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) | ||
| 610 | args.v3.ucPanelMode = panel_mode; | ||
| 611 | else | ||
| 612 | args.v1.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder); | ||
| 613 | |||
| 614 | if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) | ||
| 615 | args.v1.ucLaneNum = dp_lane_count; | ||
| 616 | else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 617 | args.v1.ucLaneNum = 8; | ||
| 618 | else | ||
| 619 | args.v1.ucLaneNum = 4; | ||
| 620 | |||
| 621 | if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000)) | ||
| 622 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
| 623 | switch (amdgpu_encoder->encoder_id) { | ||
| 624 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 625 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; | ||
| 626 | break; | ||
| 627 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 628 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
| 629 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; | ||
| 630 | break; | ||
| 631 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 632 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; | ||
| 633 | break; | ||
| 634 | } | ||
| 635 | if (dig->linkb) | ||
| 636 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; | ||
| 637 | else | ||
| 638 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; | ||
| 639 | break; | ||
| 640 | case 2: | ||
| 641 | case 3: | ||
| 642 | args.v3.ucAction = action; | ||
| 643 | args.v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 644 | if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) | ||
| 645 | args.v3.ucPanelMode = panel_mode; | ||
| 646 | else | ||
| 647 | args.v3.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder); | ||
| 648 | |||
| 649 | if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode)) | ||
| 650 | args.v3.ucLaneNum = dp_lane_count; | ||
| 651 | else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 652 | args.v3.ucLaneNum = 8; | ||
| 653 | else | ||
| 654 | args.v3.ucLaneNum = 4; | ||
| 655 | |||
| 656 | if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000)) | ||
| 657 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; | ||
| 658 | args.v3.acConfig.ucDigSel = dig->dig_encoder; | ||
| 659 | args.v3.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder); | ||
| 660 | break; | ||
| 661 | case 4: | ||
| 662 | args.v4.ucAction = action; | ||
| 663 | args.v4.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 664 | if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) | ||
| 665 | args.v4.ucPanelMode = panel_mode; | ||
| 666 | else | ||
| 667 | args.v4.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder); | ||
| 668 | |||
| 669 | if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) | ||
| 670 | args.v4.ucLaneNum = dp_lane_count; | ||
| 671 | else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 672 | args.v4.ucLaneNum = 8; | ||
| 673 | else | ||
| 674 | args.v4.ucLaneNum = 4; | ||
| 675 | |||
| 676 | if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) { | ||
| 677 | if (dp_clock == 540000) | ||
| 678 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; | ||
| 679 | else if (dp_clock == 324000) | ||
| 680 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ; | ||
| 681 | else if (dp_clock == 270000) | ||
| 682 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; | ||
| 683 | else | ||
| 684 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ; | ||
| 685 | } | ||
| 686 | args.v4.acConfig.ucDigSel = dig->dig_encoder; | ||
| 687 | args.v4.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder); | ||
| 688 | if (hpd_id == AMDGPU_HPD_NONE) | ||
| 689 | args.v4.ucHPD_ID = 0; | ||
| 690 | else | ||
| 691 | args.v4.ucHPD_ID = hpd_id + 1; | ||
| 692 | break; | ||
| 693 | default: | ||
| 694 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 695 | break; | ||
| 696 | } | ||
| 697 | break; | ||
| 698 | default: | ||
| 699 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 700 | break; | ||
| 701 | } | ||
| 702 | |||
| 703 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 704 | |||
| 705 | } | ||
| 706 | |||
| 707 | union dig_transmitter_control { | ||
| 708 | DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; | ||
| 709 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; | ||
| 710 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; | ||
| 711 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4; | ||
| 712 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5; | ||
| 713 | }; | ||
| 714 | |||
| 715 | void | ||
| 716 | amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int action, | ||
| 717 | uint8_t lane_num, uint8_t lane_set) | ||
| 718 | { | ||
| 719 | struct drm_device *dev = encoder->dev; | ||
| 720 | struct amdgpu_device *adev = dev->dev_private; | ||
| 721 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 722 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
| 723 | struct drm_connector *connector; | ||
| 724 | union dig_transmitter_control args; | ||
| 725 | int index = 0; | ||
| 726 | uint8_t frev, crev; | ||
| 727 | bool is_dp = false; | ||
| 728 | int pll_id = 0; | ||
| 729 | int dp_clock = 0; | ||
| 730 | int dp_lane_count = 0; | ||
| 731 | int connector_object_id = 0; | ||
| 732 | int igp_lane_info = 0; | ||
| 733 | int dig_encoder = dig->dig_encoder; | ||
| 734 | int hpd_id = AMDGPU_HPD_NONE; | ||
| 735 | |||
| 736 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
| 737 | connector = amdgpu_get_connector_for_encoder_init(encoder); | ||
| 738 | /* just needed to avoid bailing in the encoder check. the encoder | ||
| 739 | * isn't used for init | ||
| 740 | */ | ||
| 741 | dig_encoder = 0; | ||
| 742 | } else | ||
| 743 | connector = amdgpu_get_connector_for_encoder(encoder); | ||
| 744 | |||
| 745 | if (connector) { | ||
| 746 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 747 | struct amdgpu_connector_atom_dig *dig_connector = | ||
| 748 | amdgpu_connector->con_priv; | ||
| 749 | |||
| 750 | hpd_id = amdgpu_connector->hpd.hpd; | ||
| 751 | dp_clock = dig_connector->dp_clock; | ||
| 752 | dp_lane_count = dig_connector->dp_lane_count; | ||
| 753 | connector_object_id = | ||
| 754 | (amdgpu_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
| 755 | } | ||
| 756 | |||
| 757 | if (encoder->crtc) { | ||
| 758 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); | ||
| 759 | pll_id = amdgpu_crtc->pll_id; | ||
| 760 | } | ||
| 761 | |||
| 762 | /* no dig encoder assigned */ | ||
| 763 | if (dig_encoder == -1) | ||
| 764 | return; | ||
| 765 | |||
| 766 | if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder))) | ||
| 767 | is_dp = true; | ||
| 768 | |||
| 769 | memset(&args, 0, sizeof(args)); | ||
| 770 | |||
| 771 | switch (amdgpu_encoder->encoder_id) { | ||
| 772 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
| 773 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | ||
| 774 | break; | ||
| 775 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 776 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 777 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 778 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | ||
| 779 | index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); | ||
| 780 | break; | ||
| 781 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
| 782 | index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl); | ||
| 783 | break; | ||
| 784 | } | ||
| 785 | |||
| 786 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
| 787 | return; | ||
| 788 | |||
| 789 | switch (frev) { | ||
| 790 | case 1: | ||
| 791 | switch (crev) { | ||
| 792 | case 1: | ||
| 793 | args.v1.ucAction = action; | ||
| 794 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
| 795 | args.v1.usInitInfo = cpu_to_le16(connector_object_id); | ||
| 796 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | ||
| 797 | args.v1.asMode.ucLaneSel = lane_num; | ||
| 798 | args.v1.asMode.ucLaneSet = lane_set; | ||
| 799 | } else { | ||
| 800 | if (is_dp) | ||
| 801 | args.v1.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
| 802 | else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 803 | args.v1.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10); | ||
| 804 | else | ||
| 805 | args.v1.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 806 | } | ||
| 807 | |||
| 808 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; | ||
| 809 | |||
| 810 | if (dig_encoder) | ||
| 811 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; | ||
| 812 | else | ||
| 813 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; | ||
| 814 | |||
| 815 | if ((adev->flags & AMDGPU_IS_APU) && | ||
| 816 | (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { | ||
| 817 | if (is_dp || | ||
| 818 | !amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) { | ||
| 819 | if (igp_lane_info & 0x1) | ||
| 820 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; | ||
| 821 | else if (igp_lane_info & 0x2) | ||
| 822 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; | ||
| 823 | else if (igp_lane_info & 0x4) | ||
| 824 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; | ||
| 825 | else if (igp_lane_info & 0x8) | ||
| 826 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; | ||
| 827 | } else { | ||
| 828 | if (igp_lane_info & 0x3) | ||
| 829 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; | ||
| 830 | else if (igp_lane_info & 0xc) | ||
| 831 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; | ||
| 832 | } | ||
| 833 | } | ||
| 834 | |||
| 835 | if (dig->linkb) | ||
| 836 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; | ||
| 837 | else | ||
| 838 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; | ||
| 839 | |||
| 840 | if (is_dp) | ||
| 841 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; | ||
| 842 | else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
| 843 | if (dig->coherent_mode) | ||
| 844 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; | ||
| 845 | if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 846 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; | ||
| 847 | } | ||
| 848 | break; | ||
| 849 | case 2: | ||
| 850 | args.v2.ucAction = action; | ||
| 851 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
| 852 | args.v2.usInitInfo = cpu_to_le16(connector_object_id); | ||
| 853 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | ||
| 854 | args.v2.asMode.ucLaneSel = lane_num; | ||
| 855 | args.v2.asMode.ucLaneSet = lane_set; | ||
| 856 | } else { | ||
| 857 | if (is_dp) | ||
| 858 | args.v2.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
| 859 | else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 860 | args.v2.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10); | ||
| 861 | else | ||
| 862 | args.v2.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 863 | } | ||
| 864 | |||
| 865 | args.v2.acConfig.ucEncoderSel = dig_encoder; | ||
| 866 | if (dig->linkb) | ||
| 867 | args.v2.acConfig.ucLinkSel = 1; | ||
| 868 | |||
| 869 | switch (amdgpu_encoder->encoder_id) { | ||
| 870 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 871 | args.v2.acConfig.ucTransmitterSel = 0; | ||
| 872 | break; | ||
| 873 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 874 | args.v2.acConfig.ucTransmitterSel = 1; | ||
| 875 | break; | ||
| 876 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 877 | args.v2.acConfig.ucTransmitterSel = 2; | ||
| 878 | break; | ||
| 879 | } | ||
| 880 | |||
| 881 | if (is_dp) { | ||
| 882 | args.v2.acConfig.fCoherentMode = 1; | ||
| 883 | args.v2.acConfig.fDPConnector = 1; | ||
| 884 | } else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
| 885 | if (dig->coherent_mode) | ||
| 886 | args.v2.acConfig.fCoherentMode = 1; | ||
| 887 | if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 888 | args.v2.acConfig.fDualLinkConnector = 1; | ||
| 889 | } | ||
| 890 | break; | ||
| 891 | case 3: | ||
| 892 | args.v3.ucAction = action; | ||
| 893 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
| 894 | args.v3.usInitInfo = cpu_to_le16(connector_object_id); | ||
| 895 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | ||
| 896 | args.v3.asMode.ucLaneSel = lane_num; | ||
| 897 | args.v3.asMode.ucLaneSet = lane_set; | ||
| 898 | } else { | ||
| 899 | if (is_dp) | ||
| 900 | args.v3.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
| 901 | else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 902 | args.v3.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10); | ||
| 903 | else | ||
| 904 | args.v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 905 | } | ||
| 906 | |||
| 907 | if (is_dp) | ||
| 908 | args.v3.ucLaneNum = dp_lane_count; | ||
| 909 | else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 910 | args.v3.ucLaneNum = 8; | ||
| 911 | else | ||
| 912 | args.v3.ucLaneNum = 4; | ||
| 913 | |||
| 914 | if (dig->linkb) | ||
| 915 | args.v3.acConfig.ucLinkSel = 1; | ||
| 916 | if (dig_encoder & 1) | ||
| 917 | args.v3.acConfig.ucEncoderSel = 1; | ||
| 918 | |||
| 919 | /* Select the PLL for the PHY | ||
| 920 | * DP PHY should be clocked from external src if there is | ||
| 921 | * one. | ||
| 922 | */ | ||
| 923 | /* On DCE4, if there is an external clock, it generates the DP ref clock */ | ||
| 924 | if (is_dp && adev->clock.dp_extclk) | ||
| 925 | args.v3.acConfig.ucRefClkSource = 2; /* external src */ | ||
| 926 | else | ||
| 927 | args.v3.acConfig.ucRefClkSource = pll_id; | ||
| 928 | |||
| 929 | switch (amdgpu_encoder->encoder_id) { | ||
| 930 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 931 | args.v3.acConfig.ucTransmitterSel = 0; | ||
| 932 | break; | ||
| 933 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 934 | args.v3.acConfig.ucTransmitterSel = 1; | ||
| 935 | break; | ||
| 936 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 937 | args.v3.acConfig.ucTransmitterSel = 2; | ||
| 938 | break; | ||
| 939 | } | ||
| 940 | |||
| 941 | if (is_dp) | ||
| 942 | args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */ | ||
| 943 | else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
| 944 | if (dig->coherent_mode) | ||
| 945 | args.v3.acConfig.fCoherentMode = 1; | ||
| 946 | if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 947 | args.v3.acConfig.fDualLinkConnector = 1; | ||
| 948 | } | ||
| 949 | break; | ||
| 950 | case 4: | ||
| 951 | args.v4.ucAction = action; | ||
| 952 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
| 953 | args.v4.usInitInfo = cpu_to_le16(connector_object_id); | ||
| 954 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | ||
| 955 | args.v4.asMode.ucLaneSel = lane_num; | ||
| 956 | args.v4.asMode.ucLaneSet = lane_set; | ||
| 957 | } else { | ||
| 958 | if (is_dp) | ||
| 959 | args.v4.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
| 960 | else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 961 | args.v4.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10); | ||
| 962 | else | ||
| 963 | args.v4.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 964 | } | ||
| 965 | |||
| 966 | if (is_dp) | ||
| 967 | args.v4.ucLaneNum = dp_lane_count; | ||
| 968 | else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 969 | args.v4.ucLaneNum = 8; | ||
| 970 | else | ||
| 971 | args.v4.ucLaneNum = 4; | ||
| 972 | |||
| 973 | if (dig->linkb) | ||
| 974 | args.v4.acConfig.ucLinkSel = 1; | ||
| 975 | if (dig_encoder & 1) | ||
| 976 | args.v4.acConfig.ucEncoderSel = 1; | ||
| 977 | |||
| 978 | /* Select the PLL for the PHY | ||
| 979 | * DP PHY should be clocked from external src if there is | ||
| 980 | * one. | ||
| 981 | */ | ||
| 982 | /* On DCE5 DCPLL usually generates the DP ref clock */ | ||
| 983 | if (is_dp) { | ||
| 984 | if (adev->clock.dp_extclk) | ||
| 985 | args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK; | ||
| 986 | else | ||
| 987 | args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL; | ||
| 988 | } else | ||
| 989 | args.v4.acConfig.ucRefClkSource = pll_id; | ||
| 990 | |||
| 991 | switch (amdgpu_encoder->encoder_id) { | ||
| 992 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 993 | args.v4.acConfig.ucTransmitterSel = 0; | ||
| 994 | break; | ||
| 995 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 996 | args.v4.acConfig.ucTransmitterSel = 1; | ||
| 997 | break; | ||
| 998 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 999 | args.v4.acConfig.ucTransmitterSel = 2; | ||
| 1000 | break; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | if (is_dp) | ||
| 1004 | args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */ | ||
| 1005 | else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
| 1006 | if (dig->coherent_mode) | ||
| 1007 | args.v4.acConfig.fCoherentMode = 1; | ||
| 1008 | if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 1009 | args.v4.acConfig.fDualLinkConnector = 1; | ||
| 1010 | } | ||
| 1011 | break; | ||
| 1012 | case 5: | ||
| 1013 | args.v5.ucAction = action; | ||
| 1014 | if (is_dp) | ||
| 1015 | args.v5.usSymClock = cpu_to_le16(dp_clock / 10); | ||
| 1016 | else | ||
| 1017 | args.v5.usSymClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 1018 | |||
| 1019 | switch (amdgpu_encoder->encoder_id) { | ||
| 1020 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 1021 | if (dig->linkb) | ||
| 1022 | args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYB; | ||
| 1023 | else | ||
| 1024 | args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYA; | ||
| 1025 | break; | ||
| 1026 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 1027 | if (dig->linkb) | ||
| 1028 | args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYD; | ||
| 1029 | else | ||
| 1030 | args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYC; | ||
| 1031 | break; | ||
| 1032 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 1033 | if (dig->linkb) | ||
| 1034 | args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYF; | ||
| 1035 | else | ||
| 1036 | args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE; | ||
| 1037 | break; | ||
| 1038 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | ||
| 1039 | args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYG; | ||
| 1040 | break; | ||
| 1041 | } | ||
| 1042 | if (is_dp) | ||
| 1043 | args.v5.ucLaneNum = dp_lane_count; | ||
| 1044 | else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 1045 | args.v5.ucLaneNum = 8; | ||
| 1046 | else | ||
| 1047 | args.v5.ucLaneNum = 4; | ||
| 1048 | args.v5.ucConnObjId = connector_object_id; | ||
| 1049 | args.v5.ucDigMode = amdgpu_atombios_encoder_get_encoder_mode(encoder); | ||
| 1050 | |||
| 1051 | if (is_dp && adev->clock.dp_extclk) | ||
| 1052 | args.v5.asConfig.ucPhyClkSrcId = ENCODER_REFCLK_SRC_EXTCLK; | ||
| 1053 | else | ||
| 1054 | args.v5.asConfig.ucPhyClkSrcId = pll_id; | ||
| 1055 | |||
| 1056 | if (is_dp) | ||
| 1057 | args.v5.asConfig.ucCoherentMode = 1; /* DP requires coherent */ | ||
| 1058 | else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
| 1059 | if (dig->coherent_mode) | ||
| 1060 | args.v5.asConfig.ucCoherentMode = 1; | ||
| 1061 | } | ||
| 1062 | if (hpd_id == AMDGPU_HPD_NONE) | ||
| 1063 | args.v5.asConfig.ucHPDSel = 0; | ||
| 1064 | else | ||
| 1065 | args.v5.asConfig.ucHPDSel = hpd_id + 1; | ||
| 1066 | args.v5.ucDigEncoderSel = 1 << dig_encoder; | ||
| 1067 | args.v5.ucDPLaneSet = lane_set; | ||
| 1068 | break; | ||
| 1069 | default: | ||
| 1070 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 1071 | break; | ||
| 1072 | } | ||
| 1073 | break; | ||
| 1074 | default: | ||
| 1075 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 1076 | break; | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | bool | ||
| 1083 | amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector, | ||
| 1084 | int action) | ||
| 1085 | { | ||
| 1086 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1087 | struct drm_device *dev = amdgpu_connector->base.dev; | ||
| 1088 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1089 | union dig_transmitter_control args; | ||
| 1090 | int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); | ||
| 1091 | uint8_t frev, crev; | ||
| 1092 | |||
| 1093 | if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) | ||
| 1094 | goto done; | ||
| 1095 | |||
| 1096 | if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && | ||
| 1097 | (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) | ||
| 1098 | goto done; | ||
| 1099 | |||
| 1100 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
| 1101 | goto done; | ||
| 1102 | |||
| 1103 | memset(&args, 0, sizeof(args)); | ||
| 1104 | |||
| 1105 | args.v1.ucAction = action; | ||
| 1106 | |||
| 1107 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1108 | |||
| 1109 | /* wait for the panel to power up */ | ||
| 1110 | if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) { | ||
| 1111 | int i; | ||
| 1112 | |||
| 1113 | for (i = 0; i < 300; i++) { | ||
| 1114 | if (amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) | ||
| 1115 | return true; | ||
| 1116 | mdelay(1); | ||
| 1117 | } | ||
| 1118 | return false; | ||
| 1119 | } | ||
| 1120 | done: | ||
| 1121 | return true; | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | union external_encoder_control { | ||
| 1125 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; | ||
| 1126 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3; | ||
| 1127 | }; | ||
| 1128 | |||
| 1129 | static void | ||
| 1130 | amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder, | ||
| 1131 | struct drm_encoder *ext_encoder, | ||
| 1132 | int action) | ||
| 1133 | { | ||
| 1134 | struct drm_device *dev = encoder->dev; | ||
| 1135 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1136 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1137 | struct amdgpu_encoder *ext_amdgpu_encoder = to_amdgpu_encoder(ext_encoder); | ||
| 1138 | union external_encoder_control args; | ||
| 1139 | struct drm_connector *connector; | ||
| 1140 | int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); | ||
| 1141 | u8 frev, crev; | ||
| 1142 | int dp_clock = 0; | ||
| 1143 | int dp_lane_count = 0; | ||
| 1144 | int connector_object_id = 0; | ||
| 1145 | u32 ext_enum = (ext_amdgpu_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
| 1146 | |||
| 1147 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) | ||
| 1148 | connector = amdgpu_get_connector_for_encoder_init(encoder); | ||
| 1149 | else | ||
| 1150 | connector = amdgpu_get_connector_for_encoder(encoder); | ||
| 1151 | |||
| 1152 | if (connector) { | ||
| 1153 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1154 | struct amdgpu_connector_atom_dig *dig_connector = | ||
| 1155 | amdgpu_connector->con_priv; | ||
| 1156 | |||
| 1157 | dp_clock = dig_connector->dp_clock; | ||
| 1158 | dp_lane_count = dig_connector->dp_lane_count; | ||
| 1159 | connector_object_id = | ||
| 1160 | (amdgpu_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
| 1161 | } | ||
| 1162 | |||
| 1163 | memset(&args, 0, sizeof(args)); | ||
| 1164 | |||
| 1165 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
| 1166 | return; | ||
| 1167 | |||
| 1168 | switch (frev) { | ||
| 1169 | case 1: | ||
| 1170 | /* no params on frev 1 */ | ||
| 1171 | break; | ||
| 1172 | case 2: | ||
| 1173 | switch (crev) { | ||
| 1174 | case 1: | ||
| 1175 | case 2: | ||
| 1176 | args.v1.sDigEncoder.ucAction = action; | ||
| 1177 | args.v1.sDigEncoder.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 1178 | args.v1.sDigEncoder.ucEncoderMode = | ||
| 1179 | amdgpu_atombios_encoder_get_encoder_mode(encoder); | ||
| 1180 | |||
| 1181 | if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) { | ||
| 1182 | if (dp_clock == 270000) | ||
| 1183 | args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
| 1184 | args.v1.sDigEncoder.ucLaneNum = dp_lane_count; | ||
| 1185 | } else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 1186 | args.v1.sDigEncoder.ucLaneNum = 8; | ||
| 1187 | else | ||
| 1188 | args.v1.sDigEncoder.ucLaneNum = 4; | ||
| 1189 | break; | ||
| 1190 | case 3: | ||
| 1191 | args.v3.sExtEncoder.ucAction = action; | ||
| 1192 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) | ||
| 1193 | args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id); | ||
| 1194 | else | ||
| 1195 | args.v3.sExtEncoder.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); | ||
| 1196 | args.v3.sExtEncoder.ucEncoderMode = | ||
| 1197 | amdgpu_atombios_encoder_get_encoder_mode(encoder); | ||
| 1198 | |||
| 1199 | if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) { | ||
| 1200 | if (dp_clock == 270000) | ||
| 1201 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; | ||
| 1202 | else if (dp_clock == 540000) | ||
| 1203 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ; | ||
| 1204 | args.v3.sExtEncoder.ucLaneNum = dp_lane_count; | ||
| 1205 | } else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) | ||
| 1206 | args.v3.sExtEncoder.ucLaneNum = 8; | ||
| 1207 | else | ||
| 1208 | args.v3.sExtEncoder.ucLaneNum = 4; | ||
| 1209 | switch (ext_enum) { | ||
| 1210 | case GRAPH_OBJECT_ENUM_ID1: | ||
| 1211 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1; | ||
| 1212 | break; | ||
| 1213 | case GRAPH_OBJECT_ENUM_ID2: | ||
| 1214 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2; | ||
| 1215 | break; | ||
| 1216 | case GRAPH_OBJECT_ENUM_ID3: | ||
| 1217 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; | ||
| 1218 | break; | ||
| 1219 | } | ||
| 1220 | args.v3.sExtEncoder.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder); | ||
| 1221 | break; | ||
| 1222 | default: | ||
| 1223 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
| 1224 | return; | ||
| 1225 | } | ||
| 1226 | break; | ||
| 1227 | default: | ||
| 1228 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
| 1229 | return; | ||
| 1230 | } | ||
| 1231 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | static void | ||
| 1235 | amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action) | ||
| 1236 | { | ||
| 1237 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1238 | struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder); | ||
| 1239 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
| 1240 | struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); | ||
| 1241 | struct amdgpu_connector *amdgpu_connector = NULL; | ||
| 1242 | struct amdgpu_connector_atom_dig *amdgpu_dig_connector = NULL; | ||
| 1243 | |||
| 1244 | if (connector) { | ||
| 1245 | amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1246 | amdgpu_dig_connector = amdgpu_connector->con_priv; | ||
| 1247 | } | ||
| 1248 | |||
| 1249 | if (action == ATOM_ENABLE) { | ||
| 1250 | if (!connector) | ||
| 1251 | dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; | ||
| 1252 | else | ||
| 1253 | dig->panel_mode = amdgpu_atombios_dp_get_panel_mode(encoder, connector); | ||
| 1254 | |||
| 1255 | /* setup and enable the encoder */ | ||
| 1256 | amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_SETUP, 0); | ||
| 1257 | amdgpu_atombios_encoder_setup_dig_encoder(encoder, | ||
| 1258 | ATOM_ENCODER_CMD_SETUP_PANEL_MODE, | ||
| 1259 | dig->panel_mode); | ||
| 1260 | if (ext_encoder) | ||
| 1261 | amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, | ||
| 1262 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); | ||
| 1263 | if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) && | ||
| 1264 | connector) { | ||
| 1265 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
| 1266 | amdgpu_atombios_encoder_set_edp_panel_power(connector, | ||
| 1267 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
| 1268 | amdgpu_dig_connector->edp_on = true; | ||
| 1269 | } | ||
| 1270 | } | ||
| 1271 | /* enable the transmitter */ | ||
| 1272 | amdgpu_atombios_encoder_setup_dig_transmitter(encoder, | ||
| 1273 | ATOM_TRANSMITTER_ACTION_ENABLE, | ||
| 1274 | 0, 0); | ||
| 1275 | if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) && | ||
| 1276 | connector) { | ||
| 1277 | /* DP_SET_POWER_D0 is set in amdgpu_atombios_dp_link_train */ | ||
| 1278 | amdgpu_atombios_dp_link_train(encoder, connector); | ||
| 1279 | amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); | ||
| 1280 | } | ||
| 1281 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
| 1282 | amdgpu_atombios_encoder_setup_dig_transmitter(encoder, | ||
| 1283 | ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); | ||
| 1284 | if (ext_encoder) | ||
| 1285 | amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); | ||
| 1286 | } else { | ||
| 1287 | if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) && | ||
| 1288 | connector) | ||
| 1289 | amdgpu_atombios_encoder_setup_dig_encoder(encoder, | ||
| 1290 | ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); | ||
| 1291 | if (ext_encoder) | ||
| 1292 | amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_DISABLE); | ||
| 1293 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
| 1294 | amdgpu_atombios_encoder_setup_dig_transmitter(encoder, | ||
| 1295 | ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); | ||
| 1296 | |||
| 1297 | if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) && | ||
| 1298 | connector) | ||
| 1299 | amdgpu_atombios_dp_set_rx_power_state(connector, DP_SET_POWER_D3); | ||
| 1300 | /* disable the transmitter */ | ||
| 1301 | amdgpu_atombios_encoder_setup_dig_transmitter(encoder, | ||
| 1302 | ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
| 1303 | if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) && | ||
| 1304 | connector) { | ||
| 1305 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
| 1306 | amdgpu_atombios_encoder_set_edp_panel_power(connector, | ||
| 1307 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
| 1308 | amdgpu_dig_connector->edp_on = false; | ||
| 1309 | } | ||
| 1310 | } | ||
| 1311 | } | ||
| 1312 | } | ||
| 1313 | |||
| 1314 | void | ||
| 1315 | amdgpu_atombios_encoder_dpms(struct drm_encoder *encoder, int mode) | ||
| 1316 | { | ||
| 1317 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1318 | |||
| 1319 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", | ||
| 1320 | amdgpu_encoder->encoder_id, mode, amdgpu_encoder->devices, | ||
| 1321 | amdgpu_encoder->active_device); | ||
| 1322 | switch (amdgpu_encoder->encoder_id) { | ||
| 1323 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 1324 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 1325 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 1326 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | ||
| 1327 | switch (mode) { | ||
| 1328 | case DRM_MODE_DPMS_ON: | ||
| 1329 | amdgpu_atombios_encoder_setup_dig(encoder, ATOM_ENABLE); | ||
| 1330 | break; | ||
| 1331 | case DRM_MODE_DPMS_STANDBY: | ||
| 1332 | case DRM_MODE_DPMS_SUSPEND: | ||
| 1333 | case DRM_MODE_DPMS_OFF: | ||
| 1334 | amdgpu_atombios_encoder_setup_dig(encoder, ATOM_DISABLE); | ||
| 1335 | break; | ||
| 1336 | } | ||
| 1337 | break; | ||
| 1338 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
| 1339 | switch (mode) { | ||
| 1340 | case DRM_MODE_DPMS_ON: | ||
| 1341 | amdgpu_atombios_encoder_setup_dvo(encoder, ATOM_ENABLE); | ||
| 1342 | break; | ||
| 1343 | case DRM_MODE_DPMS_STANDBY: | ||
| 1344 | case DRM_MODE_DPMS_SUSPEND: | ||
| 1345 | case DRM_MODE_DPMS_OFF: | ||
| 1346 | amdgpu_atombios_encoder_setup_dvo(encoder, ATOM_DISABLE); | ||
| 1347 | break; | ||
| 1348 | } | ||
| 1349 | break; | ||
| 1350 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
| 1351 | switch (mode) { | ||
| 1352 | case DRM_MODE_DPMS_ON: | ||
| 1353 | amdgpu_atombios_encoder_setup_dac(encoder, ATOM_ENABLE); | ||
| 1354 | break; | ||
| 1355 | case DRM_MODE_DPMS_STANDBY: | ||
| 1356 | case DRM_MODE_DPMS_SUSPEND: | ||
| 1357 | case DRM_MODE_DPMS_OFF: | ||
| 1358 | amdgpu_atombios_encoder_setup_dac(encoder, ATOM_DISABLE); | ||
| 1359 | break; | ||
| 1360 | } | ||
| 1361 | break; | ||
| 1362 | default: | ||
| 1363 | return; | ||
| 1364 | } | ||
| 1365 | } | ||
| 1366 | |||
| 1367 | union crtc_source_param { | ||
| 1368 | SELECT_CRTC_SOURCE_PS_ALLOCATION v1; | ||
| 1369 | SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; | ||
| 1370 | SELECT_CRTC_SOURCE_PARAMETERS_V3 v3; | ||
| 1371 | }; | ||
| 1372 | |||
| 1373 | void | ||
| 1374 | amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder) | ||
| 1375 | { | ||
| 1376 | struct drm_device *dev = encoder->dev; | ||
| 1377 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1378 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1379 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); | ||
| 1380 | union crtc_source_param args; | ||
| 1381 | int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); | ||
| 1382 | uint8_t frev, crev; | ||
| 1383 | struct amdgpu_encoder_atom_dig *dig; | ||
| 1384 | |||
| 1385 | memset(&args, 0, sizeof(args)); | ||
| 1386 | |||
| 1387 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
| 1388 | return; | ||
| 1389 | |||
| 1390 | switch (frev) { | ||
| 1391 | case 1: | ||
| 1392 | switch (crev) { | ||
| 1393 | case 1: | ||
| 1394 | default: | ||
| 1395 | args.v1.ucCRTC = amdgpu_crtc->crtc_id; | ||
| 1396 | switch (amdgpu_encoder->encoder_id) { | ||
| 1397 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
| 1398 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
| 1399 | args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX; | ||
| 1400 | break; | ||
| 1401 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
| 1402 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
| 1403 | if (amdgpu_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) | ||
| 1404 | args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX; | ||
| 1405 | else | ||
| 1406 | args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX; | ||
| 1407 | break; | ||
| 1408 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
| 1409 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
| 1410 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
| 1411 | args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX; | ||
| 1412 | break; | ||
| 1413 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
| 1414 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
| 1415 | if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
| 1416 | args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; | ||
| 1417 | else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
| 1418 | args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; | ||
| 1419 | else | ||
| 1420 | args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX; | ||
| 1421 | break; | ||
| 1422 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
| 1423 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
| 1424 | if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
| 1425 | args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; | ||
| 1426 | else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
| 1427 | args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; | ||
| 1428 | else | ||
| 1429 | args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX; | ||
| 1430 | break; | ||
| 1431 | } | ||
| 1432 | break; | ||
| 1433 | case 2: | ||
| 1434 | args.v2.ucCRTC = amdgpu_crtc->crtc_id; | ||
| 1435 | if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) { | ||
| 1436 | struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); | ||
| 1437 | |||
| 1438 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) | ||
| 1439 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; | ||
| 1440 | else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) | ||
| 1441 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; | ||
| 1442 | else | ||
| 1443 | args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder); | ||
| 1444 | } else if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
| 1445 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; | ||
| 1446 | } else { | ||
| 1447 | args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder); | ||
| 1448 | } | ||
| 1449 | switch (amdgpu_encoder->encoder_id) { | ||
| 1450 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 1451 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 1452 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 1453 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | ||
| 1454 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
| 1455 | dig = amdgpu_encoder->enc_priv; | ||
| 1456 | switch (dig->dig_encoder) { | ||
| 1457 | case 0: | ||
| 1458 | args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; | ||
| 1459 | break; | ||
| 1460 | case 1: | ||
| 1461 | args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; | ||
| 1462 | break; | ||
| 1463 | case 2: | ||
| 1464 | args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID; | ||
| 1465 | break; | ||
| 1466 | case 3: | ||
| 1467 | args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID; | ||
| 1468 | break; | ||
| 1469 | case 4: | ||
| 1470 | args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID; | ||
| 1471 | break; | ||
| 1472 | case 5: | ||
| 1473 | args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; | ||
| 1474 | break; | ||
| 1475 | case 6: | ||
| 1476 | args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID; | ||
| 1477 | break; | ||
| 1478 | } | ||
| 1479 | break; | ||
| 1480 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
| 1481 | args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; | ||
| 1482 | break; | ||
| 1483 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
| 1484 | if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
| 1485 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
| 1486 | else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
| 1487 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
| 1488 | else | ||
| 1489 | args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID; | ||
| 1490 | break; | ||
| 1491 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
| 1492 | if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
| 1493 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
| 1494 | else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
| 1495 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
| 1496 | else | ||
| 1497 | args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID; | ||
| 1498 | break; | ||
| 1499 | } | ||
| 1500 | break; | ||
| 1501 | case 3: | ||
| 1502 | args.v3.ucCRTC = amdgpu_crtc->crtc_id; | ||
| 1503 | if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) { | ||
| 1504 | struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); | ||
| 1505 | |||
| 1506 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) | ||
| 1507 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; | ||
| 1508 | else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) | ||
| 1509 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; | ||
| 1510 | else | ||
| 1511 | args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder); | ||
| 1512 | } else if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
| 1513 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; | ||
| 1514 | } else { | ||
| 1515 | args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder); | ||
| 1516 | } | ||
| 1517 | args.v3.ucDstBpc = amdgpu_atombios_encoder_get_bpc(encoder); | ||
| 1518 | switch (amdgpu_encoder->encoder_id) { | ||
| 1519 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 1520 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 1521 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 1522 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | ||
| 1523 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
| 1524 | dig = amdgpu_encoder->enc_priv; | ||
| 1525 | switch (dig->dig_encoder) { | ||
| 1526 | case 0: | ||
| 1527 | args.v3.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; | ||
| 1528 | break; | ||
| 1529 | case 1: | ||
| 1530 | args.v3.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; | ||
| 1531 | break; | ||
| 1532 | case 2: | ||
| 1533 | args.v3.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID; | ||
| 1534 | break; | ||
| 1535 | case 3: | ||
| 1536 | args.v3.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID; | ||
| 1537 | break; | ||
| 1538 | case 4: | ||
| 1539 | args.v3.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID; | ||
| 1540 | break; | ||
| 1541 | case 5: | ||
| 1542 | args.v3.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; | ||
| 1543 | break; | ||
| 1544 | case 6: | ||
| 1545 | args.v3.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID; | ||
| 1546 | break; | ||
| 1547 | } | ||
| 1548 | break; | ||
| 1549 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
| 1550 | args.v3.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; | ||
| 1551 | break; | ||
| 1552 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
| 1553 | if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
| 1554 | args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
| 1555 | else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
| 1556 | args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
| 1557 | else | ||
| 1558 | args.v3.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID; | ||
| 1559 | break; | ||
| 1560 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
| 1561 | if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
| 1562 | args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
| 1563 | else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
| 1564 | args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
| 1565 | else | ||
| 1566 | args.v3.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID; | ||
| 1567 | break; | ||
| 1568 | } | ||
| 1569 | break; | ||
| 1570 | } | ||
| 1571 | break; | ||
| 1572 | default: | ||
| 1573 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
| 1574 | return; | ||
| 1575 | } | ||
| 1576 | |||
| 1577 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1578 | } | ||
| 1579 | |||
| 1580 | /* This only needs to be called once at startup */ | ||
| 1581 | void | ||
| 1582 | amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev) | ||
| 1583 | { | ||
| 1584 | struct drm_device *dev = adev->ddev; | ||
| 1585 | struct drm_encoder *encoder; | ||
| 1586 | |||
| 1587 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
| 1588 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1589 | struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder); | ||
| 1590 | |||
| 1591 | switch (amdgpu_encoder->encoder_id) { | ||
| 1592 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 1593 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 1594 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 1595 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | ||
| 1596 | amdgpu_atombios_encoder_setup_dig_transmitter(encoder, ATOM_TRANSMITTER_ACTION_INIT, | ||
| 1597 | 0, 0); | ||
| 1598 | break; | ||
| 1599 | } | ||
| 1600 | |||
| 1601 | if (ext_encoder) | ||
| 1602 | amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, | ||
| 1603 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); | ||
| 1604 | } | ||
| 1605 | } | ||
| 1606 | |||
| 1607 | static bool | ||
| 1608 | amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder, | ||
| 1609 | struct drm_connector *connector) | ||
| 1610 | { | ||
| 1611 | struct drm_device *dev = encoder->dev; | ||
| 1612 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1613 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1614 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1615 | |||
| 1616 | if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | | ||
| 1617 | ATOM_DEVICE_CV_SUPPORT | | ||
| 1618 | ATOM_DEVICE_CRT_SUPPORT)) { | ||
| 1619 | DAC_LOAD_DETECTION_PS_ALLOCATION args; | ||
| 1620 | int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection); | ||
| 1621 | uint8_t frev, crev; | ||
| 1622 | |||
| 1623 | memset(&args, 0, sizeof(args)); | ||
| 1624 | |||
| 1625 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
| 1626 | return false; | ||
| 1627 | |||
| 1628 | args.sDacload.ucMisc = 0; | ||
| 1629 | |||
| 1630 | if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) || | ||
| 1631 | (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1)) | ||
| 1632 | args.sDacload.ucDacType = ATOM_DAC_A; | ||
| 1633 | else | ||
| 1634 | args.sDacload.ucDacType = ATOM_DAC_B; | ||
| 1635 | |||
| 1636 | if (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) | ||
| 1637 | args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT); | ||
| 1638 | else if (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) | ||
| 1639 | args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT); | ||
| 1640 | else if (amdgpu_connector->devices & ATOM_DEVICE_CV_SUPPORT) { | ||
| 1641 | args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT); | ||
| 1642 | if (crev >= 3) | ||
| 1643 | args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; | ||
| 1644 | } else if (amdgpu_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { | ||
| 1645 | args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT); | ||
| 1646 | if (crev >= 3) | ||
| 1647 | args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; | ||
| 1648 | } | ||
| 1649 | |||
| 1650 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 1651 | |||
| 1652 | return true; | ||
| 1653 | } else | ||
| 1654 | return false; | ||
| 1655 | } | ||
| 1656 | |||
| 1657 | enum drm_connector_status | ||
| 1658 | amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder, | ||
| 1659 | struct drm_connector *connector) | ||
| 1660 | { | ||
| 1661 | struct drm_device *dev = encoder->dev; | ||
| 1662 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1663 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1664 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1665 | uint32_t bios_0_scratch; | ||
| 1666 | |||
| 1667 | if (!amdgpu_atombios_encoder_dac_load_detect(encoder, connector)) { | ||
| 1668 | DRM_DEBUG_KMS("detect returned false \n"); | ||
| 1669 | return connector_status_unknown; | ||
| 1670 | } | ||
| 1671 | |||
| 1672 | bios_0_scratch = RREG32(mmBIOS_SCRATCH_0); | ||
| 1673 | |||
| 1674 | DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, amdgpu_encoder->devices); | ||
| 1675 | if (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { | ||
| 1676 | if (bios_0_scratch & ATOM_S0_CRT1_MASK) | ||
| 1677 | return connector_status_connected; | ||
| 1678 | } | ||
| 1679 | if (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { | ||
| 1680 | if (bios_0_scratch & ATOM_S0_CRT2_MASK) | ||
| 1681 | return connector_status_connected; | ||
| 1682 | } | ||
| 1683 | if (amdgpu_connector->devices & ATOM_DEVICE_CV_SUPPORT) { | ||
| 1684 | if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) | ||
| 1685 | return connector_status_connected; | ||
| 1686 | } | ||
| 1687 | if (amdgpu_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { | ||
| 1688 | if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) | ||
| 1689 | return connector_status_connected; /* CTV */ | ||
| 1690 | else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) | ||
| 1691 | return connector_status_connected; /* STV */ | ||
| 1692 | } | ||
| 1693 | return connector_status_disconnected; | ||
| 1694 | } | ||
| 1695 | |||
| 1696 | enum drm_connector_status | ||
| 1697 | amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder, | ||
| 1698 | struct drm_connector *connector) | ||
| 1699 | { | ||
| 1700 | struct drm_device *dev = encoder->dev; | ||
| 1701 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1702 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1703 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
| 1704 | struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder); | ||
| 1705 | u32 bios_0_scratch; | ||
| 1706 | |||
| 1707 | if (!ext_encoder) | ||
| 1708 | return connector_status_unknown; | ||
| 1709 | |||
| 1710 | if ((amdgpu_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0) | ||
| 1711 | return connector_status_unknown; | ||
| 1712 | |||
| 1713 | /* load detect on the dp bridge */ | ||
| 1714 | amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, | ||
| 1715 | EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION); | ||
| 1716 | |||
| 1717 | bios_0_scratch = RREG32(mmBIOS_SCRATCH_0); | ||
| 1718 | |||
| 1719 | DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, amdgpu_encoder->devices); | ||
| 1720 | if (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { | ||
| 1721 | if (bios_0_scratch & ATOM_S0_CRT1_MASK) | ||
| 1722 | return connector_status_connected; | ||
| 1723 | } | ||
| 1724 | if (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { | ||
| 1725 | if (bios_0_scratch & ATOM_S0_CRT2_MASK) | ||
| 1726 | return connector_status_connected; | ||
| 1727 | } | ||
| 1728 | if (amdgpu_connector->devices & ATOM_DEVICE_CV_SUPPORT) { | ||
| 1729 | if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) | ||
| 1730 | return connector_status_connected; | ||
| 1731 | } | ||
| 1732 | if (amdgpu_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { | ||
| 1733 | if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) | ||
| 1734 | return connector_status_connected; /* CTV */ | ||
| 1735 | else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) | ||
| 1736 | return connector_status_connected; /* STV */ | ||
| 1737 | } | ||
| 1738 | return connector_status_disconnected; | ||
| 1739 | } | ||
| 1740 | |||
| 1741 | void | ||
| 1742 | amdgpu_atombios_encoder_setup_ext_encoder_ddc(struct drm_encoder *encoder) | ||
| 1743 | { | ||
| 1744 | struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder); | ||
| 1745 | |||
| 1746 | if (ext_encoder) | ||
| 1747 | /* ddc_setup on the dp bridge */ | ||
| 1748 | amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, | ||
| 1749 | EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP); | ||
| 1750 | |||
| 1751 | } | ||
| 1752 | |||
| 1753 | void | ||
| 1754 | amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector, | ||
| 1755 | struct drm_encoder *encoder, | ||
| 1756 | bool connected) | ||
| 1757 | { | ||
| 1758 | struct drm_device *dev = connector->dev; | ||
| 1759 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1760 | struct amdgpu_connector *amdgpu_connector = | ||
| 1761 | to_amdgpu_connector(connector); | ||
| 1762 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 1763 | uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch; | ||
| 1764 | |||
| 1765 | bios_0_scratch = RREG32(mmBIOS_SCRATCH_0); | ||
| 1766 | bios_3_scratch = RREG32(mmBIOS_SCRATCH_3); | ||
| 1767 | bios_6_scratch = RREG32(mmBIOS_SCRATCH_6); | ||
| 1768 | |||
| 1769 | if ((amdgpu_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) && | ||
| 1770 | (amdgpu_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) { | ||
| 1771 | if (connected) { | ||
| 1772 | DRM_DEBUG_KMS("LCD1 connected\n"); | ||
| 1773 | bios_0_scratch |= ATOM_S0_LCD1; | ||
| 1774 | bios_3_scratch |= ATOM_S3_LCD1_ACTIVE; | ||
| 1775 | bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1; | ||
| 1776 | } else { | ||
| 1777 | DRM_DEBUG_KMS("LCD1 disconnected\n"); | ||
| 1778 | bios_0_scratch &= ~ATOM_S0_LCD1; | ||
| 1779 | bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE; | ||
| 1780 | bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1; | ||
| 1781 | } | ||
| 1782 | } | ||
| 1783 | if ((amdgpu_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) && | ||
| 1784 | (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) { | ||
| 1785 | if (connected) { | ||
| 1786 | DRM_DEBUG_KMS("CRT1 connected\n"); | ||
| 1787 | bios_0_scratch |= ATOM_S0_CRT1_COLOR; | ||
| 1788 | bios_3_scratch |= ATOM_S3_CRT1_ACTIVE; | ||
| 1789 | bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1; | ||
| 1790 | } else { | ||
| 1791 | DRM_DEBUG_KMS("CRT1 disconnected\n"); | ||
| 1792 | bios_0_scratch &= ~ATOM_S0_CRT1_MASK; | ||
| 1793 | bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE; | ||
| 1794 | bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1; | ||
| 1795 | } | ||
| 1796 | } | ||
| 1797 | if ((amdgpu_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) && | ||
| 1798 | (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) { | ||
| 1799 | if (connected) { | ||
| 1800 | DRM_DEBUG_KMS("CRT2 connected\n"); | ||
| 1801 | bios_0_scratch |= ATOM_S0_CRT2_COLOR; | ||
| 1802 | bios_3_scratch |= ATOM_S3_CRT2_ACTIVE; | ||
| 1803 | bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2; | ||
| 1804 | } else { | ||
| 1805 | DRM_DEBUG_KMS("CRT2 disconnected\n"); | ||
| 1806 | bios_0_scratch &= ~ATOM_S0_CRT2_MASK; | ||
| 1807 | bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE; | ||
| 1808 | bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2; | ||
| 1809 | } | ||
| 1810 | } | ||
| 1811 | if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) && | ||
| 1812 | (amdgpu_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) { | ||
| 1813 | if (connected) { | ||
| 1814 | DRM_DEBUG_KMS("DFP1 connected\n"); | ||
| 1815 | bios_0_scratch |= ATOM_S0_DFP1; | ||
| 1816 | bios_3_scratch |= ATOM_S3_DFP1_ACTIVE; | ||
| 1817 | bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1; | ||
| 1818 | } else { | ||
| 1819 | DRM_DEBUG_KMS("DFP1 disconnected\n"); | ||
| 1820 | bios_0_scratch &= ~ATOM_S0_DFP1; | ||
| 1821 | bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE; | ||
| 1822 | bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1; | ||
| 1823 | } | ||
| 1824 | } | ||
| 1825 | if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) && | ||
| 1826 | (amdgpu_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) { | ||
| 1827 | if (connected) { | ||
| 1828 | DRM_DEBUG_KMS("DFP2 connected\n"); | ||
| 1829 | bios_0_scratch |= ATOM_S0_DFP2; | ||
| 1830 | bios_3_scratch |= ATOM_S3_DFP2_ACTIVE; | ||
| 1831 | bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2; | ||
| 1832 | } else { | ||
| 1833 | DRM_DEBUG_KMS("DFP2 disconnected\n"); | ||
| 1834 | bios_0_scratch &= ~ATOM_S0_DFP2; | ||
| 1835 | bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE; | ||
| 1836 | bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2; | ||
| 1837 | } | ||
| 1838 | } | ||
| 1839 | if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) && | ||
| 1840 | (amdgpu_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) { | ||
| 1841 | if (connected) { | ||
| 1842 | DRM_DEBUG_KMS("DFP3 connected\n"); | ||
| 1843 | bios_0_scratch |= ATOM_S0_DFP3; | ||
| 1844 | bios_3_scratch |= ATOM_S3_DFP3_ACTIVE; | ||
| 1845 | bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3; | ||
| 1846 | } else { | ||
| 1847 | DRM_DEBUG_KMS("DFP3 disconnected\n"); | ||
| 1848 | bios_0_scratch &= ~ATOM_S0_DFP3; | ||
| 1849 | bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE; | ||
| 1850 | bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3; | ||
| 1851 | } | ||
| 1852 | } | ||
| 1853 | if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) && | ||
| 1854 | (amdgpu_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) { | ||
| 1855 | if (connected) { | ||
| 1856 | DRM_DEBUG_KMS("DFP4 connected\n"); | ||
| 1857 | bios_0_scratch |= ATOM_S0_DFP4; | ||
| 1858 | bios_3_scratch |= ATOM_S3_DFP4_ACTIVE; | ||
| 1859 | bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4; | ||
| 1860 | } else { | ||
| 1861 | DRM_DEBUG_KMS("DFP4 disconnected\n"); | ||
| 1862 | bios_0_scratch &= ~ATOM_S0_DFP4; | ||
| 1863 | bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE; | ||
| 1864 | bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4; | ||
| 1865 | } | ||
| 1866 | } | ||
| 1867 | if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) && | ||
| 1868 | (amdgpu_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) { | ||
| 1869 | if (connected) { | ||
| 1870 | DRM_DEBUG_KMS("DFP5 connected\n"); | ||
| 1871 | bios_0_scratch |= ATOM_S0_DFP5; | ||
| 1872 | bios_3_scratch |= ATOM_S3_DFP5_ACTIVE; | ||
| 1873 | bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5; | ||
| 1874 | } else { | ||
| 1875 | DRM_DEBUG_KMS("DFP5 disconnected\n"); | ||
| 1876 | bios_0_scratch &= ~ATOM_S0_DFP5; | ||
| 1877 | bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE; | ||
| 1878 | bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5; | ||
| 1879 | } | ||
| 1880 | } | ||
| 1881 | if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) && | ||
| 1882 | (amdgpu_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) { | ||
| 1883 | if (connected) { | ||
| 1884 | DRM_DEBUG_KMS("DFP6 connected\n"); | ||
| 1885 | bios_0_scratch |= ATOM_S0_DFP6; | ||
| 1886 | bios_3_scratch |= ATOM_S3_DFP6_ACTIVE; | ||
| 1887 | bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6; | ||
| 1888 | } else { | ||
| 1889 | DRM_DEBUG_KMS("DFP6 disconnected\n"); | ||
| 1890 | bios_0_scratch &= ~ATOM_S0_DFP6; | ||
| 1891 | bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE; | ||
| 1892 | bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6; | ||
| 1893 | } | ||
| 1894 | } | ||
| 1895 | |||
| 1896 | WREG32(mmBIOS_SCRATCH_0, bios_0_scratch); | ||
| 1897 | WREG32(mmBIOS_SCRATCH_3, bios_3_scratch); | ||
| 1898 | WREG32(mmBIOS_SCRATCH_6, bios_6_scratch); | ||
| 1899 | } | ||
| 1900 | |||
| 1901 | union lvds_info { | ||
| 1902 | struct _ATOM_LVDS_INFO info; | ||
| 1903 | struct _ATOM_LVDS_INFO_V12 info_12; | ||
| 1904 | }; | ||
| 1905 | |||
| 1906 | struct amdgpu_encoder_atom_dig * | ||
| 1907 | amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder) | ||
| 1908 | { | ||
| 1909 | struct drm_device *dev = encoder->base.dev; | ||
| 1910 | struct amdgpu_device *adev = dev->dev_private; | ||
| 1911 | struct amdgpu_mode_info *mode_info = &adev->mode_info; | ||
| 1912 | int index = GetIndexIntoMasterTable(DATA, LVDS_Info); | ||
| 1913 | uint16_t data_offset, misc; | ||
| 1914 | union lvds_info *lvds_info; | ||
| 1915 | uint8_t frev, crev; | ||
| 1916 | struct amdgpu_encoder_atom_dig *lvds = NULL; | ||
| 1917 | int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
| 1918 | |||
| 1919 | if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
| 1920 | &frev, &crev, &data_offset)) { | ||
| 1921 | lvds_info = | ||
| 1922 | (union lvds_info *)(mode_info->atom_context->bios + data_offset); | ||
| 1923 | lvds = | ||
| 1924 | kzalloc(sizeof(struct amdgpu_encoder_atom_dig), GFP_KERNEL); | ||
| 1925 | |||
| 1926 | if (!lvds) | ||
| 1927 | return NULL; | ||
| 1928 | |||
| 1929 | lvds->native_mode.clock = | ||
| 1930 | le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10; | ||
| 1931 | lvds->native_mode.hdisplay = | ||
| 1932 | le16_to_cpu(lvds_info->info.sLCDTiming.usHActive); | ||
| 1933 | lvds->native_mode.vdisplay = | ||
| 1934 | le16_to_cpu(lvds_info->info.sLCDTiming.usVActive); | ||
| 1935 | lvds->native_mode.htotal = lvds->native_mode.hdisplay + | ||
| 1936 | le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time); | ||
| 1937 | lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + | ||
| 1938 | le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset); | ||
| 1939 | lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + | ||
| 1940 | le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth); | ||
| 1941 | lvds->native_mode.vtotal = lvds->native_mode.vdisplay + | ||
| 1942 | le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); | ||
| 1943 | lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + | ||
| 1944 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); | ||
| 1945 | lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + | ||
| 1946 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); | ||
| 1947 | lvds->panel_pwr_delay = | ||
| 1948 | le16_to_cpu(lvds_info->info.usOffDelayInMs); | ||
| 1949 | lvds->lcd_misc = lvds_info->info.ucLVDS_Misc; | ||
| 1950 | |||
| 1951 | misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); | ||
| 1952 | if (misc & ATOM_VSYNC_POLARITY) | ||
| 1953 | lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC; | ||
| 1954 | if (misc & ATOM_HSYNC_POLARITY) | ||
| 1955 | lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC; | ||
| 1956 | if (misc & ATOM_COMPOSITESYNC) | ||
| 1957 | lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC; | ||
| 1958 | if (misc & ATOM_INTERLACE) | ||
| 1959 | lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE; | ||
| 1960 | if (misc & ATOM_DOUBLE_CLOCK_MODE) | ||
| 1961 | lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; | ||
| 1962 | |||
| 1963 | lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize); | ||
| 1964 | lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize); | ||
| 1965 | |||
| 1966 | /* set crtc values */ | ||
| 1967 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); | ||
| 1968 | |||
| 1969 | lvds->lcd_ss_id = lvds_info->info.ucSS_Id; | ||
| 1970 | |||
| 1971 | encoder->native_mode = lvds->native_mode; | ||
| 1972 | |||
| 1973 | if (encoder_enum == 2) | ||
| 1974 | lvds->linkb = true; | ||
| 1975 | else | ||
| 1976 | lvds->linkb = false; | ||
| 1977 | |||
| 1978 | /* parse the lcd record table */ | ||
| 1979 | if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) { | ||
| 1980 | ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; | ||
| 1981 | ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; | ||
| 1982 | bool bad_record = false; | ||
| 1983 | u8 *record; | ||
| 1984 | |||
| 1985 | if ((frev == 1) && (crev < 2)) | ||
| 1986 | /* absolute */ | ||
| 1987 | record = (u8 *)(mode_info->atom_context->bios + | ||
| 1988 | le16_to_cpu(lvds_info->info.usModePatchTableOffset)); | ||
| 1989 | else | ||
| 1990 | /* relative */ | ||
| 1991 | record = (u8 *)(mode_info->atom_context->bios + | ||
| 1992 | data_offset + | ||
| 1993 | le16_to_cpu(lvds_info->info.usModePatchTableOffset)); | ||
| 1994 | while (*record != ATOM_RECORD_END_TYPE) { | ||
| 1995 | switch (*record) { | ||
| 1996 | case LCD_MODE_PATCH_RECORD_MODE_TYPE: | ||
| 1997 | record += sizeof(ATOM_PATCH_RECORD_MODE); | ||
| 1998 | break; | ||
| 1999 | case LCD_RTS_RECORD_TYPE: | ||
| 2000 | record += sizeof(ATOM_LCD_RTS_RECORD); | ||
| 2001 | break; | ||
| 2002 | case LCD_CAP_RECORD_TYPE: | ||
| 2003 | record += sizeof(ATOM_LCD_MODE_CONTROL_CAP); | ||
| 2004 | break; | ||
| 2005 | case LCD_FAKE_EDID_PATCH_RECORD_TYPE: | ||
| 2006 | fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; | ||
| 2007 | if (fake_edid_record->ucFakeEDIDLength) { | ||
| 2008 | struct edid *edid; | ||
| 2009 | int edid_size = | ||
| 2010 | max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); | ||
| 2011 | edid = kmalloc(edid_size, GFP_KERNEL); | ||
| 2012 | if (edid) { | ||
| 2013 | memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], | ||
| 2014 | fake_edid_record->ucFakeEDIDLength); | ||
| 2015 | |||
| 2016 | if (drm_edid_is_valid(edid)) { | ||
| 2017 | adev->mode_info.bios_hardcoded_edid = edid; | ||
| 2018 | adev->mode_info.bios_hardcoded_edid_size = edid_size; | ||
| 2019 | } else | ||
| 2020 | kfree(edid); | ||
| 2021 | } | ||
| 2022 | } | ||
| 2023 | record += fake_edid_record->ucFakeEDIDLength ? | ||
| 2024 | fake_edid_record->ucFakeEDIDLength + 2 : | ||
| 2025 | sizeof(ATOM_FAKE_EDID_PATCH_RECORD); | ||
| 2026 | break; | ||
| 2027 | case LCD_PANEL_RESOLUTION_RECORD_TYPE: | ||
| 2028 | panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; | ||
| 2029 | lvds->native_mode.width_mm = panel_res_record->usHSize; | ||
| 2030 | lvds->native_mode.height_mm = panel_res_record->usVSize; | ||
| 2031 | record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD); | ||
| 2032 | break; | ||
| 2033 | default: | ||
| 2034 | DRM_ERROR("Bad LCD record %d\n", *record); | ||
| 2035 | bad_record = true; | ||
| 2036 | break; | ||
| 2037 | } | ||
| 2038 | if (bad_record) | ||
| 2039 | break; | ||
| 2040 | } | ||
| 2041 | } | ||
| 2042 | } | ||
| 2043 | return lvds; | ||
| 2044 | } | ||
| 2045 | |||
| 2046 | struct amdgpu_encoder_atom_dig * | ||
| 2047 | amdgpu_atombios_encoder_get_dig_info(struct amdgpu_encoder *amdgpu_encoder) | ||
| 2048 | { | ||
| 2049 | int encoder_enum = (amdgpu_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
| 2050 | struct amdgpu_encoder_atom_dig *dig = kzalloc(sizeof(struct amdgpu_encoder_atom_dig), GFP_KERNEL); | ||
| 2051 | |||
| 2052 | if (!dig) | ||
| 2053 | return NULL; | ||
| 2054 | |||
| 2055 | /* coherent mode by default */ | ||
| 2056 | dig->coherent_mode = true; | ||
| 2057 | dig->dig_encoder = -1; | ||
| 2058 | |||
| 2059 | if (encoder_enum == 2) | ||
| 2060 | dig->linkb = true; | ||
| 2061 | else | ||
| 2062 | dig->linkb = false; | ||
| 2063 | |||
| 2064 | return dig; | ||
| 2065 | } | ||
| 2066 | |||
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h new file mode 100644 index 000000000000..2bdec40515ce --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h | |||
| @@ -0,0 +1,73 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __ATOMBIOS_ENCODER_H__ | ||
| 25 | #define __ATOMBIOS_ENCODER_H__ | ||
| 26 | |||
| 27 | u8 | ||
| 28 | amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder); | ||
| 29 | void | ||
| 30 | amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, | ||
| 31 | u8 level); | ||
| 32 | void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encoder, | ||
| 33 | struct drm_connector *drm_connector); | ||
| 34 | void | ||
| 35 | amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder); | ||
| 36 | bool amdgpu_atombios_encoder_is_digital(struct drm_encoder *encoder); | ||
| 37 | bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder, | ||
| 38 | const struct drm_display_mode *mode, | ||
| 39 | struct drm_display_mode *adjusted_mode); | ||
| 40 | int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder); | ||
| 41 | void | ||
| 42 | amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder, | ||
| 43 | int action, int panel_mode); | ||
| 44 | void | ||
| 45 | amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int action, | ||
| 46 | uint8_t lane_num, uint8_t lane_set); | ||
| 47 | bool | ||
| 48 | amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector, | ||
| 49 | int action); | ||
| 50 | void | ||
| 51 | amdgpu_atombios_encoder_dpms(struct drm_encoder *encoder, int mode); | ||
| 52 | void | ||
| 53 | amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder); | ||
| 54 | void | ||
| 55 | amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev); | ||
| 56 | enum drm_connector_status | ||
| 57 | amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder, | ||
| 58 | struct drm_connector *connector); | ||
| 59 | enum drm_connector_status | ||
| 60 | amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder, | ||
| 61 | struct drm_connector *connector); | ||
| 62 | void | ||
| 63 | amdgpu_atombios_encoder_setup_ext_encoder_ddc(struct drm_encoder *encoder); | ||
| 64 | void | ||
| 65 | amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector, | ||
| 66 | struct drm_encoder *encoder, | ||
| 67 | bool connected); | ||
| 68 | struct amdgpu_encoder_atom_dig * | ||
| 69 | amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder); | ||
| 70 | struct amdgpu_encoder_atom_dig * | ||
| 71 | amdgpu_atombios_encoder_get_dig_info(struct amdgpu_encoder *amdgpu_encoder); | ||
| 72 | |||
| 73 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c new file mode 100644 index 000000000000..13cdb01e9b45 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c | |||
| @@ -0,0 +1,158 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2011 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Alex Deucher | ||
| 23 | * | ||
| 24 | */ | ||
| 25 | #include <drm/drmP.h> | ||
| 26 | #include <drm/amdgpu_drm.h> | ||
| 27 | #include "amdgpu.h" | ||
| 28 | #include "atom.h" | ||
| 29 | #include "amdgpu_atombios.h" | ||
| 30 | |||
| 31 | #define TARGET_HW_I2C_CLOCK 50 | ||
| 32 | |||
| 33 | /* these are a limitation of ProcessI2cChannelTransaction not the hw */ | ||
| 34 | #define ATOM_MAX_HW_I2C_WRITE 3 | ||
| 35 | #define ATOM_MAX_HW_I2C_READ 255 | ||
| 36 | |||
| 37 | static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan, | ||
| 38 | u8 slave_addr, u8 flags, | ||
| 39 | u8 *buf, u8 num) | ||
| 40 | { | ||
| 41 | struct drm_device *dev = chan->dev; | ||
| 42 | struct amdgpu_device *adev = dev->dev_private; | ||
| 43 | PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; | ||
| 44 | int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); | ||
| 45 | unsigned char *base; | ||
| 46 | u16 out = cpu_to_le16(0); | ||
| 47 | int r = 0; | ||
| 48 | |||
| 49 | memset(&args, 0, sizeof(args)); | ||
| 50 | |||
| 51 | mutex_lock(&chan->mutex); | ||
| 52 | |||
| 53 | base = (unsigned char *)adev->mode_info.atom_context->scratch; | ||
| 54 | |||
| 55 | if (flags & HW_I2C_WRITE) { | ||
| 56 | if (num > ATOM_MAX_HW_I2C_WRITE) { | ||
| 57 | DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); | ||
| 58 | r = -EINVAL; | ||
| 59 | goto done; | ||
| 60 | } | ||
| 61 | if (buf == NULL) | ||
| 62 | args.ucRegIndex = 0; | ||
| 63 | else | ||
| 64 | args.ucRegIndex = buf[0]; | ||
| 65 | if (num) | ||
| 66 | num--; | ||
| 67 | if (num) | ||
| 68 | memcpy(&out, &buf[1], num); | ||
| 69 | args.lpI2CDataOut = cpu_to_le16(out); | ||
| 70 | } else { | ||
| 71 | if (num > ATOM_MAX_HW_I2C_READ) { | ||
| 72 | DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); | ||
| 73 | r = -EINVAL; | ||
| 74 | goto done; | ||
| 75 | } | ||
| 76 | args.ucRegIndex = 0; | ||
| 77 | args.lpI2CDataOut = 0; | ||
| 78 | } | ||
| 79 | |||
| 80 | args.ucFlag = flags; | ||
| 81 | args.ucI2CSpeed = TARGET_HW_I2C_CLOCK; | ||
| 82 | args.ucTransBytes = num; | ||
| 83 | args.ucSlaveAddr = slave_addr << 1; | ||
| 84 | args.ucLineNumber = chan->rec.i2c_id; | ||
| 85 | |||
| 86 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 87 | |||
| 88 | /* error */ | ||
| 89 | if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { | ||
| 90 | DRM_DEBUG_KMS("hw_i2c error\n"); | ||
| 91 | r = -EIO; | ||
| 92 | goto done; | ||
| 93 | } | ||
| 94 | |||
| 95 | if (!(flags & HW_I2C_WRITE)) | ||
| 96 | amdgpu_atombios_copy_swap(buf, base, num, false); | ||
| 97 | |||
| 98 | done: | ||
| 99 | mutex_unlock(&chan->mutex); | ||
| 100 | |||
| 101 | return r; | ||
| 102 | } | ||
| 103 | |||
| 104 | int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap, | ||
| 105 | struct i2c_msg *msgs, int num) | ||
| 106 | { | ||
| 107 | struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
| 108 | struct i2c_msg *p; | ||
| 109 | int i, remaining, current_count, buffer_offset, max_bytes, ret; | ||
| 110 | u8 flags; | ||
| 111 | |||
| 112 | /* check for bus probe */ | ||
| 113 | p = &msgs[0]; | ||
| 114 | if ((num == 1) && (p->len == 0)) { | ||
| 115 | ret = amdgpu_atombios_i2c_process_i2c_ch(i2c, | ||
| 116 | p->addr, HW_I2C_WRITE, | ||
| 117 | NULL, 0); | ||
| 118 | if (ret) | ||
| 119 | return ret; | ||
| 120 | else | ||
| 121 | return num; | ||
| 122 | } | ||
| 123 | |||
| 124 | for (i = 0; i < num; i++) { | ||
| 125 | p = &msgs[i]; | ||
| 126 | remaining = p->len; | ||
| 127 | buffer_offset = 0; | ||
| 128 | /* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */ | ||
| 129 | if (p->flags & I2C_M_RD) { | ||
| 130 | max_bytes = ATOM_MAX_HW_I2C_READ; | ||
| 131 | flags = HW_I2C_READ; | ||
| 132 | } else { | ||
| 133 | max_bytes = ATOM_MAX_HW_I2C_WRITE; | ||
| 134 | flags = HW_I2C_WRITE; | ||
| 135 | } | ||
| 136 | while (remaining) { | ||
| 137 | if (remaining > max_bytes) | ||
| 138 | current_count = max_bytes; | ||
| 139 | else | ||
| 140 | current_count = remaining; | ||
| 141 | ret = amdgpu_atombios_i2c_process_i2c_ch(i2c, | ||
| 142 | p->addr, flags, | ||
| 143 | &p->buf[buffer_offset], current_count); | ||
| 144 | if (ret) | ||
| 145 | return ret; | ||
| 146 | remaining -= current_count; | ||
| 147 | buffer_offset += current_count; | ||
| 148 | } | ||
| 149 | } | ||
| 150 | |||
| 151 | return num; | ||
| 152 | } | ||
| 153 | |||
| 154 | u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap) | ||
| 155 | { | ||
| 156 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; | ||
| 157 | } | ||
| 158 | |||
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h new file mode 100644 index 000000000000..d6128d9de56e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h | |||
| @@ -0,0 +1,31 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __ATOMBIOS_I2C_H__ | ||
| 25 | #define __ATOMBIOS_I2C_H__ | ||
| 26 | |||
| 27 | int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap, | ||
| 28 | struct i2c_msg *msgs, int num); | ||
| 29 | u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap); | ||
| 30 | |||
| 31 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h new file mode 100644 index 000000000000..11828e2cdf34 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h | |||
| @@ -0,0 +1,550 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2012 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Alex Deucher | ||
| 23 | */ | ||
| 24 | #ifndef CIK_H | ||
| 25 | #define CIK_H | ||
| 26 | |||
| 27 | #define MC_SEQ_MISC0__GDDR5__SHIFT 0x1c | ||
| 28 | #define MC_SEQ_MISC0__GDDR5_MASK 0xf0000000 | ||
| 29 | #define MC_SEQ_MISC0__GDDR5_VALUE 5 | ||
| 30 | |||
| 31 | #define CP_ME_TABLE_SIZE 96 | ||
| 32 | |||
| 33 | /* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */ | ||
| 34 | #define CRTC0_REGISTER_OFFSET (0x1b7c - 0x1b7c) | ||
| 35 | #define CRTC1_REGISTER_OFFSET (0x1e7c - 0x1b7c) | ||
| 36 | #define CRTC2_REGISTER_OFFSET (0x417c - 0x1b7c) | ||
| 37 | #define CRTC3_REGISTER_OFFSET (0x447c - 0x1b7c) | ||
| 38 | #define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c) | ||
| 39 | #define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c) | ||
| 40 | |||
| 41 | #define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 | ||
| 42 | #define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 | ||
| 43 | |||
| 44 | #define CIK_RB_BITMAP_WIDTH_PER_SH 2 | ||
| 45 | #define HAWAII_RB_BITMAP_WIDTH_PER_SH 4 | ||
| 46 | |||
| 47 | #define AMDGPU_NUM_OF_VMIDS 8 | ||
| 48 | |||
| 49 | #define PIPEID(x) ((x) << 0) | ||
| 50 | #define MEID(x) ((x) << 2) | ||
| 51 | #define VMID(x) ((x) << 4) | ||
| 52 | #define QUEUEID(x) ((x) << 8) | ||
| 53 | |||
| 54 | #define mmCC_DRM_ID_STRAPS 0x1559 | ||
| 55 | #define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000 | ||
| 56 | |||
| 57 | #define mmCHUB_CONTROL 0x619 | ||
| 58 | #define BYPASS_VM (1 << 0) | ||
| 59 | |||
| 60 | #define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5) | ||
| 61 | |||
| 62 | #define mmGRPH_LUT_10BIT_BYPASS_CONTROL 0x1a02 | ||
| 63 | #define LUT_10BIT_BYPASS_EN (1 << 8) | ||
| 64 | |||
| 65 | # define CURSOR_MONO 0 | ||
| 66 | # define CURSOR_24_1 1 | ||
| 67 | # define CURSOR_24_8_PRE_MULT 2 | ||
| 68 | # define CURSOR_24_8_UNPRE_MULT 3 | ||
| 69 | # define CURSOR_URGENT_ALWAYS 0 | ||
| 70 | # define CURSOR_URGENT_1_8 1 | ||
| 71 | # define CURSOR_URGENT_1_4 2 | ||
| 72 | # define CURSOR_URGENT_3_8 3 | ||
| 73 | # define CURSOR_URGENT_1_2 4 | ||
| 74 | |||
| 75 | # define GRPH_DEPTH_8BPP 0 | ||
| 76 | # define GRPH_DEPTH_16BPP 1 | ||
| 77 | # define GRPH_DEPTH_32BPP 2 | ||
| 78 | /* 8 BPP */ | ||
| 79 | # define GRPH_FORMAT_INDEXED 0 | ||
| 80 | /* 16 BPP */ | ||
| 81 | # define GRPH_FORMAT_ARGB1555 0 | ||
| 82 | # define GRPH_FORMAT_ARGB565 1 | ||
| 83 | # define GRPH_FORMAT_ARGB4444 2 | ||
| 84 | # define GRPH_FORMAT_AI88 3 | ||
| 85 | # define GRPH_FORMAT_MONO16 4 | ||
| 86 | # define GRPH_FORMAT_BGRA5551 5 | ||
| 87 | /* 32 BPP */ | ||
| 88 | # define GRPH_FORMAT_ARGB8888 0 | ||
| 89 | # define GRPH_FORMAT_ARGB2101010 1 | ||
| 90 | # define GRPH_FORMAT_32BPP_DIG 2 | ||
| 91 | # define GRPH_FORMAT_8B_ARGB2101010 3 | ||
| 92 | # define GRPH_FORMAT_BGRA1010102 4 | ||
| 93 | # define GRPH_FORMAT_8B_BGRA1010102 5 | ||
| 94 | # define GRPH_FORMAT_RGB111110 6 | ||
| 95 | # define GRPH_FORMAT_BGR101111 7 | ||
| 96 | # define ADDR_SURF_MACRO_TILE_ASPECT_1 0 | ||
| 97 | # define ADDR_SURF_MACRO_TILE_ASPECT_2 1 | ||
| 98 | # define ADDR_SURF_MACRO_TILE_ASPECT_4 2 | ||
| 99 | # define ADDR_SURF_MACRO_TILE_ASPECT_8 3 | ||
| 100 | # define GRPH_ARRAY_LINEAR_GENERAL 0 | ||
| 101 | # define GRPH_ARRAY_LINEAR_ALIGNED 1 | ||
| 102 | # define GRPH_ARRAY_1D_TILED_THIN1 2 | ||
| 103 | # define GRPH_ARRAY_2D_TILED_THIN1 4 | ||
| 104 | # define DISPLAY_MICRO_TILING 0 | ||
| 105 | # define THIN_MICRO_TILING 1 | ||
| 106 | # define DEPTH_MICRO_TILING 2 | ||
| 107 | # define ROTATED_MICRO_TILING 4 | ||
| 108 | # define GRPH_ENDIAN_NONE 0 | ||
| 109 | # define GRPH_ENDIAN_8IN16 1 | ||
| 110 | # define GRPH_ENDIAN_8IN32 2 | ||
| 111 | # define GRPH_ENDIAN_8IN64 3 | ||
| 112 | # define GRPH_RED_SEL_R 0 | ||
| 113 | # define GRPH_RED_SEL_G 1 | ||
| 114 | # define GRPH_RED_SEL_B 2 | ||
| 115 | # define GRPH_RED_SEL_A 3 | ||
| 116 | # define GRPH_GREEN_SEL_G 0 | ||
| 117 | # define GRPH_GREEN_SEL_B 1 | ||
| 118 | # define GRPH_GREEN_SEL_A 2 | ||
| 119 | # define GRPH_GREEN_SEL_R 3 | ||
| 120 | # define GRPH_BLUE_SEL_B 0 | ||
| 121 | # define GRPH_BLUE_SEL_A 1 | ||
| 122 | # define GRPH_BLUE_SEL_R 2 | ||
| 123 | # define GRPH_BLUE_SEL_G 3 | ||
| 124 | # define GRPH_ALPHA_SEL_A 0 | ||
| 125 | # define GRPH_ALPHA_SEL_R 1 | ||
| 126 | # define GRPH_ALPHA_SEL_G 2 | ||
| 127 | # define GRPH_ALPHA_SEL_B 3 | ||
| 128 | # define INPUT_GAMMA_USE_LUT 0 | ||
| 129 | # define INPUT_GAMMA_BYPASS 1 | ||
| 130 | # define INPUT_GAMMA_SRGB_24 2 | ||
| 131 | # define INPUT_GAMMA_XVYCC_222 3 | ||
| 132 | |||
| 133 | # define INPUT_CSC_BYPASS 0 | ||
| 134 | # define INPUT_CSC_PROG_COEFF 1 | ||
| 135 | # define INPUT_CSC_PROG_SHARED_MATRIXA 2 | ||
| 136 | |||
| 137 | # define OUTPUT_CSC_BYPASS 0 | ||
| 138 | # define OUTPUT_CSC_TV_RGB 1 | ||
| 139 | # define OUTPUT_CSC_YCBCR_601 2 | ||
| 140 | # define OUTPUT_CSC_YCBCR_709 3 | ||
| 141 | # define OUTPUT_CSC_PROG_COEFF 4 | ||
| 142 | # define OUTPUT_CSC_PROG_SHARED_MATRIXB 5 | ||
| 143 | |||
| 144 | # define DEGAMMA_BYPASS 0 | ||
| 145 | # define DEGAMMA_SRGB_24 1 | ||
| 146 | # define DEGAMMA_XVYCC_222 2 | ||
| 147 | # define GAMUT_REMAP_BYPASS 0 | ||
| 148 | # define GAMUT_REMAP_PROG_COEFF 1 | ||
| 149 | # define GAMUT_REMAP_PROG_SHARED_MATRIXA 2 | ||
| 150 | # define GAMUT_REMAP_PROG_SHARED_MATRIXB 3 | ||
| 151 | |||
| 152 | # define REGAMMA_BYPASS 0 | ||
| 153 | # define REGAMMA_SRGB_24 1 | ||
| 154 | # define REGAMMA_XVYCC_222 2 | ||
| 155 | # define REGAMMA_PROG_A 3 | ||
| 156 | # define REGAMMA_PROG_B 4 | ||
| 157 | |||
| 158 | # define FMT_CLAMP_6BPC 0 | ||
| 159 | # define FMT_CLAMP_8BPC 1 | ||
| 160 | # define FMT_CLAMP_10BPC 2 | ||
| 161 | |||
| 162 | # define HDMI_24BIT_DEEP_COLOR 0 | ||
| 163 | # define HDMI_30BIT_DEEP_COLOR 1 | ||
| 164 | # define HDMI_36BIT_DEEP_COLOR 2 | ||
| 165 | # define HDMI_ACR_HW 0 | ||
| 166 | # define HDMI_ACR_32 1 | ||
| 167 | # define HDMI_ACR_44 2 | ||
| 168 | # define HDMI_ACR_48 3 | ||
| 169 | # define HDMI_ACR_X1 1 | ||
| 170 | # define HDMI_ACR_X2 2 | ||
| 171 | # define HDMI_ACR_X4 4 | ||
| 172 | # define AFMT_AVI_INFO_Y_RGB 0 | ||
| 173 | # define AFMT_AVI_INFO_Y_YCBCR422 1 | ||
| 174 | # define AFMT_AVI_INFO_Y_YCBCR444 2 | ||
| 175 | |||
| 176 | #define NO_AUTO 0 | ||
| 177 | #define ES_AUTO 1 | ||
| 178 | #define GS_AUTO 2 | ||
| 179 | #define ES_AND_GS_AUTO 3 | ||
| 180 | |||
| 181 | # define ARRAY_MODE(x) ((x) << 2) | ||
| 182 | # define PIPE_CONFIG(x) ((x) << 6) | ||
| 183 | # define TILE_SPLIT(x) ((x) << 11) | ||
| 184 | # define MICRO_TILE_MODE_NEW(x) ((x) << 22) | ||
| 185 | # define SAMPLE_SPLIT(x) ((x) << 25) | ||
| 186 | # define BANK_WIDTH(x) ((x) << 0) | ||
| 187 | # define BANK_HEIGHT(x) ((x) << 2) | ||
| 188 | # define MACRO_TILE_ASPECT(x) ((x) << 4) | ||
| 189 | # define NUM_BANKS(x) ((x) << 6) | ||
| 190 | |||
| 191 | #define MSG_ENTER_RLC_SAFE_MODE 1 | ||
| 192 | #define MSG_EXIT_RLC_SAFE_MODE 0 | ||
| 193 | |||
| 194 | /* | ||
| 195 | * PM4 | ||
| 196 | */ | ||
| 197 | #define PACKET_TYPE0 0 | ||
| 198 | #define PACKET_TYPE1 1 | ||
| 199 | #define PACKET_TYPE2 2 | ||
| 200 | #define PACKET_TYPE3 3 | ||
| 201 | |||
| 202 | #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) | ||
| 203 | #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) | ||
| 204 | #define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF) | ||
| 205 | #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) | ||
| 206 | #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ | ||
| 207 | ((reg) & 0xFFFF) | \ | ||
| 208 | ((n) & 0x3FFF) << 16) | ||
| 209 | #define CP_PACKET2 0x80000000 | ||
| 210 | #define PACKET2_PAD_SHIFT 0 | ||
| 211 | #define PACKET2_PAD_MASK (0x3fffffff << 0) | ||
| 212 | |||
| 213 | #define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) | ||
| 214 | |||
| 215 | #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ | ||
| 216 | (((op) & 0xFF) << 8) | \ | ||
| 217 | ((n) & 0x3FFF) << 16) | ||
| 218 | |||
| 219 | #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) | ||
| 220 | |||
| 221 | /* Packet 3 types */ | ||
| 222 | #define PACKET3_NOP 0x10 | ||
| 223 | #define PACKET3_SET_BASE 0x11 | ||
| 224 | #define PACKET3_BASE_INDEX(x) ((x) << 0) | ||
| 225 | #define CE_PARTITION_BASE 3 | ||
| 226 | #define PACKET3_CLEAR_STATE 0x12 | ||
| 227 | #define PACKET3_INDEX_BUFFER_SIZE 0x13 | ||
| 228 | #define PACKET3_DISPATCH_DIRECT 0x15 | ||
| 229 | #define PACKET3_DISPATCH_INDIRECT 0x16 | ||
| 230 | #define PACKET3_ATOMIC_GDS 0x1D | ||
| 231 | #define PACKET3_ATOMIC_MEM 0x1E | ||
| 232 | #define PACKET3_OCCLUSION_QUERY 0x1F | ||
| 233 | #define PACKET3_SET_PREDICATION 0x20 | ||
| 234 | #define PACKET3_REG_RMW 0x21 | ||
| 235 | #define PACKET3_COND_EXEC 0x22 | ||
| 236 | #define PACKET3_PRED_EXEC 0x23 | ||
| 237 | #define PACKET3_DRAW_INDIRECT 0x24 | ||
| 238 | #define PACKET3_DRAW_INDEX_INDIRECT 0x25 | ||
| 239 | #define PACKET3_INDEX_BASE 0x26 | ||
| 240 | #define PACKET3_DRAW_INDEX_2 0x27 | ||
| 241 | #define PACKET3_CONTEXT_CONTROL 0x28 | ||
| 242 | #define PACKET3_INDEX_TYPE 0x2A | ||
| 243 | #define PACKET3_DRAW_INDIRECT_MULTI 0x2C | ||
| 244 | #define PACKET3_DRAW_INDEX_AUTO 0x2D | ||
| 245 | #define PACKET3_NUM_INSTANCES 0x2F | ||
| 246 | #define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 | ||
| 247 | #define PACKET3_INDIRECT_BUFFER_CONST 0x33 | ||
| 248 | #define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 | ||
| 249 | #define PACKET3_DRAW_INDEX_OFFSET_2 0x35 | ||
| 250 | #define PACKET3_DRAW_PREAMBLE 0x36 | ||
| 251 | #define PACKET3_WRITE_DATA 0x37 | ||
| 252 | #define WRITE_DATA_DST_SEL(x) ((x) << 8) | ||
| 253 | /* 0 - register | ||
| 254 | * 1 - memory (sync - via GRBM) | ||
| 255 | * 2 - gl2 | ||
| 256 | * 3 - gds | ||
| 257 | * 4 - reserved | ||
| 258 | * 5 - memory (async - direct) | ||
| 259 | */ | ||
| 260 | #define WR_ONE_ADDR (1 << 16) | ||
| 261 | #define WR_CONFIRM (1 << 20) | ||
| 262 | #define WRITE_DATA_CACHE_POLICY(x) ((x) << 25) | ||
| 263 | /* 0 - LRU | ||
| 264 | * 1 - Stream | ||
| 265 | */ | ||
| 266 | #define WRITE_DATA_ENGINE_SEL(x) ((x) << 30) | ||
| 267 | /* 0 - me | ||
| 268 | * 1 - pfp | ||
| 269 | * 2 - ce | ||
| 270 | */ | ||
| 271 | #define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38 | ||
| 272 | #define PACKET3_MEM_SEMAPHORE 0x39 | ||
| 273 | # define PACKET3_SEM_USE_MAILBOX (0x1 << 16) | ||
| 274 | # define PACKET3_SEM_SEL_SIGNAL_TYPE (0x1 << 20) /* 0 = increment, 1 = write 1 */ | ||
| 275 | # define PACKET3_SEM_CLIENT_CODE ((x) << 24) /* 0 = CP, 1 = CB, 2 = DB */ | ||
| 276 | # define PACKET3_SEM_SEL_SIGNAL (0x6 << 29) | ||
| 277 | # define PACKET3_SEM_SEL_WAIT (0x7 << 29) | ||
| 278 | #define PACKET3_COPY_DW 0x3B | ||
| 279 | #define PACKET3_WAIT_REG_MEM 0x3C | ||
| 280 | #define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) | ||
| 281 | /* 0 - always | ||
| 282 | * 1 - < | ||
| 283 | * 2 - <= | ||
| 284 | * 3 - == | ||
| 285 | * 4 - != | ||
| 286 | * 5 - >= | ||
| 287 | * 6 - > | ||
| 288 | */ | ||
| 289 | #define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) | ||
| 290 | /* 0 - reg | ||
| 291 | * 1 - mem | ||
| 292 | */ | ||
| 293 | #define WAIT_REG_MEM_OPERATION(x) ((x) << 6) | ||
| 294 | /* 0 - wait_reg_mem | ||
| 295 | * 1 - wr_wait_wr_reg | ||
| 296 | */ | ||
| 297 | #define WAIT_REG_MEM_ENGINE(x) ((x) << 8) | ||
| 298 | /* 0 - me | ||
| 299 | * 1 - pfp | ||
| 300 | */ | ||
| 301 | #define PACKET3_INDIRECT_BUFFER 0x3F | ||
| 302 | #define INDIRECT_BUFFER_TCL2_VOLATILE (1 << 22) | ||
| 303 | #define INDIRECT_BUFFER_VALID (1 << 23) | ||
| 304 | #define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28) | ||
| 305 | /* 0 - LRU | ||
| 306 | * 1 - Stream | ||
| 307 | * 2 - Bypass | ||
| 308 | */ | ||
| 309 | #define PACKET3_COPY_DATA 0x40 | ||
| 310 | #define PACKET3_PFP_SYNC_ME 0x42 | ||
| 311 | #define PACKET3_SURFACE_SYNC 0x43 | ||
| 312 | # define PACKET3_DEST_BASE_0_ENA (1 << 0) | ||
| 313 | # define PACKET3_DEST_BASE_1_ENA (1 << 1) | ||
| 314 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) | ||
| 315 | # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) | ||
| 316 | # define PACKET3_CB2_DEST_BASE_ENA (1 << 8) | ||
| 317 | # define PACKET3_CB3_DEST_BASE_ENA (1 << 9) | ||
| 318 | # define PACKET3_CB4_DEST_BASE_ENA (1 << 10) | ||
| 319 | # define PACKET3_CB5_DEST_BASE_ENA (1 << 11) | ||
| 320 | # define PACKET3_CB6_DEST_BASE_ENA (1 << 12) | ||
| 321 | # define PACKET3_CB7_DEST_BASE_ENA (1 << 13) | ||
| 322 | # define PACKET3_DB_DEST_BASE_ENA (1 << 14) | ||
| 323 | # define PACKET3_TCL1_VOL_ACTION_ENA (1 << 15) | ||
| 324 | # define PACKET3_TC_VOL_ACTION_ENA (1 << 16) /* L2 */ | ||
| 325 | # define PACKET3_TC_WB_ACTION_ENA (1 << 18) /* L2 */ | ||
| 326 | # define PACKET3_DEST_BASE_2_ENA (1 << 19) | ||
| 327 | # define PACKET3_DEST_BASE_3_ENA (1 << 21) | ||
| 328 | # define PACKET3_TCL1_ACTION_ENA (1 << 22) | ||
| 329 | # define PACKET3_TC_ACTION_ENA (1 << 23) /* L2 */ | ||
| 330 | # define PACKET3_CB_ACTION_ENA (1 << 25) | ||
| 331 | # define PACKET3_DB_ACTION_ENA (1 << 26) | ||
| 332 | # define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27) | ||
| 333 | # define PACKET3_SH_KCACHE_VOL_ACTION_ENA (1 << 28) | ||
| 334 | # define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29) | ||
| 335 | #define PACKET3_COND_WRITE 0x45 | ||
| 336 | #define PACKET3_EVENT_WRITE 0x46 | ||
| 337 | #define EVENT_TYPE(x) ((x) << 0) | ||
| 338 | #define EVENT_INDEX(x) ((x) << 8) | ||
| 339 | /* 0 - any non-TS event | ||
| 340 | * 1 - ZPASS_DONE, PIXEL_PIPE_STAT_* | ||
| 341 | * 2 - SAMPLE_PIPELINESTAT | ||
| 342 | * 3 - SAMPLE_STREAMOUTSTAT* | ||
| 343 | * 4 - *S_PARTIAL_FLUSH | ||
| 344 | * 5 - EOP events | ||
| 345 | * 6 - EOS events | ||
| 346 | */ | ||
| 347 | #define PACKET3_EVENT_WRITE_EOP 0x47 | ||
| 348 | #define EOP_TCL1_VOL_ACTION_EN (1 << 12) | ||
| 349 | #define EOP_TC_VOL_ACTION_EN (1 << 13) /* L2 */ | ||
| 350 | #define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */ | ||
| 351 | #define EOP_TCL1_ACTION_EN (1 << 16) | ||
| 352 | #define EOP_TC_ACTION_EN (1 << 17) /* L2 */ | ||
| 353 | #define EOP_TCL2_VOLATILE (1 << 24) | ||
| 354 | #define EOP_CACHE_POLICY(x) ((x) << 25) | ||
| 355 | /* 0 - LRU | ||
| 356 | * 1 - Stream | ||
| 357 | * 2 - Bypass | ||
| 358 | */ | ||
| 359 | #define DATA_SEL(x) ((x) << 29) | ||
| 360 | /* 0 - discard | ||
| 361 | * 1 - send low 32bit data | ||
| 362 | * 2 - send 64bit data | ||
| 363 | * 3 - send 64bit GPU counter value | ||
| 364 | * 4 - send 64bit sys counter value | ||
| 365 | */ | ||
| 366 | #define INT_SEL(x) ((x) << 24) | ||
| 367 | /* 0 - none | ||
| 368 | * 1 - interrupt only (DATA_SEL = 0) | ||
| 369 | * 2 - interrupt when data write is confirmed | ||
| 370 | */ | ||
| 371 | #define DST_SEL(x) ((x) << 16) | ||
| 372 | /* 0 - MC | ||
| 373 | * 1 - TC/L2 | ||
| 374 | */ | ||
| 375 | #define PACKET3_EVENT_WRITE_EOS 0x48 | ||
| 376 | #define PACKET3_RELEASE_MEM 0x49 | ||
| 377 | #define PACKET3_PREAMBLE_CNTL 0x4A | ||
| 378 | # define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) | ||
| 379 | # define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) | ||
| 380 | #define PACKET3_DMA_DATA 0x50 | ||
| 381 | /* 1. header | ||
| 382 | * 2. CONTROL | ||
| 383 | * 3. SRC_ADDR_LO or DATA [31:0] | ||
| 384 | * 4. SRC_ADDR_HI [31:0] | ||
| 385 | * 5. DST_ADDR_LO [31:0] | ||
| 386 | * 6. DST_ADDR_HI [7:0] | ||
| 387 | * 7. COMMAND [30:21] | BYTE_COUNT [20:0] | ||
| 388 | */ | ||
| 389 | /* CONTROL */ | ||
| 390 | # define PACKET3_DMA_DATA_ENGINE(x) ((x) << 0) | ||
| 391 | /* 0 - ME | ||
| 392 | * 1 - PFP | ||
| 393 | */ | ||
| 394 | # define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13) | ||
| 395 | /* 0 - LRU | ||
| 396 | * 1 - Stream | ||
| 397 | * 2 - Bypass | ||
| 398 | */ | ||
| 399 | # define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15) | ||
| 400 | # define PACKET3_DMA_DATA_DST_SEL(x) ((x) << 20) | ||
| 401 | /* 0 - DST_ADDR using DAS | ||
| 402 | * 1 - GDS | ||
| 403 | * 3 - DST_ADDR using L2 | ||
| 404 | */ | ||
| 405 | # define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25) | ||
| 406 | /* 0 - LRU | ||
| 407 | * 1 - Stream | ||
| 408 | * 2 - Bypass | ||
| 409 | */ | ||
| 410 | # define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27) | ||
| 411 | # define PACKET3_DMA_DATA_SRC_SEL(x) ((x) << 29) | ||
| 412 | /* 0 - SRC_ADDR using SAS | ||
| 413 | * 1 - GDS | ||
| 414 | * 2 - DATA | ||
| 415 | * 3 - SRC_ADDR using L2 | ||
| 416 | */ | ||
| 417 | # define PACKET3_DMA_DATA_CP_SYNC (1 << 31) | ||
| 418 | /* COMMAND */ | ||
| 419 | # define PACKET3_DMA_DATA_DIS_WC (1 << 21) | ||
| 420 | # define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22) | ||
| 421 | /* 0 - none | ||
| 422 | * 1 - 8 in 16 | ||
| 423 | * 2 - 8 in 32 | ||
| 424 | * 3 - 8 in 64 | ||
| 425 | */ | ||
| 426 | # define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24) | ||
| 427 | /* 0 - none | ||
| 428 | * 1 - 8 in 16 | ||
| 429 | * 2 - 8 in 32 | ||
| 430 | * 3 - 8 in 64 | ||
| 431 | */ | ||
| 432 | # define PACKET3_DMA_DATA_CMD_SAS (1 << 26) | ||
| 433 | /* 0 - memory | ||
| 434 | * 1 - register | ||
| 435 | */ | ||
| 436 | # define PACKET3_DMA_DATA_CMD_DAS (1 << 27) | ||
| 437 | /* 0 - memory | ||
| 438 | * 1 - register | ||
| 439 | */ | ||
| 440 | # define PACKET3_DMA_DATA_CMD_SAIC (1 << 28) | ||
| 441 | # define PACKET3_DMA_DATA_CMD_DAIC (1 << 29) | ||
| 442 | # define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30) | ||
| 443 | #define PACKET3_AQUIRE_MEM 0x58 | ||
| 444 | #define PACKET3_REWIND 0x59 | ||
| 445 | #define PACKET3_LOAD_UCONFIG_REG 0x5E | ||
| 446 | #define PACKET3_LOAD_SH_REG 0x5F | ||
| 447 | #define PACKET3_LOAD_CONFIG_REG 0x60 | ||
| 448 | #define PACKET3_LOAD_CONTEXT_REG 0x61 | ||
| 449 | #define PACKET3_SET_CONFIG_REG 0x68 | ||
| 450 | #define PACKET3_SET_CONFIG_REG_START 0x00002000 | ||
| 451 | #define PACKET3_SET_CONFIG_REG_END 0x00002c00 | ||
| 452 | #define PACKET3_SET_CONTEXT_REG 0x69 | ||
| 453 | #define PACKET3_SET_CONTEXT_REG_START 0x0000a000 | ||
| 454 | #define PACKET3_SET_CONTEXT_REG_END 0x0000a400 | ||
| 455 | #define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 | ||
| 456 | #define PACKET3_SET_SH_REG 0x76 | ||
| 457 | #define PACKET3_SET_SH_REG_START 0x00002c00 | ||
| 458 | #define PACKET3_SET_SH_REG_END 0x00003000 | ||
| 459 | #define PACKET3_SET_SH_REG_OFFSET 0x77 | ||
| 460 | #define PACKET3_SET_QUEUE_REG 0x78 | ||
| 461 | #define PACKET3_SET_UCONFIG_REG 0x79 | ||
| 462 | #define PACKET3_SET_UCONFIG_REG_START 0x0000c000 | ||
| 463 | #define PACKET3_SET_UCONFIG_REG_END 0x0000c400 | ||
| 464 | #define PACKET3_SCRATCH_RAM_WRITE 0x7D | ||
| 465 | #define PACKET3_SCRATCH_RAM_READ 0x7E | ||
| 466 | #define PACKET3_LOAD_CONST_RAM 0x80 | ||
| 467 | #define PACKET3_WRITE_CONST_RAM 0x81 | ||
| 468 | #define PACKET3_DUMP_CONST_RAM 0x83 | ||
| 469 | #define PACKET3_INCREMENT_CE_COUNTER 0x84 | ||
| 470 | #define PACKET3_INCREMENT_DE_COUNTER 0x85 | ||
| 471 | #define PACKET3_WAIT_ON_CE_COUNTER 0x86 | ||
| 472 | #define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88 | ||
| 473 | #define PACKET3_SWITCH_BUFFER 0x8B | ||
| 474 | |||
| 475 | /* SDMA - first instance at 0xd000, second at 0xd800 */ | ||
| 476 | #define SDMA0_REGISTER_OFFSET 0x0 /* not a register */ | ||
| 477 | #define SDMA1_REGISTER_OFFSET 0x200 /* not a register */ | ||
| 478 | #define SDMA_MAX_INSTANCE 2 | ||
| 479 | |||
| 480 | #define SDMA_PACKET(op, sub_op, e) ((((e) & 0xFFFF) << 16) | \ | ||
| 481 | (((sub_op) & 0xFF) << 8) | \ | ||
| 482 | (((op) & 0xFF) << 0)) | ||
| 483 | /* sDMA opcodes */ | ||
| 484 | #define SDMA_OPCODE_NOP 0 | ||
| 485 | #define SDMA_OPCODE_COPY 1 | ||
| 486 | # define SDMA_COPY_SUB_OPCODE_LINEAR 0 | ||
| 487 | # define SDMA_COPY_SUB_OPCODE_TILED 1 | ||
| 488 | # define SDMA_COPY_SUB_OPCODE_SOA 3 | ||
| 489 | # define SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW 4 | ||
| 490 | # define SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW 5 | ||
| 491 | # define SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW 6 | ||
| 492 | #define SDMA_OPCODE_WRITE 2 | ||
| 493 | # define SDMA_WRITE_SUB_OPCODE_LINEAR 0 | ||
| 494 | # define SDMA_WRTIE_SUB_OPCODE_TILED 1 | ||
| 495 | #define SDMA_OPCODE_INDIRECT_BUFFER 4 | ||
| 496 | #define SDMA_OPCODE_FENCE 5 | ||
| 497 | #define SDMA_OPCODE_TRAP 6 | ||
| 498 | #define SDMA_OPCODE_SEMAPHORE 7 | ||
| 499 | # define SDMA_SEMAPHORE_EXTRA_O (1 << 13) | ||
| 500 | /* 0 - increment | ||
| 501 | * 1 - write 1 | ||
| 502 | */ | ||
| 503 | # define SDMA_SEMAPHORE_EXTRA_S (1 << 14) | ||
| 504 | /* 0 - wait | ||
| 505 | * 1 - signal | ||
| 506 | */ | ||
| 507 | # define SDMA_SEMAPHORE_EXTRA_M (1 << 15) | ||
| 508 | /* mailbox */ | ||
| 509 | #define SDMA_OPCODE_POLL_REG_MEM 8 | ||
| 510 | # define SDMA_POLL_REG_MEM_EXTRA_OP(x) ((x) << 10) | ||
| 511 | /* 0 - wait_reg_mem | ||
| 512 | * 1 - wr_wait_wr_reg | ||
| 513 | */ | ||
| 514 | # define SDMA_POLL_REG_MEM_EXTRA_FUNC(x) ((x) << 12) | ||
| 515 | /* 0 - always | ||
| 516 | * 1 - < | ||
| 517 | * 2 - <= | ||
| 518 | * 3 - == | ||
| 519 | * 4 - != | ||
| 520 | * 5 - >= | ||
| 521 | * 6 - > | ||
| 522 | */ | ||
| 523 | # define SDMA_POLL_REG_MEM_EXTRA_M (1 << 15) | ||
| 524 | /* 0 = register | ||
| 525 | * 1 = memory | ||
| 526 | */ | ||
| 527 | #define SDMA_OPCODE_COND_EXEC 9 | ||
| 528 | #define SDMA_OPCODE_CONSTANT_FILL 11 | ||
| 529 | # define SDMA_CONSTANT_FILL_EXTRA_SIZE(x) ((x) << 14) | ||
| 530 | /* 0 = byte fill | ||
| 531 | * 2 = DW fill | ||
| 532 | */ | ||
| 533 | #define SDMA_OPCODE_GENERATE_PTE_PDE 12 | ||
| 534 | #define SDMA_OPCODE_TIMESTAMP 13 | ||
| 535 | # define SDMA_TIMESTAMP_SUB_OPCODE_SET_LOCAL 0 | ||
| 536 | # define SDMA_TIMESTAMP_SUB_OPCODE_GET_LOCAL 1 | ||
| 537 | # define SDMA_TIMESTAMP_SUB_OPCODE_GET_GLOBAL 2 | ||
| 538 | #define SDMA_OPCODE_SRBM_WRITE 14 | ||
| 539 | # define SDMA_SRBM_WRITE_EXTRA_BYTE_ENABLE(x) ((x) << 12) | ||
| 540 | /* byte mask */ | ||
| 541 | |||
| 542 | #define VCE_CMD_NO_OP 0x00000000 | ||
| 543 | #define VCE_CMD_END 0x00000001 | ||
| 544 | #define VCE_CMD_IB 0x00000002 | ||
| 545 | #define VCE_CMD_FENCE 0x00000003 | ||
| 546 | #define VCE_CMD_TRAP 0x00000004 | ||
| 547 | #define VCE_CMD_IB_AUTO 0x00000005 | ||
| 548 | #define VCE_CMD_SEMAPHORE 0x00000006 | ||
| 549 | |||
| 550 | #endif | ||
