aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-09-27 21:27:05 -0400
committerDave Airlie <airlied@redhat.com>2016-09-27 21:27:05 -0400
commit9f4ef05bcdcfdf911b056b471dd3c6a4f331b644 (patch)
treeba8dfba87b4fe5295598f5438881822b6d3395f0 /drivers/gpu/drm/amd/amdgpu
parent81c5d6aa3983662b6b48b504fe3a0a4c640f6a84 (diff)
parentbeb86f29c9c7f2d04f9a42c4c61cc469c3689779 (diff)
Merge branch 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux into drm-next
Last set of radeon and amdgpu changes for 4.9. This is mostly just the powerplay cleanup for dGPUs. Beyond that, just misc code cleanups and bug fixes. * 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux: (49 commits) drm/amd/amdgpu: Clean up afmt allocation in DCEv6. (v2) drm/amd/amdgpu: Remove division from vblank_wait drm/radeon/atif: Send a hotplug event when we get dgpu display request drm/radeon/atpx: check for ATIF dGPU wake for display events support drm/amdgpu/atif: Send a hotplug event when we get dgpu display request drm/amdgpu/atpx: check for ATIF dGPU wake for display events support drm/amdgpu: bump version for new vce packet support drm/amdgpu/vce: allow the clock table packet drm/amdgpu:cleanup virt related define drm/amdgpu: use powerplay module for dgpu in Vi. drm/amdgpu: set gfx clock gating for tonga/polaris. drm/amdgpu: set system clock gating for tonga/polaris. drm/amd/powerplay: export function to help to set cg by smu. drm/amdgpu: avoid out of bounds access on array interrupt_status_offsets drm/amdgpu: mark symbols static where possible drm/amdgpu: remove unused functions drm/amd/powerplay: Replace per-asic print_performance with generic drm/radeon: narrow asic_init for virtualization drm/amdgpu:add fw version entry to info drm/amdgpu:determine if vPost is needed indeed ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h (renamed from drivers/gpu/drm/amd/amdgpu/iceland_smum.h)44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c863
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smum.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c245
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c677
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c862
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smum.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h37
35 files changed, 1004 insertions, 3199 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index dc6df075bafc..d15e9b080ce1 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -52,10 +52,7 @@ amdgpu-y += \
52amdgpu-y += \ 52amdgpu-y += \
53 amdgpu_dpm.o \ 53 amdgpu_dpm.o \
54 amdgpu_powerplay.o \ 54 amdgpu_powerplay.o \
55 cz_smc.o cz_dpm.o \ 55 cz_smc.o cz_dpm.o
56 tonga_smc.o tonga_dpm.o \
57 fiji_smc.o fiji_dpm.o \
58 iceland_smc.o iceland_dpm.o
59 56
60# add DCE block 57# add DCE block
61amdgpu-y += \ 58amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ee45d9f7f3dc..9d79e4ba0213 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -57,6 +57,7 @@
57#include "amdgpu_acp.h" 57#include "amdgpu_acp.h"
58 58
59#include "gpu_scheduler.h" 59#include "gpu_scheduler.h"
60#include "amdgpu_virt.h"
60 61
61/* 62/*
62 * Modules parameters. 63 * Modules parameters.
@@ -1827,6 +1828,7 @@ struct amdgpu_asic_funcs {
1827 bool (*read_disabled_bios)(struct amdgpu_device *adev); 1828 bool (*read_disabled_bios)(struct amdgpu_device *adev);
1828 bool (*read_bios_from_rom)(struct amdgpu_device *adev, 1829 bool (*read_bios_from_rom)(struct amdgpu_device *adev,
1829 u8 *bios, u32 length_bytes); 1830 u8 *bios, u32 length_bytes);
1831 void (*detect_hw_virtualization) (struct amdgpu_device *adev);
1830 int (*read_register)(struct amdgpu_device *adev, u32 se_num, 1832 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1831 u32 sh_num, u32 reg_offset, u32 *value); 1833 u32 sh_num, u32 reg_offset, u32 *value);
1832 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 1834 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
@@ -1836,8 +1838,6 @@ struct amdgpu_asic_funcs {
1836 /* MM block clocks */ 1838 /* MM block clocks */
1837 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 1839 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1838 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 1840 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1839 /* query virtual capabilities */
1840 u32 (*get_virtual_caps)(struct amdgpu_device *adev);
1841 /* static power management */ 1841 /* static power management */
1842 int (*get_pcie_lanes)(struct amdgpu_device *adev); 1842 int (*get_pcie_lanes)(struct amdgpu_device *adev);
1843 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); 1843 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
@@ -1933,16 +1933,6 @@ struct amdgpu_atcs {
1933struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 1933struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1934void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); 1934void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
1935 1935
1936
1937/* GPU virtualization */
1938#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
1939#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
1940struct amdgpu_virtualization {
1941 bool supports_sr_iov;
1942 bool is_virtual;
1943 u32 caps;
1944};
1945
1946/* 1936/*
1947 * Core structure, functions and helpers. 1937 * Core structure, functions and helpers.
1948 */ 1938 */
@@ -2260,12 +2250,12 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2260#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 2250#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
2261#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 2251#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
2262#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2252#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
2263#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
2264#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) 2253#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
2265#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) 2254#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
2266#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 2255#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
2267#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2256#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
2268#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 2257#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
2258#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev))
2269#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 2259#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
2270#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) 2260#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
2271#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) 2261#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
@@ -2323,6 +2313,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2323#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) 2313#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
2324#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) 2314#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
2325 2315
2316#define amdgpu_dpm_read_sensor(adev, idx, value) \
2317 ((adev)->pp_enabled ? \
2318 (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
2319 -EINVAL)
2320
2326#define amdgpu_dpm_get_temperature(adev) \ 2321#define amdgpu_dpm_get_temperature(adev) \
2327 ((adev)->pp_enabled ? \ 2322 ((adev)->pp_enabled ? \
2328 (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ 2323 (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
@@ -2374,11 +2369,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2374 (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ 2369 (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
2375 (adev)->pm.funcs->powergate_vce((adev), (g))) 2370 (adev)->pm.funcs->powergate_vce((adev), (g)))
2376 2371
2377#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
2378 ((adev)->pp_enabled ? \
2379 (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
2380 (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
2381
2382#define amdgpu_dpm_get_current_power_state(adev) \ 2372#define amdgpu_dpm_get_current_power_state(adev) \
2383 (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) 2373 (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
2384 2374
@@ -2460,11 +2450,13 @@ void amdgpu_register_atpx_handler(void);
2460void amdgpu_unregister_atpx_handler(void); 2450void amdgpu_unregister_atpx_handler(void);
2461bool amdgpu_has_atpx_dgpu_power_cntl(void); 2451bool amdgpu_has_atpx_dgpu_power_cntl(void);
2462bool amdgpu_is_atpx_hybrid(void); 2452bool amdgpu_is_atpx_hybrid(void);
2453bool amdgpu_atpx_dgpu_req_power_for_displays(void);
2463#else 2454#else
2464static inline void amdgpu_register_atpx_handler(void) {} 2455static inline void amdgpu_register_atpx_handler(void) {}
2465static inline void amdgpu_unregister_atpx_handler(void) {} 2456static inline void amdgpu_unregister_atpx_handler(void) {}
2466static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 2457static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
2467static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 2458static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
2459static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
2468#endif 2460#endif
2469 2461
2470/* 2462/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 5cd7b736a9de..5796539a0bcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -25,6 +25,7 @@
25#include <linux/acpi.h> 25#include <linux/acpi.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/power_supply.h> 27#include <linux/power_supply.h>
28#include <linux/pm_runtime.h>
28#include <acpi/video.h> 29#include <acpi/video.h>
29#include <drm/drmP.h> 30#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
@@ -333,6 +334,16 @@ int amdgpu_atif_handler(struct amdgpu_device *adev,
333#endif 334#endif
334 } 335 }
335 } 336 }
337 if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
338 if ((adev->flags & AMD_IS_PX) &&
339 amdgpu_atpx_dgpu_req_power_for_displays()) {
340 pm_runtime_get_sync(adev->ddev->dev);
341 /* Just fire off a uevent and let userspace tell us what to do */
342 drm_helper_hpd_irq_event(adev->ddev);
343 pm_runtime_mark_last_busy(adev->ddev->dev);
344 pm_runtime_put_autosuspend(adev->ddev->dev);
345 }
346 }
336 /* TODO: check other events */ 347 /* TODO: check other events */
337 348
338 /* We've handled the event, stop the notifier chain. The ACPI interface 349 /* We've handled the event, stop the notifier chain. The ACPI interface
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d080d0807a5b..dba8a5b25e66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -143,14 +143,6 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
143 return r; 143 return r;
144} 144}
145 145
146u32 pool_to_domain(enum kgd_memory_pool p)
147{
148 switch (p) {
149 case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
150 default: return AMDGPU_GEM_DOMAIN_GTT;
151 }
152}
153
154int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, 146int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
155 void **mem_obj, uint64_t *gpu_addr, 147 void **mem_obj, uint64_t *gpu_addr,
156 void **cpu_ptr) 148 void **cpu_ptr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 49de92600074..550c5ee704ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -29,6 +29,7 @@ struct amdgpu_atpx {
29 acpi_handle handle; 29 acpi_handle handle;
30 struct amdgpu_atpx_functions functions; 30 struct amdgpu_atpx_functions functions;
31 bool is_hybrid; 31 bool is_hybrid;
32 bool dgpu_req_power_for_displays;
32}; 33};
33 34
34static struct amdgpu_atpx_priv { 35static struct amdgpu_atpx_priv {
@@ -73,6 +74,10 @@ bool amdgpu_is_atpx_hybrid(void) {
73 return amdgpu_atpx_priv.atpx.is_hybrid; 74 return amdgpu_atpx_priv.atpx.is_hybrid;
74} 75}
75 76
77bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
78 return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
79}
80
76/** 81/**
77 * amdgpu_atpx_call - call an ATPX method 82 * amdgpu_atpx_call - call an ATPX method
78 * 83 *
@@ -213,6 +218,10 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
213 atpx->is_hybrid = true; 218 atpx->is_hybrid = true;
214 } 219 }
215 220
221 atpx->dgpu_req_power_for_displays = false;
222 if (valid_bits & ATPX_DGPU_REQ_POWER_FOR_DISPLAYS)
223 atpx->dgpu_req_power_for_displays = true;
224
216 return 0; 225 return 0;
217} 226}
218 227
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index f1c53a2b09c6..7a8bfa34682f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -711,6 +711,47 @@ static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode
711 return -EINVAL; 711 return -EINVAL;
712} 712}
713 713
714static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
715 enum cgs_ucode_id type)
716{
717 CGS_FUNC_ADEV;
718 uint16_t fw_version;
719
720 switch (type) {
721 case CGS_UCODE_ID_SDMA0:
722 fw_version = adev->sdma.instance[0].fw_version;
723 break;
724 case CGS_UCODE_ID_SDMA1:
725 fw_version = adev->sdma.instance[1].fw_version;
726 break;
727 case CGS_UCODE_ID_CP_CE:
728 fw_version = adev->gfx.ce_fw_version;
729 break;
730 case CGS_UCODE_ID_CP_PFP:
731 fw_version = adev->gfx.pfp_fw_version;
732 break;
733 case CGS_UCODE_ID_CP_ME:
734 fw_version = adev->gfx.me_fw_version;
735 break;
736 case CGS_UCODE_ID_CP_MEC:
737 fw_version = adev->gfx.mec_fw_version;
738 break;
739 case CGS_UCODE_ID_CP_MEC_JT1:
740 fw_version = adev->gfx.mec_fw_version;
741 break;
742 case CGS_UCODE_ID_CP_MEC_JT2:
743 fw_version = adev->gfx.mec_fw_version;
744 break;
745 case CGS_UCODE_ID_RLC_G:
746 fw_version = adev->gfx.rlc_fw_version;
747 break;
748 default:
749 DRM_ERROR("firmware type %d do not have version\n", type);
750 fw_version = 0;
751 }
752 return fw_version;
753}
754
714static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, 755static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
715 enum cgs_ucode_id type, 756 enum cgs_ucode_id type,
716 struct cgs_firmware_info *info) 757 struct cgs_firmware_info *info)
@@ -741,6 +782,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
741 info->mc_addr = gpu_addr; 782 info->mc_addr = gpu_addr;
742 info->image_size = data_size; 783 info->image_size = data_size;
743 info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); 784 info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
785 info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
744 info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); 786 info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
745 } else { 787 } else {
746 char fw_name[30] = {0}; 788 char fw_name[30] = {0};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 319a5e1d9389..decbba5ad438 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1545,7 +1545,8 @@ static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
1545 return MODE_OK; 1545 return MODE_OK;
1546} 1546}
1547 1547
1548int amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode) 1548static int
1549amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
1549{ 1550{
1550 return 0; 1551 return 0;
1551} 1552}
@@ -1557,7 +1558,8 @@ amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
1557 return connector_status_connected; 1558 return connector_status_connected;
1558} 1559}
1559 1560
1560int amdgpu_connector_virtual_set_property(struct drm_connector *connector, 1561static int
1562amdgpu_connector_virtual_set_property(struct drm_connector *connector,
1561 struct drm_property *property, 1563 struct drm_property *property,
1562 uint64_t val) 1564 uint64_t val)
1563{ 1565{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3ddae5ff41bb..99a15cad6789 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -50,6 +50,7 @@
50#include "vi.h" 50#include "vi.h"
51#include "bif/bif_4_1_d.h" 51#include "bif/bif_4_1_d.h"
52#include <linux/pci.h> 52#include <linux/pci.h>
53#include <linux/firmware.h>
53 54
54static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); 55static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
55static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); 56static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
@@ -110,7 +111,7 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
110 bool always_indirect) 111 bool always_indirect)
111{ 112{
112 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); 113 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
113 114
114 if ((reg * 4) < adev->rmmio_size && !always_indirect) 115 if ((reg * 4) < adev->rmmio_size && !always_indirect)
115 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 116 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
116 else { 117 else {
@@ -651,6 +652,46 @@ bool amdgpu_card_posted(struct amdgpu_device *adev)
651 652
652} 653}
653 654
655static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
656{
657 if (amdgpu_sriov_vf(adev))
658 return false;
659
660 if (amdgpu_passthrough(adev)) {
661 /* for FIJI: In whole GPU pass-through virtualization case
662 * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH)
663 * so amdgpu_card_posted return false and driver will incorrectly skip vPost.
664 * but if we force vPost do in pass-through case, the driver reload will hang.
665 * whether doing vPost depends on amdgpu_card_posted if smc version is above
666 * 00160e00 for FIJI.
667 */
668 if (adev->asic_type == CHIP_FIJI) {
669 int err;
670 uint32_t fw_ver;
671 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
672 /* force vPost if error occured */
673 if (err)
674 return true;
675
676 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
677 if (fw_ver >= 0x00160e00)
678 return !amdgpu_card_posted(adev);
679 }
680 } else {
681 /* in bare-metal case, amdgpu_card_posted return false
682 * after system reboot/boot, and return true if driver
683 * reloaded.
684 * we shouldn't do vPost after driver reload otherwise GPU
685 * could hang.
686 */
687 if (amdgpu_card_posted(adev))
688 return false;
689 }
690
691 /* we assume vPost is neede for all other cases */
692 return true;
693}
694
654/** 695/**
655 * amdgpu_dummy_page_init - init dummy page used by the driver 696 * amdgpu_dummy_page_init - init dummy page used by the driver
656 * 697 *
@@ -1485,13 +1526,10 @@ static int amdgpu_resume(struct amdgpu_device *adev)
1485 return 0; 1526 return 0;
1486} 1527}
1487 1528
1488static bool amdgpu_device_is_virtual(void) 1529static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
1489{ 1530{
1490#ifdef CONFIG_X86 1531 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1491 return boot_cpu_has(X86_FEATURE_HYPERVISOR); 1532 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1492#else
1493 return false;
1494#endif
1495} 1533}
1496 1534
1497/** 1535/**
@@ -1648,25 +1686,24 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1648 goto failed; 1686 goto failed;
1649 } 1687 }
1650 1688
1651 /* See if the asic supports SR-IOV */ 1689 /* detect if we are with an SRIOV vbios */
1652 adev->virtualization.supports_sr_iov = 1690 amdgpu_device_detect_sriov_bios(adev);
1653 amdgpu_atombios_has_gpu_virtualization_table(adev);
1654
1655 /* Check if we are executing in a virtualized environment */
1656 adev->virtualization.is_virtual = amdgpu_device_is_virtual();
1657 adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
1658 1691
1659 /* Post card if necessary */ 1692 /* Post card if necessary */
1660 if (!amdgpu_card_posted(adev) || 1693 if (amdgpu_vpost_needed(adev)) {
1661 (adev->virtualization.is_virtual &&
1662 !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
1663 if (!adev->bios) { 1694 if (!adev->bios) {
1664 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); 1695 dev_err(adev->dev, "no vBIOS found\n");
1665 r = -EINVAL; 1696 r = -EINVAL;
1666 goto failed; 1697 goto failed;
1667 } 1698 }
1668 DRM_INFO("GPU not posted. posting now...\n"); 1699 DRM_INFO("GPU posting now...\n");
1669 amdgpu_atom_asic_init(adev->mode_info.atom_context); 1700 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1701 if (r) {
1702 dev_err(adev->dev, "gpu post error!\n");
1703 goto failed;
1704 }
1705 } else {
1706 DRM_INFO("GPU post is not needed\n");
1670 } 1707 }
1671 1708
1672 /* Initialize clocks */ 1709 /* Initialize clocks */
@@ -1842,8 +1879,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
1842 1879
1843 adev = dev->dev_private; 1880 adev = dev->dev_private;
1844 1881
1845 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || 1882 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1846 dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
1847 return 0; 1883 return 0;
1848 1884
1849 drm_kms_helper_poll_disable(dev); 1885 drm_kms_helper_poll_disable(dev);
@@ -1928,8 +1964,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
1928 struct drm_crtc *crtc; 1964 struct drm_crtc *crtc;
1929 int r; 1965 int r;
1930 1966
1931 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || 1967 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1932 dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
1933 return 0; 1968 return 0;
1934 1969
1935 if (fbcon) 1970 if (fbcon)
@@ -2043,7 +2078,7 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2043 return asic_hang; 2078 return asic_hang;
2044} 2079}
2045 2080
2046int amdgpu_pre_soft_reset(struct amdgpu_device *adev) 2081static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2047{ 2082{
2048 int i, r = 0; 2083 int i, r = 0;
2049 2084
@@ -2714,7 +2749,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
2714 if (size & 0x3 || *pos & 0x3) 2749 if (size & 0x3 || *pos & 0x3)
2715 return -EINVAL; 2750 return -EINVAL;
2716 2751
2717 config = kmalloc(256 * sizeof(*config), GFP_KERNEL); 2752 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
2718 if (!config) 2753 if (!config)
2719 return -ENOMEM; 2754 return -ENOMEM;
2720 2755
@@ -2773,6 +2808,29 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
2773 return result; 2808 return result;
2774} 2809}
2775 2810
2811static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
2812 size_t size, loff_t *pos)
2813{
2814 struct amdgpu_device *adev = f->f_inode->i_private;
2815 int idx, r;
2816 int32_t value;
2817
2818 if (size != 4 || *pos & 0x3)
2819 return -EINVAL;
2820
2821 /* convert offset to sensor number */
2822 idx = *pos >> 2;
2823
2824 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
2825 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
2826 else
2827 return -EINVAL;
2828
2829 if (!r)
2830 r = put_user(value, (int32_t *)buf);
2831
2832 return !r ? 4 : r;
2833}
2776 2834
2777static const struct file_operations amdgpu_debugfs_regs_fops = { 2835static const struct file_operations amdgpu_debugfs_regs_fops = {
2778 .owner = THIS_MODULE, 2836 .owner = THIS_MODULE,
@@ -2805,12 +2863,19 @@ static const struct file_operations amdgpu_debugfs_gca_config_fops = {
2805 .llseek = default_llseek 2863 .llseek = default_llseek
2806}; 2864};
2807 2865
2866static const struct file_operations amdgpu_debugfs_sensors_fops = {
2867 .owner = THIS_MODULE,
2868 .read = amdgpu_debugfs_sensor_read,
2869 .llseek = default_llseek
2870};
2871
2808static const struct file_operations *debugfs_regs[] = { 2872static const struct file_operations *debugfs_regs[] = {
2809 &amdgpu_debugfs_regs_fops, 2873 &amdgpu_debugfs_regs_fops,
2810 &amdgpu_debugfs_regs_didt_fops, 2874 &amdgpu_debugfs_regs_didt_fops,
2811 &amdgpu_debugfs_regs_pcie_fops, 2875 &amdgpu_debugfs_regs_pcie_fops,
2812 &amdgpu_debugfs_regs_smc_fops, 2876 &amdgpu_debugfs_regs_smc_fops,
2813 &amdgpu_debugfs_gca_config_fops, 2877 &amdgpu_debugfs_gca_config_fops,
2878 &amdgpu_debugfs_sensors_fops,
2814}; 2879};
2815 2880
2816static const char *debugfs_regs_names[] = { 2881static const char *debugfs_regs_names[] = {
@@ -2819,6 +2884,7 @@ static const char *debugfs_regs_names[] = {
2819 "amdgpu_regs_pcie", 2884 "amdgpu_regs_pcie",
2820 "amdgpu_regs_smc", 2885 "amdgpu_regs_smc",
2821 "amdgpu_gca_config", 2886 "amdgpu_gca_config",
2887 "amdgpu_sensors",
2822}; 2888};
2823 2889
2824static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 2890static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 596362624610..7dbc7727e32b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -56,9 +56,10 @@
56 * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS. 56 * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
57 * - 3.5.0 - Add support for new UVD_NO_OP register. 57 * - 3.5.0 - Add support for new UVD_NO_OP register.
58 * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. 58 * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
59 * - 3.7.0 - Add support for VCE clock list packet
59 */ 60 */
60#define KMS_DRIVER_MAJOR 3 61#define KMS_DRIVER_MAJOR 3
61#define KMS_DRIVER_MINOR 6 62#define KMS_DRIVER_MINOR 7
62#define KMS_DRIVER_PATCHLEVEL 0 63#define KMS_DRIVER_PATCHLEVEL 0
63 64
64int amdgpu_vram_limit = 0; 65int amdgpu_vram_limit = 0;
@@ -485,7 +486,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
485 /* if we are running in a VM, make sure the device 486 /* if we are running in a VM, make sure the device
486 * torn down properly on reboot/shutdown 487 * torn down properly on reboot/shutdown
487 */ 488 */
488 if (adev->virtualization.is_virtual) 489 if (amdgpu_passthrough(adev))
489 amdgpu_pci_remove(pdev); 490 amdgpu_pci_remove(pdev);
490} 491}
491 492
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index d4ec3cb187a5..accc908bdc88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1322,6 +1322,64 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1322 */ 1322 */
1323#if defined(CONFIG_DEBUG_FS) 1323#if defined(CONFIG_DEBUG_FS)
1324 1324
1325static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
1326{
1327 int32_t value;
1328
1329 /* sanity check PP is enabled */
1330 if (!(adev->powerplay.pp_funcs &&
1331 adev->powerplay.pp_funcs->read_sensor))
1332 return -EINVAL;
1333
1334 /* GPU Clocks */
1335 seq_printf(m, "GFX Clocks and Power:\n");
1336 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value))
1337 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
1338 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value))
1339 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
1340 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value))
1341 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
1342 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value))
1343 seq_printf(m, "\t%u mV (VDDNB)\n", value);
1344 seq_printf(m, "\n");
1345
1346 /* GPU Temp */
1347 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value))
1348 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
1349
1350 /* GPU Load */
1351 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value))
1352 seq_printf(m, "GPU Load: %u %%\n", value);
1353 seq_printf(m, "\n");
1354
1355 /* UVD clocks */
1356 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) {
1357 if (!value) {
1358 seq_printf(m, "UVD: Disabled\n");
1359 } else {
1360 seq_printf(m, "UVD: Enabled\n");
1361 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value))
1362 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
1363 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value))
1364 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
1365 }
1366 }
1367 seq_printf(m, "\n");
1368
1369 /* VCE clocks */
1370 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) {
1371 if (!value) {
1372 seq_printf(m, "VCE: Disabled\n");
1373 } else {
1374 seq_printf(m, "VCE: Enabled\n");
1375 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value))
1376 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
1377 }
1378 }
1379
1380 return 0;
1381}
1382
1325static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) 1383static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
1326{ 1384{
1327 struct drm_info_node *node = (struct drm_info_node *) m->private; 1385 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1337,11 +1395,11 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
1337 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 1395 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1338 seq_printf(m, "PX asic powered off\n"); 1396 seq_printf(m, "PX asic powered off\n");
1339 } else if (adev->pp_enabled) { 1397 } else if (adev->pp_enabled) {
1340 amdgpu_dpm_debugfs_print_current_performance_level(adev, m); 1398 return amdgpu_debugfs_pm_info_pp(m, adev);
1341 } else { 1399 } else {
1342 mutex_lock(&adev->pm.mutex); 1400 mutex_lock(&adev->pm.mutex);
1343 if (adev->pm.funcs->debugfs_print_current_performance_level) 1401 if (adev->pm.funcs->debugfs_print_current_performance_level)
1344 amdgpu_dpm_debugfs_print_current_performance_level(adev, m); 1402 adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
1345 else 1403 else
1346 seq_printf(m, "Debugfs support not implemented for this asic\n"); 1404 seq_printf(m, "Debugfs support not implemented for this asic\n");
1347 mutex_unlock(&adev->pm.mutex); 1405 mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 1e7f160f23d8..68ad24101a36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -80,15 +80,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
80 amd_pp->ip_funcs = &kv_dpm_ip_funcs; 80 amd_pp->ip_funcs = &kv_dpm_ip_funcs;
81 break; 81 break;
82#endif 82#endif
83 case CHIP_TOPAZ:
84 amd_pp->ip_funcs = &iceland_dpm_ip_funcs;
85 break;
86 case CHIP_TONGA:
87 amd_pp->ip_funcs = &tonga_dpm_ip_funcs;
88 break;
89 case CHIP_FIJI:
90 amd_pp->ip_funcs = &fiji_dpm_ip_funcs;
91 break;
92 case CHIP_CARRIZO: 83 case CHIP_CARRIZO:
93 case CHIP_STONEY: 84 case CHIP_STONEY:
94 amd_pp->ip_funcs = &cz_dpm_ip_funcs; 85 amd_pp->ip_funcs = &cz_dpm_ip_funcs;
@@ -110,11 +101,11 @@ static int amdgpu_pp_early_init(void *handle)
110 switch (adev->asic_type) { 101 switch (adev->asic_type) {
111 case CHIP_POLARIS11: 102 case CHIP_POLARIS11:
112 case CHIP_POLARIS10: 103 case CHIP_POLARIS10:
113 adev->pp_enabled = true;
114 break;
115 case CHIP_TONGA: 104 case CHIP_TONGA:
116 case CHIP_FIJI: 105 case CHIP_FIJI:
117 case CHIP_TOPAZ: 106 case CHIP_TOPAZ:
107 adev->pp_enabled = true;
108 break;
118 case CHIP_CARRIZO: 109 case CHIP_CARRIZO:
119 case CHIP_STONEY: 110 case CHIP_STONEY:
120 adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; 111 adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 2c9ea9b50f48..06b94c13c2c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -691,6 +691,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
691 case 0x04000008: /* rdo */ 691 case 0x04000008: /* rdo */
692 case 0x04000009: /* vui */ 692 case 0x04000009: /* vui */
693 case 0x05000002: /* auxiliary buffer */ 693 case 0x05000002: /* auxiliary buffer */
694 case 0x05000009: /* clock table */
694 break; 695 break;
695 696
696 case 0x03000001: /* encode */ 697 case 0x03000001: /* encode */
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 5983e3150cc5..2c37a374917f 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2014 Advanced Micro Devices, Inc. 2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -19,23 +19,39 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Author: Monk.liu@amd.com
22 */ 23 */
24#ifndef AMDGPU_VIRT_H
25#define AMDGPU_VIRT_H
23 26
24#ifndef ICELAND_SMUM_H 27#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */
25#define ICELAND_SMUM_H 28#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
29#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
30#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
31/* GPU virtualization */
32struct amdgpu_virtualization {
33 uint32_t virtual_caps;
34};
26 35
27#include "ppsmc.h" 36#define amdgpu_sriov_enabled(adev) \
37((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
28 38
29extern int iceland_smu_init(struct amdgpu_device *adev); 39#define amdgpu_sriov_vf(adev) \
30extern int iceland_smu_fini(struct amdgpu_device *adev); 40((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF)
31extern int iceland_smu_start(struct amdgpu_device *adev);
32 41
33struct iceland_smu_private_data 42#define amdgpu_sriov_bios(adev) \
34{ 43((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
35 uint8_t *header;
36 uint8_t *mec_image;
37 uint32_t header_addr_high;
38 uint32_t header_addr_low;
39};
40 44
45#define amdgpu_passthrough(adev) \
46((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE)
47
48static inline bool is_virtual_machine(void)
49{
50#ifdef CONFIG_X86
51 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
52#else
53 return false;
41#endif 54#endif
55}
56
57#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 825de800b798..a845b6a93b79 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -963,12 +963,6 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
963 return true; 963 return true;
964} 964}
965 965
966static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
967{
968 /* CIK does not support SR-IOV */
969 return 0;
970}
971
972static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { 966static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
973 {mmGRBM_STATUS, false}, 967 {mmGRBM_STATUS, false},
974 {mmGB_ADDR_CONFIG, false}, 968 {mmGB_ADDR_CONFIG, false},
@@ -1641,6 +1635,12 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
1641 >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; 1635 >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
1642} 1636}
1643 1637
1638static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
1639{
1640 if (is_virtual_machine()) /* passthrough mode */
1641 adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
1642}
1643
1644static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = 1644static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
1645{ 1645{
1646 /* ORDER MATTERS! */ 1646 /* ORDER MATTERS! */
@@ -2384,13 +2384,13 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
2384{ 2384{
2385 .read_disabled_bios = &cik_read_disabled_bios, 2385 .read_disabled_bios = &cik_read_disabled_bios,
2386 .read_bios_from_rom = &cik_read_bios_from_rom, 2386 .read_bios_from_rom = &cik_read_bios_from_rom,
2387 .detect_hw_virtualization = cik_detect_hw_virtualization,
2387 .read_register = &cik_read_register, 2388 .read_register = &cik_read_register,
2388 .reset = &cik_asic_reset, 2389 .reset = &cik_asic_reset,
2389 .set_vga_state = &cik_vga_set_state, 2390 .set_vga_state = &cik_vga_set_state,
2390 .get_xclk = &cik_get_xclk, 2391 .get_xclk = &cik_get_xclk,
2391 .set_uvd_clocks = &cik_set_uvd_clocks, 2392 .set_uvd_clocks = &cik_set_uvd_clocks,
2392 .set_vce_clocks = &cik_set_vce_clocks, 2393 .set_vce_clocks = &cik_set_vce_clocks,
2393 .get_virtual_caps = &cik_get_virtual_caps,
2394}; 2394};
2395 2395
2396static int cik_common_early_init(void *handle) 2396static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index c4f6f00d62bc..8659852aea9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -562,4 +562,40 @@ enum {
562 MTYPE_NONCACHED = 3 562 MTYPE_NONCACHED = 3
563}; 563};
564 564
565/* mmPA_SC_RASTER_CONFIG mask */
566#define RB_MAP_PKR0(x) ((x) << 0)
567#define RB_MAP_PKR0_MASK (0x3 << 0)
568#define RB_MAP_PKR1(x) ((x) << 2)
569#define RB_MAP_PKR1_MASK (0x3 << 2)
570#define RB_XSEL2(x) ((x) << 4)
571#define RB_XSEL2_MASK (0x3 << 4)
572#define RB_XSEL (1 << 6)
573#define RB_YSEL (1 << 7)
574#define PKR_MAP(x) ((x) << 8)
575#define PKR_MAP_MASK (0x3 << 8)
576#define PKR_XSEL(x) ((x) << 10)
577#define PKR_XSEL_MASK (0x3 << 10)
578#define PKR_YSEL(x) ((x) << 12)
579#define PKR_YSEL_MASK (0x3 << 12)
580#define SC_MAP(x) ((x) << 16)
581#define SC_MAP_MASK (0x3 << 16)
582#define SC_XSEL(x) ((x) << 18)
583#define SC_XSEL_MASK (0x3 << 18)
584#define SC_YSEL(x) ((x) << 20)
585#define SC_YSEL_MASK (0x3 << 20)
586#define SE_MAP(x) ((x) << 24)
587#define SE_MAP_MASK (0x3 << 24)
588#define SE_XSEL(x) ((x) << 26)
589#define SE_XSEL_MASK (0x3 << 26)
590#define SE_YSEL(x) ((x) << 28)
591#define SE_YSEL_MASK (0x3 << 28)
592
593/* mmPA_SC_RASTER_CONFIG_1 mask */
594#define SE_PAIR_MAP(x) ((x) << 0)
595#define SE_PAIR_MAP_MASK (0x3 << 0)
596#define SE_PAIR_XSEL(x) ((x) << 2)
597#define SE_PAIR_XSEL_MASK (0x3 << 2)
598#define SE_PAIR_YSEL(x) ((x) << 4)
599#define SE_PAIR_YSEL_MASK (0x3 << 4)
600
565#endif 601#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index 95887e484c51..aed7033c0973 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -101,13 +101,6 @@ int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
101 return 0; 101 return 0;
102} 102}
103 103
104int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
105 u16 msg, u32 parameter)
106{
107 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
108 return cz_send_msg_to_smc_async(adev, msg);
109}
110
111int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, 104int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
112 u16 msg, u32 parameter) 105 u16 msg, u32 parameter)
113{ 106{
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index bc5bb4eb9625..9d38fe0519e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -221,7 +221,7 @@ static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
221 */ 221 */
222static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc) 222static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
223{ 223{
224 unsigned i = 0; 224 unsigned i = 100;
225 225
226 if (crtc >= adev->mode_info.num_crtc) 226 if (crtc >= adev->mode_info.num_crtc)
227 return; 227 return;
@@ -233,14 +233,16 @@ static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
233 * wait for another frame. 233 * wait for another frame.
234 */ 234 */
235 while (dce_v10_0_is_in_vblank(adev, crtc)) { 235 while (dce_v10_0_is_in_vblank(adev, crtc)) {
236 if (i++ % 100 == 0) { 236 if (i++ == 100) {
237 i = 0;
237 if (!dce_v10_0_is_counter_moving(adev, crtc)) 238 if (!dce_v10_0_is_counter_moving(adev, crtc))
238 break; 239 break;
239 } 240 }
240 } 241 }
241 242
242 while (!dce_v10_0_is_in_vblank(adev, crtc)) { 243 while (!dce_v10_0_is_in_vblank(adev, crtc)) {
243 if (i++ % 100 == 0) { 244 if (i++ == 100) {
245 i = 0;
244 if (!dce_v10_0_is_counter_moving(adev, crtc)) 246 if (!dce_v10_0_is_counter_moving(adev, crtc))
245 break; 247 break;
246 } 248 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index d3512f381e53..eb8f96a61491 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -146,7 +146,7 @@ static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
146 */ 146 */
147static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc) 147static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
148{ 148{
149 unsigned i = 0; 149 unsigned i = 100;
150 150
151 if (crtc >= adev->mode_info.num_crtc) 151 if (crtc >= adev->mode_info.num_crtc)
152 return; 152 return;
@@ -158,14 +158,16 @@ static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
158 * wait for another frame. 158 * wait for another frame.
159 */ 159 */
160 while (dce_v6_0_is_in_vblank(adev, crtc)) { 160 while (dce_v6_0_is_in_vblank(adev, crtc)) {
161 if (i++ % 100 == 0) { 161 if (i++ == 100) {
162 i = 0;
162 if (!dce_v6_0_is_counter_moving(adev, crtc)) 163 if (!dce_v6_0_is_counter_moving(adev, crtc))
163 break; 164 break;
164 } 165 }
165 } 166 }
166 167
167 while (!dce_v6_0_is_in_vblank(adev, crtc)) { 168 while (!dce_v6_0_is_in_vblank(adev, crtc)) {
168 if (i++ % 100 == 0) { 169 if (i++ == 100) {
170 i = 0;
169 if (!dce_v6_0_is_counter_moving(adev, crtc)) 171 if (!dce_v6_0_is_counter_moving(adev, crtc))
170 break; 172 break;
171 } 173 }
@@ -185,7 +187,7 @@ static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
185 unsigned i; 187 unsigned i;
186 188
187 /* Enable pflip interrupts */ 189 /* Enable pflip interrupts */
188 for (i = 0; i <= adev->mode_info.num_crtc; i++) 190 for (i = 0; i < adev->mode_info.num_crtc; i++)
189 amdgpu_irq_get(adev, &adev->pageflip_irq, i); 191 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
190} 192}
191 193
@@ -194,7 +196,7 @@ static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
194 unsigned i; 196 unsigned i;
195 197
196 /* Disable pflip interrupts */ 198 /* Disable pflip interrupts */
197 for (i = 0; i <= adev->mode_info.num_crtc; i++) 199 for (i = 0; i < adev->mode_info.num_crtc; i++)
198 amdgpu_irq_put(adev, &adev->pageflip_irq, i); 200 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
199} 201}
200 202
@@ -1420,21 +1422,29 @@ static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1420 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); 1422 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1421} 1423}
1422 1424
1423static void dce_v6_0_afmt_init(struct amdgpu_device *adev) 1425static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1424{ 1426{
1425 int i; 1427 int i, j;
1426 1428
1427 for (i = 0; i < adev->mode_info.num_dig; i++) 1429 for (i = 0; i < adev->mode_info.num_dig; i++)
1428 adev->mode_info.afmt[i] = NULL; 1430 adev->mode_info.afmt[i] = NULL;
1429 1431
1430 /* DCE8 has audio blocks tied to DIG encoders */ 1432 /* DCE6 has audio blocks tied to DIG encoders */
1431 for (i = 0; i < adev->mode_info.num_dig; i++) { 1433 for (i = 0; i < adev->mode_info.num_dig; i++) {
1432 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); 1434 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1433 if (adev->mode_info.afmt[i]) { 1435 if (adev->mode_info.afmt[i]) {
1434 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1436 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1435 adev->mode_info.afmt[i]->id = i; 1437 adev->mode_info.afmt[i]->id = i;
1438 } else {
1439 for (j = 0; j < i; j++) {
1440 kfree(adev->mode_info.afmt[j]);
1441 adev->mode_info.afmt[j] = NULL;
1442 }
1443 DRM_ERROR("Out of memory allocating afmt table\n");
1444 return -ENOMEM;
1436 } 1445 }
1437 } 1446 }
1447 return 0;
1438} 1448}
1439 1449
1440static void dce_v6_0_afmt_fini(struct amdgpu_device *adev) 1450static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
@@ -2397,7 +2407,9 @@ static int dce_v6_0_sw_init(void *handle)
2397 return -EINVAL; 2407 return -EINVAL;
2398 2408
2399 /* setup afmt */ 2409 /* setup afmt */
2400 dce_v6_0_afmt_init(adev); 2410 r = dce_v6_0_afmt_init(adev);
2411 if (r)
2412 return r;
2401 2413
2402 r = dce_v6_0_audio_init(adev); 2414 r = dce_v6_0_audio_init(adev);
2403 if (r) 2415 if (r)
@@ -2782,7 +2794,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
2782 uint32_t disp_int, mask, int_control, tmp; 2794 uint32_t disp_int, mask, int_control, tmp;
2783 unsigned hpd; 2795 unsigned hpd;
2784 2796
2785 if (entry->src_data > 6) { 2797 if (entry->src_data >= adev->mode_info.num_hpd) {
2786 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 2798 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2787 return 0; 2799 return 0;
2788 } 2800 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index abd5213dfe18..a7decf977b5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -170,7 +170,7 @@ static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
170 */ 170 */
171static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc) 171static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
172{ 172{
173 unsigned i = 0; 173 unsigned i = 100;
174 174
175 if (crtc >= adev->mode_info.num_crtc) 175 if (crtc >= adev->mode_info.num_crtc)
176 return; 176 return;
@@ -182,14 +182,16 @@ static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
182 * wait for another frame. 182 * wait for another frame.
183 */ 183 */
184 while (dce_v8_0_is_in_vblank(adev, crtc)) { 184 while (dce_v8_0_is_in_vblank(adev, crtc)) {
185 if (i++ % 100 == 0) { 185 if (i++ == 100) {
186 i = 0;
186 if (!dce_v8_0_is_counter_moving(adev, crtc)) 187 if (!dce_v8_0_is_counter_moving(adev, crtc))
187 break; 188 break;
188 } 189 }
189 } 190 }
190 191
191 while (!dce_v8_0_is_in_vblank(adev, crtc)) { 192 while (!dce_v8_0_is_in_vblank(adev, crtc)) {
192 if (i++ % 100 == 0) { 193 if (i++ == 100) {
194 i = 0;
193 if (!dce_v8_0_is_counter_moving(adev, crtc)) 195 if (!dce_v8_0_is_counter_moving(adev, crtc))
194 break; 196 break;
195 } 197 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 619b604ab8ae..30badd261269 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -95,7 +95,7 @@ static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
95 return false; 95 return false;
96} 96}
97 97
98void dce_virtual_stop_mc_access(struct amdgpu_device *adev, 98static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
99 struct amdgpu_mode_mc_save *save) 99 struct amdgpu_mode_mc_save *save)
100{ 100{
101 switch (adev->asic_type) { 101 switch (adev->asic_type) {
@@ -127,13 +127,13 @@ void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
127 127
128 return; 128 return;
129} 129}
130void dce_virtual_resume_mc_access(struct amdgpu_device *adev, 130static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
131 struct amdgpu_mode_mc_save *save) 131 struct amdgpu_mode_mc_save *save)
132{ 132{
133 return; 133 return;
134} 134}
135 135
136void dce_virtual_set_vga_render_state(struct amdgpu_device *adev, 136static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
137 bool render) 137 bool render)
138{ 138{
139 return; 139 return;
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
deleted file mode 100644
index ed03b75175d4..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
+++ /dev/null
@@ -1,186 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "fiji_smum.h"
28
29MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
30
31static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
32
33static int fiji_dpm_early_init(void *handle)
34{
35 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
36
37 fiji_dpm_set_funcs(adev);
38
39 return 0;
40}
41
42static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
43{
44 char fw_name[30] = "amdgpu/fiji_smc.bin";
45 int err;
46
47 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
48 if (err)
49 goto out;
50 err = amdgpu_ucode_validate(adev->pm.fw);
51
52out:
53 if (err) {
54 DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
55 release_firmware(adev->pm.fw);
56 adev->pm.fw = NULL;
57 }
58 return err;
59}
60
61static int fiji_dpm_sw_init(void *handle)
62{
63 int ret;
64 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
65
66 ret = fiji_dpm_init_microcode(adev);
67 if (ret)
68 return ret;
69
70 return 0;
71}
72
73static int fiji_dpm_sw_fini(void *handle)
74{
75 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
76
77 release_firmware(adev->pm.fw);
78 adev->pm.fw = NULL;
79
80 return 0;
81}
82
83static int fiji_dpm_hw_init(void *handle)
84{
85 int ret;
86 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
87
88 mutex_lock(&adev->pm.mutex);
89
90 ret = fiji_smu_init(adev);
91 if (ret) {
92 DRM_ERROR("SMU initialization failed\n");
93 goto fail;
94 }
95
96 ret = fiji_smu_start(adev);
97 if (ret) {
98 DRM_ERROR("SMU start failed\n");
99 goto fail;
100 }
101
102 mutex_unlock(&adev->pm.mutex);
103 return 0;
104
105fail:
106 adev->firmware.smu_load = false;
107 mutex_unlock(&adev->pm.mutex);
108 return -EINVAL;
109}
110
111static int fiji_dpm_hw_fini(void *handle)
112{
113 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
114 mutex_lock(&adev->pm.mutex);
115 fiji_smu_fini(adev);
116 mutex_unlock(&adev->pm.mutex);
117 return 0;
118}
119
120static int fiji_dpm_suspend(void *handle)
121{
122 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
123
124 fiji_dpm_hw_fini(adev);
125
126 return 0;
127}
128
129static int fiji_dpm_resume(void *handle)
130{
131 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
132
133 fiji_dpm_hw_init(adev);
134
135 return 0;
136}
137
138static int fiji_dpm_set_clockgating_state(void *handle,
139 enum amd_clockgating_state state)
140{
141 return 0;
142}
143
144static int fiji_dpm_set_powergating_state(void *handle,
145 enum amd_powergating_state state)
146{
147 return 0;
148}
149
150const struct amd_ip_funcs fiji_dpm_ip_funcs = {
151 .name = "fiji_dpm",
152 .early_init = fiji_dpm_early_init,
153 .late_init = NULL,
154 .sw_init = fiji_dpm_sw_init,
155 .sw_fini = fiji_dpm_sw_fini,
156 .hw_init = fiji_dpm_hw_init,
157 .hw_fini = fiji_dpm_hw_fini,
158 .suspend = fiji_dpm_suspend,
159 .resume = fiji_dpm_resume,
160 .is_idle = NULL,
161 .wait_for_idle = NULL,
162 .soft_reset = NULL,
163 .set_clockgating_state = fiji_dpm_set_clockgating_state,
164 .set_powergating_state = fiji_dpm_set_powergating_state,
165};
166
167static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
168 .get_temperature = NULL,
169 .pre_set_power_state = NULL,
170 .set_power_state = NULL,
171 .post_set_power_state = NULL,
172 .display_configuration_changed = NULL,
173 .get_sclk = NULL,
174 .get_mclk = NULL,
175 .print_power_state = NULL,
176 .debugfs_print_current_performance_level = NULL,
177 .force_performance_level = NULL,
178 .vblank_too_short = NULL,
179 .powergate_uvd = NULL,
180};
181
182static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
183{
184 if (NULL == adev->pm.funcs)
185 adev->pm.funcs = &fiji_dpm_funcs;
186}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
deleted file mode 100644
index b3e19ba4c57f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ /dev/null
@@ -1,863 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "fiji_ppsmc.h"
28#include "fiji_smum.h"
29#include "smu_ucode_xfer_vi.h"
30#include "amdgpu_ucode.h"
31
32#include "smu/smu_7_1_3_d.h"
33#include "smu/smu_7_1_3_sh_mask.h"
34
35#define FIJI_SMC_SIZE 0x20000
36
37static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
38{
39 uint32_t val;
40
41 if (smc_address & 3)
42 return -EINVAL;
43
44 if ((smc_address + 3) > limit)
45 return -EINVAL;
46
47 WREG32(mmSMC_IND_INDEX_0, smc_address);
48
49 val = RREG32(mmSMC_IND_ACCESS_CNTL);
50 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
51 WREG32(mmSMC_IND_ACCESS_CNTL, val);
52
53 return 0;
54}
55
56static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
57{
58 uint32_t addr;
59 uint32_t data, orig_data;
60 int result = 0;
61 uint32_t extra_shift;
62 unsigned long flags;
63
64 if (smc_start_address & 3)
65 return -EINVAL;
66
67 if ((smc_start_address + byte_count) > limit)
68 return -EINVAL;
69
70 addr = smc_start_address;
71
72 spin_lock_irqsave(&adev->smc_idx_lock, flags);
73 while (byte_count >= 4) {
74 /* Bytes are written into the SMC addres space with the MSB first */
75 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
76
77 result = fiji_set_smc_sram_address(adev, addr, limit);
78
79 if (result)
80 goto out;
81
82 WREG32(mmSMC_IND_DATA_0, data);
83
84 src += 4;
85 byte_count -= 4;
86 addr += 4;
87 }
88
89 if (0 != byte_count) {
90 /* Now write odd bytes left, do a read modify write cycle */
91 data = 0;
92
93 result = fiji_set_smc_sram_address(adev, addr, limit);
94 if (result)
95 goto out;
96
97 orig_data = RREG32(mmSMC_IND_DATA_0);
98 extra_shift = 8 * (4 - byte_count);
99
100 while (byte_count > 0) {
101 data = (data << 8) + *src++;
102 byte_count--;
103 }
104
105 data <<= extra_shift;
106 data |= (orig_data & ~((~0UL) << extra_shift));
107
108 result = fiji_set_smc_sram_address(adev, addr, limit);
109 if (result)
110 goto out;
111
112 WREG32(mmSMC_IND_DATA_0, data);
113 }
114
115out:
116 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
117 return result;
118}
119
120static int fiji_program_jump_on_start(struct amdgpu_device *adev)
121{
122 static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
123 fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
124
125 return 0;
126}
127
128static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
129{
130 uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
131 val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
132
133 return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
134}
135
136static int wait_smu_response(struct amdgpu_device *adev)
137{
138 int i;
139 uint32_t val;
140
141 for (i = 0; i < adev->usec_timeout; i++) {
142 val = RREG32(mmSMC_RESP_0);
143 if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
144 break;
145 udelay(1);
146 }
147
148 if (i == adev->usec_timeout)
149 return -EINVAL;
150
151 return 0;
152}
153
154static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
155{
156 if (wait_smu_response(adev)) {
157 DRM_ERROR("Failed to send previous message\n");
158 return -EINVAL;
159 }
160
161 WREG32(mmSMC_MSG_ARG_0, 0x20000);
162 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
163
164 if (wait_smu_response(adev)) {
165 DRM_ERROR("Failed to send message\n");
166 return -EINVAL;
167 }
168
169 return 0;
170}
171
172static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
173{
174 if (!fiji_is_smc_ram_running(adev))
175 {
176 return -EINVAL;
177 }
178
179 if (wait_smu_response(adev)) {
180 DRM_ERROR("Failed to send previous message\n");
181 return -EINVAL;
182 }
183
184 WREG32(mmSMC_MESSAGE_0, msg);
185
186 if (wait_smu_response(adev)) {
187 DRM_ERROR("Failed to send message\n");
188 return -EINVAL;
189 }
190
191 return 0;
192}
193
194static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
195 PPSMC_Msg msg)
196{
197 if (wait_smu_response(adev)) {
198 DRM_ERROR("Failed to send previous message\n");
199 return -EINVAL;
200 }
201
202 WREG32(mmSMC_MESSAGE_0, msg);
203
204 return 0;
205}
206
207static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
208 PPSMC_Msg msg,
209 uint32_t parameter)
210{
211 if (!fiji_is_smc_ram_running(adev))
212 return -EINVAL;
213
214 if (wait_smu_response(adev)) {
215 DRM_ERROR("Failed to send previous message\n");
216 return -EINVAL;
217 }
218
219 WREG32(mmSMC_MSG_ARG_0, parameter);
220
221 return fiji_send_msg_to_smc(adev, msg);
222}
223
224static int fiji_send_msg_to_smc_with_parameter_without_waiting(
225 struct amdgpu_device *adev,
226 PPSMC_Msg msg, uint32_t parameter)
227{
228 if (wait_smu_response(adev)) {
229 DRM_ERROR("Failed to send previous message\n");
230 return -EINVAL;
231 }
232
233 WREG32(mmSMC_MSG_ARG_0, parameter);
234
235 return fiji_send_msg_to_smc_without_waiting(adev, msg);
236}
237
238#if 0 /* not used yet */
239static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
240{
241 int i;
242 uint32_t val;
243
244 if (!fiji_is_smc_ram_running(adev))
245 return -EINVAL;
246
247 for (i = 0; i < adev->usec_timeout; i++) {
248 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
249 if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
250 break;
251 udelay(1);
252 }
253
254 if (i == adev->usec_timeout)
255 return -EINVAL;
256
257 return 0;
258}
259#endif
260
261static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
262{
263 const struct smc_firmware_header_v1_0 *hdr;
264 uint32_t ucode_size;
265 uint32_t ucode_start_address;
266 const uint8_t *src;
267 uint32_t val;
268 uint32_t byte_count;
269 uint32_t *data;
270 unsigned long flags;
271
272 if (!adev->pm.fw)
273 return -EINVAL;
274
275 /* Skip SMC ucode loading on SR-IOV capable boards.
276 * vbios does this for us in asic_init in that case.
277 */
278 if (adev->virtualization.supports_sr_iov)
279 return 0;
280
281 hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
282 amdgpu_ucode_print_smc_hdr(&hdr->header);
283
284 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
285 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
286 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
287 src = (const uint8_t *)
288 (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
289
290 if (ucode_size & 3) {
291 DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
292 return -EINVAL;
293 }
294
295 if (ucode_size > FIJI_SMC_SIZE) {
296 DRM_ERROR("SMC address is beyond the SMC RAM area\n");
297 return -EINVAL;
298 }
299
300 spin_lock_irqsave(&adev->smc_idx_lock, flags);
301 WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
302
303 val = RREG32(mmSMC_IND_ACCESS_CNTL);
304 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
305 WREG32(mmSMC_IND_ACCESS_CNTL, val);
306
307 byte_count = ucode_size;
308 data = (uint32_t *)src;
309 for (; byte_count >= 4; data++, byte_count -= 4)
310 WREG32(mmSMC_IND_DATA_0, data[0]);
311
312 val = RREG32(mmSMC_IND_ACCESS_CNTL);
313 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
314 WREG32(mmSMC_IND_ACCESS_CNTL, val);
315 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
316
317 return 0;
318}
319
320#if 0 /* not used yet */
321static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
322 uint32_t smc_address,
323 uint32_t *value,
324 uint32_t limit)
325{
326 int result;
327 unsigned long flags;
328
329 spin_lock_irqsave(&adev->smc_idx_lock, flags);
330 result = fiji_set_smc_sram_address(adev, smc_address, limit);
331 if (result == 0)
332 *value = RREG32(mmSMC_IND_DATA_0);
333 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
334 return result;
335}
336
337static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
338 uint32_t smc_address,
339 uint32_t value,
340 uint32_t limit)
341{
342 int result;
343 unsigned long flags;
344
345 spin_lock_irqsave(&adev->smc_idx_lock, flags);
346 result = fiji_set_smc_sram_address(adev, smc_address, limit);
347 if (result == 0)
348 WREG32(mmSMC_IND_DATA_0, value);
349 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
350 return result;
351}
352
353static int fiji_smu_stop_smc(struct amdgpu_device *adev)
354{
355 uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
356 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
357 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
358
359 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
360 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
361 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
362
363 return 0;
364}
365#endif
366
367static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
368{
369 switch (fw_type) {
370 case UCODE_ID_SDMA0:
371 return AMDGPU_UCODE_ID_SDMA0;
372 case UCODE_ID_SDMA1:
373 return AMDGPU_UCODE_ID_SDMA1;
374 case UCODE_ID_CP_CE:
375 return AMDGPU_UCODE_ID_CP_CE;
376 case UCODE_ID_CP_PFP:
377 return AMDGPU_UCODE_ID_CP_PFP;
378 case UCODE_ID_CP_ME:
379 return AMDGPU_UCODE_ID_CP_ME;
380 case UCODE_ID_CP_MEC:
381 case UCODE_ID_CP_MEC_JT1:
382 case UCODE_ID_CP_MEC_JT2:
383 return AMDGPU_UCODE_ID_CP_MEC1;
384 case UCODE_ID_RLC_G:
385 return AMDGPU_UCODE_ID_RLC_G;
386 default:
387 DRM_ERROR("ucode type is out of range!\n");
388 return AMDGPU_UCODE_ID_MAXIMUM;
389 }
390}
391
392static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
393 uint32_t fw_type,
394 struct SMU_Entry *entry)
395{
396 enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
397 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
398 const struct gfx_firmware_header_v1_0 *header = NULL;
399 uint64_t gpu_addr;
400 uint32_t data_size;
401
402 if (ucode->fw == NULL)
403 return -EINVAL;
404 gpu_addr = ucode->mc_addr;
405 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
406 data_size = le32_to_cpu(header->header.ucode_size_bytes);
407
408 if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
409 (fw_type == UCODE_ID_CP_MEC_JT2)) {
410 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
411 data_size = le32_to_cpu(header->jt_size) << 2;
412 }
413
414 entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
415 entry->id = (uint16_t)fw_type;
416 entry->image_addr_high = upper_32_bits(gpu_addr);
417 entry->image_addr_low = lower_32_bits(gpu_addr);
418 entry->meta_data_addr_high = 0;
419 entry->meta_data_addr_low = 0;
420 entry->data_size_byte = data_size;
421 entry->num_register_entries = 0;
422
423 if (fw_type == UCODE_ID_RLC_G)
424 entry->flags = 1;
425 else
426 entry->flags = 0;
427
428 return 0;
429}
430
431static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
432{
433 struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
434 struct SMU_DRAMData_TOC *toc;
435 uint32_t fw_to_load;
436
437 WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
438
439 fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
440 fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
441
442 toc = (struct SMU_DRAMData_TOC *)private->header;
443 toc->num_entries = 0;
444 toc->structure_version = 1;
445
446 if (!adev->firmware.smu_load)
447 return 0;
448
449 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
450 &toc->entry[toc->num_entries++])) {
451 DRM_ERROR("Failed to get firmware entry for RLC\n");
452 return -EINVAL;
453 }
454
455 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
456 &toc->entry[toc->num_entries++])) {
457 DRM_ERROR("Failed to get firmware entry for CE\n");
458 return -EINVAL;
459 }
460
461 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
462 &toc->entry[toc->num_entries++])) {
463 DRM_ERROR("Failed to get firmware entry for PFP\n");
464 return -EINVAL;
465 }
466
467 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
468 &toc->entry[toc->num_entries++])) {
469 DRM_ERROR("Failed to get firmware entry for ME\n");
470 return -EINVAL;
471 }
472
473 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
474 &toc->entry[toc->num_entries++])) {
475 DRM_ERROR("Failed to get firmware entry for MEC\n");
476 return -EINVAL;
477 }
478
479 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
480 &toc->entry[toc->num_entries++])) {
481 DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
482 return -EINVAL;
483 }
484
485 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
486 &toc->entry[toc->num_entries++])) {
487 DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
488 return -EINVAL;
489 }
490
491 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
492 &toc->entry[toc->num_entries++])) {
493 DRM_ERROR("Failed to get firmware entry for SDMA0\n");
494 return -EINVAL;
495 }
496
497 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
498 &toc->entry[toc->num_entries++])) {
499 DRM_ERROR("Failed to get firmware entry for SDMA1\n");
500 return -EINVAL;
501 }
502
503 fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
504 fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
505
506 fw_to_load = UCODE_ID_RLC_G_MASK |
507 UCODE_ID_SDMA0_MASK |
508 UCODE_ID_SDMA1_MASK |
509 UCODE_ID_CP_CE_MASK |
510 UCODE_ID_CP_ME_MASK |
511 UCODE_ID_CP_PFP_MASK |
512 UCODE_ID_CP_MEC_MASK;
513
514 if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
515 DRM_ERROR("Fail to request SMU load ucode\n");
516 return -EINVAL;
517 }
518
519 return 0;
520}
521
522static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
523{
524 switch (fw_type) {
525 case AMDGPU_UCODE_ID_SDMA0:
526 return UCODE_ID_SDMA0_MASK;
527 case AMDGPU_UCODE_ID_SDMA1:
528 return UCODE_ID_SDMA1_MASK;
529 case AMDGPU_UCODE_ID_CP_CE:
530 return UCODE_ID_CP_CE_MASK;
531 case AMDGPU_UCODE_ID_CP_PFP:
532 return UCODE_ID_CP_PFP_MASK;
533 case AMDGPU_UCODE_ID_CP_ME:
534 return UCODE_ID_CP_ME_MASK;
535 case AMDGPU_UCODE_ID_CP_MEC1:
536 return UCODE_ID_CP_MEC_MASK;
537 case AMDGPU_UCODE_ID_CP_MEC2:
538 return UCODE_ID_CP_MEC_MASK;
539 case AMDGPU_UCODE_ID_RLC_G:
540 return UCODE_ID_RLC_G_MASK;
541 default:
542 DRM_ERROR("ucode type is out of range!\n");
543 return 0;
544 }
545}
546
547static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
548 uint32_t fw_type)
549{
550 uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
551 int i;
552
553 for (i = 0; i < adev->usec_timeout; i++) {
554 if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
555 break;
556 udelay(1);
557 }
558
559 if (i == adev->usec_timeout) {
560 DRM_ERROR("check firmware loading failed\n");
561 return -EINVAL;
562 }
563
564 return 0;
565}
566
567static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
568{
569 int result;
570 uint32_t val;
571 int i;
572
573 /* Assert reset */
574 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
575 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
576 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
577
578 result = fiji_smu_upload_firmware_image(adev);
579 if (result)
580 return result;
581
582 /* Clear status */
583 WREG32_SMC(ixSMU_STATUS, 0);
584
585 /* Enable clock */
586 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
587 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
588 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
589
590 /* De-assert reset */
591 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
592 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
593 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
594
595 /* Set SMU Auto Start */
596 val = RREG32_SMC(ixSMU_INPUT_DATA);
597 val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
598 WREG32_SMC(ixSMU_INPUT_DATA, val);
599
600 /* Clear firmware interrupt enable flag */
601 WREG32_SMC(ixFIRMWARE_FLAGS, 0);
602
603 for (i = 0; i < adev->usec_timeout; i++) {
604 val = RREG32_SMC(ixRCU_UC_EVENTS);
605 if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
606 break;
607 udelay(1);
608 }
609
610 if (i == adev->usec_timeout) {
611 DRM_ERROR("Interrupt is not enabled by firmware\n");
612 return -EINVAL;
613 }
614
615 /* Call Test SMU message with 0x20000 offset
616 * to trigger SMU start
617 */
618 fiji_send_msg_to_smc_offset(adev);
619 DRM_INFO("[FM]try triger smu start\n");
620 /* Wait for done bit to be set */
621 for (i = 0; i < adev->usec_timeout; i++) {
622 val = RREG32_SMC(ixSMU_STATUS);
623 if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
624 break;
625 udelay(1);
626 }
627
628 if (i == adev->usec_timeout) {
629 DRM_ERROR("Timeout for SMU start\n");
630 return -EINVAL;
631 }
632
633 /* Check pass/failed indicator */
634 val = RREG32_SMC(ixSMU_STATUS);
635 if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
636 DRM_ERROR("SMU Firmware start failed\n");
637 return -EINVAL;
638 }
639 DRM_INFO("[FM]smu started\n");
640 /* Wait for firmware to initialize */
641 for (i = 0; i < adev->usec_timeout; i++) {
642 val = RREG32_SMC(ixFIRMWARE_FLAGS);
643 if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
644 break;
645 udelay(1);
646 }
647
648 if (i == adev->usec_timeout) {
649 DRM_ERROR("SMU firmware initialization failed\n");
650 return -EINVAL;
651 }
652 DRM_INFO("[FM]smu initialized\n");
653
654 return 0;
655}
656
657static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
658{
659 int i, result;
660 uint32_t val;
661
662 /* wait for smc boot up */
663 for (i = 0; i < adev->usec_timeout; i++) {
664 val = RREG32_SMC(ixRCU_UC_EVENTS);
665 val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
666 if (val)
667 break;
668 udelay(1);
669 }
670
671 if (i == adev->usec_timeout) {
672 DRM_ERROR("SMC boot sequence is not completed\n");
673 return -EINVAL;
674 }
675
676 /* Clear firmware interrupt enable flag */
677 WREG32_SMC(ixFIRMWARE_FLAGS, 0);
678
679 /* Assert reset */
680 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
681 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
682 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
683
684 result = fiji_smu_upload_firmware_image(adev);
685 if (result)
686 return result;
687
688 /* Set smc instruct start point at 0x0 */
689 fiji_program_jump_on_start(adev);
690
691 /* Enable clock */
692 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
693 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
694 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
695
696 /* De-assert reset */
697 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
698 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
699 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
700
701 /* Wait for firmware to initialize */
702 for (i = 0; i < adev->usec_timeout; i++) {
703 val = RREG32_SMC(ixFIRMWARE_FLAGS);
704 if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
705 break;
706 udelay(1);
707 }
708
709 if (i == adev->usec_timeout) {
710 DRM_ERROR("Timeout for SMC firmware initialization\n");
711 return -EINVAL;
712 }
713
714 return 0;
715}
716
717int fiji_smu_start(struct amdgpu_device *adev)
718{
719 int result;
720 uint32_t val;
721
722 if (!fiji_is_smc_ram_running(adev)) {
723 val = RREG32_SMC(ixSMU_FIRMWARE);
724 if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
725 DRM_INFO("[FM]start smu in nonprotection mode\n");
726 result = fiji_smu_start_in_non_protection_mode(adev);
727 if (result)
728 return result;
729 } else {
730 DRM_INFO("[FM]start smu in protection mode\n");
731 result = fiji_smu_start_in_protection_mode(adev);
732 if (result)
733 return result;
734 }
735 }
736
737 return fiji_smu_request_load_fw(adev);
738}
739
740static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
741 .check_fw_load_finish = fiji_smu_check_fw_load_finish,
742 .request_smu_load_fw = NULL,
743 .request_smu_specific_fw = NULL,
744};
745
746int fiji_smu_init(struct amdgpu_device *adev)
747{
748 struct fiji_smu_private_data *private;
749 uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
750 uint32_t smu_internal_buffer_size = 200*4096;
751 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
752 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
753 uint64_t mc_addr;
754 void *toc_buf_ptr;
755 void *smu_buf_ptr;
756 int ret;
757
758 private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
759 if (NULL == private)
760 return -ENOMEM;
761
762 /* allocate firmware buffers */
763 if (adev->firmware.smu_load)
764 amdgpu_ucode_init_bo(adev);
765
766 adev->smu.priv = private;
767 adev->smu.fw_flags = 0;
768
769 /* Allocate FW image data structure and header buffer */
770 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
771 true, AMDGPU_GEM_DOMAIN_VRAM,
772 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
773 NULL, NULL, toc_buf);
774 if (ret) {
775 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
776 return -ENOMEM;
777 }
778
779 /* Allocate buffer for SMU internal buffer */
780 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
781 true, AMDGPU_GEM_DOMAIN_VRAM,
782 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
783 NULL, NULL, smu_buf);
784 if (ret) {
785 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
786 return -ENOMEM;
787 }
788
789 /* Retrieve GPU address for header buffer and internal buffer */
790 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
791 if (ret) {
792 amdgpu_bo_unref(&adev->smu.toc_buf);
793 DRM_ERROR("Failed to reserve the TOC buffer\n");
794 return -EINVAL;
795 }
796
797 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
798 if (ret) {
799 amdgpu_bo_unreserve(adev->smu.toc_buf);
800 amdgpu_bo_unref(&adev->smu.toc_buf);
801 DRM_ERROR("Failed to pin the TOC buffer\n");
802 return -EINVAL;
803 }
804
805 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
806 if (ret) {
807 amdgpu_bo_unreserve(adev->smu.toc_buf);
808 amdgpu_bo_unref(&adev->smu.toc_buf);
809 DRM_ERROR("Failed to map the TOC buffer\n");
810 return -EINVAL;
811 }
812
813 amdgpu_bo_unreserve(adev->smu.toc_buf);
814 private->header_addr_low = lower_32_bits(mc_addr);
815 private->header_addr_high = upper_32_bits(mc_addr);
816 private->header = toc_buf_ptr;
817
818 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
819 if (ret) {
820 amdgpu_bo_unref(&adev->smu.smu_buf);
821 amdgpu_bo_unref(&adev->smu.toc_buf);
822 DRM_ERROR("Failed to reserve the SMU internal buffer\n");
823 return -EINVAL;
824 }
825
826 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
827 if (ret) {
828 amdgpu_bo_unreserve(adev->smu.smu_buf);
829 amdgpu_bo_unref(&adev->smu.smu_buf);
830 amdgpu_bo_unref(&adev->smu.toc_buf);
831 DRM_ERROR("Failed to pin the SMU internal buffer\n");
832 return -EINVAL;
833 }
834
835 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
836 if (ret) {
837 amdgpu_bo_unreserve(adev->smu.smu_buf);
838 amdgpu_bo_unref(&adev->smu.smu_buf);
839 amdgpu_bo_unref(&adev->smu.toc_buf);
840 DRM_ERROR("Failed to map the SMU internal buffer\n");
841 return -EINVAL;
842 }
843
844 amdgpu_bo_unreserve(adev->smu.smu_buf);
845 private->smu_buffer_addr_low = lower_32_bits(mc_addr);
846 private->smu_buffer_addr_high = upper_32_bits(mc_addr);
847
848 adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
849
850 return 0;
851}
852
853int fiji_smu_fini(struct amdgpu_device *adev)
854{
855 amdgpu_bo_unref(&adev->smu.toc_buf);
856 amdgpu_bo_unref(&adev->smu.smu_buf);
857 kfree(adev->smu.priv);
858 adev->smu.priv = NULL;
859 if (adev->firmware.fw_buf)
860 amdgpu_ucode_fini_bo(adev);
861
862 return 0;
863}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h b/drivers/gpu/drm/amd/amdgpu/fiji_smum.h
deleted file mode 100644
index 1cef03deeac3..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef FIJI_SMUMGR_H
25#define FIJI_SMUMGR_H
26
27#include "fiji_ppsmc.h"
28
29int fiji_smu_init(struct amdgpu_device *adev);
30int fiji_smu_fini(struct amdgpu_device *adev);
31int fiji_smu_start(struct amdgpu_device *adev);
32
33struct fiji_smu_private_data
34{
35 uint8_t *header;
36 uint32_t smu_buffer_addr_high;
37 uint32_t smu_buffer_addr_low;
38 uint32_t header_addr_high;
39 uint32_t header_addr_low;
40};
41
42#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 410b29c05671..40abb6b81c09 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -931,6 +931,123 @@ static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
931 return data & mask; 931 return data & mask;
932} 932}
933 933
934static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
935{
936 switch (adev->asic_type) {
937 case CHIP_TAHITI:
938 case CHIP_PITCAIRN:
939 *rconf |= RB_XSEL2(2) | RB_XSEL | PKR_MAP(2) | PKR_YSEL(1) |
940 SE_MAP(2) | SE_XSEL(2) | SE_YSEL(2);
941 break;
942 case CHIP_VERDE:
943 *rconf |= RB_XSEL | PKR_MAP(2) | PKR_YSEL(1);
944 break;
945 case CHIP_OLAND:
946 *rconf |= RB_YSEL;
947 break;
948 case CHIP_HAINAN:
949 *rconf |= 0x0;
950 break;
951 default:
952 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
953 break;
954 }
955}
956
957static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
958 u32 raster_config, unsigned rb_mask,
959 unsigned num_rb)
960{
961 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
962 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
963 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
964 unsigned rb_per_se = num_rb / num_se;
965 unsigned se_mask[4];
966 unsigned se;
967
968 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
969 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
970 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
971 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
972
973 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
974 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
975 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
976
977 for (se = 0; se < num_se; se++) {
978 unsigned raster_config_se = raster_config;
979 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
980 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
981 int idx = (se / 2) * 2;
982
983 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
984 raster_config_se &= ~SE_MAP_MASK;
985
986 if (!se_mask[idx]) {
987 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
988 } else {
989 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
990 }
991 }
992
993 pkr0_mask &= rb_mask;
994 pkr1_mask &= rb_mask;
995 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
996 raster_config_se &= ~PKR_MAP_MASK;
997
998 if (!pkr0_mask) {
999 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
1000 } else {
1001 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
1002 }
1003 }
1004
1005 if (rb_per_se >= 2) {
1006 unsigned rb0_mask = 1 << (se * rb_per_se);
1007 unsigned rb1_mask = rb0_mask << 1;
1008
1009 rb0_mask &= rb_mask;
1010 rb1_mask &= rb_mask;
1011 if (!rb0_mask || !rb1_mask) {
1012 raster_config_se &= ~RB_MAP_PKR0_MASK;
1013
1014 if (!rb0_mask) {
1015 raster_config_se |=
1016 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
1017 } else {
1018 raster_config_se |=
1019 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
1020 }
1021 }
1022
1023 if (rb_per_se > 2) {
1024 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1025 rb1_mask = rb0_mask << 1;
1026 rb0_mask &= rb_mask;
1027 rb1_mask &= rb_mask;
1028 if (!rb0_mask || !rb1_mask) {
1029 raster_config_se &= ~RB_MAP_PKR1_MASK;
1030
1031 if (!rb0_mask) {
1032 raster_config_se |=
1033 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
1034 } else {
1035 raster_config_se |=
1036 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
1037 }
1038 }
1039 }
1040 }
1041
1042 /* GRBM_GFX_INDEX has a different offset on SI */
1043 gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
1044 WREG32(PA_SC_RASTER_CONFIG, raster_config_se);
1045 }
1046
1047 /* GRBM_GFX_INDEX has a different offset on SI */
1048 gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1049}
1050
934static void gfx_v6_0_setup_rb(struct amdgpu_device *adev, 1051static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
935 u32 se_num, u32 sh_per_se, 1052 u32 se_num, u32 sh_per_se,
936 u32 max_rb_num_per_se) 1053 u32 max_rb_num_per_se)
@@ -939,6 +1056,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
939 u32 data, mask; 1056 u32 data, mask;
940 u32 disabled_rbs = 0; 1057 u32 disabled_rbs = 0;
941 u32 enabled_rbs = 0; 1058 u32 enabled_rbs = 0;
1059 unsigned num_rb_pipes;
942 1060
943 mutex_lock(&adev->grbm_idx_mutex); 1061 mutex_lock(&adev->grbm_idx_mutex);
944 for (i = 0; i < se_num; i++) { 1062 for (i = 0; i < se_num; i++) {
@@ -961,6 +1079,9 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
961 adev->gfx.config.backend_enable_mask = enabled_rbs; 1079 adev->gfx.config.backend_enable_mask = enabled_rbs;
962 adev->gfx.config.num_rbs = hweight32(enabled_rbs); 1080 adev->gfx.config.num_rbs = hweight32(enabled_rbs);
963 1081
1082 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1083 adev->gfx.config.max_shader_engines, 16);
1084
964 mutex_lock(&adev->grbm_idx_mutex); 1085 mutex_lock(&adev->grbm_idx_mutex);
965 for (i = 0; i < se_num; i++) { 1086 for (i = 0; i < se_num; i++) {
966 gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff); 1087 gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
@@ -980,7 +1101,15 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
980 } 1101 }
981 enabled_rbs >>= 2; 1102 enabled_rbs >>= 2;
982 } 1103 }
983 WREG32(PA_SC_RASTER_CONFIG, data); 1104 gfx_v6_0_raster_config(adev, &data);
1105
1106 if (!adev->gfx.config.backend_enable_mask ||
1107 adev->gfx.config.num_rbs >= num_rb_pipes)
1108 WREG32(PA_SC_RASTER_CONFIG, data);
1109 else
1110 gfx_v6_0_write_harvested_raster_configs(adev, data,
1111 adev->gfx.config.backend_enable_mask,
1112 num_rb_pipes);
984 } 1113 }
985 gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1114 gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
986 mutex_unlock(&adev->grbm_idx_mutex); 1115 mutex_unlock(&adev->grbm_idx_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 90102f123bb8..32a676291e67 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1645,6 +1645,147 @@ static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1645 return (~data) & mask; 1645 return (~data) & mask;
1646} 1646}
1647 1647
1648static void
1649gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
1650{
1651 switch (adev->asic_type) {
1652 case CHIP_BONAIRE:
1653 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
1654 SE_XSEL(1) | SE_YSEL(1);
1655 *rconf1 |= 0x0;
1656 break;
1657 case CHIP_HAWAII:
1658 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
1659 RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
1660 PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
1661 SE_YSEL(3);
1662 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
1663 SE_PAIR_YSEL(2);
1664 break;
1665 case CHIP_KAVERI:
1666 *rconf |= RB_MAP_PKR0(2);
1667 *rconf1 |= 0x0;
1668 break;
1669 case CHIP_KABINI:
1670 case CHIP_MULLINS:
1671 *rconf |= 0x0;
1672 *rconf1 |= 0x0;
1673 break;
1674 default:
1675 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1676 break;
1677 }
1678}
1679
1680static void
1681gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
1682 u32 raster_config, u32 raster_config_1,
1683 unsigned rb_mask, unsigned num_rb)
1684{
1685 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
1686 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
1687 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
1688 unsigned rb_per_se = num_rb / num_se;
1689 unsigned se_mask[4];
1690 unsigned se;
1691
1692 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1693 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1694 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1695 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1696
1697 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
1698 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
1699 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
1700
1701 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
1702 (!se_mask[2] && !se_mask[3]))) {
1703 raster_config_1 &= ~SE_PAIR_MAP_MASK;
1704
1705 if (!se_mask[0] && !se_mask[1]) {
1706 raster_config_1 |=
1707 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
1708 } else {
1709 raster_config_1 |=
1710 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
1711 }
1712 }
1713
1714 for (se = 0; se < num_se; se++) {
1715 unsigned raster_config_se = raster_config;
1716 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1717 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1718 int idx = (se / 2) * 2;
1719
1720 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1721 raster_config_se &= ~SE_MAP_MASK;
1722
1723 if (!se_mask[idx]) {
1724 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
1725 } else {
1726 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
1727 }
1728 }
1729
1730 pkr0_mask &= rb_mask;
1731 pkr1_mask &= rb_mask;
1732 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1733 raster_config_se &= ~PKR_MAP_MASK;
1734
1735 if (!pkr0_mask) {
1736 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
1737 } else {
1738 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
1739 }
1740 }
1741
1742 if (rb_per_se >= 2) {
1743 unsigned rb0_mask = 1 << (se * rb_per_se);
1744 unsigned rb1_mask = rb0_mask << 1;
1745
1746 rb0_mask &= rb_mask;
1747 rb1_mask &= rb_mask;
1748 if (!rb0_mask || !rb1_mask) {
1749 raster_config_se &= ~RB_MAP_PKR0_MASK;
1750
1751 if (!rb0_mask) {
1752 raster_config_se |=
1753 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
1754 } else {
1755 raster_config_se |=
1756 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
1757 }
1758 }
1759
1760 if (rb_per_se > 2) {
1761 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1762 rb1_mask = rb0_mask << 1;
1763 rb0_mask &= rb_mask;
1764 rb1_mask &= rb_mask;
1765 if (!rb0_mask || !rb1_mask) {
1766 raster_config_se &= ~RB_MAP_PKR1_MASK;
1767
1768 if (!rb0_mask) {
1769 raster_config_se |=
1770 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
1771 } else {
1772 raster_config_se |=
1773 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
1774 }
1775 }
1776 }
1777 }
1778
1779 /* GRBM_GFX_INDEX has a different offset on CI+ */
1780 gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
1781 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
1782 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1783 }
1784
1785 /* GRBM_GFX_INDEX has a different offset on CI+ */
1786 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1787}
1788
1648/** 1789/**
1649 * gfx_v7_0_setup_rb - setup the RBs on the asic 1790 * gfx_v7_0_setup_rb - setup the RBs on the asic
1650 * 1791 *
@@ -1658,9 +1799,11 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1658{ 1799{
1659 int i, j; 1800 int i, j;
1660 u32 data; 1801 u32 data;
1802 u32 raster_config = 0, raster_config_1 = 0;
1661 u32 active_rbs = 0; 1803 u32 active_rbs = 0;
1662 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 1804 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1663 adev->gfx.config.max_sh_per_se; 1805 adev->gfx.config.max_sh_per_se;
1806 unsigned num_rb_pipes;
1664 1807
1665 mutex_lock(&adev->grbm_idx_mutex); 1808 mutex_lock(&adev->grbm_idx_mutex);
1666 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1809 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -1672,10 +1815,25 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1672 } 1815 }
1673 } 1816 }
1674 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1817 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1675 mutex_unlock(&adev->grbm_idx_mutex);
1676 1818
1677 adev->gfx.config.backend_enable_mask = active_rbs; 1819 adev->gfx.config.backend_enable_mask = active_rbs;
1678 adev->gfx.config.num_rbs = hweight32(active_rbs); 1820 adev->gfx.config.num_rbs = hweight32(active_rbs);
1821
1822 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1823 adev->gfx.config.max_shader_engines, 16);
1824
1825 gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
1826
1827 if (!adev->gfx.config.backend_enable_mask ||
1828 adev->gfx.config.num_rbs >= num_rb_pipes) {
1829 WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
1830 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1831 } else {
1832 gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
1833 adev->gfx.config.backend_enable_mask,
1834 num_rb_pipes);
1835 }
1836 mutex_unlock(&adev->grbm_idx_mutex);
1679} 1837}
1680 1838
1681/** 1839/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 47e270ad4fe3..6c6ff57b1c95 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3492,13 +3492,163 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
3492 return (~data) & mask; 3492 return (~data) & mask;
3493} 3493}
3494 3494
3495static void
3496gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
3497{
3498 switch (adev->asic_type) {
3499 case CHIP_FIJI:
3500 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
3501 RB_XSEL2(1) | PKR_MAP(2) |
3502 PKR_XSEL(1) | PKR_YSEL(1) |
3503 SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
3504 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
3505 SE_PAIR_YSEL(2);
3506 break;
3507 case CHIP_TONGA:
3508 case CHIP_POLARIS10:
3509 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3510 SE_XSEL(1) | SE_YSEL(1);
3511 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
3512 SE_PAIR_YSEL(2);
3513 break;
3514 case CHIP_TOPAZ:
3515 case CHIP_CARRIZO:
3516 *rconf |= RB_MAP_PKR0(2);
3517 *rconf1 |= 0x0;
3518 break;
3519 case CHIP_POLARIS11:
3520 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3521 SE_XSEL(1) | SE_YSEL(1);
3522 *rconf1 |= 0x0;
3523 break;
3524 case CHIP_STONEY:
3525 *rconf |= 0x0;
3526 *rconf1 |= 0x0;
3527 break;
3528 default:
3529 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
3530 break;
3531 }
3532}
3533
3534static void
3535gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
3536 u32 raster_config, u32 raster_config_1,
3537 unsigned rb_mask, unsigned num_rb)
3538{
3539 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
3540 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
3541 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
3542 unsigned rb_per_se = num_rb / num_se;
3543 unsigned se_mask[4];
3544 unsigned se;
3545
3546 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
3547 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
3548 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
3549 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
3550
3551 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
3552 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
3553 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
3554
3555 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
3556 (!se_mask[2] && !se_mask[3]))) {
3557 raster_config_1 &= ~SE_PAIR_MAP_MASK;
3558
3559 if (!se_mask[0] && !se_mask[1]) {
3560 raster_config_1 |=
3561 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
3562 } else {
3563 raster_config_1 |=
3564 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
3565 }
3566 }
3567
3568 for (se = 0; se < num_se; se++) {
3569 unsigned raster_config_se = raster_config;
3570 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
3571 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
3572 int idx = (se / 2) * 2;
3573
3574 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
3575 raster_config_se &= ~SE_MAP_MASK;
3576
3577 if (!se_mask[idx]) {
3578 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
3579 } else {
3580 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
3581 }
3582 }
3583
3584 pkr0_mask &= rb_mask;
3585 pkr1_mask &= rb_mask;
3586 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
3587 raster_config_se &= ~PKR_MAP_MASK;
3588
3589 if (!pkr0_mask) {
3590 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
3591 } else {
3592 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
3593 }
3594 }
3595
3596 if (rb_per_se >= 2) {
3597 unsigned rb0_mask = 1 << (se * rb_per_se);
3598 unsigned rb1_mask = rb0_mask << 1;
3599
3600 rb0_mask &= rb_mask;
3601 rb1_mask &= rb_mask;
3602 if (!rb0_mask || !rb1_mask) {
3603 raster_config_se &= ~RB_MAP_PKR0_MASK;
3604
3605 if (!rb0_mask) {
3606 raster_config_se |=
3607 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
3608 } else {
3609 raster_config_se |=
3610 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
3611 }
3612 }
3613
3614 if (rb_per_se > 2) {
3615 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
3616 rb1_mask = rb0_mask << 1;
3617 rb0_mask &= rb_mask;
3618 rb1_mask &= rb_mask;
3619 if (!rb0_mask || !rb1_mask) {
3620 raster_config_se &= ~RB_MAP_PKR1_MASK;
3621
3622 if (!rb0_mask) {
3623 raster_config_se |=
3624 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
3625 } else {
3626 raster_config_se |=
3627 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
3628 }
3629 }
3630 }
3631 }
3632
3633 /* GRBM_GFX_INDEX has a different offset on VI */
3634 gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
3635 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
3636 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3637 }
3638
3639 /* GRBM_GFX_INDEX has a different offset on VI */
3640 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3641}
3642
3495static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) 3643static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3496{ 3644{
3497 int i, j; 3645 int i, j;
3498 u32 data; 3646 u32 data;
3647 u32 raster_config = 0, raster_config_1 = 0;
3499 u32 active_rbs = 0; 3648 u32 active_rbs = 0;
3500 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 3649 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
3501 adev->gfx.config.max_sh_per_se; 3650 adev->gfx.config.max_sh_per_se;
3651 unsigned num_rb_pipes;
3502 3652
3503 mutex_lock(&adev->grbm_idx_mutex); 3653 mutex_lock(&adev->grbm_idx_mutex);
3504 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 3654 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -3510,10 +3660,26 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3510 } 3660 }
3511 } 3661 }
3512 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 3662 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3513 mutex_unlock(&adev->grbm_idx_mutex);
3514 3663
3515 adev->gfx.config.backend_enable_mask = active_rbs; 3664 adev->gfx.config.backend_enable_mask = active_rbs;
3516 adev->gfx.config.num_rbs = hweight32(active_rbs); 3665 adev->gfx.config.num_rbs = hweight32(active_rbs);
3666
3667 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
3668 adev->gfx.config.max_shader_engines, 16);
3669
3670 gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
3671
3672 if (!adev->gfx.config.backend_enable_mask ||
3673 adev->gfx.config.num_rbs >= num_rb_pipes) {
3674 WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
3675 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3676 } else {
3677 gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
3678 adev->gfx.config.backend_enable_mask,
3679 num_rb_pipes);
3680 }
3681
3682 mutex_unlock(&adev->grbm_idx_mutex);
3517} 3683}
3518 3684
3519/** 3685/**
@@ -5817,6 +5983,76 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5817 return 0; 5983 return 0;
5818} 5984}
5819 5985
5986static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
5987 enum amd_clockgating_state state)
5988{
5989 uint32_t msg_id, pp_state;
5990 void *pp_handle = adev->powerplay.pp_handle;
5991
5992 if (state == AMD_CG_STATE_UNGATE)
5993 pp_state = 0;
5994 else
5995 pp_state = PP_STATE_CG | PP_STATE_LS;
5996
5997 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5998 PP_BLOCK_GFX_CG,
5999 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6000 pp_state);
6001 amd_set_clockgating_by_smu(pp_handle, msg_id);
6002
6003 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6004 PP_BLOCK_GFX_MG,
6005 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6006 pp_state);
6007 amd_set_clockgating_by_smu(pp_handle, msg_id);
6008
6009 return 0;
6010}
6011
6012static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
6013 enum amd_clockgating_state state)
6014{
6015 uint32_t msg_id, pp_state;
6016 void *pp_handle = adev->powerplay.pp_handle;
6017
6018 if (state == AMD_CG_STATE_UNGATE)
6019 pp_state = 0;
6020 else
6021 pp_state = PP_STATE_CG | PP_STATE_LS;
6022
6023 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6024 PP_BLOCK_GFX_CG,
6025 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6026 pp_state);
6027 amd_set_clockgating_by_smu(pp_handle, msg_id);
6028
6029 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6030 PP_BLOCK_GFX_3D,
6031 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6032 pp_state);
6033 amd_set_clockgating_by_smu(pp_handle, msg_id);
6034
6035 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6036 PP_BLOCK_GFX_MG,
6037 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6038 pp_state);
6039 amd_set_clockgating_by_smu(pp_handle, msg_id);
6040
6041 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6042 PP_BLOCK_GFX_RLC,
6043 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6044 pp_state);
6045 amd_set_clockgating_by_smu(pp_handle, msg_id);
6046
6047 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6048 PP_BLOCK_GFX_CP,
6049 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6050 pp_state);
6051 amd_set_clockgating_by_smu(pp_handle, msg_id);
6052
6053 return 0;
6054}
6055
5820static int gfx_v8_0_set_clockgating_state(void *handle, 6056static int gfx_v8_0_set_clockgating_state(void *handle,
5821 enum amd_clockgating_state state) 6057 enum amd_clockgating_state state)
5822{ 6058{
@@ -5829,6 +6065,13 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
5829 gfx_v8_0_update_gfx_clock_gating(adev, 6065 gfx_v8_0_update_gfx_clock_gating(adev,
5830 state == AMD_CG_STATE_GATE ? true : false); 6066 state == AMD_CG_STATE_GATE ? true : false);
5831 break; 6067 break;
6068 case CHIP_TONGA:
6069 gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
6070 break;
6071 case CHIP_POLARIS10:
6072 case CHIP_POLARIS11:
6073 gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
6074 break;
5832 default: 6075 default:
5833 break; 6076 break;
5834 } 6077 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 84c10d5117a9..1b319f5bc696 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -269,8 +269,10 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
269 269
270 /* Skip MC ucode loading on SR-IOV capable boards. 270 /* Skip MC ucode loading on SR-IOV capable boards.
271 * vbios does this for us in asic_init in that case. 271 * vbios does this for us in asic_init in that case.
272 * Skip MC ucode loading on VF, because hypervisor will do that
273 * for this adaptor.
272 */ 274 */
273 if (adev->virtualization.supports_sr_iov) 275 if (amdgpu_sriov_bios(adev))
274 return 0; 276 return 0;
275 277
276 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; 278 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
deleted file mode 100644
index 2f078ad6095c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ /dev/null
@@ -1,200 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "iceland_smum.h"
28
29MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
30
31static void iceland_dpm_set_funcs(struct amdgpu_device *adev);
32
33static int iceland_dpm_early_init(void *handle)
34{
35 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
36
37 iceland_dpm_set_funcs(adev);
38
39 return 0;
40}
41
42static int iceland_dpm_init_microcode(struct amdgpu_device *adev)
43{
44 char fw_name[30] = "amdgpu/topaz_smc.bin";
45 int err;
46
47 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
48 if (err)
49 goto out;
50 err = amdgpu_ucode_validate(adev->pm.fw);
51
52out:
53 if (err) {
54 DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
55 release_firmware(adev->pm.fw);
56 adev->pm.fw = NULL;
57 }
58 return err;
59}
60
61static int iceland_dpm_sw_init(void *handle)
62{
63 int ret;
64 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
65
66 ret = iceland_dpm_init_microcode(adev);
67 if (ret)
68 return ret;
69
70 return 0;
71}
72
73static int iceland_dpm_sw_fini(void *handle)
74{
75 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
76
77 release_firmware(adev->pm.fw);
78 adev->pm.fw = NULL;
79
80 return 0;
81}
82
83static int iceland_dpm_hw_init(void *handle)
84{
85 int ret;
86 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
87
88 mutex_lock(&adev->pm.mutex);
89
90 /* smu init only needs to be called at startup, not resume.
91 * It should be in sw_init, but requires the fw info gathered
92 * in sw_init from other IP modules.
93 */
94 ret = iceland_smu_init(adev);
95 if (ret) {
96 DRM_ERROR("SMU initialization failed\n");
97 goto fail;
98 }
99
100 ret = iceland_smu_start(adev);
101 if (ret) {
102 DRM_ERROR("SMU start failed\n");
103 goto fail;
104 }
105
106 mutex_unlock(&adev->pm.mutex);
107 return 0;
108
109fail:
110 adev->firmware.smu_load = false;
111 mutex_unlock(&adev->pm.mutex);
112 return -EINVAL;
113}
114
115static int iceland_dpm_hw_fini(void *handle)
116{
117 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
118
119 mutex_lock(&adev->pm.mutex);
120 /* smu fini only needs to be called at teardown, not suspend.
121 * It should be in sw_fini, but we put it here for symmetry
122 * with smu init.
123 */
124 iceland_smu_fini(adev);
125 mutex_unlock(&adev->pm.mutex);
126 return 0;
127}
128
129static int iceland_dpm_suspend(void *handle)
130{
131 return 0;
132}
133
134static int iceland_dpm_resume(void *handle)
135{
136 int ret;
137 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
138
139 mutex_lock(&adev->pm.mutex);
140
141 ret = iceland_smu_start(adev);
142 if (ret) {
143 DRM_ERROR("SMU start failed\n");
144 goto fail;
145 }
146
147fail:
148 mutex_unlock(&adev->pm.mutex);
149 return ret;
150}
151
152static int iceland_dpm_set_clockgating_state(void *handle,
153 enum amd_clockgating_state state)
154{
155 return 0;
156}
157
158static int iceland_dpm_set_powergating_state(void *handle,
159 enum amd_powergating_state state)
160{
161 return 0;
162}
163
164const struct amd_ip_funcs iceland_dpm_ip_funcs = {
165 .name = "iceland_dpm",
166 .early_init = iceland_dpm_early_init,
167 .late_init = NULL,
168 .sw_init = iceland_dpm_sw_init,
169 .sw_fini = iceland_dpm_sw_fini,
170 .hw_init = iceland_dpm_hw_init,
171 .hw_fini = iceland_dpm_hw_fini,
172 .suspend = iceland_dpm_suspend,
173 .resume = iceland_dpm_resume,
174 .is_idle = NULL,
175 .wait_for_idle = NULL,
176 .soft_reset = NULL,
177 .set_clockgating_state = iceland_dpm_set_clockgating_state,
178 .set_powergating_state = iceland_dpm_set_powergating_state,
179};
180
181static const struct amdgpu_dpm_funcs iceland_dpm_funcs = {
182 .get_temperature = NULL,
183 .pre_set_power_state = NULL,
184 .set_power_state = NULL,
185 .post_set_power_state = NULL,
186 .display_configuration_changed = NULL,
187 .get_sclk = NULL,
188 .get_mclk = NULL,
189 .print_power_state = NULL,
190 .debugfs_print_current_performance_level = NULL,
191 .force_performance_level = NULL,
192 .vblank_too_short = NULL,
193 .powergate_uvd = NULL,
194};
195
196static void iceland_dpm_set_funcs(struct amdgpu_device *adev)
197{
198 if (NULL == adev->pm.funcs)
199 adev->pm.funcs = &iceland_dpm_funcs;
200}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
deleted file mode 100644
index ef7c27d7356a..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ /dev/null
@@ -1,677 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "ppsmc.h"
28#include "iceland_smum.h"
29#include "smu_ucode_xfer_vi.h"
30#include "amdgpu_ucode.h"
31
32#include "smu/smu_7_1_1_d.h"
33#include "smu/smu_7_1_1_sh_mask.h"
34
35#define ICELAND_SMC_SIZE 0x20000
36
37static int iceland_set_smc_sram_address(struct amdgpu_device *adev,
38 uint32_t smc_address, uint32_t limit)
39{
40 uint32_t val;
41
42 if (smc_address & 3)
43 return -EINVAL;
44
45 if ((smc_address + 3) > limit)
46 return -EINVAL;
47
48 WREG32(mmSMC_IND_INDEX_0, smc_address);
49
50 val = RREG32(mmSMC_IND_ACCESS_CNTL);
51 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
52 WREG32(mmSMC_IND_ACCESS_CNTL, val);
53
54 return 0;
55}
56
57static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev,
58 uint32_t smc_start_address,
59 const uint8_t *src,
60 uint32_t byte_count, uint32_t limit)
61{
62 uint32_t addr;
63 uint32_t data, orig_data;
64 int result = 0;
65 uint32_t extra_shift;
66 unsigned long flags;
67
68 if (smc_start_address & 3)
69 return -EINVAL;
70
71 if ((smc_start_address + byte_count) > limit)
72 return -EINVAL;
73
74 addr = smc_start_address;
75
76 spin_lock_irqsave(&adev->smc_idx_lock, flags);
77 while (byte_count >= 4) {
78 /* Bytes are written into the SMC addres space with the MSB first */
79 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
80
81 result = iceland_set_smc_sram_address(adev, addr, limit);
82
83 if (result)
84 goto out;
85
86 WREG32(mmSMC_IND_DATA_0, data);
87
88 src += 4;
89 byte_count -= 4;
90 addr += 4;
91 }
92
93 if (0 != byte_count) {
94 /* Now write odd bytes left, do a read modify write cycle */
95 data = 0;
96
97 result = iceland_set_smc_sram_address(adev, addr, limit);
98 if (result)
99 goto out;
100
101 orig_data = RREG32(mmSMC_IND_DATA_0);
102 extra_shift = 8 * (4 - byte_count);
103
104 while (byte_count > 0) {
105 data = (data << 8) + *src++;
106 byte_count--;
107 }
108
109 data <<= extra_shift;
110 data |= (orig_data & ~((~0UL) << extra_shift));
111
112 result = iceland_set_smc_sram_address(adev, addr, limit);
113 if (result)
114 goto out;
115
116 WREG32(mmSMC_IND_DATA_0, data);
117 }
118
119out:
120 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
121 return result;
122}
123
124static void iceland_start_smc(struct amdgpu_device *adev)
125{
126 uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
127
128 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
129 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
130}
131
132static void iceland_reset_smc(struct amdgpu_device *adev)
133{
134 uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
135
136 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
137 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
138}
139
140static int iceland_program_jump_on_start(struct amdgpu_device *adev)
141{
142 static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
143 iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
144
145 return 0;
146}
147
148static void iceland_stop_smc_clock(struct amdgpu_device *adev)
149{
150 uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
151
152 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
153 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
154}
155
156static void iceland_start_smc_clock(struct amdgpu_device *adev)
157{
158 uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
159
160 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
161 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
162}
163
164static bool iceland_is_smc_ram_running(struct amdgpu_device *adev)
165{
166 uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
167 val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
168
169 return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
170}
171
172static int wait_smu_response(struct amdgpu_device *adev)
173{
174 int i;
175 uint32_t val;
176
177 for (i = 0; i < adev->usec_timeout; i++) {
178 val = RREG32(mmSMC_RESP_0);
179 if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
180 break;
181 udelay(1);
182 }
183
184 if (i == adev->usec_timeout)
185 return -EINVAL;
186
187 return 0;
188}
189
190static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
191{
192 if (!iceland_is_smc_ram_running(adev))
193 return -EINVAL;
194
195 if (wait_smu_response(adev)) {
196 DRM_ERROR("Failed to send previous message\n");
197 return -EINVAL;
198 }
199
200 WREG32(mmSMC_MESSAGE_0, msg);
201
202 if (wait_smu_response(adev)) {
203 DRM_ERROR("Failed to send message\n");
204 return -EINVAL;
205 }
206
207 return 0;
208}
209
210static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
211 PPSMC_Msg msg)
212{
213 if (!iceland_is_smc_ram_running(adev))
214 return -EINVAL;
215
216 if (wait_smu_response(adev)) {
217 DRM_ERROR("Failed to send previous message\n");
218 return -EINVAL;
219 }
220
221 WREG32(mmSMC_MESSAGE_0, msg);
222
223 return 0;
224}
225
226static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
227 PPSMC_Msg msg,
228 uint32_t parameter)
229{
230 WREG32(mmSMC_MSG_ARG_0, parameter);
231
232 return iceland_send_msg_to_smc(adev, msg);
233}
234
235static int iceland_send_msg_to_smc_with_parameter_without_waiting(
236 struct amdgpu_device *adev,
237 PPSMC_Msg msg, uint32_t parameter)
238{
239 WREG32(mmSMC_MSG_ARG_0, parameter);
240
241 return iceland_send_msg_to_smc_without_waiting(adev, msg);
242}
243
244#if 0 /* not used yet */
245static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev)
246{
247 int i;
248 uint32_t val;
249
250 if (!iceland_is_smc_ram_running(adev))
251 return -EINVAL;
252
253 for (i = 0; i < adev->usec_timeout; i++) {
254 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
255 if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
256 break;
257 udelay(1);
258 }
259
260 if (i == adev->usec_timeout)
261 return -EINVAL;
262
263 return 0;
264}
265#endif
266
267static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev)
268{
269 const struct smc_firmware_header_v1_0 *hdr;
270 uint32_t ucode_size;
271 uint32_t ucode_start_address;
272 const uint8_t *src;
273 uint32_t val;
274 uint32_t byte_count;
275 uint32_t data;
276 unsigned long flags;
277 int i;
278
279 if (!adev->pm.fw)
280 return -EINVAL;
281
282 /* Skip SMC ucode loading on SR-IOV capable boards.
283 * vbios does this for us in asic_init in that case.
284 */
285 if (adev->virtualization.supports_sr_iov)
286 return 0;
287
288 hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
289 amdgpu_ucode_print_smc_hdr(&hdr->header);
290
291 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
292 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
293 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
294 src = (const uint8_t *)
295 (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
296
297 if (ucode_size & 3) {
298 DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
299 return -EINVAL;
300 }
301
302 if (ucode_size > ICELAND_SMC_SIZE) {
303 DRM_ERROR("SMC address is beyond the SMC RAM area\n");
304 return -EINVAL;
305 }
306
307 for (i = 0; i < adev->usec_timeout; i++) {
308 val = RREG32_SMC(ixRCU_UC_EVENTS);
309 if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0)
310 break;
311 udelay(1);
312 }
313 val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL);
314 WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1);
315
316 iceland_stop_smc_clock(adev);
317 iceland_reset_smc(adev);
318
319 spin_lock_irqsave(&adev->smc_idx_lock, flags);
320 WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
321
322 val = RREG32(mmSMC_IND_ACCESS_CNTL);
323 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
324 WREG32(mmSMC_IND_ACCESS_CNTL, val);
325
326 byte_count = ucode_size;
327 while (byte_count >= 4) {
328 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
329 WREG32(mmSMC_IND_DATA_0, data);
330 src += 4;
331 byte_count -= 4;
332 }
333 val = RREG32(mmSMC_IND_ACCESS_CNTL);
334 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
335 WREG32(mmSMC_IND_ACCESS_CNTL, val);
336 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
337
338 return 0;
339}
340
341#if 0 /* not used yet */
342static int iceland_read_smc_sram_dword(struct amdgpu_device *adev,
343 uint32_t smc_address,
344 uint32_t *value,
345 uint32_t limit)
346{
347 int result;
348 unsigned long flags;
349
350 spin_lock_irqsave(&adev->smc_idx_lock, flags);
351 result = iceland_set_smc_sram_address(adev, smc_address, limit);
352 if (result == 0)
353 *value = RREG32(mmSMC_IND_DATA_0);
354 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
355 return result;
356}
357
358static int iceland_write_smc_sram_dword(struct amdgpu_device *adev,
359 uint32_t smc_address,
360 uint32_t value,
361 uint32_t limit)
362{
363 int result;
364 unsigned long flags;
365
366 spin_lock_irqsave(&adev->smc_idx_lock, flags);
367 result = iceland_set_smc_sram_address(adev, smc_address, limit);
368 if (result == 0)
369 WREG32(mmSMC_IND_DATA_0, value);
370 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
371 return result;
372}
373
374static int iceland_smu_stop_smc(struct amdgpu_device *adev)
375{
376 iceland_reset_smc(adev);
377 iceland_stop_smc_clock(adev);
378
379 return 0;
380}
381#endif
382
383static int iceland_smu_start_smc(struct amdgpu_device *adev)
384{
385 int i;
386 uint32_t val;
387
388 iceland_program_jump_on_start(adev);
389 iceland_start_smc_clock(adev);
390 iceland_start_smc(adev);
391
392 for (i = 0; i < adev->usec_timeout; i++) {
393 val = RREG32_SMC(ixFIRMWARE_FLAGS);
394 if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1)
395 break;
396 udelay(1);
397 }
398 return 0;
399}
400
401static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type)
402{
403 switch (fw_type) {
404 case UCODE_ID_SDMA0:
405 return AMDGPU_UCODE_ID_SDMA0;
406 case UCODE_ID_SDMA1:
407 return AMDGPU_UCODE_ID_SDMA1;
408 case UCODE_ID_CP_CE:
409 return AMDGPU_UCODE_ID_CP_CE;
410 case UCODE_ID_CP_PFP:
411 return AMDGPU_UCODE_ID_CP_PFP;
412 case UCODE_ID_CP_ME:
413 return AMDGPU_UCODE_ID_CP_ME;
414 case UCODE_ID_CP_MEC:
415 case UCODE_ID_CP_MEC_JT1:
416 return AMDGPU_UCODE_ID_CP_MEC1;
417 case UCODE_ID_CP_MEC_JT2:
418 return AMDGPU_UCODE_ID_CP_MEC2;
419 case UCODE_ID_RLC_G:
420 return AMDGPU_UCODE_ID_RLC_G;
421 default:
422 DRM_ERROR("ucode type is out of range!\n");
423 return AMDGPU_UCODE_ID_MAXIMUM;
424 }
425}
426
427static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
428{
429 switch (fw_type) {
430 case AMDGPU_UCODE_ID_SDMA0:
431 return UCODE_ID_SDMA0_MASK;
432 case AMDGPU_UCODE_ID_SDMA1:
433 return UCODE_ID_SDMA1_MASK;
434 case AMDGPU_UCODE_ID_CP_CE:
435 return UCODE_ID_CP_CE_MASK;
436 case AMDGPU_UCODE_ID_CP_PFP:
437 return UCODE_ID_CP_PFP_MASK;
438 case AMDGPU_UCODE_ID_CP_ME:
439 return UCODE_ID_CP_ME_MASK;
440 case AMDGPU_UCODE_ID_CP_MEC1:
441 return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
442 case AMDGPU_UCODE_ID_CP_MEC2:
443 return UCODE_ID_CP_MEC_MASK;
444 case AMDGPU_UCODE_ID_RLC_G:
445 return UCODE_ID_RLC_G_MASK;
446 default:
447 DRM_ERROR("ucode type is out of range!\n");
448 return 0;
449 }
450}
451
452static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
453 uint32_t fw_type,
454 struct SMU_Entry *entry)
455{
456 enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type);
457 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
458 const struct gfx_firmware_header_v1_0 *header = NULL;
459 uint64_t gpu_addr;
460 uint32_t data_size;
461
462 if (ucode->fw == NULL)
463 return -EINVAL;
464
465 gpu_addr = ucode->mc_addr;
466 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
467 data_size = le32_to_cpu(header->header.ucode_size_bytes);
468
469 entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
470 entry->id = (uint16_t)fw_type;
471 entry->image_addr_high = upper_32_bits(gpu_addr);
472 entry->image_addr_low = lower_32_bits(gpu_addr);
473 entry->meta_data_addr_high = 0;
474 entry->meta_data_addr_low = 0;
475 entry->data_size_byte = data_size;
476 entry->num_register_entries = 0;
477 entry->flags = 0;
478
479 return 0;
480}
481
482static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
483{
484 struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv;
485 struct SMU_DRAMData_TOC *toc;
486 uint32_t fw_to_load;
487
488 toc = (struct SMU_DRAMData_TOC *)private->header;
489 toc->num_entries = 0;
490 toc->structure_version = 1;
491
492 if (!adev->firmware.smu_load)
493 return 0;
494
495 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
496 &toc->entry[toc->num_entries++])) {
497 DRM_ERROR("Failed to get firmware entry for RLC\n");
498 return -EINVAL;
499 }
500
501 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
502 &toc->entry[toc->num_entries++])) {
503 DRM_ERROR("Failed to get firmware entry for CE\n");
504 return -EINVAL;
505 }
506
507 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
508 &toc->entry[toc->num_entries++])) {
509 DRM_ERROR("Failed to get firmware entry for PFP\n");
510 return -EINVAL;
511 }
512
513 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
514 &toc->entry[toc->num_entries++])) {
515 DRM_ERROR("Failed to get firmware entry for ME\n");
516 return -EINVAL;
517 }
518
519 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
520 &toc->entry[toc->num_entries++])) {
521 DRM_ERROR("Failed to get firmware entry for MEC\n");
522 return -EINVAL;
523 }
524
525 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
526 &toc->entry[toc->num_entries++])) {
527 DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
528 return -EINVAL;
529 }
530
531 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
532 &toc->entry[toc->num_entries++])) {
533 DRM_ERROR("Failed to get firmware entry for SDMA0\n");
534 return -EINVAL;
535 }
536
537 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
538 &toc->entry[toc->num_entries++])) {
539 DRM_ERROR("Failed to get firmware entry for SDMA1\n");
540 return -EINVAL;
541 }
542
543 iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
544 iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
545
546 fw_to_load = UCODE_ID_RLC_G_MASK |
547 UCODE_ID_SDMA0_MASK |
548 UCODE_ID_SDMA1_MASK |
549 UCODE_ID_CP_CE_MASK |
550 UCODE_ID_CP_ME_MASK |
551 UCODE_ID_CP_PFP_MASK |
552 UCODE_ID_CP_MEC_MASK |
553 UCODE_ID_CP_MEC_JT1_MASK;
554
555
556 if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
557 DRM_ERROR("Fail to request SMU load ucode\n");
558 return -EINVAL;
559 }
560
561 return 0;
562}
563
564static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev,
565 uint32_t fw_type)
566{
567 uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type);
568 int i;
569
570 for (i = 0; i < adev->usec_timeout; i++) {
571 if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask))
572 break;
573 udelay(1);
574 }
575
576 if (i == adev->usec_timeout) {
577 DRM_ERROR("check firmware loading failed\n");
578 return -EINVAL;
579 }
580
581 return 0;
582}
583
584int iceland_smu_start(struct amdgpu_device *adev)
585{
586 int result;
587
588 result = iceland_smu_upload_firmware_image(adev);
589 if (result)
590 return result;
591 result = iceland_smu_start_smc(adev);
592 if (result)
593 return result;
594
595 return iceland_smu_request_load_fw(adev);
596}
597
598static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = {
599 .check_fw_load_finish = iceland_smu_check_fw_load_finish,
600 .request_smu_load_fw = NULL,
601 .request_smu_specific_fw = NULL,
602};
603
604int iceland_smu_init(struct amdgpu_device *adev)
605{
606 struct iceland_smu_private_data *private;
607 uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
608 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
609 uint64_t mc_addr;
610 void *toc_buf_ptr;
611 int ret;
612
613 private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL);
614 if (NULL == private)
615 return -ENOMEM;
616
617 /* allocate firmware buffers */
618 if (adev->firmware.smu_load)
619 amdgpu_ucode_init_bo(adev);
620
621 adev->smu.priv = private;
622 adev->smu.fw_flags = 0;
623
624 /* Allocate FW image data structure and header buffer */
625 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
626 true, AMDGPU_GEM_DOMAIN_VRAM,
627 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
628 NULL, NULL, toc_buf);
629 if (ret) {
630 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
631 return -ENOMEM;
632 }
633
634 /* Retrieve GPU address for header buffer and internal buffer */
635 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
636 if (ret) {
637 amdgpu_bo_unref(&adev->smu.toc_buf);
638 DRM_ERROR("Failed to reserve the TOC buffer\n");
639 return -EINVAL;
640 }
641
642 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
643 if (ret) {
644 amdgpu_bo_unreserve(adev->smu.toc_buf);
645 amdgpu_bo_unref(&adev->smu.toc_buf);
646 DRM_ERROR("Failed to pin the TOC buffer\n");
647 return -EINVAL;
648 }
649
650 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
651 if (ret) {
652 amdgpu_bo_unreserve(adev->smu.toc_buf);
653 amdgpu_bo_unref(&adev->smu.toc_buf);
654 DRM_ERROR("Failed to map the TOC buffer\n");
655 return -EINVAL;
656 }
657
658 amdgpu_bo_unreserve(adev->smu.toc_buf);
659 private->header_addr_low = lower_32_bits(mc_addr);
660 private->header_addr_high = upper_32_bits(mc_addr);
661 private->header = toc_buf_ptr;
662
663 adev->smu.smumgr_funcs = &iceland_smumgr_funcs;
664
665 return 0;
666}
667
668int iceland_smu_fini(struct amdgpu_device *adev)
669{
670 amdgpu_bo_unref(&adev->smu.toc_buf);
671 kfree(adev->smu.priv);
672 adev->smu.priv = NULL;
673 if (adev->firmware.fw_buf)
674 amdgpu_ucode_fini_bo(adev);
675
676 return 0;
677}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index fee76b8a536f..dc9511c5ecb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -952,12 +952,6 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
952 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 952 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
953} 953}
954 954
955static u32 si_get_virtual_caps(struct amdgpu_device *adev)
956{
957 /* SI does not support SR-IOV */
958 return 0;
959}
960
961static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { 955static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
962 {GRBM_STATUS, false}, 956 {GRBM_STATUS, false},
963 {GB_ADDR_CONFIG, false}, 957 {GB_ADDR_CONFIG, false},
@@ -1124,16 +1118,22 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1124 return 0; 1118 return 0;
1125} 1119}
1126 1120
1121static void si_detect_hw_virtualization(struct amdgpu_device *adev)
1122{
1123 if (is_virtual_machine()) /* passthrough mode */
1124 adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
1125}
1126
1127static const struct amdgpu_asic_funcs si_asic_funcs = 1127static const struct amdgpu_asic_funcs si_asic_funcs =
1128{ 1128{
1129 .read_disabled_bios = &si_read_disabled_bios, 1129 .read_disabled_bios = &si_read_disabled_bios,
1130 .detect_hw_virtualization = si_detect_hw_virtualization,
1130 .read_register = &si_read_register, 1131 .read_register = &si_read_register,
1131 .reset = &si_asic_reset, 1132 .reset = &si_asic_reset,
1132 .set_vga_state = &si_vga_set_state, 1133 .set_vga_state = &si_vga_set_state,
1133 .get_xclk = &si_get_xclk, 1134 .get_xclk = &si_get_xclk,
1134 .set_uvd_clocks = &si_set_uvd_clocks, 1135 .set_uvd_clocks = &si_set_uvd_clocks,
1135 .set_vce_clocks = NULL, 1136 .set_vce_clocks = NULL,
1136 .get_virtual_caps = &si_get_virtual_caps,
1137}; 1137};
1138 1138
1139static uint32_t si_get_rev_id(struct amdgpu_device *adev) 1139static uint32_t si_get_rev_id(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
deleted file mode 100644
index f06f6f4dc3a8..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ /dev/null
@@ -1,186 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "tonga_smum.h"
28
29MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
30
31static void tonga_dpm_set_funcs(struct amdgpu_device *adev);
32
33static int tonga_dpm_early_init(void *handle)
34{
35 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
36
37 tonga_dpm_set_funcs(adev);
38
39 return 0;
40}
41
42static int tonga_dpm_init_microcode(struct amdgpu_device *adev)
43{
44 char fw_name[30] = "amdgpu/tonga_smc.bin";
45 int err;
46 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
47 if (err)
48 goto out;
49 err = amdgpu_ucode_validate(adev->pm.fw);
50
51out:
52 if (err) {
53 DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
54 release_firmware(adev->pm.fw);
55 adev->pm.fw = NULL;
56 }
57 return err;
58}
59
60static int tonga_dpm_sw_init(void *handle)
61{
62 int ret;
63 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
64
65 ret = tonga_dpm_init_microcode(adev);
66 if (ret)
67 return ret;
68
69 return 0;
70}
71
72static int tonga_dpm_sw_fini(void *handle)
73{
74 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
75
76 release_firmware(adev->pm.fw);
77 adev->pm.fw = NULL;
78
79 return 0;
80}
81
82static int tonga_dpm_hw_init(void *handle)
83{
84 int ret;
85 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
86
87 mutex_lock(&adev->pm.mutex);
88
89 /* smu init only needs to be called at startup, not resume.
90 * It should be in sw_init, but requires the fw info gathered
91 * in sw_init from other IP modules.
92 */
93 ret = tonga_smu_init(adev);
94 if (ret) {
95 DRM_ERROR("SMU initialization failed\n");
96 goto fail;
97 }
98
99 ret = tonga_smu_start(adev);
100 if (ret) {
101 DRM_ERROR("SMU start failed\n");
102 goto fail;
103 }
104
105 mutex_unlock(&adev->pm.mutex);
106 return 0;
107
108fail:
109 adev->firmware.smu_load = false;
110 mutex_unlock(&adev->pm.mutex);
111 return -EINVAL;
112}
113
114static int tonga_dpm_hw_fini(void *handle)
115{
116 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
117
118 mutex_lock(&adev->pm.mutex);
119 /* smu fini only needs to be called at teardown, not suspend.
120 * It should be in sw_fini, but we put it here for symmetry
121 * with smu init.
122 */
123 tonga_smu_fini(adev);
124 mutex_unlock(&adev->pm.mutex);
125 return 0;
126}
127
128static int tonga_dpm_suspend(void *handle)
129{
130 return tonga_dpm_hw_fini(handle);
131}
132
133static int tonga_dpm_resume(void *handle)
134{
135 return tonga_dpm_hw_init(handle);
136}
137
138static int tonga_dpm_set_clockgating_state(void *handle,
139 enum amd_clockgating_state state)
140{
141 return 0;
142}
143
144static int tonga_dpm_set_powergating_state(void *handle,
145 enum amd_powergating_state state)
146{
147 return 0;
148}
149
150const struct amd_ip_funcs tonga_dpm_ip_funcs = {
151 .name = "tonga_dpm",
152 .early_init = tonga_dpm_early_init,
153 .late_init = NULL,
154 .sw_init = tonga_dpm_sw_init,
155 .sw_fini = tonga_dpm_sw_fini,
156 .hw_init = tonga_dpm_hw_init,
157 .hw_fini = tonga_dpm_hw_fini,
158 .suspend = tonga_dpm_suspend,
159 .resume = tonga_dpm_resume,
160 .is_idle = NULL,
161 .wait_for_idle = NULL,
162 .soft_reset = NULL,
163 .set_clockgating_state = tonga_dpm_set_clockgating_state,
164 .set_powergating_state = tonga_dpm_set_powergating_state,
165};
166
167static const struct amdgpu_dpm_funcs tonga_dpm_funcs = {
168 .get_temperature = NULL,
169 .pre_set_power_state = NULL,
170 .set_power_state = NULL,
171 .post_set_power_state = NULL,
172 .display_configuration_changed = NULL,
173 .get_sclk = NULL,
174 .get_mclk = NULL,
175 .print_power_state = NULL,
176 .debugfs_print_current_performance_level = NULL,
177 .force_performance_level = NULL,
178 .vblank_too_short = NULL,
179 .powergate_uvd = NULL,
180};
181
182static void tonga_dpm_set_funcs(struct amdgpu_device *adev)
183{
184 if (NULL == adev->pm.funcs)
185 adev->pm.funcs = &tonga_dpm_funcs;
186}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
deleted file mode 100644
index 940de1836f8f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ /dev/null
@@ -1,862 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "tonga_ppsmc.h"
28#include "tonga_smum.h"
29#include "smu_ucode_xfer_vi.h"
30#include "amdgpu_ucode.h"
31
32#include "smu/smu_7_1_2_d.h"
33#include "smu/smu_7_1_2_sh_mask.h"
34
35#define TONGA_SMC_SIZE 0x20000
36
37static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
38{
39 uint32_t val;
40
41 if (smc_address & 3)
42 return -EINVAL;
43
44 if ((smc_address + 3) > limit)
45 return -EINVAL;
46
47 WREG32(mmSMC_IND_INDEX_0, smc_address);
48
49 val = RREG32(mmSMC_IND_ACCESS_CNTL);
50 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
51 WREG32(mmSMC_IND_ACCESS_CNTL, val);
52
53 return 0;
54}
55
56static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
57{
58 uint32_t addr;
59 uint32_t data, orig_data;
60 int result = 0;
61 uint32_t extra_shift;
62 unsigned long flags;
63
64 if (smc_start_address & 3)
65 return -EINVAL;
66
67 if ((smc_start_address + byte_count) > limit)
68 return -EINVAL;
69
70 addr = smc_start_address;
71
72 spin_lock_irqsave(&adev->smc_idx_lock, flags);
73 while (byte_count >= 4) {
74 /* Bytes are written into the SMC addres space with the MSB first */
75 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
76
77 result = tonga_set_smc_sram_address(adev, addr, limit);
78
79 if (result)
80 goto out;
81
82 WREG32(mmSMC_IND_DATA_0, data);
83
84 src += 4;
85 byte_count -= 4;
86 addr += 4;
87 }
88
89 if (0 != byte_count) {
90 /* Now write odd bytes left, do a read modify write cycle */
91 data = 0;
92
93 result = tonga_set_smc_sram_address(adev, addr, limit);
94 if (result)
95 goto out;
96
97 orig_data = RREG32(mmSMC_IND_DATA_0);
98 extra_shift = 8 * (4 - byte_count);
99
100 while (byte_count > 0) {
101 data = (data << 8) + *src++;
102 byte_count--;
103 }
104
105 data <<= extra_shift;
106 data |= (orig_data & ~((~0UL) << extra_shift));
107
108 result = tonga_set_smc_sram_address(adev, addr, limit);
109 if (result)
110 goto out;
111
112 WREG32(mmSMC_IND_DATA_0, data);
113 }
114
115out:
116 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
117 return result;
118}
119
120static int tonga_program_jump_on_start(struct amdgpu_device *adev)
121{
122 static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
123 tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
124
125 return 0;
126}
127
128static bool tonga_is_smc_ram_running(struct amdgpu_device *adev)
129{
130 uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
131 val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
132
133 return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
134}
135
136static int wait_smu_response(struct amdgpu_device *adev)
137{
138 int i;
139 uint32_t val;
140
141 for (i = 0; i < adev->usec_timeout; i++) {
142 val = RREG32(mmSMC_RESP_0);
143 if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
144 break;
145 udelay(1);
146 }
147
148 if (i == adev->usec_timeout)
149 return -EINVAL;
150
151 return 0;
152}
153
154static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev)
155{
156 if (wait_smu_response(adev)) {
157 DRM_ERROR("Failed to send previous message\n");
158 return -EINVAL;
159 }
160
161 WREG32(mmSMC_MSG_ARG_0, 0x20000);
162 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
163
164 if (wait_smu_response(adev)) {
165 DRM_ERROR("Failed to send message\n");
166 return -EINVAL;
167 }
168
169 return 0;
170}
171
172static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
173{
174 if (!tonga_is_smc_ram_running(adev))
175 {
176 return -EINVAL;
177 }
178
179 if (wait_smu_response(adev)) {
180 DRM_ERROR("Failed to send previous message\n");
181 return -EINVAL;
182 }
183
184 WREG32(mmSMC_MESSAGE_0, msg);
185
186 if (wait_smu_response(adev)) {
187 DRM_ERROR("Failed to send message\n");
188 return -EINVAL;
189 }
190
191 return 0;
192}
193
194static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
195 PPSMC_Msg msg)
196{
197 if (wait_smu_response(adev)) {
198 DRM_ERROR("Failed to send previous message\n");
199 return -EINVAL;
200 }
201
202 WREG32(mmSMC_MESSAGE_0, msg);
203
204 return 0;
205}
206
207static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
208 PPSMC_Msg msg,
209 uint32_t parameter)
210{
211 if (!tonga_is_smc_ram_running(adev))
212 return -EINVAL;
213
214 if (wait_smu_response(adev)) {
215 DRM_ERROR("Failed to send previous message\n");
216 return -EINVAL;
217 }
218
219 WREG32(mmSMC_MSG_ARG_0, parameter);
220
221 return tonga_send_msg_to_smc(adev, msg);
222}
223
224static int tonga_send_msg_to_smc_with_parameter_without_waiting(
225 struct amdgpu_device *adev,
226 PPSMC_Msg msg, uint32_t parameter)
227{
228 if (wait_smu_response(adev)) {
229 DRM_ERROR("Failed to send previous message\n");
230 return -EINVAL;
231 }
232
233 WREG32(mmSMC_MSG_ARG_0, parameter);
234
235 return tonga_send_msg_to_smc_without_waiting(adev, msg);
236}
237
238#if 0 /* not used yet */
239static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev)
240{
241 int i;
242 uint32_t val;
243
244 if (!tonga_is_smc_ram_running(adev))
245 return -EINVAL;
246
247 for (i = 0; i < adev->usec_timeout; i++) {
248 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
249 if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
250 break;
251 udelay(1);
252 }
253
254 if (i == adev->usec_timeout)
255 return -EINVAL;
256
257 return 0;
258}
259#endif
260
261static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev)
262{
263 const struct smc_firmware_header_v1_0 *hdr;
264 uint32_t ucode_size;
265 uint32_t ucode_start_address;
266 const uint8_t *src;
267 uint32_t val;
268 uint32_t byte_count;
269 uint32_t *data;
270 unsigned long flags;
271
272 if (!adev->pm.fw)
273 return -EINVAL;
274
275 /* Skip SMC ucode loading on SR-IOV capable boards.
276 * vbios does this for us in asic_init in that case.
277 */
278 if (adev->virtualization.supports_sr_iov)
279 return 0;
280
281 hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
282 amdgpu_ucode_print_smc_hdr(&hdr->header);
283
284 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
285 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
286 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
287 src = (const uint8_t *)
288 (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
289
290 if (ucode_size & 3) {
291 DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
292 return -EINVAL;
293 }
294
295 if (ucode_size > TONGA_SMC_SIZE) {
296 DRM_ERROR("SMC address is beyond the SMC RAM area\n");
297 return -EINVAL;
298 }
299
300 spin_lock_irqsave(&adev->smc_idx_lock, flags);
301 WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
302
303 val = RREG32(mmSMC_IND_ACCESS_CNTL);
304 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
305 WREG32(mmSMC_IND_ACCESS_CNTL, val);
306
307 byte_count = ucode_size;
308 data = (uint32_t *)src;
309 for (; byte_count >= 4; data++, byte_count -= 4)
310 WREG32(mmSMC_IND_DATA_0, data[0]);
311
312 val = RREG32(mmSMC_IND_ACCESS_CNTL);
313 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
314 WREG32(mmSMC_IND_ACCESS_CNTL, val);
315 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
316
317 return 0;
318}
319
320#if 0 /* not used yet */
321static int tonga_read_smc_sram_dword(struct amdgpu_device *adev,
322 uint32_t smc_address,
323 uint32_t *value,
324 uint32_t limit)
325{
326 int result;
327 unsigned long flags;
328
329 spin_lock_irqsave(&adev->smc_idx_lock, flags);
330 result = tonga_set_smc_sram_address(adev, smc_address, limit);
331 if (result == 0)
332 *value = RREG32(mmSMC_IND_DATA_0);
333 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
334 return result;
335}
336
337static int tonga_write_smc_sram_dword(struct amdgpu_device *adev,
338 uint32_t smc_address,
339 uint32_t value,
340 uint32_t limit)
341{
342 int result;
343 unsigned long flags;
344
345 spin_lock_irqsave(&adev->smc_idx_lock, flags);
346 result = tonga_set_smc_sram_address(adev, smc_address, limit);
347 if (result == 0)
348 WREG32(mmSMC_IND_DATA_0, value);
349 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
350 return result;
351}
352
353static int tonga_smu_stop_smc(struct amdgpu_device *adev)
354{
355 uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
356 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
357 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
358
359 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
360 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
361 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
362
363 return 0;
364}
365#endif
366
367static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type)
368{
369 switch (fw_type) {
370 case UCODE_ID_SDMA0:
371 return AMDGPU_UCODE_ID_SDMA0;
372 case UCODE_ID_SDMA1:
373 return AMDGPU_UCODE_ID_SDMA1;
374 case UCODE_ID_CP_CE:
375 return AMDGPU_UCODE_ID_CP_CE;
376 case UCODE_ID_CP_PFP:
377 return AMDGPU_UCODE_ID_CP_PFP;
378 case UCODE_ID_CP_ME:
379 return AMDGPU_UCODE_ID_CP_ME;
380 case UCODE_ID_CP_MEC:
381 case UCODE_ID_CP_MEC_JT1:
382 return AMDGPU_UCODE_ID_CP_MEC1;
383 case UCODE_ID_CP_MEC_JT2:
384 return AMDGPU_UCODE_ID_CP_MEC2;
385 case UCODE_ID_RLC_G:
386 return AMDGPU_UCODE_ID_RLC_G;
387 default:
388 DRM_ERROR("ucode type is out of range!\n");
389 return AMDGPU_UCODE_ID_MAXIMUM;
390 }
391}
392
393static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
394 uint32_t fw_type,
395 struct SMU_Entry *entry)
396{
397 enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type);
398 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
399 const struct gfx_firmware_header_v1_0 *header = NULL;
400 uint64_t gpu_addr;
401 uint32_t data_size;
402
403 if (ucode->fw == NULL)
404 return -EINVAL;
405
406 gpu_addr = ucode->mc_addr;
407 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
408 data_size = le32_to_cpu(header->header.ucode_size_bytes);
409
410 if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
411 (fw_type == UCODE_ID_CP_MEC_JT2)) {
412 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
413 data_size = le32_to_cpu(header->jt_size) << 2;
414 }
415
416 entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
417 entry->id = (uint16_t)fw_type;
418 entry->image_addr_high = upper_32_bits(gpu_addr);
419 entry->image_addr_low = lower_32_bits(gpu_addr);
420 entry->meta_data_addr_high = 0;
421 entry->meta_data_addr_low = 0;
422 entry->data_size_byte = data_size;
423 entry->num_register_entries = 0;
424
425 if (fw_type == UCODE_ID_RLC_G)
426 entry->flags = 1;
427 else
428 entry->flags = 0;
429
430 return 0;
431}
432
433static int tonga_smu_request_load_fw(struct amdgpu_device *adev)
434{
435 struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv;
436 struct SMU_DRAMData_TOC *toc;
437 uint32_t fw_to_load;
438
439 WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
440
441 tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
442 tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
443
444 toc = (struct SMU_DRAMData_TOC *)private->header;
445 toc->num_entries = 0;
446 toc->structure_version = 1;
447
448 if (!adev->firmware.smu_load)
449 return 0;
450
451 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
452 &toc->entry[toc->num_entries++])) {
453 DRM_ERROR("Failed to get firmware entry for RLC\n");
454 return -EINVAL;
455 }
456
457 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
458 &toc->entry[toc->num_entries++])) {
459 DRM_ERROR("Failed to get firmware entry for CE\n");
460 return -EINVAL;
461 }
462
463 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
464 &toc->entry[toc->num_entries++])) {
465 DRM_ERROR("Failed to get firmware entry for PFP\n");
466 return -EINVAL;
467 }
468
469 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
470 &toc->entry[toc->num_entries++])) {
471 DRM_ERROR("Failed to get firmware entry for ME\n");
472 return -EINVAL;
473 }
474
475 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
476 &toc->entry[toc->num_entries++])) {
477 DRM_ERROR("Failed to get firmware entry for MEC\n");
478 return -EINVAL;
479 }
480
481 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
482 &toc->entry[toc->num_entries++])) {
483 DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
484 return -EINVAL;
485 }
486
487 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
488 &toc->entry[toc->num_entries++])) {
489 DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
490 return -EINVAL;
491 }
492
493 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
494 &toc->entry[toc->num_entries++])) {
495 DRM_ERROR("Failed to get firmware entry for SDMA0\n");
496 return -EINVAL;
497 }
498
499 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
500 &toc->entry[toc->num_entries++])) {
501 DRM_ERROR("Failed to get firmware entry for SDMA1\n");
502 return -EINVAL;
503 }
504
505 tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
506 tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
507
508 fw_to_load = UCODE_ID_RLC_G_MASK |
509 UCODE_ID_SDMA0_MASK |
510 UCODE_ID_SDMA1_MASK |
511 UCODE_ID_CP_CE_MASK |
512 UCODE_ID_CP_ME_MASK |
513 UCODE_ID_CP_PFP_MASK |
514 UCODE_ID_CP_MEC_MASK;
515
516 if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
517 DRM_ERROR("Fail to request SMU load ucode\n");
518 return -EINVAL;
519 }
520
521 return 0;
522}
523
524static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type)
525{
526 switch (fw_type) {
527 case AMDGPU_UCODE_ID_SDMA0:
528 return UCODE_ID_SDMA0_MASK;
529 case AMDGPU_UCODE_ID_SDMA1:
530 return UCODE_ID_SDMA1_MASK;
531 case AMDGPU_UCODE_ID_CP_CE:
532 return UCODE_ID_CP_CE_MASK;
533 case AMDGPU_UCODE_ID_CP_PFP:
534 return UCODE_ID_CP_PFP_MASK;
535 case AMDGPU_UCODE_ID_CP_ME:
536 return UCODE_ID_CP_ME_MASK;
537 case AMDGPU_UCODE_ID_CP_MEC1:
538 return UCODE_ID_CP_MEC_MASK;
539 case AMDGPU_UCODE_ID_CP_MEC2:
540 return UCODE_ID_CP_MEC_MASK;
541 case AMDGPU_UCODE_ID_RLC_G:
542 return UCODE_ID_RLC_G_MASK;
543 default:
544 DRM_ERROR("ucode type is out of range!\n");
545 return 0;
546 }
547}
548
549static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev,
550 uint32_t fw_type)
551{
552 uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type);
553 int i;
554
555 for (i = 0; i < adev->usec_timeout; i++) {
556 if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
557 break;
558 udelay(1);
559 }
560
561 if (i == adev->usec_timeout) {
562 DRM_ERROR("check firmware loading failed\n");
563 return -EINVAL;
564 }
565
566 return 0;
567}
568
569static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev)
570{
571 int result;
572 uint32_t val;
573 int i;
574
575 /* Assert reset */
576 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
577 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
578 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
579
580 result = tonga_smu_upload_firmware_image(adev);
581 if (result)
582 return result;
583
584 /* Clear status */
585 WREG32_SMC(ixSMU_STATUS, 0);
586
587 /* Enable clock */
588 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
589 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
590 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
591
592 /* De-assert reset */
593 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
594 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
595 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
596
597 /* Set SMU Auto Start */
598 val = RREG32_SMC(ixSMU_INPUT_DATA);
599 val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
600 WREG32_SMC(ixSMU_INPUT_DATA, val);
601
602 /* Clear firmware interrupt enable flag */
603 WREG32_SMC(ixFIRMWARE_FLAGS, 0);
604
605 for (i = 0; i < adev->usec_timeout; i++) {
606 val = RREG32_SMC(ixRCU_UC_EVENTS);
607 if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
608 break;
609 udelay(1);
610 }
611
612 if (i == adev->usec_timeout) {
613 DRM_ERROR("Interrupt is not enabled by firmware\n");
614 return -EINVAL;
615 }
616
617 /* Call Test SMU message with 0x20000 offset
618 * to trigger SMU start
619 */
620 tonga_send_msg_to_smc_offset(adev);
621
622 /* Wait for done bit to be set */
623 for (i = 0; i < adev->usec_timeout; i++) {
624 val = RREG32_SMC(ixSMU_STATUS);
625 if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
626 break;
627 udelay(1);
628 }
629
630 if (i == adev->usec_timeout) {
631 DRM_ERROR("Timeout for SMU start\n");
632 return -EINVAL;
633 }
634
635 /* Check pass/failed indicator */
636 val = RREG32_SMC(ixSMU_STATUS);
637 if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
638 DRM_ERROR("SMU Firmware start failed\n");
639 return -EINVAL;
640 }
641
642 /* Wait for firmware to initialize */
643 for (i = 0; i < adev->usec_timeout; i++) {
644 val = RREG32_SMC(ixFIRMWARE_FLAGS);
645 if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
646 break;
647 udelay(1);
648 }
649
650 if (i == adev->usec_timeout) {
651 DRM_ERROR("SMU firmware initialization failed\n");
652 return -EINVAL;
653 }
654
655 return 0;
656}
657
658static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
659{
660 int i, result;
661 uint32_t val;
662
663 /* wait for smc boot up */
664 for (i = 0; i < adev->usec_timeout; i++) {
665 val = RREG32_SMC(ixRCU_UC_EVENTS);
666 val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
667 if (val)
668 break;
669 udelay(1);
670 }
671
672 if (i == adev->usec_timeout) {
673 DRM_ERROR("SMC boot sequence is not completed\n");
674 return -EINVAL;
675 }
676
677 /* Clear firmware interrupt enable flag */
678 WREG32_SMC(ixFIRMWARE_FLAGS, 0);
679
680 /* Assert reset */
681 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
682 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
683 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
684
685 result = tonga_smu_upload_firmware_image(adev);
686 if (result)
687 return result;
688
689 /* Set smc instruct start point at 0x0 */
690 tonga_program_jump_on_start(adev);
691
692 /* Enable clock */
693 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
694 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
695 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
696
697 /* De-assert reset */
698 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
699 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
700 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
701
702 /* Wait for firmware to initialize */
703 for (i = 0; i < adev->usec_timeout; i++) {
704 val = RREG32_SMC(ixFIRMWARE_FLAGS);
705 if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
706 break;
707 udelay(1);
708 }
709
710 if (i == adev->usec_timeout) {
711 DRM_ERROR("Timeout for SMC firmware initialization\n");
712 return -EINVAL;
713 }
714
715 return 0;
716}
717
718int tonga_smu_start(struct amdgpu_device *adev)
719{
720 int result;
721 uint32_t val;
722
723 if (!tonga_is_smc_ram_running(adev)) {
724 val = RREG32_SMC(ixSMU_FIRMWARE);
725 if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
726 result = tonga_smu_start_in_non_protection_mode(adev);
727 if (result)
728 return result;
729 } else {
730 result = tonga_smu_start_in_protection_mode(adev);
731 if (result)
732 return result;
733 }
734 }
735
736 return tonga_smu_request_load_fw(adev);
737}
738
739static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = {
740 .check_fw_load_finish = tonga_smu_check_fw_load_finish,
741 .request_smu_load_fw = NULL,
742 .request_smu_specific_fw = NULL,
743};
744
745int tonga_smu_init(struct amdgpu_device *adev)
746{
747 struct tonga_smu_private_data *private;
748 uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
749 uint32_t smu_internal_buffer_size = 200*4096;
750 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
751 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
752 uint64_t mc_addr;
753 void *toc_buf_ptr;
754 void *smu_buf_ptr;
755 int ret;
756
757 private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL);
758 if (NULL == private)
759 return -ENOMEM;
760
761 /* allocate firmware buffers */
762 if (adev->firmware.smu_load)
763 amdgpu_ucode_init_bo(adev);
764
765 adev->smu.priv = private;
766 adev->smu.fw_flags = 0;
767
768 /* Allocate FW image data structure and header buffer */
769 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
770 true, AMDGPU_GEM_DOMAIN_VRAM,
771 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
772 NULL, NULL, toc_buf);
773 if (ret) {
774 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
775 return -ENOMEM;
776 }
777
778 /* Allocate buffer for SMU internal buffer */
779 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
780 true, AMDGPU_GEM_DOMAIN_VRAM,
781 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
782 NULL, NULL, smu_buf);
783 if (ret) {
784 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
785 return -ENOMEM;
786 }
787
788 /* Retrieve GPU address for header buffer and internal buffer */
789 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
790 if (ret) {
791 amdgpu_bo_unref(&adev->smu.toc_buf);
792 DRM_ERROR("Failed to reserve the TOC buffer\n");
793 return -EINVAL;
794 }
795
796 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
797 if (ret) {
798 amdgpu_bo_unreserve(adev->smu.toc_buf);
799 amdgpu_bo_unref(&adev->smu.toc_buf);
800 DRM_ERROR("Failed to pin the TOC buffer\n");
801 return -EINVAL;
802 }
803
804 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
805 if (ret) {
806 amdgpu_bo_unreserve(adev->smu.toc_buf);
807 amdgpu_bo_unref(&adev->smu.toc_buf);
808 DRM_ERROR("Failed to map the TOC buffer\n");
809 return -EINVAL;
810 }
811
812 amdgpu_bo_unreserve(adev->smu.toc_buf);
813 private->header_addr_low = lower_32_bits(mc_addr);
814 private->header_addr_high = upper_32_bits(mc_addr);
815 private->header = toc_buf_ptr;
816
817 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
818 if (ret) {
819 amdgpu_bo_unref(&adev->smu.smu_buf);
820 amdgpu_bo_unref(&adev->smu.toc_buf);
821 DRM_ERROR("Failed to reserve the SMU internal buffer\n");
822 return -EINVAL;
823 }
824
825 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
826 if (ret) {
827 amdgpu_bo_unreserve(adev->smu.smu_buf);
828 amdgpu_bo_unref(&adev->smu.smu_buf);
829 amdgpu_bo_unref(&adev->smu.toc_buf);
830 DRM_ERROR("Failed to pin the SMU internal buffer\n");
831 return -EINVAL;
832 }
833
834 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
835 if (ret) {
836 amdgpu_bo_unreserve(adev->smu.smu_buf);
837 amdgpu_bo_unref(&adev->smu.smu_buf);
838 amdgpu_bo_unref(&adev->smu.toc_buf);
839 DRM_ERROR("Failed to map the SMU internal buffer\n");
840 return -EINVAL;
841 }
842
843 amdgpu_bo_unreserve(adev->smu.smu_buf);
844 private->smu_buffer_addr_low = lower_32_bits(mc_addr);
845 private->smu_buffer_addr_high = upper_32_bits(mc_addr);
846
847 adev->smu.smumgr_funcs = &tonga_smumgr_funcs;
848
849 return 0;
850}
851
852int tonga_smu_fini(struct amdgpu_device *adev)
853{
854 amdgpu_bo_unref(&adev->smu.toc_buf);
855 amdgpu_bo_unref(&adev->smu.smu_buf);
856 kfree(adev->smu.priv);
857 adev->smu.priv = NULL;
858 if (adev->firmware.fw_buf)
859 amdgpu_ucode_fini_bo(adev);
860
861 return 0;
862}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h b/drivers/gpu/drm/amd/amdgpu/tonga_smum.h
deleted file mode 100644
index c031ff99fe3e..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef TONGA_SMUMGR_H
25#define TONGA_SMUMGR_H
26
27#include "tonga_ppsmc.h"
28
29int tonga_smu_init(struct amdgpu_device *adev);
30int tonga_smu_fini(struct amdgpu_device *adev);
31int tonga_smu_start(struct amdgpu_device *adev);
32
33struct tonga_smu_private_data
34{
35 uint8_t *header;
36 uint32_t smu_buffer_addr_high;
37 uint32_t smu_buffer_addr_low;
38 uint32_t header_addr_high;
39 uint32_t header_addr_low;
40};
41
42#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index b688e2f77419..c0d9aad7126f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -79,6 +79,9 @@
79#endif 79#endif
80#include "dce_virtual.h" 80#include "dce_virtual.h"
81 81
82MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
83MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
84MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
82MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); 85MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
83MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); 86MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
84MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); 87MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
@@ -445,18 +448,21 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
445 return true; 448 return true;
446} 449}
447 450
448static u32 vi_get_virtual_caps(struct amdgpu_device *adev) 451static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
449{ 452{
450 u32 caps = 0; 453 uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
451 u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 454 /* bit0: 0 means pf and 1 means vf */
455 /* bit31: 0 means disable IOV and 1 means enable */
456 if (reg & 1)
457 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
452 458
453 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 459 if (reg & 0x80000000)
454 caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; 460 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
455 461
456 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 462 if (reg == 0) {
457 caps |= AMDGPU_VIRT_CAPS_IS_VF; 463 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
458 464 adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
459 return caps; 465 }
460} 466}
461 467
462static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 468static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
@@ -1521,13 +1527,13 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
1521{ 1527{
1522 .read_disabled_bios = &vi_read_disabled_bios, 1528 .read_disabled_bios = &vi_read_disabled_bios,
1523 .read_bios_from_rom = &vi_read_bios_from_rom, 1529 .read_bios_from_rom = &vi_read_bios_from_rom,
1530 .detect_hw_virtualization = vi_detect_hw_virtualization,
1524 .read_register = &vi_read_register, 1531 .read_register = &vi_read_register,
1525 .reset = &vi_asic_reset, 1532 .reset = &vi_asic_reset,
1526 .set_vga_state = &vi_vga_set_state, 1533 .set_vga_state = &vi_vga_set_state,
1527 .get_xclk = &vi_get_xclk, 1534 .get_xclk = &vi_get_xclk,
1528 .set_uvd_clocks = &vi_set_uvd_clocks, 1535 .set_uvd_clocks = &vi_set_uvd_clocks,
1529 .set_vce_clocks = &vi_set_vce_clocks, 1536 .set_vce_clocks = &vi_set_vce_clocks,
1530 .get_virtual_caps = &vi_get_virtual_caps,
1531}; 1537};
1532 1538
1533static int vi_common_early_init(void *handle) 1539static int vi_common_early_init(void *handle)
@@ -1657,6 +1663,10 @@ static int vi_common_early_init(void *handle)
1657 return -EINVAL; 1663 return -EINVAL;
1658 } 1664 }
1659 1665
1666 /* in early init stage, vbios code won't work */
1667 if (adev->asic_funcs->detect_hw_virtualization)
1668 amdgpu_asic_detect_hw_virtualization(adev);
1669
1660 if (amdgpu_smc_load_fw && smc_enabled) 1670 if (amdgpu_smc_load_fw && smc_enabled)
1661 adev->firmware.smu_load = true; 1671 adev->firmware.smu_load = true;
1662 1672
@@ -1800,6 +1810,63 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1800 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1810 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1801} 1811}
1802 1812
1813static int vi_common_set_clockgating_state_by_smu(void *handle,
1814 enum amd_clockgating_state state)
1815{
1816 uint32_t msg_id, pp_state;
1817 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1818 void *pp_handle = adev->powerplay.pp_handle;
1819
1820 if (state == AMD_CG_STATE_UNGATE)
1821 pp_state = 0;
1822 else
1823 pp_state = PP_STATE_CG | PP_STATE_LS;
1824
1825 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1826 PP_BLOCK_SYS_MC,
1827 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
1828 pp_state);
1829 amd_set_clockgating_by_smu(pp_handle, msg_id);
1830
1831 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1832 PP_BLOCK_SYS_SDMA,
1833 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
1834 pp_state);
1835 amd_set_clockgating_by_smu(pp_handle, msg_id);
1836
1837 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1838 PP_BLOCK_SYS_HDP,
1839 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
1840 pp_state);
1841 amd_set_clockgating_by_smu(pp_handle, msg_id);
1842
1843 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1844 PP_BLOCK_SYS_BIF,
1845 PP_STATE_SUPPORT_LS,
1846 pp_state);
1847 amd_set_clockgating_by_smu(pp_handle, msg_id);
1848
1849 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1850 PP_BLOCK_SYS_BIF,
1851 PP_STATE_SUPPORT_CG,
1852 pp_state);
1853 amd_set_clockgating_by_smu(pp_handle, msg_id);
1854
1855 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1856 PP_BLOCK_SYS_DRM,
1857 PP_STATE_SUPPORT_LS,
1858 pp_state);
1859 amd_set_clockgating_by_smu(pp_handle, msg_id);
1860
1861 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1862 PP_BLOCK_SYS_ROM,
1863 PP_STATE_SUPPORT_CG,
1864 pp_state);
1865 amd_set_clockgating_by_smu(pp_handle, msg_id);
1866
1867 return 0;
1868}
1869
1803static int vi_common_set_clockgating_state(void *handle, 1870static int vi_common_set_clockgating_state(void *handle,
1804 enum amd_clockgating_state state) 1871 enum amd_clockgating_state state)
1805{ 1872{
@@ -1825,6 +1892,10 @@ static int vi_common_set_clockgating_state(void *handle,
1825 vi_update_hdp_light_sleep(adev, 1892 vi_update_hdp_light_sleep(adev,
1826 state == AMD_CG_STATE_GATE ? true : false); 1893 state == AMD_CG_STATE_GATE ? true : false);
1827 break; 1894 break;
1895 case CHIP_TONGA:
1896 case CHIP_POLARIS10:
1897 case CHIP_POLARIS11:
1898 vi_common_set_clockgating_state_by_smu(adev, state);
1828 default: 1899 default:
1829 break; 1900 break;
1830 } 1901 }
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index f62b261660d4..11746f22d0c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -373,4 +373,41 @@
373#define VCE_CMD_WAIT_GE 0x00000106 373#define VCE_CMD_WAIT_GE 0x00000106
374#define VCE_CMD_UPDATE_PTB 0x00000107 374#define VCE_CMD_UPDATE_PTB 0x00000107
375#define VCE_CMD_FLUSH_TLB 0x00000108 375#define VCE_CMD_FLUSH_TLB 0x00000108
376
377/* mmPA_SC_RASTER_CONFIG mask */
378#define RB_MAP_PKR0(x) ((x) << 0)
379#define RB_MAP_PKR0_MASK (0x3 << 0)
380#define RB_MAP_PKR1(x) ((x) << 2)
381#define RB_MAP_PKR1_MASK (0x3 << 2)
382#define RB_XSEL2(x) ((x) << 4)
383#define RB_XSEL2_MASK (0x3 << 4)
384#define RB_XSEL (1 << 6)
385#define RB_YSEL (1 << 7)
386#define PKR_MAP(x) ((x) << 8)
387#define PKR_MAP_MASK (0x3 << 8)
388#define PKR_XSEL(x) ((x) << 10)
389#define PKR_XSEL_MASK (0x3 << 10)
390#define PKR_YSEL(x) ((x) << 12)
391#define PKR_YSEL_MASK (0x3 << 12)
392#define SC_MAP(x) ((x) << 16)
393#define SC_MAP_MASK (0x3 << 16)
394#define SC_XSEL(x) ((x) << 18)
395#define SC_XSEL_MASK (0x3 << 18)
396#define SC_YSEL(x) ((x) << 20)
397#define SC_YSEL_MASK (0x3 << 20)
398#define SE_MAP(x) ((x) << 24)
399#define SE_MAP_MASK (0x3 << 24)
400#define SE_XSEL(x) ((x) << 26)
401#define SE_XSEL_MASK (0x3 << 26)
402#define SE_YSEL(x) ((x) << 28)
403#define SE_YSEL_MASK (0x3 << 28)
404
405/* mmPA_SC_RASTER_CONFIG_1 mask */
406#define SE_PAIR_MAP(x) ((x) << 0)
407#define SE_PAIR_MAP_MASK (0x3 << 0)
408#define SE_PAIR_XSEL(x) ((x) << 2)
409#define SE_PAIR_XSEL_MASK (0x3 << 2)
410#define SE_PAIR_YSEL(x) ((x) << 4)
411#define SE_PAIR_YSEL_MASK (0x3 << 4)
412
376#endif 413#endif